Merge "Adding the column family metrics API" from Amnon
"The column family matrics is a set of data related to the column family. This series adds an API based on the ColumnFamilyMetrics mbean. It has a stub implementation, just so the JMX proxy would get a response."
This commit is contained in:
commit
3cef1a6f7a
@ -186,6 +186,24 @@ public class APIClient {
|
||||
return join(arr, ",");
|
||||
}
|
||||
|
||||
public static String mapToString(Map<String, String> mp, String pairJoin,
|
||||
String joiner) {
|
||||
String res = "";
|
||||
if (mp != null) {
|
||||
for (String name : mp.keySet()) {
|
||||
if (!res.equals("")) {
|
||||
res = res + joiner;
|
||||
}
|
||||
res = res + name + pairJoin + mp.get(name);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public static String mapToString(Map<String, String> mp) {
|
||||
return mapToString(mp, "=", ",");
|
||||
}
|
||||
|
||||
public static boolean set_query_param(
|
||||
MultivaluedMap<String, String> queryParams, String key, String value) {
|
||||
if (queryParams != null && key != null && value != null
|
||||
@ -384,9 +402,20 @@ public class APIClient {
|
||||
return null;
|
||||
}
|
||||
|
||||
public int[] getIntArrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
JsonArray arr = reader.readArray();
|
||||
int[] res = new int[arr.size()];
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
res[i] = arr.getInt(i);
|
||||
}
|
||||
reader.close();
|
||||
return res;
|
||||
}
|
||||
|
||||
public int[] getIntArrValue(String string) {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
return getIntArrValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, Long> getListMapStringLongValue(String string,
|
||||
|
306
src/main/java/com/cloudius/urchin/utils/EstimatedHistogram.java
Normal file
306
src/main/java/com/cloudius/urchin/utils/EstimatedHistogram.java
Normal file
@ -0,0 +1,306 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package com.cloudius.urchin.utils;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.atomic.AtomicLongArray;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
public class EstimatedHistogram {
|
||||
/**
|
||||
* The series of values to which the counts in `buckets` correspond: 1, 2,
|
||||
* 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of [0, 0, 1,
|
||||
* 10] would mean we had seen one value of 3 and 10 values of 4.
|
||||
*
|
||||
* The series starts at 1 and grows by 1.2 each time (rounding and removing
|
||||
* duplicates). It goes from 1 to around 36M by default (creating 90+1
|
||||
* buckets), which will give us timing resolution from microseconds to 36
|
||||
* seconds, with less precision as the numbers get larger.
|
||||
*
|
||||
* Each bucket represents values from (previous bucket offset, current
|
||||
* offset].
|
||||
*/
|
||||
private final long[] bucketOffsets;
|
||||
|
||||
// buckets is one element longer than bucketOffsets -- the last element is
|
||||
// values greater than the last offset
|
||||
final AtomicLongArray buckets;
|
||||
|
||||
public EstimatedHistogram() {
|
||||
this(90);
|
||||
}
|
||||
|
||||
public EstimatedHistogram(int bucketCount) {
|
||||
bucketOffsets = newOffsets(bucketCount);
|
||||
buckets = new AtomicLongArray(bucketOffsets.length + 1);
|
||||
}
|
||||
|
||||
public EstimatedHistogram(long[] offsets, long[] bucketData) {
|
||||
assert bucketData.length == offsets.length + 1;
|
||||
bucketOffsets = offsets;
|
||||
buckets = new AtomicLongArray(bucketData);
|
||||
}
|
||||
|
||||
private static long[] newOffsets(int size) {
|
||||
long[] result = new long[size];
|
||||
long last = 1;
|
||||
result[0] = last;
|
||||
for (int i = 1; i < size; i++) {
|
||||
long next = Math.round(last * 1.2);
|
||||
if (next == last)
|
||||
next++;
|
||||
result[i] = next;
|
||||
last = next;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the histogram values corresponding to each bucket index
|
||||
*/
|
||||
public long[] getBucketOffsets() {
|
||||
return bucketOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the count of the bucket closest to n, rounding UP.
|
||||
*
|
||||
* @param n
|
||||
*/
|
||||
public void add(long n) {
|
||||
int index = Arrays.binarySearch(bucketOffsets, n);
|
||||
if (index < 0) {
|
||||
// inexact match, take the first bucket higher than n
|
||||
index = -index - 1;
|
||||
}
|
||||
// else exact match; we're good
|
||||
buckets.incrementAndGet(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the count in the given bucket
|
||||
*/
|
||||
long get(int bucket) {
|
||||
return buckets.get(bucket);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param reset
|
||||
* zero out buckets afterwards if true
|
||||
* @return a long[] containing the current histogram buckets
|
||||
*/
|
||||
public long[] getBuckets(boolean reset) {
|
||||
final int len = buckets.length();
|
||||
long[] rv = new long[len];
|
||||
|
||||
if (reset)
|
||||
for (int i = 0; i < len; i++)
|
||||
rv[i] = buckets.getAndSet(i, 0L);
|
||||
else
|
||||
for (int i = 0; i < len; i++)
|
||||
rv[i] = buckets.get(i);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the smallest value that could have been added to this histogram
|
||||
*/
|
||||
public long min() {
|
||||
for (int i = 0; i < buckets.length(); i++) {
|
||||
if (buckets.get(i) > 0)
|
||||
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the largest value that could have been added to this histogram.
|
||||
* If the histogram overflowed, returns Long.MAX_VALUE.
|
||||
*/
|
||||
public long max() {
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
return Long.MAX_VALUE;
|
||||
|
||||
for (int i = lastBucket - 1; i >= 0; i--) {
|
||||
if (buckets.get(i) > 0)
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param percentile
|
||||
* @return estimated value at given percentile
|
||||
*/
|
||||
public long percentile(double percentile) {
|
||||
assert percentile >= 0 && percentile <= 1.0;
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
throw new IllegalStateException(
|
||||
"Unable to compute when histogram overflowed");
|
||||
|
||||
long pcount = (long) Math.floor(count() * percentile);
|
||||
if (pcount == 0)
|
||||
return 0;
|
||||
|
||||
long elements = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
elements += buckets.get(i);
|
||||
if (elements >= pcount)
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mean histogram value (average of bucket offsets, weighted by
|
||||
* count)
|
||||
* @throws IllegalStateException
|
||||
* if any values were greater than the largest bucket threshold
|
||||
*/
|
||||
public long mean() {
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
throw new IllegalStateException(
|
||||
"Unable to compute ceiling for max when histogram overflowed");
|
||||
|
||||
long elements = 0;
|
||||
long sum = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
long bCount = buckets.get(i);
|
||||
elements += bCount;
|
||||
sum += bCount * bucketOffsets[i];
|
||||
}
|
||||
|
||||
return (long) Math.ceil((double) sum / elements);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of non-zero values
|
||||
*/
|
||||
public long count() {
|
||||
long sum = 0L;
|
||||
for (int i = 0; i < buckets.length(); i++)
|
||||
sum += buckets.get(i);
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if this histogram has overflowed -- that is, a value larger
|
||||
* than our largest bucket could bound was added
|
||||
*/
|
||||
public boolean isOverflowed() {
|
||||
return buckets.get(buckets.length() - 1) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* log.debug() every record in the histogram
|
||||
*
|
||||
* @param log
|
||||
*/
|
||||
public void log(Logger log) {
|
||||
// only print overflow if there is any
|
||||
int nameCount;
|
||||
if (buckets.get(buckets.length() - 1) == 0)
|
||||
nameCount = buckets.length() - 1;
|
||||
else
|
||||
nameCount = buckets.length();
|
||||
String[] names = new String[nameCount];
|
||||
|
||||
int maxNameLength = 0;
|
||||
for (int i = 0; i < nameCount; i++) {
|
||||
names[i] = nameOfRange(bucketOffsets, i);
|
||||
maxNameLength = Math.max(maxNameLength, names[i].length());
|
||||
}
|
||||
|
||||
// emit log records
|
||||
String formatstr = "%" + maxNameLength + "s: %d";
|
||||
for (int i = 0; i < nameCount; i++) {
|
||||
long count = buckets.get(i);
|
||||
// sort-of-hack to not print empty ranges at the start that are only
|
||||
// used to demarcate the
|
||||
// first populated range. for code clarity we don't omit this record
|
||||
// from the maxNameLength
|
||||
// calculation, and accept the unnecessary whitespace prefixes that
|
||||
// will occasionally occur
|
||||
if (i == 0 && count == 0)
|
||||
continue;
|
||||
log.debug(String.format(formatstr, names[i], count));
|
||||
}
|
||||
}
|
||||
|
||||
private static String nameOfRange(long[] bucketOffsets, int index) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
appendRange(sb, bucketOffsets, index);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static void appendRange(StringBuilder sb, long[] bucketOffsets,
|
||||
int index) {
|
||||
sb.append("[");
|
||||
if (index == 0)
|
||||
if (bucketOffsets[0] > 0)
|
||||
// by original definition, this histogram is for values greater
|
||||
// than zero only;
|
||||
// if values of 0 or less are required, an entry of lb-1 must be
|
||||
// inserted at the start
|
||||
sb.append("1");
|
||||
else
|
||||
sb.append("-Inf");
|
||||
else
|
||||
sb.append(bucketOffsets[index - 1] + 1);
|
||||
sb.append("..");
|
||||
if (index == bucketOffsets.length)
|
||||
sb.append("Inf");
|
||||
else
|
||||
sb.append(bucketOffsets[index]);
|
||||
sb.append("]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
|
||||
if (!(o instanceof EstimatedHistogram))
|
||||
return false;
|
||||
|
||||
EstimatedHistogram that = (EstimatedHistogram) o;
|
||||
return Arrays.equals(getBucketOffsets(), that.getBucketOffsets())
|
||||
&& Arrays.equals(getBuckets(false), that.getBuckets(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(getBucketOffsets(), getBuckets(false));
|
||||
}
|
||||
}
|
@ -16,7 +16,6 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
@ -31,8 +30,12 @@ import java.util.concurrent.*;
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.*;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import org.apache.cassandra.metrics.ColumnFamilyMetrics;
|
||||
|
||||
import com.cloudius.urchin.api.APIClient;
|
||||
import com.sun.jersey.core.util.MultivaluedMapImpl;
|
||||
|
||||
public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
@ -42,7 +45,8 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
private String keyspace;
|
||||
private String name;
|
||||
private String mbeanName;
|
||||
static final int INTERVAL = 1000; //update every 1second
|
||||
static final int INTERVAL = 1000; // update every 1second
|
||||
public final ColumnFamilyMetrics metric;
|
||||
|
||||
private static Map<String, ColumnFamilyStore> cf = new HashMap<String, ColumnFamilyStore>();
|
||||
private static Timer timer = new Timer("Column Family");
|
||||
@ -56,10 +60,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
timer.scheduleAtFixedRate(taskToExecute, 100, INTERVAL);
|
||||
}
|
||||
|
||||
public ColumnFamilyStore(String _type, String _keyspace, String _name) {
|
||||
type = _type;
|
||||
keyspace = _keyspace;
|
||||
name = _name;
|
||||
public ColumnFamilyStore(String type, String keyspace, String name) {
|
||||
this.type = type;
|
||||
this.keyspace = keyspace;
|
||||
this.name = name;
|
||||
mbeanName = getName(type, keyspace, name);
|
||||
try {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
@ -68,6 +72,24 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
metric = new ColumnFamilyMetrics(this);
|
||||
}
|
||||
|
||||
/** true if this CFS contains secondary index data */
|
||||
/*
|
||||
* It is hard coded to false until secondary index is supported
|
||||
*/
|
||||
public boolean isIndex() {
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the column family name in the API format
|
||||
*
|
||||
* @return
|
||||
*/
|
||||
public String getCFName() {
|
||||
return keyspace + ":" + name;
|
||||
}
|
||||
|
||||
private static String getName(String type, String keyspace, String name) {
|
||||
@ -92,12 +114,12 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
mbean.getString("type"), mbean.getString("ks"),
|
||||
mbean.getString("cf"));
|
||||
cf.put(name, cfs);
|
||||
}
|
||||
}
|
||||
all_cf.add(name);
|
||||
}
|
||||
//removing deleted column family
|
||||
// removing deleted column family
|
||||
for (String n : cf.keySet()) {
|
||||
if (! all_cf.contains(n)) {
|
||||
if (!all_cf.contains(n)) {
|
||||
cf.remove(n);
|
||||
}
|
||||
}
|
||||
@ -322,6 +344,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
public void forceMajorCompaction() throws ExecutionException,
|
||||
InterruptedException {
|
||||
log(" forceMajorCompaction() throws ExecutionException, InterruptedException");
|
||||
c.post("column_family/major_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -431,7 +454,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public int getMinimumCompactionThreshold() {
|
||||
log(" getMinimumCompactionThreshold()");
|
||||
return c.getIntValue("");
|
||||
return c.getIntValue("column_family/minimum_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -439,6 +462,9 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public void setMinimumCompactionThreshold(int threshold) {
|
||||
log(" setMinimumCompactionThreshold(int threshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("value", Integer.toString(threshold));
|
||||
c.post("column_family/minimum_compaction/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -446,7 +472,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public int getMaximumCompactionThreshold() {
|
||||
log(" getMaximumCompactionThreshold()");
|
||||
return c.getIntValue("");
|
||||
return c.getIntValue("column_family/maximum_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -455,6 +481,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public void setCompactionThresholds(int minThreshold, int maxThreshold) {
|
||||
log(" setCompactionThresholds(int minThreshold, int maxThreshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("minimum", Integer.toString(minThreshold));
|
||||
queryParams.add("maximum", Integer.toString(maxThreshold));
|
||||
c.post("column_family/compaction" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -462,16 +492,22 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public void setMaximumCompactionThreshold(int threshold) {
|
||||
log(" setMaximumCompactionThreshold(int threshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("value", Integer.toString(threshold));
|
||||
c.post("column_family/maximum_compaction/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the compaction strategy by class name
|
||||
*
|
||||
*
|
||||
* @param className
|
||||
* the name of the compaction strategy class
|
||||
*/
|
||||
public void setCompactionStrategyClass(String className) {
|
||||
log(" setCompactionStrategyClass(String className)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("class_name", className);
|
||||
c.post("column_family/compaction_strategy/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -479,7 +515,8 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public String getCompactionStrategyClass() {
|
||||
log(" getCompactionStrategyClass()");
|
||||
return c.getStringValue("");
|
||||
return c.getStringValue("column_family/compaction_strategy/"
|
||||
+ getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -487,17 +524,22 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public Map<String, String> getCompressionParameters() {
|
||||
log(" getCompressionParameters()");
|
||||
return c.getMapStrValue("");
|
||||
return c.getMapStrValue("column_family/compression_parameters/"
|
||||
+ getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the compression parameters
|
||||
*
|
||||
*
|
||||
* @param opts
|
||||
* map of string names to values
|
||||
*/
|
||||
public void setCompressionParameters(Map<String, String> opts) {
|
||||
log(" setCompressionParameters(Map<String,String> opts)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("opts", APIClient.mapToString(opts));
|
||||
c.post("column_family/compression_parameters/" + getCFName(),
|
||||
queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -505,11 +547,14 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public void setCrcCheckChance(double crcCheckChance) {
|
||||
log(" setCrcCheckChance(double crcCheckChance)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("check_chance", Double.toString(crcCheckChance));
|
||||
c.post("column_family/crc_check_chance/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
public boolean isAutoCompactionDisabled() {
|
||||
log(" isAutoCompactionDisabled()");
|
||||
return c.getBooleanValue("");
|
||||
return c.getBooleanValue("column_family/autocompaction/" + getCFName());
|
||||
}
|
||||
|
||||
/** Number of tombstoned cells retreived during the last slicequery */
|
||||
@ -528,7 +573,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
|
||||
public long estimateKeys() {
|
||||
log(" estimateKeys()");
|
||||
return c.getLongValue("");
|
||||
return c.getLongValue("column_family/estimate_keys/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -560,23 +605,26 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
|
||||
/**
|
||||
* Returns a list of the names of the built column indexes for current store
|
||||
*
|
||||
*
|
||||
* @return list of the index names
|
||||
*/
|
||||
public List<String> getBuiltIndexes() {
|
||||
log(" getBuiltIndexes()");
|
||||
return c.getListStrValue("");
|
||||
return c.getListStrValue("column_family/built_indexes/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a list of filenames that contain the given key on this node
|
||||
*
|
||||
*
|
||||
* @param key
|
||||
* @return list of filenames containing the key
|
||||
*/
|
||||
public List<String> getSSTablesForKey(String key) {
|
||||
log(" getSSTablesForKey(String key)");
|
||||
return c.getListStrValue("");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedMapImpl();
|
||||
queryParams.add("key", key);
|
||||
return c.getListStrValue(
|
||||
"column_family/sstables/by_key/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -585,6 +633,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public void loadNewSSTables() {
|
||||
log(" loadNewSSTables()");
|
||||
c.post("column_family/sstable/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -593,7 +642,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public int getUnleveledSSTables() {
|
||||
log(" getUnleveledSSTables()");
|
||||
return c.getIntValue("");
|
||||
return c.getIntValue("column_family/sstables/unleveled/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -603,18 +652,19 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public int[] getSSTableCountPerLevel() {
|
||||
log(" getSSTableCountPerLevel()");
|
||||
return c.getIntArrValue("");
|
||||
return c.getIntArrValue("column_family/sstables/per_level/"
|
||||
+ getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the ratio of droppable tombstones to real columns (and non-droppable
|
||||
* tombstones)
|
||||
*
|
||||
*
|
||||
* @return ratio
|
||||
*/
|
||||
public double getDroppableTombstoneRatio() {
|
||||
log(" getDroppableTombstoneRatio()");
|
||||
return c.getDoubleValue("");
|
||||
return c.getDoubleValue("column_family/droppable_ratio/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -623,7 +673,11 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public long trueSnapshotsSize() {
|
||||
log(" trueSnapshotsSize()");
|
||||
return c.getLongValue("");
|
||||
return c.getLongValue("column_family/snapshots_size/" + getCFName());
|
||||
}
|
||||
|
||||
public String getKeyspace() {
|
||||
return keyspace;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -0,0 +1,559 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||
|
||||
import com.cloudius.urchin.api.APIClient;
|
||||
import com.cloudius.urchin.metrics.APIMetrics;
|
||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.yammer.metrics.Metrics;
|
||||
import com.yammer.metrics.core.*;
|
||||
|
||||
/**
|
||||
* Metrics for {@link ColumnFamilyStore}.
|
||||
*/
|
||||
public class ColumnFamilyMetrics {
|
||||
private APIClient c = new APIClient();
|
||||
/**
|
||||
* Total amount of data stored in the memtable that resides on-heap,
|
||||
* including column related overhead and overwritten rows.
|
||||
*/
|
||||
public final Gauge<Long> memtableOnHeapSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtable that resides off-heap,
|
||||
* including column related overhead and overwritten rows.
|
||||
*/
|
||||
public final Gauge<Long> memtableOffHeapSize;
|
||||
/**
|
||||
* Total amount of live data stored in the memtable, excluding any data
|
||||
* structure overhead
|
||||
*/
|
||||
public final Gauge<Long> memtableLiveDataSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides on-heap.
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesOnHeapSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides off-heap.
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesOffHeapSize;
|
||||
/**
|
||||
* Total amount of live data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides off-heap, excluding any data structure
|
||||
* overhead
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesLiveDataSize;
|
||||
/** Total number of columns present in the memtable. */
|
||||
public final Gauge<Long> memtableColumnsCount;
|
||||
/** Number of times flush has resulted in the memtable being switched out. */
|
||||
public final Counter memtableSwitchCount;
|
||||
/** Current compression ratio for all SSTables */
|
||||
public final Gauge<Double> compressionRatio;
|
||||
/** Histogram of estimated row size (in bytes). */
|
||||
public final Gauge<long[]> estimatedRowSizeHistogram;
|
||||
/** Histogram of estimated number of columns. */
|
||||
public final Gauge<long[]> estimatedColumnCountHistogram;
|
||||
/** Histogram of the number of sstable data files accessed per read */
|
||||
public final ColumnFamilyHistogram sstablesPerReadHistogram;
|
||||
/** (Local) read metrics */
|
||||
public final LatencyMetrics readLatency;
|
||||
/** (Local) range slice metrics */
|
||||
public final LatencyMetrics rangeLatency;
|
||||
/** (Local) write metrics */
|
||||
public final LatencyMetrics writeLatency;
|
||||
/** Estimated number of tasks pending for this column family */
|
||||
public final Counter pendingFlushes;
|
||||
/** Estimate of number of pending compactios for this CF */
|
||||
public final Gauge<Integer> pendingCompactions;
|
||||
/** Number of SSTables on disk for this CF */
|
||||
public final Gauge<Integer> liveSSTableCount;
|
||||
/** Disk space used by SSTables belonging to this CF */
|
||||
public final Counter liveDiskSpaceUsed;
|
||||
/**
|
||||
* Total disk space used by SSTables belonging to this CF, including
|
||||
* obsolete ones waiting to be GC'd
|
||||
*/
|
||||
public final Counter totalDiskSpaceUsed;
|
||||
/** Size of the smallest compacted row */
|
||||
public final Gauge<Long> minRowSize;
|
||||
/** Size of the largest compacted row */
|
||||
public final Gauge<Long> maxRowSize;
|
||||
/** Size of the smallest compacted row */
|
||||
public final Gauge<Long> meanRowSize;
|
||||
/** Number of false positives in bloom filter */
|
||||
public final Gauge<Long> bloomFilterFalsePositives;
|
||||
/** Number of false positives in bloom filter from last read */
|
||||
public final Gauge<Long> recentBloomFilterFalsePositives;
|
||||
/** False positive ratio of bloom filter */
|
||||
public final Gauge<Double> bloomFilterFalseRatio;
|
||||
/** False positive ratio of bloom filter from last read */
|
||||
public final Gauge<Double> recentBloomFilterFalseRatio;
|
||||
/** Disk space used by bloom filter */
|
||||
public final Gauge<Long> bloomFilterDiskSpaceUsed;
|
||||
/** Off heap memory used by bloom filter */
|
||||
public final Gauge<Long> bloomFilterOffHeapMemoryUsed;
|
||||
/** Off heap memory used by index summary */
|
||||
public final Gauge<Long> indexSummaryOffHeapMemoryUsed;
|
||||
/** Off heap memory used by compression meta data */
|
||||
public final Gauge<Long> compressionMetadataOffHeapMemoryUsed;
|
||||
/** Key cache hit rate for this CF */
|
||||
public final Gauge<Double> keyCacheHitRate;
|
||||
/** Tombstones scanned in queries on this CF */
|
||||
public final ColumnFamilyHistogram tombstoneScannedHistogram;
|
||||
/** Live cells scanned in queries on this CF */
|
||||
public final ColumnFamilyHistogram liveScannedHistogram;
|
||||
/** Column update time delta on this CF */
|
||||
public final ColumnFamilyHistogram colUpdateTimeDeltaHistogram;
|
||||
/** Disk space used by snapshot files which */
|
||||
public final Gauge<Long> trueSnapshotsSize;
|
||||
/** Row cache hits, but result out of range */
|
||||
public final Counter rowCacheHitOutOfRange;
|
||||
/** Number of row cache hits */
|
||||
public final Counter rowCacheHit;
|
||||
/** Number of row cache misses */
|
||||
public final Counter rowCacheMiss;
|
||||
/** CAS Prepare metrics */
|
||||
public final LatencyMetrics casPrepare;
|
||||
/** CAS Propose metrics */
|
||||
public final LatencyMetrics casPropose;
|
||||
/** CAS Commit metrics */
|
||||
public final LatencyMetrics casCommit;
|
||||
|
||||
public final Timer coordinatorReadLatency;
|
||||
public final Timer coordinatorScanLatency;
|
||||
|
||||
/** Time spent waiting for free memtable space, either on- or off-heap */
|
||||
public final Timer waitingOnFreeMemtableSpace;
|
||||
|
||||
private final MetricNameFactory factory;
|
||||
private static final MetricNameFactory globalNameFactory = new AllColumnFamilyMetricNameFactory();
|
||||
|
||||
public final Counter speculativeRetries;
|
||||
|
||||
// for backward compatibility
|
||||
@Deprecated
|
||||
public final EstimatedHistogram sstablesPerRead = new EstimatedHistogram(35);
|
||||
@Deprecated
|
||||
public final EstimatedHistogram recentSSTablesPerRead = new EstimatedHistogram(
|
||||
35);
|
||||
private String cfName;
|
||||
|
||||
public final static LatencyMetrics globalReadLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/read_latency", globalNameFactory, "Read");
|
||||
public final static LatencyMetrics globalWriteLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/write_latency", globalNameFactory, "Write");
|
||||
public final static LatencyMetrics globalRangeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/range_latency", globalNameFactory, "Range");
|
||||
|
||||
/**
|
||||
* stores metrics that will be rolled into a single global metric
|
||||
*/
|
||||
public final static ConcurrentMap<String, Set<Metric>> allColumnFamilyMetrics = Maps
|
||||
.newConcurrentMap();
|
||||
|
||||
/**
|
||||
* Stores all metric names created that can be used when unregistering
|
||||
*/
|
||||
public final static Set<String> all = Sets.newHashSet();
|
||||
|
||||
/**
|
||||
* Creates metrics for given {@link ColumnFamilyStore}.
|
||||
*
|
||||
* @param cfs
|
||||
* ColumnFamilyStore to measure metrics
|
||||
*/
|
||||
public ColumnFamilyMetrics(final ColumnFamilyStore cfs) {
|
||||
factory = new ColumnFamilyMetricNameFactory(cfs);
|
||||
cfName = cfs.getCFName();
|
||||
memtableColumnsCount = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_columns_count",
|
||||
"MemtableColumnsCount");
|
||||
memtableOnHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_on_heap_size",
|
||||
"MemtableOnHeapSize");
|
||||
memtableOffHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_off_heap_size",
|
||||
"MemtableOffHeapSize");
|
||||
memtableLiveDataSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_live_data_size",
|
||||
"MemtableLiveDataSize");
|
||||
allMemtablesOnHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_on_heap_size",
|
||||
"AllMemtablesHeapSize");
|
||||
allMemtablesOffHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_off_heap_size",
|
||||
"AllMemtablesOffHeapSize");
|
||||
allMemtablesLiveDataSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_live_data_size",
|
||||
"AllMemtablesLiveDataSize");
|
||||
memtableSwitchCount = createColumnFamilyCounter(
|
||||
"/column_family/metrics/memtable_switch_count",
|
||||
"MemtableSwitchCount");
|
||||
estimatedRowSizeHistogram = Metrics.newGauge(
|
||||
factory.createMetricName("EstimatedRowSizeHistogram"),
|
||||
new Gauge<long[]>() {
|
||||
public long[] value() {
|
||||
return c.getLongArrValue("/column_family/metrics/estimated_row_size_histogram/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
estimatedColumnCountHistogram = Metrics.newGauge(
|
||||
factory.createMetricName("EstimatedColumnCountHistogram"),
|
||||
new Gauge<long[]>() {
|
||||
public long[] value() {
|
||||
return c.getLongArrValue("/column_family/metrics/estimated_column_count_histogram/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
sstablesPerReadHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/sstables_per_read_histogram",
|
||||
"SSTablesPerReadHistogram");
|
||||
compressionRatio = createColumnFamilyGauge("CompressionRatio",
|
||||
new Gauge<Double>() {
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/"
|
||||
+ cfName);
|
||||
}
|
||||
}, new Gauge<Double>() // global gauge
|
||||
{
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/");
|
||||
}
|
||||
});
|
||||
readLatency = new LatencyMetrics("/column_family/metrics/read_latency/"
|
||||
+ cfName, factory, "Read");
|
||||
writeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/write_latency/" + cfName, factory,
|
||||
"Write");
|
||||
rangeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/range_latency/" + cfName, factory,
|
||||
"Range");
|
||||
pendingFlushes = createColumnFamilyCounter(
|
||||
"/column_family/metrics/pending_flushes", "PendingFlushes");
|
||||
pendingCompactions = createColumnFamilyGaugeInt(
|
||||
"/column_family/metrics/pending_compactions",
|
||||
"PendingCompactions");
|
||||
liveSSTableCount = createColumnFamilyGaugeInt(
|
||||
"/column_family/metrics/live_ss_table_count",
|
||||
"LiveSSTableCount");
|
||||
liveDiskSpaceUsed = createColumnFamilyCounter(
|
||||
"/column_family/metrics/live_disk_space_used",
|
||||
"LiveDiskSpaceUsed");
|
||||
totalDiskSpaceUsed = createColumnFamilyCounter(
|
||||
"/column_family/metrics/total_disk_space_used",
|
||||
"TotalDiskSpaceUsed");
|
||||
minRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/min_row_size", "MinRowSize");
|
||||
maxRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/max_row_size", "MaxRowSize");
|
||||
meanRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/mean_row_size", "MeanRowSize");
|
||||
bloomFilterFalsePositives = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_false_positives",
|
||||
"BloomFilterFalsePositives");
|
||||
recentBloomFilterFalsePositives = createColumnFamilyGauge(
|
||||
"/column_family/metrics/recent_bloom_filter_false_positives",
|
||||
"RecentBloomFilterFalsePositives");
|
||||
bloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
||||
"/column_family/metrics/bloom_filter_false_ratio",
|
||||
"BloomFilterFalseRatio");
|
||||
recentBloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
||||
"/column_family/metrics/recent_bloom_filter_false_ratio",
|
||||
"RecentBloomFilterFalseRatio");
|
||||
bloomFilterDiskSpaceUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_disk_space_used",
|
||||
"BloomFilterDiskSpaceUsed");
|
||||
bloomFilterOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_off_heap_memory_used",
|
||||
"BloomFilterOffHeapMemoryUsed");
|
||||
indexSummaryOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/index_summary_off_heap_memory_used",
|
||||
"IndexSummaryOffHeapMemoryUsed");
|
||||
compressionMetadataOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/compression_metadata_off_heap_memory_used",
|
||||
"CompressionMetadataOffHeapMemoryUsed");
|
||||
speculativeRetries = createColumnFamilyCounter(
|
||||
"/column_family/metrics/speculative_retries",
|
||||
"SpeculativeRetries");
|
||||
keyCacheHitRate = Metrics.newGauge(
|
||||
factory.createMetricName("KeyCacheHitRate"),
|
||||
new Gauge<Double>() {
|
||||
@Override
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/key_cache_hit_rate/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
tombstoneScannedHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/tombstone_scanned_histogram",
|
||||
"TombstoneScannedHistogram");
|
||||
liveScannedHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/live_scanned_histogram",
|
||||
"LiveScannedHistogram");
|
||||
colUpdateTimeDeltaHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/col_update_time_delta_histogram",
|
||||
"ColUpdateTimeDeltaHistogram");
|
||||
coordinatorReadLatency = Metrics.newTimer(
|
||||
factory.createMetricName("CoordinatorReadLatency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
coordinatorScanLatency = Metrics.newTimer(
|
||||
factory.createMetricName("CoordinatorScanLatency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
waitingOnFreeMemtableSpace = Metrics.newTimer(
|
||||
factory.createMetricName("WaitingOnFreeMemtableSpace"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
|
||||
trueSnapshotsSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/true_snapshots_size", "SnapshotsSize");
|
||||
rowCacheHitOutOfRange = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_hit_out_of_range",
|
||||
"RowCacheHitOutOfRange");
|
||||
rowCacheHit = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_hit", "RowCacheHit");
|
||||
rowCacheMiss = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_miss", "RowCacheMiss");
|
||||
|
||||
casPrepare = new LatencyMetrics("/column_family/metrics/cas_prepare/"
|
||||
+ cfName, factory, "CasPrepare");
|
||||
casPropose = new LatencyMetrics("/column_family/metrics/cas_propose/"
|
||||
+ cfName, factory, "CasPropose");
|
||||
casCommit = new LatencyMetrics("/column_family/metrics/cas_commit/"
|
||||
+ cfName, factory, "CasCommit");
|
||||
}
|
||||
|
||||
/**
|
||||
* Release all associated metrics.
|
||||
*/
|
||||
public void release() {
|
||||
for (String name : all) {
|
||||
allColumnFamilyMetrics.get(name).remove(
|
||||
Metrics.defaultRegistry().allMetrics()
|
||||
.get(factory.createMetricName(name)));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName(name));
|
||||
}
|
||||
readLatency.release();
|
||||
writeLatency.release();
|
||||
rangeLatency.release();
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("EstimatedRowSizeHistogram"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("EstimatedColumnCountHistogram"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("KeyCacheHitRate"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("CoordinatorReadLatency"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("CoordinatorScanLatency"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("WaitingOnFreeMemtableSpace"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Double> createColumnFamilyGaugeDouble(String url,
|
||||
final String name) {
|
||||
Gauge<Double> gauge = new Gauge<Double>() {
|
||||
public Double value() {
|
||||
return c.getDoubleValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Long> createColumnFamilyGauge(String url, final String name) {
|
||||
Gauge<Long> gauge = new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Integer> createColumnFamilyGaugeInt(String url,
|
||||
final String name) {
|
||||
Gauge<Integer> gauge = new Gauge<Integer>() {
|
||||
public Integer value() {
|
||||
return c.getIntValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected <T extends Number> Gauge<T> createColumnFamilyGauge(String url,
|
||||
final String name, Gauge<T> gauge) {
|
||||
return createColumnFamilyGauge(name, gauge, new Gauge<Long>() {
|
||||
public Long value() {
|
||||
// This is an optimiztion, call once for all column families
|
||||
// instead
|
||||
// of iterating over all of them
|
||||
return c.getLongValue(url);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge is defined as the globalGauge parameter
|
||||
*/
|
||||
protected <G, T> Gauge<T> createColumnFamilyGauge(String name,
|
||||
Gauge<T> gauge, Gauge<G> globalGauge) {
|
||||
Gauge<T> cfGauge = APIMetrics.newGauge(factory.createMetricName(name),
|
||||
gauge);
|
||||
if (register(name, cfGauge)) {
|
||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
||||
globalGauge);
|
||||
}
|
||||
return cfGauge;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a counter that will also have a global counter thats the sum of
|
||||
* all counters across different column families
|
||||
*/
|
||||
protected Counter createColumnFamilyCounter(String url, final String name) {
|
||||
Counter cfCounter = APIMetrics.newCounter(url + "/" + cfName,
|
||||
factory.createMetricName(name));
|
||||
if (register(name, cfCounter)) {
|
||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
||||
new Gauge<Long>() {
|
||||
public Long value() {
|
||||
// This is an optimiztion, call once for all column
|
||||
// families instead
|
||||
// of iterating over all of them
|
||||
return c.getLongValue(url);
|
||||
}
|
||||
});
|
||||
}
|
||||
return cfCounter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a histogram-like interface that will register both a CF, keyspace
|
||||
* and global level histogram and forward any updates to both
|
||||
*/
|
||||
protected ColumnFamilyHistogram createColumnFamilyHistogram(String url,
|
||||
String name) {
|
||||
Histogram cfHistogram = APIMetrics.newHistogram(url + "/" + cfName,
|
||||
factory.createMetricName(name), true);
|
||||
register(name, cfHistogram);
|
||||
|
||||
// TBD add keyspace and global histograms
|
||||
// keyspaceHistogram,
|
||||
// Metrics.newHistogram(globalNameFactory.createMetricName(name),
|
||||
// true));
|
||||
return new ColumnFamilyHistogram(cfHistogram, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a metric to be removed when unloading CF.
|
||||
*
|
||||
* @return true if first time metric with that name has been registered
|
||||
*/
|
||||
private boolean register(String name, Metric metric) {
|
||||
boolean ret = allColumnFamilyMetrics.putIfAbsent(name,
|
||||
new HashSet<Metric>()) == null;
|
||||
allColumnFamilyMetrics.get(name).add(metric);
|
||||
all.add(name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
public class ColumnFamilyHistogram {
|
||||
public final Histogram[] all;
|
||||
public final Histogram cf;
|
||||
|
||||
private ColumnFamilyHistogram(Histogram cf, Histogram keyspace,
|
||||
Histogram global) {
|
||||
this.cf = cf;
|
||||
this.all = new Histogram[] { cf, keyspace, global };
|
||||
}
|
||||
}
|
||||
|
||||
class ColumnFamilyMetricNameFactory implements MetricNameFactory {
|
||||
private final String keyspaceName;
|
||||
private final String columnFamilyName;
|
||||
private final boolean isIndex;
|
||||
|
||||
ColumnFamilyMetricNameFactory(ColumnFamilyStore cfs) {
|
||||
this.keyspaceName = cfs.getKeyspace();
|
||||
this.columnFamilyName = cfs.getColumnFamilyName();
|
||||
isIndex = cfs.isIndex();
|
||||
}
|
||||
|
||||
public MetricName createMetricName(String metricName) {
|
||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
||||
String type = isIndex ? "IndexColumnFamily" : "ColumnFamily";
|
||||
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=").append(type);
|
||||
mbeanName.append(",keyspace=").append(keyspaceName);
|
||||
mbeanName.append(",scope=").append(columnFamilyName);
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
return new MetricName(groupName, type, metricName, keyspaceName
|
||||
+ "." + columnFamilyName, mbeanName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static class AllColumnFamilyMetricNameFactory implements MetricNameFactory {
|
||||
public MetricName createMetricName(String metricName) {
|
||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=ColumnFamily");
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
return new MetricName(groupName, "ColumnFamily", metricName, "all",
|
||||
mbeanName.toString());
|
||||
}
|
||||
}
|
||||
}
|
@ -98,7 +98,7 @@ public class LatencyMetrics {
|
||||
latency = APIMetrics.newTimer(
|
||||
factory.createMetricName(namePrefix + "Latency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
totalLatency = APIMetrics.newCounter(url + "total_latency",
|
||||
totalLatency = APIMetrics.newCounter(url,
|
||||
factory.createMetricName(namePrefix + "TotalLatency"));
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user