Merge "Complete the nodetool cfstats support" from Amnon

"This series complete the scylla series to support the nodetool cfstatus support.

After this series it will be possible to call nodetoold cfstatus and get a meaningfull output.

An output example:
./bin/nodetool cfstats keyspace1
Keyspace: keyspace1
        Read Count: 87657
        Read Latency: 1.1418900715287998 ms.
        Write Count: 87177
        Write Latency: 0.022303761313190406 ms.
        Pending Flushes: 0
                Table: standard1
                SSTable count: 8
                SSTables in each level: [               Space used (live): 92356832
                Space used (total): 92356832
                Space used by snapshots (total): 0
                Off heap memory used (total): 106430512
                SSTable Compression Ratio: 0.0
                Number of keys (estimate): 328672
                Memtable cell count: 100000
                Memtable data size: 84800254
                Memtable off heap memory used: 105906176
                Memtable switch count: 4
                Local read count: 92854
                Local read latency: 1.039 ms
                Local write count: 93880
                Local write latency: 1.045 ms
                Pending flushes: 0
                Bloom filter false positives: 0
                Bloom filter false ratio: 0.00000
                Bloom filter space used: 208416
                Bloom filter off heap memory used: 524336
                Index summary off heap memory used: 0
                Compression metadata off heap memory used: 0
                Compacted partition minimum bytes: 259
                Compacted partition maximum bytes: 310"
This commit is contained in:
Pekka Enberg 2015-10-06 12:23:09 +03:00
commit a90e080ee6
6 changed files with 35 additions and 20 deletions

View File

@ -551,11 +551,13 @@ public class APIClient {
res.min = obj.getJsonNumber("min").longValue();
res.sum = obj.getJsonNumber("sum").longValue();
res.variance = obj.getJsonNumber("variance").doubleValue();
res.svariance = obj.getJsonNumber("svariance").doubleValue();
res.mean = obj.getJsonNumber("mean").doubleValue();
JsonArray arr = obj.getJsonArray("sample");
res.sample = new long[arr.size()];
for (int i = 0; i < arr.size(); i++) {
res.sample[i] = arr.getJsonNumber(i).longValue();
if (arr != null) {
res.sample = new long[arr.size()];
for (int i = 0; i < arr.size(); i++) {
res.sample[i] = arr.getJsonNumber(i).longValue();
}
}
return res;
}

View File

@ -42,6 +42,12 @@ public class APIHistogram extends Histogram {
sampleField.setAccessible(true);
countField = Histogram.class.getDeclaredField("count");
countField.setAccessible(true);
try {
getCount().set(0);
} catch (IllegalArgumentException | IllegalAccessException e) {
// There's no reason to get here
// and there's nothing we can do even if we would
}
} catch (NoSuchFieldException | SecurityException e) {
e.printStackTrace();
}
@ -104,16 +110,18 @@ public class APIHistogram extends Histogram {
clear();
HistogramValues vals = c.getHistogramValue(url);
try {
for (long v : vals.sample) {
getSample().update(v);
if (vals.sample != null) {
for (long v : vals.sample) {
getSample().update(v);
}
}
getCount().set(vals.count);
getMax().set(vals.max);
getMin().set(vals.min);
getSum().set(vals.sum);
double[] newValue = new double[2];
newValue[0] = vals.variance;
newValue[1] = vals.svariance;
newValue[0] = vals.mean;
newValue[1] = vals.variance;
getVariance().getAndSet(newValue);
} catch (IllegalArgumentException | IllegalAccessException e) {
e.printStackTrace();

View File

@ -6,6 +6,6 @@ public class HistogramValues {
public long max;
public long sum;
public double variance;
public double svariance;
public double mean;
public long sample[];
}

View File

@ -60,7 +60,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
public static void register_mbeans() {
TimerTask taskToExecute = new CheckRegistration();
timer.scheduleAtFixedRate(taskToExecute, 100, INTERVAL);
timer.schedule(taskToExecute, 100, INTERVAL);
}
public ColumnFamilyStore(String type, String keyspace, String name) {

View File

@ -83,6 +83,8 @@ public class ColumnFamilyMetrics {
public final Gauge<Double> compressionRatio;
/** Histogram of estimated row size (in bytes). */
public final Gauge<long[]> estimatedRowSizeHistogram;
/** Approximate number of keys in table. */
public final Gauge<Long> estimatedRowCount;
/** Histogram of estimated number of columns. */
public final Gauge<long[]> estimatedColumnCountHistogram;
/** Histogram of the number of sstable data files accessed per read */
@ -229,6 +231,15 @@ public class ColumnFamilyMetrics {
+ cfName);
}
});
estimatedRowCount= Metrics.newGauge(
factory.createMetricName("EstimatedRowCount"),
new Gauge<Long>() {
public Long value() {
return c.getLongValue("/column_family/metrics/estimated_row_count/"
+ cfName);
}
});
estimatedColumnCountHistogram = Metrics.newGauge(
factory.createMetricName("EstimatedColumnCountHistogram"),
new Gauge<long[]>() {

View File

@ -92,14 +92,7 @@ public class LatencyMetrics {
*/
public LatencyMetrics(String url, MetricNameFactory factory,
String namePrefix) {
this.factory = factory;
this.namePrefix = namePrefix;
latency = APIMetrics.newTimer(url + "/histogram",
factory.createMetricName(namePrefix + "Latency"),
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
totalLatency = APIMetrics.newCounter(url,
factory.createMetricName(namePrefix + "TotalLatency"));
this(url, null, factory, namePrefix);
}
public LatencyMetrics(String url, String paramName,
@ -107,10 +100,11 @@ public class LatencyMetrics {
this.factory = factory;
this.namePrefix = namePrefix;
latency = APIMetrics.newTimer(url + "/histogram/" + paramName,
paramName = (paramName == null)? "" : "/" + paramName;
latency = APIMetrics.newTimer(url + "/histogram" + paramName,
factory.createMetricName(namePrefix + "Latency"),
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
totalLatency = APIMetrics.newCounter(url + "/" + paramName,
totalLatency = APIMetrics.newCounter(url + paramName,
factory.createMetricName(namePrefix + "TotalLatency"));
}