From 9a44228c71dc7a3ae42959ce65bb73f0f861a428 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:53:09 +0000 Subject: [PATCH 01/32] APIClient: Add some "post" overloads --- src/main/java/com/scylladb/jmx/api/APIClient.java | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/main/java/com/scylladb/jmx/api/APIClient.java b/src/main/java/com/scylladb/jmx/api/APIClient.java index 52e7a56..6d7b49a 100644 --- a/src/main/java/com/scylladb/jmx/api/APIClient.java +++ b/src/main/java/com/scylladb/jmx/api/APIClient.java @@ -106,8 +106,12 @@ public class APIClient { } public Response post(String path, MultivaluedMap queryParams) { + return post(path, queryParams, null); + } + + public Response post(String path, MultivaluedMap queryParams, Object object, String type) { try { - Response response = get(path, queryParams).post(Entity.entity(null, MediaType.TEXT_PLAIN)); + Response response = get(path, queryParams).post(Entity.entity(object, type)); if (response.getStatus() != Response.Status.OK.getStatusCode() ) { throw getException("Scylla API server HTTP POST to URL '" + path + "' failed", response.readEntity(String.class)); } @@ -117,6 +121,10 @@ public class APIClient { } } + public Response post(String path, MultivaluedMap queryParams, Object object) { + return post(path, queryParams, object, MediaType.TEXT_PLAIN); + } + public void post(String path) { post(path, null); } From 85b39d7fbe3e892b223f65c633bea3f324243586 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:30:04 +0000 Subject: [PATCH 02/32] ColumnFamilyStore: update to c3 compat Note: some calls still unimplemented --- .../cassandra/db/ColumnFamilyStore.java | 110 +++++-- .../cassandra/db/ColumnFamilyStoreMBean.java | 303 ++---------------- 2 files changed, 114 insertions(+), 299 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java index ad62dad..0bee7af 100644 --- a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -23,29 +23,43 @@ */ package org.apache.cassandra.db; +import static java.lang.String.valueOf; +import static javax.json.Json.createObjectBuilder; +import static javax.json.Json.createReader; +import static javax.ws.rs.core.MediaType.APPLICATION_JSON; + +import java.io.StringReader; import java.lang.management.ManagementFactory; -import java.net.ConnectException; -import java.util.*; -import java.util.concurrent.*; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Set; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ExecutionException; import javax.json.JsonArray; import javax.json.JsonObject; -import javax.management.*; +import javax.json.JsonObjectBuilder; +import javax.json.JsonValue; +import javax.management.MBeanServer; +import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.OpenDataException; -import javax.ws.rs.ProcessingException; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import org.apache.cassandra.metrics.ColumnFamilyMetrics; -import com.google.common.base.Throwables; import com.scylladb.jmx.api.APIClient; public class ColumnFamilyStore implements ColumnFamilyStoreMBean { private static final java.util.logging.Logger logger = java.util.logging.Logger .getLogger(ColumnFamilyStore.class.getName()); private APIClient c = new APIClient(); + @SuppressWarnings("unused") private String type; private String keyspace; private String name; @@ -156,6 +170,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * @return the name of the column family */ + @Override public String getColumnFamilyName() { log(" getColumnFamilyName()"); return name; @@ -476,6 +491,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Gets the minimum number of sstables in queue before compaction kicks off */ + @Override public int getMinimumCompactionThreshold() { log(" getMinimumCompactionThreshold()"); return c.getIntValue("column_family/minimum_compaction/" + getCFName()); @@ -484,6 +500,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Sets the minimum number of sstables in queue before compaction kicks off */ + @Override public void setMinimumCompactionThreshold(int threshold) { log(" setMinimumCompactionThreshold(int threshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -494,6 +511,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Gets the maximum number of sstables in queue before compaction kicks off */ + @Override public int getMaximumCompactionThreshold() { log(" getMaximumCompactionThreshold()"); return c.getIntValue("column_family/maximum_compaction/" + getCFName()); @@ -503,6 +521,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * Sets the maximum and maximum number of SSTables in queue before * compaction kicks off */ + @Override public void setCompactionThresholds(int minThreshold, int maxThreshold) { log(" setCompactionThresholds(int minThreshold, int maxThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -514,6 +533,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Sets the maximum number of sstables in queue before compaction kicks off */ + @Override public void setMaximumCompactionThreshold(int threshold) { log(" setMaximumCompactionThreshold(int threshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -546,6 +566,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Get the compression parameters */ + @Override public Map getCompressionParameters() { log(" getCompressionParameters()"); return c.getMapStrValue( @@ -558,6 +579,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * @param opts * map of string names to values */ + @Override public void setCompressionParameters(Map opts) { log(" setCompressionParameters(Map opts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -569,6 +591,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { /** * Set new crc check chance */ + @Override public void setCrcCheckChance(double crcCheckChance) { log(" setCrcCheckChance(double crcCheckChance)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -576,6 +599,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { c.post("column_family/crc_check_chance/" + getCFName(), queryParams); } + @Override public boolean isAutoCompactionDisabled() { log(" isAutoCompactionDisabled()"); return c.getBooleanValue("column_family/autocompaction/" + getCFName()); @@ -595,6 +619,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { return c.getDoubleValue(""); } + @Override public long estimateKeys() { log(" estimateKeys()"); return c.getLongValue("column_family/estimate_keys/" + getCFName()); @@ -632,6 +657,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * * @return list of the index names */ + @Override public List getBuiltIndexes() { log(" getBuiltIndexes()"); return c.getListStrValue("column_family/built_indexes/" + getCFName()); @@ -643,6 +669,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * @param key * @return list of filenames containing the key */ + @Override public List getSSTablesForKey(String key) { log(" getSSTablesForKey(String key)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -655,6 +682,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * Scan through Keyspace/ColumnFamily's data directory determine which * SSTables should be loaded and load them */ + @Override public void loadNewSSTables() { log(" loadNewSSTables()"); c.post("column_family/sstable/" + getCFName()); @@ -664,6 +692,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * @return the number of SSTables in L0. Always return 0 if Leveled * compaction is not enabled. */ + @Override public int getUnleveledSSTables() { log(" getUnleveledSSTables()"); return c.getIntValue("column_family/sstables/unleveled/" + getCFName()); @@ -674,6 +703,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * used. array index corresponds to level(int[0] is for level 0, * ...). */ + @Override public int[] getSSTableCountPerLevel() { log(" getSSTableCountPerLevel()"); int[] res = c.getIntArrValue( @@ -692,6 +722,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * * @return ratio */ + @Override public double getDroppableTombstoneRatio() { log(" getDroppableTombstoneRatio()"); return c.getDoubleValue("column_family/droppable_ratio/" + getCFName()); @@ -701,6 +732,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { * @return the size of SSTables in "snapshots" subdirectory which aren't * live anymore */ + @Override public long trueSnapshotsSize() { log(" trueSnapshotsSize()"); return c.getLongValue("column_family/metrics/snapshots_size/" + getCFName()); @@ -709,41 +741,70 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { public String getKeyspace() { return keyspace; } - + @Override - public long getRangeCount() { - log("getRangeCount()"); - return metric.rangeLatency.latency.count(); + public String getTableName() { + log(" getTableName()"); + return name; } @Override - public long getTotalRangeLatencyMicros() { - log("getTotalRangeLatencyMicros()"); - return metric.rangeLatency.totalLatency.count(); + public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException { + log(" forceMajorCompaction(boolean) throws ExecutionException, InterruptedException"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.putSingle("value", valueOf(splitOutput)); + c.post("column_family/major_compaction/" + getCFName(), queryParams); } @Override - public long[] getLifetimeRangeLatencyHistogramMicros() { - log("getLifetimeRangeLatencyHistogramMicros()"); - return metric.rangeLatency.totalLatencyHistogram.getBuckets(false); + public void setCompactionParametersJson(String options) { + log(" setCompactionParametersJson"); + c.post("column_family/compaction_parameters/" + getCFName(), null, options, APPLICATION_JSON); } @Override - public long[] getRecentRangeLatencyHistogramMicros() { - log("getRecentRangeLatencyHistogramMicros()"); - return metric.rangeLatency.getRecentLatencyHistogram(); + public String getCompactionParametersJson() { + log(" getCompactionParametersJson"); + return c.getStringValue("column_family/compaction_parameters/" + getCFName()); } @Override - public double getRecentRangeLatencyMicros() { - log("getRecentRangeLatencyMicros()"); - return metric.rangeLatency.getRecentLatency(); + public void setCompactionParameters(Map options) { + JsonObjectBuilder b = createObjectBuilder(); + for (Map.Entry e : options.entrySet()) { + b.add(e.getKey(), e.getValue()); + } + setCompactionParametersJson(b.build().toString()); + } + + @Override + public Map getCompactionParameters() { + String s = getCompactionParametersJson(); + JsonObject o = createReader(new StringReader(s)).readObject(); + HashMap res = new HashMap<>(); + for (Entry e : o.entrySet()) { + res.put(e.getKey(), e.getValue().toString()); + } + return res; + } + + @Override + public boolean isCompactionDiskSpaceCheckEnabled() { + // TODO Auto-generated method stub + log(" isCompactionDiskSpaceCheckEnabled()"); + return false; + } + + @Override + public void compactionDiskSpaceCheck(boolean enable) { + // TODO Auto-generated method stub + log(" compactionDiskSpaceCheck()"); } @Override public void beginLocalSampling(String sampler, int capacity) { // TODO Auto-generated method stub - log("beginLocalSampling()"); + log(" beginLocalSampling()"); } @@ -751,8 +812,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException { // TODO Auto-generated method stub - log("finishLocalSampling()"); + log(" finishLocalSampling()"); return null; } - } diff --git a/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java b/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java index 4df593b..a74316e 100644 --- a/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java +++ b/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java @@ -32,258 +32,17 @@ public interface ColumnFamilyStoreMBean /** * @return the name of the column family */ + @Deprecated public String getColumnFamilyName(); - /** - * Returns the total amount of data stored in the memtable, including - * column related overhead. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize - * @return The size in bytes. - * @deprecated - */ - @Deprecated - public long getMemtableDataSize(); - - /** - * Returns the total number of columns present in the memtable. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount - * @return The number of columns. - */ - @Deprecated - public long getMemtableColumnsCount(); - - /** - * Returns the number of times that a flush has resulted in the - * memtable being switched out. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount - * @return the number of memtable switches - */ - @Deprecated - public int getMemtableSwitchCount(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead - * @return a histogram of the number of sstable data files accessed per read: reading this property resets it - */ - @Deprecated - public long[] getRecentSSTablesPerReadHistogram(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram - * @return a histogram of the number of sstable data files accessed per read - */ - @Deprecated - public long[] getSSTablesPerReadHistogram(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return the number of read operations on this column family - */ - @Deprecated - public long getReadCount(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return total read latency (divide by getReadCount() for average) - */ - @Deprecated - public long getTotalReadLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getLifetimeReadLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getRecentReadLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return average latency per read operation since the last call - */ - @Deprecated - public double getRecentReadLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return the number of write operations on this column family - */ - @Deprecated - public long getWriteCount(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return total write latency (divide by getReadCount() for average) - */ - @Deprecated - public long getTotalWriteLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getLifetimeWriteLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getRecentWriteLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return average latency per write operation since the last call - */ - @Deprecated - public double getRecentWriteLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency - * @return the number of range slice operations on this column family - */ - @Deprecated - public long getRangeCount(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency - * @return total range slice latency (divide by getRangeCount() for average) - */ - @Deprecated - public long getTotalRangeLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getLifetimeRangeLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getRecentRangeLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency - * @return average latency per range slice operation since the last call - */ - @Deprecated - public double getRecentRangeLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes - * @return the estimated number of tasks pending for this column family - */ - @Deprecated - public int getPendingTasks(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount - * @return the number of SSTables on disk for this CF - */ - @Deprecated - public int getLiveSSTableCount(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed - * @return disk space used by SSTables belonging to this CF - */ - @Deprecated - public long getLiveDiskSpaceUsed(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed - * @return total disk space used by SSTables belonging to this CF, including obsolete ones waiting to be GC'd - */ - @Deprecated - public long getTotalDiskSpaceUsed(); + public String getTableName(); /** * force a major compaction of this column family + * + * @param splitOutput true if the output of the major compaction should be split in several sstables */ - public void forceMajorCompaction() throws ExecutionException, InterruptedException; - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize - * @return the size of the smallest compacted row - */ - @Deprecated - public long getMinRowSize(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize - * @return the size of the largest compacted row - */ - @Deprecated - public long getMaxRowSize(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize - * @return the average row size across all the sstables - */ - @Deprecated - public long getMeanRowSize(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives - */ - @Deprecated - public long getBloomFilterFalsePositives(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives - */ - @Deprecated - public long getRecentBloomFilterFalsePositives(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio - */ - @Deprecated - public double getBloomFilterFalseRatio(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio - */ - @Deprecated - public double getRecentBloomFilterFalseRatio(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed - */ - @Deprecated - public long getBloomFilterDiskSpaceUsed(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed - */ - @Deprecated - public long getBloomFilterOffHeapMemoryUsed(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed - */ - @Deprecated - public long getIndexSummaryOffHeapMemoryUsed(); - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed - */ - @Deprecated - public long getCompressionMetadataOffHeapMemoryUsed(); + public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException; /** * Gets the minimum number of sstables in queue before compaction kicks off @@ -311,15 +70,24 @@ public interface ColumnFamilyStoreMBean public void setMaximumCompactionThreshold(int threshold); /** - * Sets the compaction strategy by class name - * @param className the name of the compaction strategy class + * Sets the compaction parameters locally for this node + * + * Note that this will be set until an ALTER with compaction = {..} is executed or the node is restarted + * + * @param options compaction options with the same syntax as when doing ALTER ... WITH compaction = {..} */ - public void setCompactionStrategyClass(String className); + public void setCompactionParametersJson(String options); + public String getCompactionParametersJson(); /** - * Gets the compaction strategy class name + * Sets the compaction parameters locally for this node + * + * Note that this will be set until an ALTER with compaction = {..} is executed or the node is restarted + * + * @param options compaction options map */ - public String getCompactionStrategyClass(); + public void setCompactionParameters(Map options); + public Map getCompactionParameters(); /** * Get the compression parameters @@ -339,31 +107,8 @@ public interface ColumnFamilyStoreMBean public boolean isAutoCompactionDisabled(); - /** Number of tombstoned cells retreived during the last slicequery */ - @Deprecated - public double getTombstonesPerSlice(); - - /** Number of live cells retreived during the last slicequery */ - @Deprecated - public double getLiveCellsPerSlice(); - public long estimateKeys(); - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram - */ - @Deprecated - public long[] getEstimatedRowSizeHistogram(); - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram - */ - @Deprecated - public long[] getEstimatedColumnCountHistogram(); - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio - */ - @Deprecated - public double getCompressionRatio(); /** * Returns a list of the names of the built column indexes for current store @@ -416,4 +161,14 @@ public interface ColumnFamilyStoreMBean * @return top count items for the sampler since beginLocalSampling was called */ public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException; + + /* + Is Compaction space check enabled + */ + public boolean isCompactionDiskSpaceCheckEnabled(); + + /* + Enable/Disable compaction space check + */ + public void compactionDiskSpaceCheck(boolean enable); } From 39e4cd8f3f537cb52fe57e249b48639b26279bb8 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:30:46 +0000 Subject: [PATCH 03/32] CommitLog: update to c3 compat --- .../cassandra/db/commitlog/CommitLog.java | 61 ++++++++----------- .../db/commitlog/CommitLogMBean.java | 46 ++++++-------- 2 files changed, 45 insertions(+), 62 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java index 784b8bb..e943351 100644 --- a/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java +++ b/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java @@ -22,9 +22,14 @@ */ package org.apache.cassandra.db.commitlog; -import java.io.*; +import java.io.IOException; import java.lang.management.ManagementFactory; -import java.util.*; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -65,39 +70,6 @@ public class CommitLog implements CommitLogMBean { } } - /** - * Get the number of completed tasks - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks - */ - @Deprecated - public long getCompletedTasks() { - log(" getCompletedTasks()"); - return c.getLongValue(""); - } - - /** - * Get the number of tasks waiting to be executed - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks - */ - @Deprecated - public long getPendingTasks() { - log(" getPendingTasks()"); - return c.getLongValue(""); - } - - /** - * Get the current size used by all the commitlog segments. - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize - */ - @Deprecated - public long getTotalCommitlogSize() { - log(" getTotalCommitlogSize()"); - return c.getLongValue(""); - } - /** * Recover a single file. */ @@ -170,4 +142,23 @@ public class CommitLog implements CommitLogMBean { return c.getStringValue(""); } + @Override + public long getActiveContentSize() { + // scylla does not compress commit log, so this is equivalent + return getActiveOnDiskSize(); + } + + @Override + public long getActiveOnDiskSize() { + return c.getLongValue("/commitlog/metrics/total_commit_log_size"); + } + + @Override + public Map getActiveSegmentCompressionRatios() { + HashMap res = new HashMap<>(); + for (String name : getActiveSegmentNames()) { + res.put(name, 1.0); + } + return res; + } } diff --git a/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java b/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java index 1ab3a91..e0cfd3c 100644 --- a/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java +++ b/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java @@ -17,36 +17,14 @@ */ package org.apache.cassandra.db.commitlog; + import java.io.IOException; import java.util.List; +import java.util.Map; public interface CommitLogMBean { /** - * Get the number of completed tasks - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks - */ - @Deprecated - public long getCompletedTasks(); - - /** - * Get the number of tasks waiting to be executed - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks - */ - @Deprecated - public long getPendingTasks(); - - /** - * Get the current size used by all the commitlog segments. - * - * @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize - */ - @Deprecated - public long getTotalCommitlogSize(); - - /** - * Command to execute to archive a commitlog segment. Blank to disabled. + * Command to execute to archive a commitlog segment. Blank to disabled. */ public String getArchiveCommand(); @@ -88,8 +66,22 @@ public interface CommitLogMBean { public List getActiveSegmentNames(); /** - * @return Files which are pending for archival attempt. Does NOT include - * failed archive attempts. + * @return Files which are pending for archival attempt. Does NOT include failed archive attempts. */ public List getArchivingSegmentNames(); + + /** + * @return The size of the mutations in all active commit log segments (uncompressed). + */ + public long getActiveContentSize(); + + /** + * @return The space taken on disk by the commit log (compressed). + */ + public long getActiveOnDiskSize(); + + /** + * @return A map between active log segments and the compression ratio achieved for each. + */ + public Map getActiveSegmentCompressionRatios(); } From 3efcd5103b40c0074c0480a66959dc5d99de15a0 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:31:25 +0000 Subject: [PATCH 04/32] CompactionManager: update to c3 compat --- .../db/compaction/CompactionManager.java | 61 +++++++------------ .../db/compaction/CompactionManagerMBean.java | 39 +++--------- 2 files changed, 31 insertions(+), 69 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java index 7cd95be..7df536e 100644 --- a/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -76,6 +76,7 @@ public class CompactionManager implements CompactionManagerMBean { } /** List of running compaction objects. */ + @Override public List> getCompactions() { log(" getCompactions()"); List> results = new ArrayList>(); @@ -95,12 +96,14 @@ public class CompactionManager implements CompactionManagerMBean { } /** List of running compaction summary strings. */ + @Override public List getCompactionSummary() { log(" getCompactionSummary()"); return c.getListStrValue("compaction_manager/compaction_summary"); } /** compaction history **/ + @Override public TabularData getCompactionHistory() { log(" getCompactionHistory()"); try { @@ -110,46 +113,6 @@ public class CompactionManager implements CompactionManagerMBean { } } - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks - * @return estimated number of compactions remaining to perform - */ - @Deprecated - public int getPendingTasks() { - log(" getPendingTasks()"); - return c.getIntValue(""); - } - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks - * @return number of completed compactions since server [re]start - */ - @Deprecated - public long getCompletedTasks() { - log(" getCompletedTasks()"); - return c.getLongValue(""); - } - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted - * @return total number of bytes compacted since server [re]start - */ - @Deprecated - public long getTotalBytesCompacted() { - log(" getTotalBytesCompacted()"); - return c.getLongValue(""); - } - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted - * @return total number of compactions since server [re]start - */ - @Deprecated - public long getTotalCompactionsCompleted() { - log(" getTotalCompactionsCompleted()"); - return c.getLongValue(""); - } - /** * Triggers the compaction of user specified sstables. You can specify files * from various keyspaces and columnfamilies. If you do so, user defined @@ -161,6 +124,7 @@ public class CompactionManager implements CompactionManagerMBean { * contain keyspace and columnfamily name in path(for 2.1+) or * file name itself. */ + @Override public void forceUserDefinedCompaction(String dataFiles) { log(" forceUserDefinedCompaction(String dataFiles)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -175,6 +139,7 @@ public class CompactionManager implements CompactionManagerMBean { * the type of compaction to stop. Can be one of: - COMPACTION - * VALIDATION - CLEANUP - SCRUB - INDEX_BUILD */ + @Override public void stopCompaction(String type) { log(" stopCompaction(String type)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -185,6 +150,7 @@ public class CompactionManager implements CompactionManagerMBean { /** * Returns core size of compaction thread pool */ + @Override public int getCoreCompactorThreads() { log(" getCoreCompactorThreads()"); return c.getIntValue(""); @@ -196,6 +162,7 @@ public class CompactionManager implements CompactionManagerMBean { * @param number * New maximum of compaction threads */ + @Override public void setCoreCompactorThreads(int number) { log(" setCoreCompactorThreads(int number)"); } @@ -203,6 +170,7 @@ public class CompactionManager implements CompactionManagerMBean { /** * Returns maximum size of compaction thread pool */ + @Override public int getMaximumCompactorThreads() { log(" getMaximumCompactorThreads()"); return c.getIntValue(""); @@ -214,6 +182,7 @@ public class CompactionManager implements CompactionManagerMBean { * @param number * New maximum of compaction threads */ + @Override public void setMaximumCompactorThreads(int number) { log(" setMaximumCompactorThreads(int number)"); } @@ -221,6 +190,7 @@ public class CompactionManager implements CompactionManagerMBean { /** * Returns core size of validation thread pool */ + @Override public int getCoreValidationThreads() { log(" getCoreValidationThreads()"); return c.getIntValue(""); @@ -232,6 +202,7 @@ public class CompactionManager implements CompactionManagerMBean { * @param number * New maximum of compaction threads */ + @Override public void setCoreValidationThreads(int number) { log(" setCoreValidationThreads(int number)"); } @@ -239,6 +210,7 @@ public class CompactionManager implements CompactionManagerMBean { /** * Returns size of validator thread pool */ + @Override public int getMaximumValidatorThreads() { log(" getMaximumValidatorThreads()"); return c.getIntValue(""); @@ -250,8 +222,17 @@ public class CompactionManager implements CompactionManagerMBean { * @param number * New maximum of validator threads */ + @Override public void setMaximumValidatorThreads(int number) { log(" setMaximumValidatorThreads(int number)"); } + @Override + public void stopCompactionById(String compactionId) { + // scylla does not have neither compaction ids nor the file described in: + // "Ids can be found in the transaction log files whose name starts with compaction_, located in the table transactions folder" + // (nodetool) + // TODO: throw? + log(" stopCompactionById"); + } } diff --git a/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java b/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java index fcf514b..f101245 100644 --- a/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java +++ b/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java @@ -21,7 +21,8 @@ import java.util.List; import java.util.Map; import javax.management.openmbean.TabularData; -public interface CompactionManagerMBean { +public interface CompactionManagerMBean +{ /** List of running compaction objects. */ public List> getCompactions(); @@ -31,34 +32,6 @@ public interface CompactionManagerMBean { /** compaction history **/ public TabularData getCompactionHistory(); - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks - * @return estimated number of compactions remaining to perform - */ - @Deprecated - public int getPendingTasks(); - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks - * @return number of completed compactions since server [re]start - */ - @Deprecated - public long getCompletedTasks(); - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted - * @return total number of bytes compacted since server [re]start - */ - @Deprecated - public long getTotalBytesCompacted(); - - /** - * @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted - * @return total number of compactions since server [re]start - */ - @Deprecated - public long getTotalCompactionsCompleted(); - /** * Triggers the compaction of user specified sstables. You can specify files * from various keyspaces and columnfamilies. If you do so, user defined @@ -79,6 +52,14 @@ public interface CompactionManagerMBean { */ public void stopCompaction(String type); + /** + * Stop an individual running compaction using the compactionId. + * @param compactionId Compaction ID of compaction to stop. Such IDs can be found in + * the transaction log files whose name starts with compaction_, + * located in the table transactions folder. + */ + public void stopCompactionById(String compactionId); + /** * Returns core size of compaction thread pool */ From b7a6554ee91fa6629e07bc9ca693e93a0df25fb7 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:32:00 +0000 Subject: [PATCH 05/32] FailureDetector: update to c3 compat --- .../apache/cassandra/gms/FailureDetector.java | 53 ++++++++++++++++++- .../cassandra/gms/FailureDetectorMBean.java | 5 ++ 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/src/main/java/org/apache/cassandra/gms/FailureDetector.java b/src/main/java/org/apache/cassandra/gms/FailureDetector.java index e1a3012..3762100 100644 --- a/src/main/java/org/apache/cassandra/gms/FailureDetector.java +++ b/src/main/java/org/apache/cassandra/gms/FailureDetector.java @@ -26,12 +26,23 @@ package org.apache.cassandra.gms; import java.lang.management.ManagementFactory; import java.net.UnknownHostException; -import java.util.*; +import java.util.HashMap; +import java.util.Map; import javax.json.JsonArray; import javax.json.JsonObject; +import javax.json.JsonValue; import javax.management.MBeanServer; import javax.management.ObjectName; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; +import javax.management.openmbean.TabularData; +import javax.management.openmbean.TabularDataSupport; +import javax.management.openmbean.TabularType; import com.scylladb.jmx.api.APIClient; @@ -62,19 +73,23 @@ public class FailureDetector implements FailureDetectorMBean { } } + @Override public void dumpInterArrivalTimes() { log(" dumpInterArrivalTimes()"); } + @Override public void setPhiConvictThreshold(double phi) { log(" setPhiConvictThreshold(double phi)"); } + @Override public double getPhiConvictThreshold() { log(" getPhiConvictThreshold()"); return c.getDoubleValue("/failure_detector/phi"); } + @Override public String getAllEndpointStates() { log(" getAllEndpointStates()"); @@ -117,24 +132,60 @@ public class FailureDetector implements FailureDetectorMBean { return res; } + @Override public String getEndpointState(String address) throws UnknownHostException { log(" getEndpointState(String address) throws UnknownHostException"); return c.getStringValue("/failure_detector/endpoints/states/" + address); } + @Override public Map getSimpleStates() { log(" getSimpleStates()"); return c.getMapStrValue("/failure_detector/simple_states"); } + @Override public int getDownEndpointCount() { log(" getDownEndpointCount()"); return c.getIntValue("/failure_detector/count/endpoint/down"); } + @Override public int getUpEndpointCount() { log(" getUpEndpointCount()"); return c.getIntValue("/failure_detector/count/endpoint/up"); } + // From origin: + // this is useless except to provide backwards compatibility in phi_convict_threshold, + // because everyone seems pretty accustomed to the default of 8, and users who have + // already tuned their phi_convict_threshold for their own environments won't need to + // change. + private final double PHI_FACTOR = 1.0 / Math.log(10.0); // 0.434... + + @Override + public TabularData getPhiValues() throws OpenDataException { + final CompositeType ct = new CompositeType("Node", "Node", new String[] { "Endpoint", "PHI" }, + new String[] { "IP of the endpoint", "PHI value" }, + new OpenType[] { SimpleType.STRING, SimpleType.DOUBLE }); + final TabularDataSupport results = new TabularDataSupport( + new TabularType("PhiList", "PhiList", ct, new String[] { "Endpoint" })); + final JsonArray arr = c.getJsonArray("/failure_detector/endpoint_phi_values"); + + for (JsonValue v : arr) { + JsonObject o = (JsonObject) v; + String endpoint = o.getString("endpoint"); + double phi = Double.parseDouble(o.getString("phi")); + + if (phi != Double.MIN_VALUE) { + // returned values are scaled by PHI_FACTOR so that the are on + // the same scale as PhiConvictThreshold + final CompositeData data = new CompositeDataSupport(ct, new String[] { "Endpoint", "PHI" }, + new Object[] { endpoint, phi * PHI_FACTOR }); + results.put(data); + } + } + + return results; + } } diff --git a/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java b/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java index 45250b4..23fae3a 100644 --- a/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java +++ b/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java @@ -20,6 +20,9 @@ package org.apache.cassandra.gms; import java.net.UnknownHostException; import java.util.Map; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.TabularData; + public interface FailureDetectorMBean { public void dumpInterArrivalTimes(); @@ -37,4 +40,6 @@ public interface FailureDetectorMBean public int getDownEndpointCount(); public int getUpEndpointCount(); + + public TabularData getPhiValues() throws OpenDataException; } From 68ce437b03ad91711b0d9b03bc134e605882b633 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:33:00 +0000 Subject: [PATCH 06/32] Gossiper: update to c3 compat --- src/main/java/org/apache/cassandra/gms/Gossiper.java | 4 ++++ src/main/java/org/apache/cassandra/gms/GossiperMBean.java | 2 ++ 2 files changed, 6 insertions(+) diff --git a/src/main/java/org/apache/cassandra/gms/Gossiper.java b/src/main/java/org/apache/cassandra/gms/Gossiper.java index 5b5f7a4..b1c81e7 100644 --- a/src/main/java/org/apache/cassandra/gms/Gossiper.java +++ b/src/main/java/org/apache/cassandra/gms/Gossiper.java @@ -77,17 +77,20 @@ public class Gossiper implements GossiperMBean { } } + @Override public long getEndpointDowntime(String address) throws UnknownHostException { log(" getEndpointDowntime(String address) throws UnknownHostException"); return c.getLongValue("gossiper/downtime/" + address); } + @Override public int getCurrentGenerationNumber(String address) throws UnknownHostException { log(" getCurrentGenerationNumber(String address) throws UnknownHostException"); return c.getIntValue("gossiper/generation_number/" + address); } + @Override public void unsafeAssassinateEndpoint(String address) throws UnknownHostException { log(" unsafeAssassinateEndpoint(String address) throws UnknownHostException"); @@ -96,6 +99,7 @@ public class Gossiper implements GossiperMBean { c.post("gossiper/assassinate/" + address, queryParams); } + @Override public void assassinateEndpoint(String address) throws UnknownHostException { log(" assassinateEndpoint(String address) throws UnknownHostException"); c.post("gossiper/assassinate/" + address, null); diff --git a/src/main/java/org/apache/cassandra/gms/GossiperMBean.java b/src/main/java/org/apache/cassandra/gms/GossiperMBean.java index 521fd21..c4b244c 100644 --- a/src/main/java/org/apache/cassandra/gms/GossiperMBean.java +++ b/src/main/java/org/apache/cassandra/gms/GossiperMBean.java @@ -27,4 +27,6 @@ public interface GossiperMBean public void unsafeAssassinateEndpoint(String address) throws UnknownHostException; + public void assassinateEndpoint(String address) throws UnknownHostException; + } \ No newline at end of file From f4759f05e79706f6774f326772b45a8d275a3684 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:33:25 +0000 Subject: [PATCH 07/32] EndpointSnitchInfo: update to c3 compat --- .../org/apache/cassandra/locator/EndpointSnitchInfo.java | 9 +++++++++ .../cassandra/locator/EndpointSnitchInfoMBean.java | 9 +++++++++ 2 files changed, 18 insertions(+) diff --git a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java index 02b4f15..0cf82bc 100644 --- a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java +++ b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java @@ -101,4 +101,13 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean { return c.getStringValue("/snitch/name"); } + @Override + public String getRack() { + return c.getStringValue("/snitch/rack", null, 10000); + } + + @Override + public String getDatacenter() { + return c.getStringValue("/snitch/datacenter", null, 10000); + } } diff --git a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java index 84d2499..6de5022 100644 --- a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java +++ b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java @@ -38,6 +38,15 @@ public interface EndpointSnitchInfoMBean */ public String getDatacenter(String host) throws UnknownHostException; + /** + * Provides the Rack name depending on the respective snitch used for this node + */ + public String getRack(); + + /** + * Provides the Datacenter name depending on the respective snitch used for this node + */ + public String getDatacenter(); /** * Provides the snitch name of the cluster From 85e1b0754420147fad063cfb03b0ccab7c0d711f Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:36:02 +0000 Subject: [PATCH 08/32] MessagingService: update to c3 compat Note: c3 adds configurable size threshold counting of messages sent, dividing info "large"/"small" partitions (+gossiper). Message bulk queries in v3 mbean reflects this. Scylla does not (yet?) have such a threshold divider, so this is highly incomplete and just delegates to old apis that "sort-of" fit. --- .../cassandra/net/MessagingService.java | 73 ++++++++++++++++++- .../cassandra/net/MessagingServiceMBean.java | 56 +++++++++++++- 2 files changed, 126 insertions(+), 3 deletions(-) diff --git a/src/main/java/org/apache/cassandra/net/MessagingService.java b/src/main/java/org/apache/cassandra/net/MessagingService.java index d8d4b1f..a663030 100644 --- a/src/main/java/org/apache/cassandra/net/MessagingService.java +++ b/src/main/java/org/apache/cassandra/net/MessagingService.java @@ -22,9 +22,12 @@ */ package org.apache.cassandra.net; +import static java.util.Collections.emptyMap; + import java.lang.management.ManagementFactory; -import java.net.*; -import java.util.*; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; import java.util.Map.Entry; import javax.json.JsonArray; @@ -125,6 +128,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Pending tasks for Command(Mutations, Read etc) TCP Connections */ + @Override public Map getCommandPendingTasks() { log(" getCommandPendingTasks()"); return c.getMapStringIntegerValue("/messaging_service/messages/pending"); @@ -133,6 +137,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Completed tasks for Command(Mutations, Read etc) TCP Connections */ + @Override public Map getCommandCompletedTasks() { log("getCommandCompletedTasks()"); Map res = c @@ -143,6 +148,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Dropped tasks for Command(Mutations, Read etc) TCP Connections */ + @Override public Map getCommandDroppedTasks() { log(" getCommandDroppedTasks()"); return c.getMapStringLongValue("/messaging_service/messages/dropped"); @@ -151,6 +157,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections */ + @Override public Map getResponsePendingTasks() { log(" getResponsePendingTasks()"); return c.getMapStringIntegerValue("/messaging_service/messages/respond_pending"); @@ -159,6 +166,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections */ + @Override public Map getResponseCompletedTasks() { log(" getResponseCompletedTasks()"); return c.getMapStringLongValue("/messaging_service/messages/respond_completed"); @@ -167,6 +175,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * dropped message counts for server lifetime */ + @Override public Map getDroppedMessages() { log(" getDroppedMessages()"); Map res = new HashMap(); @@ -181,6 +190,8 @@ public final class MessagingService implements MessagingServiceMBean { /** * dropped message counts since last called */ + @SuppressWarnings("deprecation") + @Override public Map getRecentlyDroppedMessages() { log(" getRecentlyDroppedMessages()"); Map map = new HashMap(); @@ -192,6 +203,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Total number of timeouts happened on this node */ + @Override public long getTotalTimeouts() { log(" getTotalTimeouts()"); Map timeouts = getTimeoutsPerHost(); @@ -205,6 +217,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Number of timeouts per host */ + @Override public Map getTimeoutsPerHost() { log(" getTimeoutsPerHost()"); return c.getMapStringLongValue("/messaging_service/messages/timeout"); @@ -213,6 +226,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Number of timeouts since last check. */ + @Override public long getRecentTotalTimouts() { log(" getRecentTotalTimouts()"); long timeoutCount = getTotalTimeouts(); @@ -224,6 +238,7 @@ public final class MessagingService implements MessagingServiceMBean { /** * Number of timeouts since last check per host. */ + @Override public Map getRecentTimeoutsPerHost() { log(" getRecentTimeoutsPerHost()"); Map timeouts = getTimeoutsPerHost(); @@ -238,9 +253,63 @@ public final class MessagingService implements MessagingServiceMBean { return result; } + @Override public int getVersion(String address) throws UnknownHostException { log(" getVersion(String address) throws UnknownHostException"); return c.getIntValue(""); } + @Override + public Map getLargeMessagePendingTasks() { + // TODO: implement for realsies + return getCommandPendingTasks(); + } + + @Override + public Map getLargeMessageCompletedTasks() { + // TODO: implement for realsies + return getCommandCompletedTasks(); + } + + @Override + public Map getLargeMessageDroppedTasks() { + // TODO: implement for realsies + return getCommandDroppedTasks(); + } + + @Override + public Map getSmallMessagePendingTasks() { + // TODO: implement for realsies + return getResponsePendingTasks(); + } + + @Override + public Map getSmallMessageCompletedTasks() { + // TODO: implement for realsies + return getResponseCompletedTasks(); + } + + @Override + public Map getSmallMessageDroppedTasks() { + // TODO: implement for realsies + return emptyMap(); + } + + @Override + public Map getGossipMessagePendingTasks() { + // TODO: implement for realsies + return emptyMap(); + } + + @Override + public Map getGossipMessageCompletedTasks() { + // TODO: implement for realsies + return emptyMap(); + } + + @Override + public Map getGossipMessageDroppedTasks() { + // TODO: implement for realsies + return emptyMap(); + } } diff --git a/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java index 3fbd5c1..5a508e0 100644 --- a/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java +++ b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java @@ -15,8 +15,17 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ + package org.apache.cassandra.net; + + import java.net.UnknownHostException; import java.util.Map; @@ -25,6 +34,51 @@ import java.util.Map; * Command/Response - Pending/Completed Tasks */ public interface MessagingServiceMBean { + /** + * Pending tasks for large message TCP Connections + */ + public Map getLargeMessagePendingTasks(); + + /** + * Completed tasks for large message) TCP Connections + */ + public Map getLargeMessageCompletedTasks(); + + /** + * Dropped tasks for large message TCP Connections + */ + public Map getLargeMessageDroppedTasks(); + + /** + * Pending tasks for small message TCP Connections + */ + public Map getSmallMessagePendingTasks(); + + /** + * Completed tasks for small message TCP Connections + */ + public Map getSmallMessageCompletedTasks(); + + /** + * Dropped tasks for small message TCP Connections + */ + public Map getSmallMessageDroppedTasks(); + + /** + * Pending tasks for gossip message TCP Connections + */ + public Map getGossipMessagePendingTasks(); + + /** + * Completed tasks for gossip message TCP Connections + */ + public Map getGossipMessageCompletedTasks(); + + /** + * Dropped tasks for gossip message TCP Connections + */ + public Map getGossipMessageDroppedTasks(); + /** * Pending tasks for Command(Mutations, Read etc) TCP Connections */ @@ -79,6 +133,6 @@ public interface MessagingServiceMBean { * Number of timeouts since last check per host. */ public Map getRecentTimeoutsPerHost(); - + public int getVersion(String address) throws UnknownHostException; } From 3a4adcb67639aea9261b0bfb2cd60c8401ba6383 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:38:55 +0000 Subject: [PATCH 09/32] CacheService: update to c3 compat --- .../cassandra/service/CacheService.java | 150 +++--------------- .../cassandra/service/CacheServiceMBean.java | 96 ++--------- 2 files changed, 28 insertions(+), 218 deletions(-) diff --git a/src/main/java/org/apache/cassandra/service/CacheService.java b/src/main/java/org/apache/cassandra/service/CacheService.java index 6655e1e..ceea0ac 100644 --- a/src/main/java/org/apache/cassandra/service/CacheService.java +++ b/src/main/java/org/apache/cassandra/service/CacheService.java @@ -70,11 +70,13 @@ public class CacheService implements CacheServiceMBean { counterCache = new CacheMetrics("CounterCache", null); } + @Override public int getRowCacheSavePeriodInSeconds() { log(" getRowCacheSavePeriodInSeconds()"); return c.getIntValue("cache_service/row_cache_save_period"); } + @Override public void setRowCacheSavePeriodInSeconds(int rcspis) { log(" setRowCacheSavePeriodInSeconds(int rcspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -82,11 +84,13 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/row_cache_save_period", queryParams); } + @Override public int getKeyCacheSavePeriodInSeconds() { log(" getKeyCacheSavePeriodInSeconds()"); return c.getIntValue("cache_service/key_cache_save_period"); } + @Override public void setKeyCacheSavePeriodInSeconds(int kcspis) { log(" setKeyCacheSavePeriodInSeconds(int kcspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -94,11 +98,13 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/key_cache_save_period", queryParams); } + @Override public int getCounterCacheSavePeriodInSeconds() { log(" getCounterCacheSavePeriodInSeconds()"); return c.getIntValue("cache_service/counter_cache_save_period"); } + @Override public void setCounterCacheSavePeriodInSeconds(int ccspis) { log(" setCounterCacheSavePeriodInSeconds(int ccspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -106,11 +112,13 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/counter_cache_save_period", queryParams); } + @Override public int getRowCacheKeysToSave() { log(" getRowCacheKeysToSave()"); return c.getIntValue("cache_service/row_cache_keys_to_save"); } + @Override public void setRowCacheKeysToSave(int rckts) { log(" setRowCacheKeysToSave(int rckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -118,11 +126,13 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/row_cache_keys_to_save", queryParams); } + @Override public int getKeyCacheKeysToSave() { log(" getKeyCacheKeysToSave()"); return c.getIntValue("cache_service/key_cache_keys_to_save"); } + @Override public void setKeyCacheKeysToSave(int kckts) { log(" setKeyCacheKeysToSave(int kckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -130,11 +140,13 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/key_cache_keys_to_save", queryParams); } + @Override public int getCounterCacheKeysToSave() { log(" getCounterCacheKeysToSave()"); return c.getIntValue("cache_service/counter_cache_keys_to_save"); } + @Override public void setCounterCacheKeysToSave(int cckts) { log(" setCounterCacheKeysToSave(int cckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -145,6 +157,7 @@ public class CacheService implements CacheServiceMBean { /** * invalidate the key cache; for use after invalidating row cache */ + @Override public void invalidateKeyCache() { log(" invalidateKeyCache()"); c.post("cache_service/invalidate_key_cache"); @@ -153,16 +166,19 @@ public class CacheService implements CacheServiceMBean { /** * invalidate the row cache; for use after bulk loading via BinaryMemtable */ + @Override public void invalidateRowCache() { log(" invalidateRowCache()"); c.post("cache_service/invalidate_row_cache"); } + @Override public void invalidateCounterCache() { log(" invalidateCounterCache()"); c.post("cache_service/invalidate_counter_cache"); } + @Override public void setRowCacheCapacityInMB(long capacity) { log(" setRowCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -170,6 +186,7 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/row_cache_capacity", queryParams); } + @Override public void setKeyCacheCapacityInMB(long capacity) { log(" setKeyCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -177,6 +194,7 @@ public class CacheService implements CacheServiceMBean { c.post("cache_service/key_cache_capacity", queryParams); } + @Override public void setCounterCacheCapacityInMB(long capacity) { log(" setCounterCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -195,139 +213,9 @@ public class CacheService implements CacheServiceMBean { * and the thread is interrupted, either before or during the * activity. */ + @Override public void saveCaches() throws ExecutionException, InterruptedException { log(" saveCaches() throws ExecutionException, InterruptedException"); c.post("cache_service/save_caches"); } - - // - // remaining methods are provided for backwards compatibility; modern - // clients should use CacheMetrics instead - // - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hits - */ - @Deprecated - public long getKeyCacheHits() { - log(" getKeyCacheHits()"); - return keyCache.hits.count(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hits - */ - @Deprecated - public long getRowCacheHits() { - log(" getRowCacheHits()"); - return rowCache.hits.count(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#requests - */ - @Deprecated - public long getKeyCacheRequests() { - log(" getKeyCacheRequests()"); - return keyCache.requests.count(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#requests - */ - @Deprecated - public long getRowCacheRequests() { - log(" getRowCacheRequests()"); - return rowCache.requests.count(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hitRate - */ - @Deprecated - public double getKeyCacheRecentHitRate() { - log(" getKeyCacheRecentHitRate()"); - return keyCache.getRecentHitRate(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hitRate - */ - @Deprecated - public double getRowCacheRecentHitRate() { - log(" getRowCacheRecentHitRate()"); - return rowCache.getRecentHitRate(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getRowCacheCapacityInMB() { - log(" getRowCacheCapacityInMB()"); - return getRowCacheCapacityInBytes() / 1024 / 1024; - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getRowCacheCapacityInBytes() { - log(" getRowCacheCapacityInBytes()"); - return rowCache.capacity.value(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getKeyCacheCapacityInMB() { - log(" getKeyCacheCapacityInMB()"); - return getKeyCacheCapacityInBytes() / 1024 / 1024; - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getKeyCacheCapacityInBytes() { - log(" getKeyCacheCapacityInBytes()"); - return keyCache.capacity.value(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#size - */ - @Deprecated - public long getRowCacheSize() { - log(" getRowCacheSize()"); - return rowCache.size.value(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#entries - */ - @Deprecated - public long getRowCacheEntries() { - log(" getRowCacheEntries()"); - return rowCache.size.value(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#size - */ - @Deprecated - public long getKeyCacheSize() { - log(" getKeyCacheSize()"); - return keyCache.size.value(); - } - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#entries - */ - @Deprecated - public long getKeyCacheEntries() { - log(" getKeyCacheEntries()"); - return keyCache.size.value(); - } } diff --git a/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java b/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java index 44a6119..a28d2d1 100644 --- a/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java +++ b/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java @@ -15,6 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ + + + package org.apache.cassandra.service; import java.util.concurrent.ExecutionException; @@ -31,7 +40,6 @@ public interface CacheServiceMBean public void setCounterCacheSavePeriodInSeconds(int ccspis); public int getRowCacheKeysToSave(); - public void setRowCacheKeysToSave(int rckts); public int getKeyCacheKeysToSave(); @@ -65,90 +73,4 @@ public interface CacheServiceMBean * @throws InterruptedException when a thread is waiting, sleeping, or otherwise occupied, and the thread is interrupted, either before or during the activity. */ public void saveCaches() throws ExecutionException, InterruptedException; - - // - // remaining methods are provided for backwards compatibility; modern clients should use CacheMetrics instead - // - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hits - */ - @Deprecated - public long getKeyCacheHits(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hits - */ - @Deprecated - public long getRowCacheHits(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#requests - */ - @Deprecated - public long getKeyCacheRequests(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#requests - */ - @Deprecated - public long getRowCacheRequests(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hitRate - */ - @Deprecated - public double getKeyCacheRecentHitRate(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#hitRate - */ - @Deprecated - public double getRowCacheRecentHitRate(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getRowCacheCapacityInMB(); - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getRowCacheCapacityInBytes(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getKeyCacheCapacityInMB(); - /** - * @see org.apache.cassandra.metrics.CacheMetrics#capacity - */ - @Deprecated - public long getKeyCacheCapacityInBytes(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#size - */ - @Deprecated - public long getRowCacheSize(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#entries - */ - @Deprecated - public long getRowCacheEntries(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#size - */ - @Deprecated - public long getKeyCacheSize(); - - /** - * @see org.apache.cassandra.metrics.CacheMetrics#entries - */ - @Deprecated - public long getKeyCacheEntries(); } From de28e685322ad2830e0789b50ede05c4d90413bd Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:39:48 +0000 Subject: [PATCH 10/32] GCInspector: Add SuppressWarnings("restriction") --- src/main/java/org/apache/cassandra/service/GCInspector.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/main/java/org/apache/cassandra/service/GCInspector.java b/src/main/java/org/apache/cassandra/service/GCInspector.java index 3e7e564..9b50316 100644 --- a/src/main/java/org/apache/cassandra/service/GCInspector.java +++ b/src/main/java/org/apache/cassandra/service/GCInspector.java @@ -32,6 +32,7 @@ import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; + import javax.management.MBeanServer; import javax.management.Notification; import javax.management.NotificationListener; @@ -44,6 +45,7 @@ import org.slf4j.LoggerFactory; import com.sun.management.GarbageCollectionNotificationInfo; import com.sun.management.GcInfo; +@SuppressWarnings("restriction") public class GCInspector implements NotificationListener, GCInspectorMXBean { public static final String MBEAN_NAME = "org.apache.cassandra.service:type=GCInspector"; From b4e483b1795b9b688bf619b5ea7e8be7683300bc Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:40:59 +0000 Subject: [PATCH 11/32] StorageProxy: update to c3 compat --- .../cassandra/service/StorageProxy.java | 173 ++++++------------ .../cassandra/service/StorageProxyMBean.java | 73 ++------ 2 files changed, 65 insertions(+), 181 deletions(-) diff --git a/src/main/java/org/apache/cassandra/service/StorageProxy.java b/src/main/java/org/apache/cassandra/service/StorageProxy.java index 29e31a3..04e41e2 100644 --- a/src/main/java/org/apache/cassandra/service/StorageProxy.java +++ b/src/main/java/org/apache/cassandra/service/StorageProxy.java @@ -23,8 +23,12 @@ */ package org.apache.cassandra.service; +import static java.util.Collections.emptySet; + import java.lang.management.ManagementFactory; -import java.util.*; +import java.util.List; +import java.util.Map; +import java.util.Set; import javax.management.MBeanServer; import javax.management.ObjectName; @@ -33,8 +37,6 @@ import javax.ws.rs.core.MultivaluedMap; import com.scylladb.jmx.api.APIClient; -import org.apache.cassandra.metrics.*; - public class StorageProxy implements StorageProxyMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=StorageProxy"; private static final java.util.logging.Logger logger = java.util.logging.Logger @@ -54,19 +56,6 @@ public class StorageProxy implements StorageProxyMBean { public static final String UNREACHABLE = "UNREACHABLE"; - private static final ClientRequestMetrics readMetrics = new ClientRequestMetrics( - "storage_proxy/metrics/read", "Read"); - private static final ClientRequestMetrics rangeMetrics = new ClientRequestMetrics( - "storage_proxy/metrics/range", "RangeSlice"); - private static final ClientRequestMetrics writeMetrics = new ClientRequestMetrics( - "storage_proxy/metrics/write", "Write"); - private static final CASClientRequestMetrics casWriteMetrics = new CASClientRequestMetrics( - "storage_proxy/metrics/cas_write", "CASWrite"); - private static final CASClientRequestMetrics casReadMetrics = new CASClientRequestMetrics( - "storage_proxy/metrics/cas_read", "CASRead"); - - private static final double CONCURRENT_SUBREQUESTS_MARGIN = 0.10; - private StorageProxy() { } @@ -80,127 +69,26 @@ public class StorageProxy implements StorageProxyMBean { } - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#lastOpCount - */ - @Deprecated - public long getReadOperations() { - log(" getReadOperations()"); - return readMetrics.latency.count(); - } - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram - */ - @Deprecated - public long getTotalReadLatencyMicros() { - log(" getTotalReadLatencyMicros()"); - return readMetrics.totalLatency.count(); - } - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram - */ - @Deprecated - public double getRecentReadLatencyMicros() { - log(" getRecentReadLatencyMicros()"); - return readMetrics.getRecentLatency(); - } - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram - */ - @Deprecated - public long[] getTotalReadLatencyHistogramMicros() { - log(" getTotalReadLatencyHistogramMicros()"); - return readMetrics.totalLatencyHistogram.getBuckets(false); - } - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram - */ - @Deprecated - public long[] getRecentReadLatencyHistogramMicros() { - log(" getRecentReadLatencyHistogramMicros()"); - return readMetrics.getRecentLatencyHistogram(); - } - - @Deprecated - public long getRangeOperations() { - log(" getRangeOperations()"); - return rangeMetrics.latency.count(); - } - - @Deprecated - public long getTotalRangeLatencyMicros() { - log(" getTotalRangeLatencyMicros()"); - return rangeMetrics.totalLatency.count(); - } - - @Deprecated - public double getRecentRangeLatencyMicros() { - log(" getRecentRangeLatencyMicros()"); - return rangeMetrics.getRecentLatency(); - } - - @Deprecated - public long[] getTotalRangeLatencyHistogramMicros() { - log(" getTotalRangeLatencyHistogramMicros()"); - return rangeMetrics.totalLatencyHistogram.getBuckets(false); - } - - @Deprecated - public long[] getRecentRangeLatencyHistogramMicros() { - log(" getRecentRangeLatencyHistogramMicros()"); - return rangeMetrics.getRecentLatencyHistogram(); - } - - @Deprecated - public long getWriteOperations() { - log(" getWriteOperations()"); - return writeMetrics.latency.count(); - } - - @Deprecated - public long getTotalWriteLatencyMicros() { - log(" getTotalWriteLatencyMicros()"); - return writeMetrics.totalLatency.count(); - } - - @Deprecated - public double getRecentWriteLatencyMicros() { - log(" getRecentWriteLatencyMicros()"); - return writeMetrics.getRecentLatency(); - } - - @Deprecated - public long[] getTotalWriteLatencyHistogramMicros() { - log(" getTotalWriteLatencyHistogramMicros()"); - return writeMetrics.totalLatencyHistogram.getBuckets(false); - } - - @Deprecated - public long[] getRecentWriteLatencyHistogramMicros() { - log(" getRecentWriteLatencyHistogramMicros()"); - return writeMetrics.getRecentLatencyHistogram(); - } - + @Override public long getTotalHints() { log(" getTotalHints()"); return c.getLongValue("storage_proxy/total_hints"); } + @Override public boolean getHintedHandoffEnabled() { log(" getHintedHandoffEnabled()"); return c.getBooleanValue("storage_proxy/hinted_handoff_enabled"); } + @Override public Set getHintedHandoffEnabledByDC() { log(" getHintedHandoffEnabledByDC()"); return c.getSetStringValue( "storage_proxy/hinted_handoff_enabled_by_dc"); } + @Override public void setHintedHandoffEnabled(boolean b) { log(" setHintedHandoffEnabled(boolean b)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -208,6 +96,7 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/hinted_handoff_enabled", queryParams); } + @Override public void setHintedHandoffEnabledByDCList(String dcs) { log(" setHintedHandoffEnabledByDCList(String dcs)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -215,11 +104,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/hinted_handoff_enabled_by_dc_list"); } + @Override public int getMaxHintWindow() { log(" getMaxHintWindow()"); return c.getIntValue("storage_proxy/max_hint_window"); } + @Override public void setMaxHintWindow(int ms) { log(" setMaxHintWindow(int ms)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -227,11 +118,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/max_hint_window", queryParams); } + @Override public int getMaxHintsInProgress() { log(" getMaxHintsInProgress()"); return c.getIntValue("storage_proxy/max_hints_in_progress"); } + @Override public void setMaxHintsInProgress(int qs) { log(" setMaxHintsInProgress(int qs)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -239,16 +132,19 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/max_hints_in_progress", queryParams); } + @Override public int getHintsInProgress() { log(" getHintsInProgress()"); return c.getIntValue("storage_proxy/hints_in_progress"); } + @Override public Long getRpcTimeout() { log(" getRpcTimeout()"); return c.getLongValue("storage_proxy/rpc_timeout"); } + @Override public void setRpcTimeout(Long timeoutInMillis) { log(" setRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -256,11 +152,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/rpc_timeout", queryParams); } + @Override public Long getReadRpcTimeout() { log(" getReadRpcTimeout()"); return c.getLongValue("storage_proxy/read_rpc_timeout"); } + @Override public void setReadRpcTimeout(Long timeoutInMillis) { log(" setReadRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -268,11 +166,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/read_rpc_timeout", queryParams); } + @Override public Long getWriteRpcTimeout() { log(" getWriteRpcTimeout()"); return c.getLongValue("storage_proxy/write_rpc_timeout"); } + @Override public void setWriteRpcTimeout(Long timeoutInMillis) { log(" setWriteRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -280,11 +180,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/write_rpc_timeout", queryParams); } + @Override public Long getCounterWriteRpcTimeout() { log(" getCounterWriteRpcTimeout()"); return c.getLongValue("storage_proxy/counter_write_rpc_timeout"); } + @Override public void setCounterWriteRpcTimeout(Long timeoutInMillis) { log(" setCounterWriteRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -292,11 +194,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/counter_write_rpc_timeout", queryParams); } + @Override public Long getCasContentionTimeout() { log(" getCasContentionTimeout()"); return c.getLongValue("storage_proxy/cas_contention_timeout"); } + @Override public void setCasContentionTimeout(Long timeoutInMillis) { log(" setCasContentionTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -304,11 +208,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/cas_contention_timeout", queryParams); } + @Override public Long getRangeRpcTimeout() { log(" getRangeRpcTimeout()"); return c.getLongValue("storage_proxy/range_rpc_timeout"); } + @Override public void setRangeRpcTimeout(Long timeoutInMillis) { log(" setRangeRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -316,11 +222,13 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/range_rpc_timeout", queryParams); } + @Override public Long getTruncateRpcTimeout() { log(" getTruncateRpcTimeout()"); return c.getLongValue("storage_proxy/truncate_rpc_timeout"); } + @Override public void setTruncateRpcTimeout(Long timeoutInMillis) { log(" setTruncateRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -328,27 +236,32 @@ public class StorageProxy implements StorageProxyMBean { c.post("storage_proxy/truncate_rpc_timeout", queryParams); } + @Override public void reloadTriggerClasses() { log(" reloadTriggerClasses()"); c.post("storage_proxy/reload_trigger_classes"); } + @Override public long getReadRepairAttempted() { log(" getReadRepairAttempted()"); return c.getLongValue("storage_proxy/read_repair_attempted"); } + @Override public long getReadRepairRepairedBlocking() { log(" getReadRepairRepairedBlocking()"); return c.getLongValue("storage_proxy/read_repair_repaired_blocking"); } + @Override public long getReadRepairRepairedBackground() { log(" getReadRepairRepairedBackground()"); return c.getLongValue("storage_proxy/read_repair_repaired_background"); } /** Returns each live node's schema version */ + @Override public Map> getSchemaVersions() { log(" getSchemaVersions()"); return c.getMapStringListStrValue("storage_proxy/schema_versions"); @@ -369,4 +282,22 @@ public class StorageProxy implements StorageProxyMBean { return c.getLongValue(""); } + @Override + public void enableHintsForDC(String dc) { + // TODO if/when scylla uses hints + log(" enableHintsForDC()"); + } + + @Override + public void disableHintsForDC(String dc) { + // TODO if/when scylla uses hints + log(" disableHintsForDC()"); + } + + @Override + public Set getHintedHandoffDisabledDCs() { + // TODO if/when scylla uses hints + log(" getHintedHandoffDisabledDCs()"); + return emptySet(); + } } diff --git a/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java b/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java index 07efe03..fd086fa 100644 --- a/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java +++ b/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java @@ -15,6 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ + package org.apache.cassandra.service; import java.util.List; @@ -22,66 +29,6 @@ import java.util.Map; import java.util.Set; public interface StorageProxyMBean { - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#lastOpCount - */ - @Deprecated - public long getReadOperations(); - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram - */ - @Deprecated - public long getTotalReadLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram - */ - @Deprecated - public double getRecentReadLatencyMicros(); - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram - */ - @Deprecated - public long[] getTotalReadLatencyHistogramMicros(); - - /** - * @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram - */ - @Deprecated - public long[] getRecentReadLatencyHistogramMicros(); - - @Deprecated - public long getRangeOperations(); - - @Deprecated - public long getTotalRangeLatencyMicros(); - - @Deprecated - public double getRecentRangeLatencyMicros(); - - @Deprecated - public long[] getTotalRangeLatencyHistogramMicros(); - - @Deprecated - public long[] getRecentRangeLatencyHistogramMicros(); - - @Deprecated - public long getWriteOperations(); - - @Deprecated - public long getTotalWriteLatencyMicros(); - - @Deprecated - public double getRecentWriteLatencyMicros(); - - @Deprecated - public long[] getTotalWriteLatencyHistogramMicros(); - - @Deprecated - public long[] getRecentWriteLatencyHistogramMicros(); - public long getTotalHints(); public boolean getHintedHandoffEnabled(); @@ -92,6 +39,12 @@ public interface StorageProxyMBean { public void setHintedHandoffEnabledByDCList(String dcs); + public void enableHintsForDC(String dc); + + public void disableHintsForDC(String dc); + + public Set getHintedHandoffDisabledDCs(); + public int getMaxHintWindow(); public void setMaxHintWindow(int ms); From 3e146845b42cb6bafbcff336d70e3c846d5b1528 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Wed, 17 Aug 2016 08:43:28 +0000 Subject: [PATCH 12/32] StorageService: update to c3 compat Note: some calls that are not (yet) applicable to scylla are unimplemented. --- .../cassandra/service/StorageService.java | 226 +++++++++++++++--- .../service/StorageServiceMBean.java | 176 ++++++++------ 2 files changed, 296 insertions(+), 106 deletions(-) diff --git a/src/main/java/org/apache/cassandra/service/StorageService.java b/src/main/java/org/apache/cassandra/service/StorageService.java index effff96..a10880c 100644 --- a/src/main/java/org/apache/cassandra/service/StorageService.java +++ b/src/main/java/org/apache/cassandra/service/StorageService.java @@ -22,21 +22,33 @@ */ package org.apache.cassandra.service; -import java.io.*; +import java.io.IOException; import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; -import java.util.*; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; import java.util.Map.Entry; -import java.util.concurrent.*; +import java.util.Set; +import java.util.Timer; +import java.util.TimerTask; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; -import javax.json.Json; import javax.json.JsonArray; import javax.json.JsonObject; -import javax.json.JsonWriter; -import javax.management.*; +import javax.management.MBeanServer; +import javax.management.Notification; +import javax.management.NotificationBroadcasterSupport; +import javax.management.ObjectName; import javax.management.openmbean.TabularData; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; @@ -45,11 +57,9 @@ import org.apache.cassandra.metrics.StorageMetrics; import org.apache.cassandra.repair.RepairParallelism; import org.apache.cassandra.streaming.StreamManager; -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.api.APIConfig; -import com.scylladb.jmx.utils.FileUtils; - import com.google.common.base.Joiner; +import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.utils.FileUtils; /** * This abstraction contains the token/identifier of this node on the identifier @@ -63,6 +73,7 @@ public class StorageService extends NotificationBroadcasterSupport private APIClient c = new APIClient(); private static Timer timer = new Timer("Storage Service Repair"); + @SuppressWarnings("unused") private StorageMetrics metrics = new StorageMetrics(); public static final StorageService instance = new StorageService(); @@ -104,6 +115,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of IP addresses, as Strings */ + @Override public List getLiveNodes() { log(" getLiveNodes()"); return c.getListStrValue("/gossiper/endpoint/live"); @@ -115,6 +127,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of IP addresses, as Strings */ + @Override public List getUnreachableNodes() { log(" getUnreachableNodes()"); return c.getListStrValue("/gossiper/endpoint/down"); @@ -125,6 +138,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of IP addresses, as Strings */ + @Override public List getJoiningNodes() { log(" getJoiningNodes()"); return c.getListStrValue("/storage_service/nodes/joining"); @@ -135,6 +149,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of IP addresses, as Strings */ + @Override public List getLeavingNodes() { log(" getLeavingNodes()"); return c.getListStrValue("/storage_service/nodes/leaving"); @@ -145,6 +160,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of IP addresses, as Strings */ + @Override public List getMovingNodes() { log(" getMovingNodes()"); return c.getListStrValue("/storage_service/nodes/moving"); @@ -155,6 +171,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return a collection of tokens formatted as strings */ + @Override public List getTokens() { log(" getTokens()"); try { @@ -173,6 +190,7 @@ public class StorageService extends NotificationBroadcasterSupport * string representation of an node * @return a collection of tokens formatted as strings */ + @Override public List getTokens(String endpoint) throws UnknownHostException { log(" getTokens(String endpoint) throws UnknownHostException"); return c.getListStrValue("/storage_service/tokens/" + endpoint); @@ -183,6 +201,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return A string representation of the Cassandra version. */ + @Override public String getReleaseVersion() { log(" getReleaseVersion()"); return c.getStringValue("/storage_service/release_version"); @@ -193,6 +212,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return A string representation of the Schema version. */ + @Override public String getSchemaVersion() { log(" getSchemaVersion()"); return c.getStringValue("/storage_service/schema_version"); @@ -203,6 +223,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return String array of all locations */ + @Override public String[] getAllDataFileLocations() { log(" getAllDataFileLocations()"); return c.getStringArrValue("/storage_service/data_file/locations"); @@ -213,6 +234,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return a string path */ + @Override public String getCommitLogLocation() { log(" getCommitLogLocation()"); return c.getStringValue("/storage_service/commitlog"); @@ -223,6 +245,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return a string path */ + @Override public String getSavedCachesLocation() { log(" getSavedCachesLocation()"); return c.getStringValue("/storage_service/saved_caches/location"); @@ -234,6 +257,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return mapping of ranges to end points */ + @Override public Map, List> getRangeToEndpointMap( String keyspace) { log(" getRangeToEndpointMap(String keyspace)"); @@ -246,6 +270,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return mapping of ranges to rpc addresses */ + @Override public Map, List> getRangeToRpcaddressMap( String keyspace) { log(" getRangeToRpcaddressMap(String keyspace)"); @@ -265,6 +290,7 @@ public class StorageService extends NotificationBroadcasterSupport * @return a List of TokenRange(s) converted to String for the given * keyspace */ + @Override public List describeRingJMX(String keyspace) throws IOException { log(" describeRingJMX(String keyspace) throws IOException"); JsonArray arr = c.getJsonArray("/storage_service/describe_ring/" + keyspace); @@ -325,6 +351,7 @@ public class StorageService extends NotificationBroadcasterSupport * the keyspace to get the pending range map for. * @return a map of pending ranges to endpoints */ + @Override public Map, List> getPendingRangeToEndpointMap( String keyspace) { log(" getPendingRangeToEndpointMap(String keyspace)"); @@ -337,6 +364,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return a map of tokens to endpoints in ascending order */ + @Override public Map getTokenToEndpointMap() { log(" getTokenToEndpointMap()"); Map mapInetAddress = c.getMapStrValue("/storage_service/tokens_endpoint"); @@ -352,6 +380,7 @@ public class StorageService extends NotificationBroadcasterSupport } /** Retrieve this hosts unique ID */ + @Override public String getLocalHostId() { log(" getLocalHostId()"); return c.getStringValue("/storage_service/hostid/local"); @@ -365,6 +394,7 @@ public class StorageService extends NotificationBroadcasterSupport return getHostIdToAddressMap().get(getLocalHostId()); } /** Retrieve the mapping of endpoint to host ID */ + @Override public Map getHostIdMap() { log(" getHostIdMap()"); return c.getMapStrValue("/storage_service/host_id"); @@ -388,12 +418,14 @@ public class StorageService extends NotificationBroadcasterSupport } /** Human-readable load value */ + @Override public String getLoadString() { log(" getLoadString()"); return FileUtils.stringifyFileSize(getLoad()); } /** Human-readable load value. Keys are IP addresses. */ + @Override public Map getLoadMap() { log(" getLoadMap()"); Map load = getLoadMapAsDouble(); @@ -415,6 +447,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return generation number */ + @Override public int getCurrentGenerationNumber() { log(" getCurrentGenerationNumber()"); return c.getIntValue("/storage_service/generation_number"); @@ -432,6 +465,7 @@ public class StorageService extends NotificationBroadcasterSupport * - key for which we need to find the endpoint return value - * the endpoint responsible for this key */ + @Override public List getNaturalEndpoints(String keyspaceName, String cf, String key) { log(" getNaturalEndpoints(String keyspaceName, String cf, String key)"); @@ -443,6 +477,7 @@ public class StorageService extends NotificationBroadcasterSupport queryParams); } + @Override public List getNaturalEndpoints(String keyspaceName, ByteBuffer key) { log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)"); @@ -458,6 +493,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param keyspaceNames * the name of the keyspaces to snapshot; empty means "all." */ + @Override public void takeSnapshot(String tag, String... keyspaceNames) throws IOException { log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException"); @@ -479,6 +515,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param tag * the tag given to the snapshot; may not be null or empty */ + @Override public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException { log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException"); @@ -499,6 +536,7 @@ public class StorageService extends NotificationBroadcasterSupport * Remove the snapshot with the given name from the given keyspaces. If no * tag is specified we will remove all snapshots. */ + @Override public void clearSnapshot(String tag, String... keyspaceNames) throws IOException { log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException"); @@ -514,6 +552,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return A map of snapshotName to all its details in Tabular form. */ + @Override public Map getSnapshotDetails() { log(" getSnapshotDetails()"); return c.getMapStringSnapshotTabularDataValue( @@ -546,6 +585,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return True size taken by all the snapshots. */ + @Override public long trueSnapshotsSize() { log(" trueSnapshotsSize()"); return c.getLongValue("/storage_service/snapshots/size/true"); @@ -568,6 +608,7 @@ public class StorageService extends NotificationBroadcasterSupport /** * Trigger a cleanup of keys on a single keyspace */ + @Override public int forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException { @@ -586,6 +627,7 @@ public class StorageService extends NotificationBroadcasterSupport * * Scrubbed CFs will be snapshotted first, if disableSnapshot is false */ + @Override public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException { @@ -613,6 +655,7 @@ public class StorageService extends NotificationBroadcasterSupport * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip * bad rows and do not snapshot sstables first. */ + @Override public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, @@ -635,6 +678,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param columnFamilies * @throws IOException */ + @Override public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException { @@ -721,6 +765,7 @@ public class StorageService extends NotificationBroadcasterSupport * repair option. * @return Repair command number, or 0 if nothing to repair */ + @Override public int repairAsync(String keyspace, Map options) { log(" repairAsync(String keyspace, Map options)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -748,6 +793,7 @@ public class StorageService extends NotificationBroadcasterSupport return repairAsync(keyspace, options); } + @Override public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, @@ -756,15 +802,6 @@ public class StorageService extends NotificationBroadcasterSupport return c.getIntValue(""); } - @Deprecated - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, RepairParallelism parallelismDegree, - Collection dataCenters, Collection hosts, - boolean fullRepair, String... columnFamilies) { - log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, RepairParallelism parallelismDegree, Collection dataCenters, Collection hosts, boolean fullRepair, String... columnFamilies)"); - return c.getIntValue(""); - } - @Override public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, @@ -774,6 +811,7 @@ public class StorageService extends NotificationBroadcasterSupport return repairAsync(keyspace, options); } + @Override @Deprecated public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, @@ -782,6 +820,7 @@ public class StorageService extends NotificationBroadcasterSupport return c.getIntValue(""); } + @Override public void forceTerminateAllRepairSessions() { log(" forceTerminateAllRepairSessions()"); c.post("/storage_service/force_terminate"); @@ -790,6 +829,7 @@ public class StorageService extends NotificationBroadcasterSupport /** * transfer this node's data to other machines and remove it from service. */ + @Override public void decommission() throws InterruptedException { log(" decommission() throws InterruptedException"); c.post("/storage_service/decommission"); @@ -800,6 +840,7 @@ public class StorageService extends NotificationBroadcasterSupport * token to move this node to. This node will unload its data * onto its neighbors, and bootstrap to the new token. */ + @Override public void move(String newToken) throws IOException { log(" move(String newToken) throws IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -812,6 +853,7 @@ public class StorageService extends NotificationBroadcasterSupport * it) from the ring * @param hostIdString the host id to remove */ + @Override public void removeNode(String hostIdString) { log(" removeNode(String token)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -822,6 +864,7 @@ public class StorageService extends NotificationBroadcasterSupport /** * Get the status of a token removal. */ + @Override public String getRemovalStatus() { log(" getRemovalStatus()"); return c.getStringValue("/storage_service/removal_status"); @@ -830,6 +873,7 @@ public class StorageService extends NotificationBroadcasterSupport /** * Force a remove operation to finish. */ + @Override public void forceRemoveCompletion() { log(" forceRemoveCompletion()"); c.post("/storage_service/force_remove_completion"); @@ -854,6 +898,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @see ch.qos.logback.classic.Level#toLevel(String) */ + @Override public void setLoggingLevel(String classQualifier, String level) throws Exception { log(" setLoggingLevel(String classQualifier, String level) throws Exception"); @@ -863,6 +908,7 @@ public class StorageService extends NotificationBroadcasterSupport } /** get the runtime logging levels */ + @Override public Map getLoggingLevels() { log(" getLoggingLevels()"); return c.getMapStrValue("/storage_service/logging_level"); @@ -872,18 +918,21 @@ public class StorageService extends NotificationBroadcasterSupport * get the operational mode (leaving, joining, normal, decommissioned, * client) **/ + @Override public String getOperationMode() { log(" getOperationMode()"); return c.getStringValue("/storage_service/operation_mode"); } /** Returns whether the storage service is starting or not */ + @Override public boolean isStarting() { log(" isStarting()"); return c.getBooleanValue("/storage_service/is_starting"); } /** get the progress of a drain operation */ + @Override public String getDrainProgress() { log(" getDrainProgress()"); // FIXME @@ -897,6 +946,7 @@ public class StorageService extends NotificationBroadcasterSupport * makes node unavailable for writes, flushes memtables and replays * commitlog. */ + @Override public void drain() throws IOException, InterruptedException, ExecutionException { log(" drain() throws IOException, InterruptedException, ExecutionException"); @@ -915,6 +965,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param columnFamily * The column family to delete data from. */ + @Override public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException { log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException"); @@ -927,6 +978,7 @@ public class StorageService extends NotificationBroadcasterSupport * given a list of tokens (representing the nodes in the cluster), returns a * mapping from "token -> %age of cluster owned by that token" */ + @Override public Map getOwnership() { log(" getOwnership()"); return c.getMapInetAddressFloatValue("/storage_service/ownership/"); @@ -939,6 +991,7 @@ public class StorageService extends NotificationBroadcasterSupport * the same replication strategies and if yes then we will use the first * else a empty Map is returned. */ + @Override public Map effectiveOwnership(String keyspace) throws IllegalStateException { log(" effectiveOwnership(String keyspace) throws IllegalStateException"); @@ -949,11 +1002,10 @@ public class StorageService extends NotificationBroadcasterSupport } } + @Override public List getKeyspaces() { log(" getKeyspaces()"); - MultivaluedMap queryParams = new MultivaluedHashMap(); - queryParams.add("non_system", "true"); - return c.getListStrValue("/storage_service/keyspaces", queryParams); + return c.getListStrValue("/storage_service/keyspaces"); } public Map> getColumnFamilyPerKeyspace() { @@ -973,9 +1025,12 @@ public class StorageService extends NotificationBroadcasterSupport return res; } + @Override public List getNonSystemKeyspaces() { log(" getNonSystemKeyspaces()"); - return c.getListStrValue("/storage_service/keyspaces"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("type", "user"); + return c.getListStrValue("/storage_service/keyspaces", queryParams); } /** @@ -994,6 +1049,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param dynamicBadnessThreshold * double, (default 0.0) */ + @Override public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException { @@ -1018,85 +1074,93 @@ public class StorageService extends NotificationBroadcasterSupport } // allows a user to forcibly 'kill' a sick node + @Override public void stopGossiping() { log(" stopGossiping()"); c.delete("/storage_service/gossiping"); } // allows a user to recover a forcibly 'killed' node + @Override public void startGossiping() { log(" startGossiping()"); c.post("/storage_service/gossiping"); } // allows a user to see whether gossip is running or not + @Override public boolean isGossipRunning() { log(" isGossipRunning()"); return c.getBooleanValue("/storage_service/gossiping"); } // allows a user to forcibly completely stop cassandra + @Override public void stopDaemon() { log(" stopDaemon()"); c.post("/storage_service/stop_daemon"); } // to determine if gossip is disabled + @Override public boolean isInitialized() { log(" isInitialized()"); return c.getBooleanValue("/storage_service/is_initialized"); } // allows a user to disable thrift + @Override public void stopRPCServer() { log(" stopRPCServer()"); c.delete("/storage_service/rpc_server"); } // allows a user to reenable thrift + @Override public void startRPCServer() { log(" startRPCServer()"); c.post("/storage_service/rpc_server"); } // to determine if thrift is running + @Override public boolean isRPCServerRunning() { log(" isRPCServerRunning()"); return c.getBooleanValue("/storage_service/rpc_server"); } + @Override public void stopNativeTransport() { log(" stopNativeTransport()"); c.delete("/storage_service/native_transport"); } + @Override public void startNativeTransport() { log(" startNativeTransport()"); c.post("/storage_service/native_transport"); } + @Override public boolean isNativeTransportRunning() { log(" isNativeTransportRunning()"); return c.getBooleanValue("/storage_service/native_transport"); } // allows a node that have been started without joining the ring to join it + @Override public void joinRing() throws IOException { log(" joinRing() throws IOException"); c.post("/storage_service/join_ring"); } + @Override public boolean isJoined() { log(" isJoined()"); return c.getBooleanValue("/storage_service/join_ring"); } - @Deprecated - public int getExceptionCount() { - log(" getExceptionCount()"); - return c.getIntValue(""); - } - + @Override public void setStreamThroughputMbPerSec(int value) { log(" setStreamThroughputMbPerSec(int value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1104,6 +1168,7 @@ public class StorageService extends NotificationBroadcasterSupport c.post("/storage_service/stream_throughput", queryParams); } + @Override public int getStreamThroughputMbPerSec() { log(" getStreamThroughputMbPerSec()"); return c.getIntValue("/storage_service/stream_throughput"); @@ -1114,6 +1179,7 @@ public class StorageService extends NotificationBroadcasterSupport return c.getIntValue("/storage_service/compaction_throughput"); } + @Override public void setCompactionThroughputMbPerSec(int value) { log(" setCompactionThroughputMbPerSec(int value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1121,11 +1187,13 @@ public class StorageService extends NotificationBroadcasterSupport c.post("/storage_service/compaction_throughput", queryParams); } + @Override public boolean isIncrementalBackupsEnabled() { log(" isIncrementalBackupsEnabled()"); return c.getBooleanValue("/storage_service/incremental_backups"); } + @Override public void setIncrementalBackupsEnabled(boolean value) { log(" setIncrementalBackupsEnabled(boolean value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1143,6 +1211,7 @@ public class StorageService extends NotificationBroadcasterSupport * Name of DC from which to select sources for streaming or null * to pick any node */ + @Override public void rebuild(String sourceDc) { log(" rebuild(String sourceDc)"); if (sourceDc != null) { @@ -1155,6 +1224,7 @@ public class StorageService extends NotificationBroadcasterSupport } /** Starts a bulk load and blocks until it completes. */ + @Override public void bulkLoad(String directory) { log(" bulkLoad(String directory)"); c.post("/storage_service/bulk_load/" + directory); @@ -1164,12 +1234,14 @@ public class StorageService extends NotificationBroadcasterSupport * Starts a bulk load asynchronously and returns the String representation * of the planID for the new streaming session. */ + @Override public String bulkLoadAsync(String directory) { log(" bulkLoadAsync(String directory)"); return c.getStringValue( "/storage_service/bulk_load_async/" + directory); } + @Override public void rescheduleFailedDeletions() { log(" rescheduleFailedDeletions()"); c.post("/storage_service/reschedule_failed_deletions"); @@ -1183,6 +1255,7 @@ public class StorageService extends NotificationBroadcasterSupport * @param cfName * The ColumnFamily name where SSTables belong */ + @Override public void loadNewSSTables(String ksName, String cfName) { log(" loadNewSSTables(String ksName, String cfName)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1200,6 +1273,7 @@ public class StorageService extends NotificationBroadcasterSupport * * @return set of Tokens as Strings */ + @Override public List sampleKeyRange() { log(" sampleKeyRange()"); return c.getListStrValue("/storage_service/sample_key_range"); @@ -1208,11 +1282,13 @@ public class StorageService extends NotificationBroadcasterSupport /** * rebuild the specified indexes */ + @Override public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) { log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)"); } + @Override public void resetLocalSchema() throws IOException { log(" resetLocalSchema() throws IOException"); c.post("/storage_service/relocal_schema"); @@ -1228,6 +1304,7 @@ public class StorageService extends NotificationBroadcasterSupport * enable tracing for all requests (which mich severely cripple * the system) */ + @Override public void setTraceProbability(double probability) { log(" setTraceProbability(double probability)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1238,11 +1315,13 @@ public class StorageService extends NotificationBroadcasterSupport /** * Returns the configured tracing probability. */ + @Override public double getTraceProbability() { log(" getTraceProbability()"); return c.getDoubleValue("/storage_service/trace_probability"); } + @Override public void disableAutoCompaction(String ks, String... columnFamilies) throws IOException { log("disableAutoCompaction(String ks, String... columnFamilies)"); @@ -1252,6 +1331,7 @@ public class StorageService extends NotificationBroadcasterSupport c.delete("/storage_service/auto_compaction/", queryParams); } + @Override public void enableAutoCompaction(String ks, String... columnFamilies) throws IOException { log("enableAutoCompaction(String ks, String... columnFamilies)"); @@ -1267,6 +1347,7 @@ public class StorageService extends NotificationBroadcasterSupport } + @Override public void deliverHints(String host) throws UnknownHostException { log(" deliverHints(String host) throws UnknownHostException"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1275,24 +1356,28 @@ public class StorageService extends NotificationBroadcasterSupport } /** Returns the name of the cluster */ + @Override public String getClusterName() { log(" getClusterName()"); return c.getStringValue("/storage_service/cluster_name"); } /** Returns the cluster partitioner */ + @Override public String getPartitionerName() { log(" getPartitionerName()"); return c.getStringValue("/storage_service/partitioner_name"); } /** Returns the threshold for warning of queries with many tombstones */ + @Override public int getTombstoneWarnThreshold() { log(" getTombstoneWarnThreshold()"); return c.getIntValue("/storage_service/tombstone_warn_threshold"); } /** Sets the threshold for warning queries with many tombstones */ + @Override public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) { log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1302,12 +1387,14 @@ public class StorageService extends NotificationBroadcasterSupport } /** Returns the threshold for abandoning queries with many tombstones */ + @Override public int getTombstoneFailureThreshold() { log(" getTombstoneFailureThreshold()"); return c.getIntValue("/storage_service/tombstone_failure_threshold"); } /** Sets the threshold for abandoning queries with many tombstones */ + @Override public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) { log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1317,12 +1404,14 @@ public class StorageService extends NotificationBroadcasterSupport } /** Returns the threshold for rejecting queries due to a large batch size */ + @Override public int getBatchSizeFailureThreshold() { log(" getBatchSizeFailureThreshold()"); return c.getIntValue("/storage_service/batch_size_failure_threshold"); } /** Sets the threshold for rejecting queries due to a large batch size */ + @Override public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold) { log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1333,6 +1422,7 @@ public class StorageService extends NotificationBroadcasterSupport /** * Sets the hinted handoff throttle in kb per second, per delivery thread. */ + @Override public void setHintedHandoffThrottleInKB(int throttleInKB) { log(" setHintedHandoffThrottleInKB(int throttleInKB)"); MultivaluedMap queryParams = new MultivaluedHashMap(); @@ -1445,9 +1535,81 @@ public class StorageService extends NotificationBroadcasterSupport } @Override - public double getTracingProbability() { + public Map getEndpointToHostId() { + return getHostIdMap(); + } + + @Override + public Map getHostIdToEndpoint() { + return getHostIdToAddressMap(); + } + + @Override + public void refreshSizeEstimates() throws ExecutionException { + // TODO Auto-generated method stub + log(" refreshSizeEstimates"); + } + + @Override + public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException { + // "splitOutput" afaik not relevant for scylla (yet?...) + forceKeyspaceCompaction(keyspaceName, tableNames); + } + + @Override + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) + throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return forceKeyspaceCleanup(keyspaceName, tables); + } + + @Override + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, + String... columnFamilies) throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return scrub(disableSnapshot, skipCorrupted, checkData, 0, keyspaceName, columnFamilies); + } + + @Override + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException { // TODO Auto-generated method stub - log(" getTracingProbability()"); - return c.getDoubleValue(""); + log(" verify"); + return 0; + } + + @Override + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) + throws IOException, ExecutionException, InterruptedException { + // "jobs" not (yet) relevant for scylla. (though possibly useful...) + return upgradeSSTables(keyspaceName, excludeCurrentVersion, tableNames); + } + + @Override + public List getNonLocalStrategyKeyspaces() { + log(" getNonLocalStrategyKeyspaces"); + MultivaluedMap queryParams = new MultivaluedHashMap(); + queryParams.add("type", "non_local_strategy"); + return c.getListStrValue("/storage_service/keyspaces", queryParams); + } + + @Override + public void setInterDCStreamThroughputMbPerSec(int value) { + // TODO Auto-generated method stub + log(" setInterDCStreamThroughputMbPerSec"); + } + + @Override + public int getInterDCStreamThroughputMbPerSec() { + // TODO Auto-generated method stub + log(" getInterDCStreamThroughputMbPerSec"); + return 0; + } + + @Override + public boolean resumeBootstrap() { + log(" resumeBootstrap"); + return false; } } diff --git a/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java index 9acbbc8..4234a88 100644 --- a/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java +++ b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java @@ -15,6 +15,13 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + +/* + * Copyright 2015 Cloudius Systems + * + * Modified by Cloudius Systems + */ + package org.apache.cassandra.service; import java.io.IOException; @@ -173,13 +180,11 @@ public interface StorageServiceMBean extends NotificationEmitter { /** Retrieve the mapping of endpoint to host ID */ public Map getHostIdMap(); - /** - * Numeric load value. - * - * @see org.apache.cassandra.metrics.StorageMetrics#load - */ - @Deprecated - public double getLoad(); + /** Retrieve the mapping of endpoint to host ID */ + public Map getEndpointToHostId(); + + /** Retrieve the mapping of host ID to endpoint */ + public Map getHostIdToEndpoint(); /** Human-readable load value */ public String getLoadString(); @@ -224,6 +229,17 @@ public interface StorageServiceMBean extends NotificationEmitter { public void takeSnapshot(String tag, String... keyspaceNames) throws IOException; + /** + * Takes the snapshot of a specific column family. A snapshot name must be specified. + * + * @param keyspaceName the keyspace which holds the specified column family + * @param tableName the table to snapshot + * @param tag the tag given to the snapshot; may not be null or empty + */ + default void takeTableSnapshot(String keyspaceName, String tableName, String tag) throws IOException { + takeColumnFamilySnapshot(keyspaceName, tableName, tag); + } + /** * Takes the snapshot of a specific column family. A snapshot name must be * specified. @@ -272,19 +288,22 @@ public interface StorageServiceMBean extends NotificationEmitter { */ public long trueSnapshotsSize(); + /** + * Forces refresh of values stored in system.size_estimates of all column families. + */ + public void refreshSizeEstimates() throws ExecutionException; + /** * Forces major compaction of a single keyspace */ - public void forceKeyspaceCompaction(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException; + public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; /** * Trigger a cleanup of keys on a single keyspace */ - public int forceKeyspaceCleanup(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException; + @Deprecated + public int forceKeyspaceCleanup(String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException; + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException; /** * Scrub (deserialize + reserialize at the latest version, skipping bad rows @@ -294,23 +313,26 @@ public interface StorageServiceMBean extends NotificationEmitter { * Scrubbed CFs will be snapshotted first, if disableSnapshot is false */ @Deprecated - public int scrub(boolean disableSnapshot, boolean skipCorrupted, - String keyspaceName, String... columnFamilies) throws IOException, - ExecutionException, InterruptedException; + public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; + @Deprecated + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException; - public int scrub(boolean disableSnapshot, boolean skipCorrupted, - boolean checkData, String keyspaceName, String... columnFamilies) - throws IOException, ExecutionException, - InterruptedException; + /** + * Verify (checksums of) the given keyspace. + * If tableNames array is empty, all CFs are verified. + * + * The entire sstable will be read to ensure each cell validates if extendedVerify is true + */ + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; /** * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip * bad rows and do not snapshot sstables first. */ - public int upgradeSSTables(String keyspaceName, - boolean excludeCurrentVersion, String... columnFamilies) - throws IOException, ExecutionException, - InterruptedException; + @Deprecated + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException; /** * Flush all memtables for the given column families, or all columnfamilies @@ -325,70 +347,66 @@ public interface StorageServiceMBean extends NotificationEmitter { InterruptedException; /** - * Invoke repair asynchronously. You can track repair progress by - * subscribing JMX notification sent from this StorageServiceMBean. - * Notification format is: type: "repair" userObject: int array of length 2, - * [0]=command number, [1]=ordinal of ActiveRepairService.Status + * Invoke repair asynchronously. + * You can track repair progress by subscribing JMX notification sent from this StorageServiceMBean. + * Notification format is: + * type: "repair" + * userObject: int array of length 2, [0]=command number, [1]=ordinal of ActiveRepairService.Status * + * @param keyspace Keyspace name to repair. Should not be null. + * @param options repair option. * @return Repair command number, or 0 if nothing to repair */ - public int forceRepairAsync(String keyspace, boolean isSequential, - Collection dataCenters, Collection hosts, - boolean primaryRange, boolean repairedAt, String... columnFamilies) - throws IOException; + public int repairAsync(String keyspace, Map options); /** - * Invoke repair asynchronously. You can track repair progress by - * subscribing JMX notification sent from this StorageServiceMBean. - * Notification format is: type: "repair" userObject: int array of length 2, - * [0]=command number, [1]=ordinal of ActiveRepairService.Status + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + */ + @Deprecated + public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames) throws IOException; + + /** + * Invoke repair asynchronously. + * You can track repair progress by subscribing JMX notification sent from this StorageServiceMBean. + * Notification format is: + * type: "repair" + * userObject: int array of length 2, [0]=command number, [1]=ordinal of ActiveRepairService.Status * - * @param parallelismDegree - * 0: sequential, 1: parallel, 2: DC parallel + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * + * @param parallelismDegree 0: sequential, 1: parallel, 2: DC parallel * @return Repair command number, or 0 if nothing to repair */ - public int forceRepairAsync(String keyspace, int parallelismDegree, - Collection dataCenters, Collection hosts, - boolean primaryRange, boolean fullRepair, String... columnFamilies); + @Deprecated + public int forceRepairAsync(String keyspace, int parallelismDegree, Collection dataCenters, Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames); - public int forceRepairAsync(String keyspace); /** - * Same as forceRepairAsync, but handles a specified range + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. */ - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, boolean isSequential, - Collection dataCenters, Collection hosts, - boolean repairedAt, String... columnFamilies) throws IOException; + @Deprecated + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames) throws IOException; /** * Same as forceRepairAsync, but handles a specified range * - * @param parallelismDegree - * 0: sequential, 1: parallel, 2: DC parallel - */ - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, int parallelismDegree, - Collection dataCenters, Collection hosts, - boolean fullRepair, String... columnFamilies); - - /** - * Invoke repair asynchronously. You can track repair progress by - * subscribing JMX notification sent from this StorageServiceMBean. - * Notification format is: type: "repair" userObject: int array of length 2, - * [0]=command number, [1]=ordinal of ActiveRepairService.Status + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. * - * @return Repair command number, or 0 if nothing to repair + * @param parallelismDegree 0: sequential, 1: parallel, 2: DC parallel */ - public int forceRepairAsync(String keyspace, boolean isSequential, - boolean isLocal, boolean primaryRange, boolean fullRepair, - String... columnFamilies); + @Deprecated + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree, Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames); /** - * Same as forceRepairAsync, but handles a specified range + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. */ - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, boolean isSequential, boolean isLocal, - boolean repairedAt, String... columnFamilies); + @Deprecated + public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... tableNames); + + /** + * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + */ + @Deprecated + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean fullRepair, String... tableNames); public void forceTerminateAllRepairSessions(); @@ -497,6 +515,10 @@ public interface StorageServiceMBean extends NotificationEmitter { public List getKeyspaces(); + public List getNonSystemKeyspaces(); + + public List getNonLocalStrategyKeyspaces(); + /** * Change endpointsnitch class and dynamic-ness (and dynamic attributes) at * runtime @@ -552,14 +574,12 @@ public interface StorageServiceMBean extends NotificationEmitter { public boolean isJoined(); - @Deprecated - public int getExceptionCount(); - public void setStreamThroughputMbPerSec(int value); public int getStreamThroughputMbPerSec(); - public int getCompactionThroughputMbPerSec(); + public void setInterDCStreamThroughputMbPerSec(int value); + public int getInterDCStreamThroughputMbPerSec(); public void setCompactionThroughputMbPerSec(int value); @@ -635,7 +655,7 @@ public interface StorageServiceMBean extends NotificationEmitter { /** * Returns the configured tracing probability. */ - public double getTracingProbability(); + public double getTraceProbability(); void disableAutoCompaction(String ks, String... columnFamilies) throws IOException; @@ -663,8 +683,16 @@ public interface StorageServiceMBean extends NotificationEmitter { /** Sets the threshold for abandoning queries with many tombstones */ public void setTombstoneFailureThreshold(int tombstoneDebugThreshold); + /** Returns the threshold for rejecting queries due to a large batch size */ + public int getBatchSizeFailureThreshold(); + /** Sets the threshold for rejecting queries due to a large batch size */ + public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold); + + /** Sets the hinted handoff throttle in kb per second, per delivery thread. */ + public void setHintedHandoffThrottleInKB(int throttleInKB); + /** * Sets the hinted handoff throttle in kb per second, per delivery thread. */ - public void setHintedHandoffThrottleInKB(int throttleInKB); + public boolean resumeBootstrap(); } From a44c18c621f70042b73201a178062740ff2f33e6 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 13:47:43 +0200 Subject: [PATCH 13/32] Add metric/mbean base types + metrics JMX object factory --- .../com/scylladb/jmx/metrics/APIMBean.java | 177 ++++ .../scylladb/jmx/metrics/MetricsMBean.java | 91 ++ .../cassandra/metrics/MetricNameFactory.java | 40 + .../org/apache/cassandra/metrics/Metrics.java | 38 + .../cassandra/metrics/MetricsRegistry.java | 792 ++++++++++++++++++ 5 files changed, 1138 insertions(+) create mode 100644 src/main/java/com/scylladb/jmx/metrics/APIMBean.java create mode 100644 src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java create mode 100644 src/main/java/org/apache/cassandra/metrics/MetricNameFactory.java create mode 100644 src/main/java/org/apache/cassandra/metrics/Metrics.java create mode 100644 src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java diff --git a/src/main/java/com/scylladb/jmx/metrics/APIMBean.java b/src/main/java/com/scylladb/jmx/metrics/APIMBean.java new file mode 100644 index 0000000..23d5d3c --- /dev/null +++ b/src/main/java/com/scylladb/jmx/metrics/APIMBean.java @@ -0,0 +1,177 @@ +package com.scylladb.jmx.metrics; + +import java.lang.reflect.Field; +import java.util.Set; +import java.util.function.Function; +import java.util.function.Predicate; + +import javax.management.BadAttributeValueExpException; +import javax.management.BadBinaryOpValueExpException; +import javax.management.BadStringOperationException; +import javax.management.InstanceAlreadyExistsException; +import javax.management.InstanceNotFoundException; +import javax.management.InvalidApplicationException; +import javax.management.MBeanRegistration; +import javax.management.MBeanRegistrationException; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; +import javax.management.QueryExp; + +import com.scylladb.jmx.api.APIClient; + +/** + * Base type for MBeans in scylla-jmx. Wraps auto naming and {@link APIClient} + * holding. + * + * @author calle + * + */ +public class APIMBean implements MBeanRegistration { + protected final APIClient client; + protected final String mbeanName; + + public APIMBean(APIClient client) { + this(null, client); + } + + public APIMBean(String mbeanName, APIClient client) { + this.mbeanName = mbeanName; + this.client = client; + } + + /** + * Helper method to add/remove dynamically created MBeans from a server + * instance. + * + * @param server + * The {@link MBeanServer} to check + * @param all + * All {@link ObjectName}s that should be bound + * @param predicate + * {@link QueryExp} predicate to filter relevant object names. + * @param generator + * {@link Function} to create a new MBean instance for a given + * {@link ObjectName} + * + * @return + * @throws MalformedObjectNameException + */ + public static boolean checkRegistration(MBeanServer server, Set all, + final Predicate predicate, Function generator) + throws MalformedObjectNameException { + Set registered = queryNames(server, predicate); + for (ObjectName name : registered) { + if (!all.contains(name)) { + try { + server.unregisterMBean(name); + } catch (MBeanRegistrationException | InstanceNotFoundException e) { + } + } + } + + int added = 0; + for (ObjectName name : all) { + if (!registered.contains(name)) { + try { + server.registerMBean(generator.apply(name), name); + added++; + } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { + } + } + } + return added > 0; + } + + /** + * Helper method to query {@link ObjectName}s from an {@link MBeanServer} + * based on {@link Predicate} + * + * @param server + * @param predicate + * @return + */ + public static Set queryNames(MBeanServer server, final Predicate predicate) { + @SuppressWarnings("serial") + Set registered = server.queryNames(null, new QueryExp() { + @Override + public void setMBeanServer(MBeanServer s) { + } + + @Override + public boolean apply(ObjectName name) throws BadStringOperationException, BadBinaryOpValueExpException, + BadAttributeValueExpException, InvalidApplicationException { + return predicate.test(name); + } + }); + return registered; + } + + MBeanServer server; + ObjectName name; + + protected final ObjectName getBoundName() { + return name; + } + + /** + * Figure out an {@link ObjectName} for this object based on either + * contructor parameter, static field, or just package/class name. + * + * @return + * @throws MalformedObjectNameException + */ + protected ObjectName generateName() throws MalformedObjectNameException { + String mbeanName = this.mbeanName; + if (mbeanName == null) { + Field f; + try { + f = getClass().getDeclaredField("MBEAN_NAME"); + f.setAccessible(true); + mbeanName = (String) f.get(null); + } catch (Throwable t) { + } + } + if (mbeanName == null) { + String name = getClass().getName(); + int i = name.lastIndexOf('.'); + mbeanName = name.substring(0, i) + ":type=" + name.substring(i + 1); + } + return new ObjectName(mbeanName); + } + + /** + * Keeps track of bound server and optionally generates an + * {@link ObjectName} for this instance. + */ + @Override + public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception { + if (this.server != null) { + throw new IllegalStateException("Can only exist in a single MBeanServer"); + } + this.server = server; + if (name == null) { + name = generateName(); + } + this.name = name; + + return name; + } + + @Override + public void postRegister(Boolean registrationDone) { + } + + @Override + public void preDeregister() throws Exception { + } + + @Override + public void postDeregister() { + assert server != null; + assert name != null; + this.server = null; + this.name = null; + } +} diff --git a/src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java b/src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java new file mode 100644 index 0000000..ae4a87f --- /dev/null +++ b/src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java @@ -0,0 +1,91 @@ +package com.scylladb.jmx.metrics; + +import static java.util.Arrays.asList; + +import java.util.Collection; +import java.util.function.Predicate; +import java.util.function.Supplier; + +import javax.management.InstanceNotFoundException; +import javax.management.MBeanRegistrationException; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; + +import org.apache.cassandra.metrics.Metrics; +import org.apache.cassandra.metrics.MetricsRegistry; + +import com.scylladb.jmx.api.APIClient; + +/** + * Base type for MBeans containing {@link Metrics}. + * + * @author calle + * + */ +public abstract class MetricsMBean extends APIMBean { + private final Collection metrics; + + public MetricsMBean(APIClient client, Metrics... metrics) { + this(null, client, metrics); + } + + public MetricsMBean(String mbeanName, APIClient client, Metrics... metrics) { + this(mbeanName, client, asList(metrics)); + } + + public MetricsMBean(String mbeanName, APIClient client, Collection metrics) { + super(mbeanName, client); + this.metrics = metrics; + } + + protected Predicate getTypePredicate() { + String domain = name.getDomain(); + String type = name.getKeyProperty("type"); + return n -> { + return domain.equals(n.getDomain()) && type.equals(n.getKeyProperty("type")); + }; + } + + private void register(MetricsRegistry registry, MBeanServer server) throws MalformedObjectNameException { + // Check if we're the first/last of our type bound/removed. + boolean empty = queryNames(server, getTypePredicate()).isEmpty(); + for (Metrics m : metrics) { + if (empty) { + m.registerGlobals(registry); + } + m.register(registry); + } + } + + @Override + public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception { + // Get name etc. + name = super.preRegister(server, name); + // Register all metrics in server + register(new MetricsRegistry(client, server), server); + return name; + } + + @Override + public void postDeregister() { + // We're officially unbound. Remove all metrics we added. + try { + register(new MetricsRegistry(client, server) { + // Unbind instead of bind. Yes. + @Override + public void register(Supplier s, ObjectName... objectNames) { + for (ObjectName name : objectNames) { + try { + server.unregisterMBean(name); + } catch (MBeanRegistrationException | InstanceNotFoundException e) { + } + } + } + }, server); + } catch (MalformedObjectNameException e) { + // TODO : log? + } + super.postDeregister(); + } +} diff --git a/src/main/java/org/apache/cassandra/metrics/MetricNameFactory.java b/src/main/java/org/apache/cassandra/metrics/MetricNameFactory.java new file mode 100644 index 0000000..3b1de95 --- /dev/null +++ b/src/main/java/org/apache/cassandra/metrics/MetricNameFactory.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.metrics; + +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; + +/** + * Simplified version of {@link Metrics} naming factory paradigm, simply + * generating {@link ObjectName} and nothing more. + * + * @author calle + * + */ +public interface MetricNameFactory { + /** + * Create a qualified name from given metric name. + * + * @param metricName + * part of qualified name. + * @return new String with given metric name. + * @throws MalformedObjectNameException + */ + ObjectName createMetricName(String metricName) throws MalformedObjectNameException; +} diff --git a/src/main/java/org/apache/cassandra/metrics/Metrics.java b/src/main/java/org/apache/cassandra/metrics/Metrics.java new file mode 100644 index 0000000..80b8b3f --- /dev/null +++ b/src/main/java/org/apache/cassandra/metrics/Metrics.java @@ -0,0 +1,38 @@ +package org.apache.cassandra.metrics; + +import java.util.function.Function; + +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; + +/** + * Action interface for any type that encapsulates n metrics. + * + * @author calle + * + */ +public interface Metrics { + /** + * Implementors should issue + * {@link MetricsRegistry#register(java.util.function.Supplier, javax.management.ObjectName...)} + * for every {@link Metrics} they generate. This method is called in both + * bind (create) and unbind (remove) phase, so an appropriate use of + * {@link Function} binding is advisable. + * + * @param registry + * @throws MalformedObjectNameException + */ + void register(MetricsRegistry registry) throws MalformedObjectNameException; + + /** + * Same as {{@link #register(MetricsRegistry)}, but for {@link Metric}s that + * are "global" (i.e. static - not bound to an individual bean instance. + * This method is called whenever the first encapsulating MBean is + * added/removed from a {@link MBeanServer}. + * + * @param registry + * @throws MalformedObjectNameException + */ + default void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException { + } +} diff --git a/src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java b/src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java new file mode 100644 index 0000000..f23ef66 --- /dev/null +++ b/src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java @@ -0,0 +1,792 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.metrics; + +import static com.scylladb.jmx.api.APIClient.getReader; +import static java.lang.Math.floor; +import static java.util.logging.Level.SEVERE; + +import java.util.Arrays; +import java.util.Locale; +import java.util.concurrent.TimeUnit; +import java.util.function.BiFunction; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.logging.Logger; + +import javax.json.JsonArray; +import javax.json.JsonNumber; +import javax.json.JsonObject; +import javax.management.InstanceAlreadyExistsException; +import javax.management.MBeanRegistrationException; +import javax.management.MBeanServer; +import javax.management.NotCompliantMBeanException; +import javax.management.ObjectName; + +import com.scylladb.jmx.api.APIClient; + +/** + * Makes integrating 3.0 metrics API with 2.0. + *

+ * The 3.0 API comes with poor JMX integration + *

+ */ +public class MetricsRegistry { + private static final long CACHE_DURATION = 1000; + private static final long UPDATE_INTERVAL = 50; + + private static final Logger logger = Logger.getLogger(MetricsRegistry.class.getName()); + + private final APIClient client; + private final MBeanServer mBeanServer; + + public MetricsRegistry(APIClient client, MBeanServer mBeanServer) { + this.client = client; + this.mBeanServer = mBeanServer; + } + + public MetricsRegistry(MetricsRegistry other) { + this(other.client, other.mBeanServer); + } + + public MetricMBean gauge(String url) { + return gauge(Long.class, url); + } + + public MetricMBean gauge(Class type, final String url) { + return gauge(getReader(type), url); + } + + public MetricMBean gauge(final BiFunction function, final String url) { + return gauge(c -> function.apply(c, url)); + } + + public MetricMBean gauge(final Function function) { + return gauge(() -> function.apply(client)); + } + + private class JmxGauge implements JmxGaugeMBean { + private final Supplier function; + + public JmxGauge(Supplier function) { + this.function = function; + } + + @Override + public Object getValue() { + return function.get(); + } + } + + public MetricMBean gauge(final Supplier function) { + return new JmxGauge(function); + } + + /** + * Default approach to register is to actually register/add to + * {@link MBeanServer} For unbind phase, override here. + * + * @param bean + * @param objectNames + */ + public void register(Supplier f, ObjectName... objectNames) { + MetricMBean bean = f.get(); + for (ObjectName name : objectNames) { + try { + mBeanServer.registerMBean(bean, name); + } catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) { + logger.log(SEVERE, "Could not register mbean", e); + } + } + } + + private class JmxCounter implements JmxCounterMBean { + private final String url; + + public JmxCounter(String url) { + super(); + this.url = url; + } + + @Override + public long getCount() { + return client.getLongValue(url); + } + } + + public MetricMBean counter(final String url) { + return new JmxCounter(url); + } + + private abstract class IntermediatelyUpdated { + private final long interval; + private final Supplier supplier; + private long lastUpdate; + + public IntermediatelyUpdated(String url, long interval) { + this.supplier = () -> client.getJsonObj(url, null); + this.interval = interval; + } + + public IntermediatelyUpdated(Supplier supplier, long interval) { + this.supplier = supplier; + this.interval = interval; + } + + public abstract void update(JsonObject obj); + + public final void update() { + long now = System.currentTimeMillis(); + if (now - lastUpdate < interval) { + return; + } + try { + JsonObject obj = supplier.get(); + update(obj); + } finally { + lastUpdate = now; + } + } + } + + private static class Meter { + public final long count; + public final double oneMinuteRate; + public final double fiveMinuteRate; + public final double fifteenMinuteRate; + public final double meanRate; + + public Meter(long count, double oneMinuteRate, double fiveMinuteRate, double fifteenMinuteRate, + double meanRate) { + this.count = count; + this.oneMinuteRate = oneMinuteRate; + this.fiveMinuteRate = fiveMinuteRate; + this.fifteenMinuteRate = fifteenMinuteRate; + this.meanRate = meanRate; + } + + public Meter() { + this(0, 0, 0, 0, 0); + } + + public Meter(JsonObject obj) { + JsonArray rates = obj.getJsonArray("rates"); + oneMinuteRate = rates.getJsonNumber(0).doubleValue(); + fiveMinuteRate = rates.getJsonNumber(1).doubleValue(); + fifteenMinuteRate = rates.getJsonNumber(2).doubleValue(); + meanRate = obj.getJsonNumber("mean_rate").doubleValue(); + count = obj.getJsonNumber("count").longValue(); + } + } + + private static final TimeUnit RATE_UNIT = TimeUnit.SECONDS; + private static final TimeUnit DURATION_UNIT = TimeUnit.MICROSECONDS; + private static final TimeUnit API_DURATION_UNIT = TimeUnit.NANOSECONDS; + private static final double DURATION_FACTOR = 1.0 / API_DURATION_UNIT.convert(1, DURATION_UNIT); + + private static double toDuration(double nanos) { + return nanos * DURATION_FACTOR; + } + + private static String unitString(TimeUnit u) { + String s = u.toString().toLowerCase(Locale.US); + return s.substring(0, s.length() - 1); + } + + private class JmxMeter extends IntermediatelyUpdated implements JmxMeterMBean { + private Meter meter = new Meter(); + + public JmxMeter(String url, long interval) { + super(url, interval); + } + + public JmxMeter(Supplier supplier, long interval) { + super(supplier, interval); + } + + @Override + public void update(JsonObject obj) { + meter = new Meter(obj); + } + + @Override + public long getCount() { + update(); + return meter.count; + } + + @Override + public double getMeanRate() { + update(); + return meter.meanRate; + } + + @Override + public double getOneMinuteRate() { + update(); + return meter.oneMinuteRate; + } + + @Override + public double getFiveMinuteRate() { + update(); + return meter.fiveMinuteRate; + } + + @Override + public double getFifteenMinuteRate() { + update(); + return meter.fifteenMinuteRate; + } + + @Override + public String getRateUnit() { + return "event/" + unitString(RATE_UNIT); + } + } + + public MetricMBean meter(String url) { + return new JmxMeter(url, CACHE_DURATION); + } + + private static long[] asLongArray(JsonArray a) { + return a.getValuesAs(JsonNumber.class).stream().mapToLong(n -> n.longValue()).toArray(); + } + + private static interface Samples { + default double getValue(double quantile) { + return 0; + } + + default long[] getValues() { + return new long[0]; + } + } + + private static class BufferSamples implements Samples { + private final long[] samples; + + public BufferSamples(long[] samples) { + this.samples = samples; + Arrays.sort(this.samples); + } + + @Override + public long[] getValues() { + return samples; + } + + @Override + public double getValue(double quantile) { + if (quantile < 0.0 || quantile > 1.0) { + throw new IllegalArgumentException(quantile + " is not in [0..1]"); + } + + if (samples.length == 0) { + return 0.0; + } + + final double pos = quantile * (samples.length + 1); + + if (pos < 1) { + return samples[0]; + } + + if (pos >= samples.length) { + return samples[samples.length - 1]; + } + + final double lower = samples[(int) pos - 1]; + final double upper = samples[(int) pos]; + return lower + (pos - floor(pos)) * (upper - lower); + } + } + + private static class Histogram { + private final long count; + private final long min; + private final long max; + private final double mean; + private final double stdDev; + + private final Samples samples; + + public Histogram(long count, long min, long max, double mean, double stdDev, Samples samples) { + this.count = count; + this.min = min; + this.max = max; + this.mean = mean; + this.stdDev = stdDev; + this.samples = samples; + } + + public Histogram() { + this(0, 0, 0, 0, 0, new Samples() { + }); + } + + public Histogram(JsonObject obj) { + this(obj.getJsonNumber("count").longValue(), obj.getJsonNumber("min").longValue(), + obj.getJsonNumber("max").longValue(), obj.getJsonNumber("mean").doubleValue(), + obj.getJsonNumber("variance").doubleValue(), new BufferSamples(getValues(obj))); + } + + public Histogram(EstimatedHistogram h) { + this(h.count(), h.min(), h.max(), h.mean(), 0, h); + } + + private static long[] getValues(JsonObject obj) { + JsonArray arr = obj.getJsonArray("sample"); + if (arr != null) { + return asLongArray(arr); + } + return new long[0]; + } + + public long[] getValues() { + return samples.getValues(); + } + + // Origin (and previous iterations of scylla-jxm) + // uses biased/ExponentiallyDecaying measurements + // for the history & quantile resolution. + // However, for use that is just gobbletigook, since + // we, at occasions of being asked, and when certain time + // has passed, ask the actual scylla server for a + // "values" buffer. A buffer with no information whatsoever + // on how said values correlate to actual sampling + // time. + // So, applying time weights at this level is just + // wrong. We can just as well treat this as a uniform + // distribution. + // Obvious improvement: Send time/value tuples instead. + public double getValue(double quantile) { + return samples.getValue(quantile); + } + + public long getCount() { + return count; + } + + public long getMin() { + return min; + } + + public long getMax() { + return max; + } + + public double getMean() { + return mean; + } + + public double getStdDev() { + return stdDev; + } + } + + private static class EstimatedHistogram implements Samples { + /** + * The series of values to which the counts in `buckets` correspond: 1, + * 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of + * [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of + * 4. + * + * The series starts at 1 and grows by 1.2 each time (rounding and + * removing duplicates). It goes from 1 to around 36M by default + * (creating 90+1 buckets), which will give us timing resolution from + * microseconds to 36 seconds, with less precision as the numbers get + * larger. + * + * Each bucket represents values from (previous bucket offset, current + * offset]. + */ + private final long[] bucketOffsets; + // buckets is one element longer than bucketOffsets -- the last element + // is + // values greater than the last offset + private long[] buckets; + + public EstimatedHistogram(JsonObject obj) { + this(asLongArray(obj.getJsonArray("bucket_offsets")), asLongArray(obj.getJsonArray("buckets"))); + } + + public EstimatedHistogram(long[] offsets, long[] bucketData) { + assert bucketData.length == offsets.length + 1; + bucketOffsets = offsets; + buckets = bucketData; + } + + /** + * @return the smallest value that could have been added to this + * histogram + */ + public long min() { + for (int i = 0; i < buckets.length; i++) { + if (buckets[i] > 0) { + return i == 0 ? 0 : 1 + bucketOffsets[i - 1]; + } + } + return 0; + } + + /** + * @return the largest value that could have been added to this + * histogram. If the histogram overflowed, returns + * Long.MAX_VALUE. + */ + public long max() { + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + return Long.MAX_VALUE; + } + + for (int i = lastBucket - 1; i >= 0; i--) { + if (buckets[i] > 0) { + return bucketOffsets[i]; + } + } + return 0; + } + + @Override + public long[] getValues() { + return buckets; + } + + /** + * @param percentile + * @return estimated value at given percentile + */ + @Override + public double getValue(double percentile) { + assert percentile >= 0 && percentile <= 1.0; + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + throw new IllegalStateException("Unable to compute when histogram overflowed"); + } + + long pcount = (long) Math.floor(count() * percentile); + if (pcount == 0) { + return 0; + } + + long elements = 0; + for (int i = 0; i < lastBucket; i++) { + elements += buckets[i]; + if (elements >= pcount) { + return bucketOffsets[i]; + } + } + return 0; + } + + /** + * @return the mean histogram value (average of bucket offsets, weighted + * by count) + * @throws IllegalStateException + * if any values were greater than the largest bucket + * threshold + */ + public long mean() { + int lastBucket = buckets.length - 1; + if (buckets[lastBucket] > 0) { + throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed"); + } + + long elements = 0; + long sum = 0; + for (int i = 0; i < lastBucket; i++) { + long bCount = buckets[i]; + elements += bCount; + sum += bCount * bucketOffsets[i]; + } + + return (long) Math.ceil((double) sum / elements); + } + + /** + * @return the total number of non-zero values + */ + public long count() { + return Arrays.stream(buckets).sum(); + } + + /** + * @return true if this histogram has overflowed -- that is, a value + * larger than our largest bucket could bound was added + */ + @SuppressWarnings("unused") + public boolean isOverflowed() { + return buckets[buckets.length - 1] > 0; + } + + } + + private class JmxHistogram extends IntermediatelyUpdated implements JmxHistogramMBean { + private Histogram histogram = new Histogram(); + + public JmxHistogram(String url, long interval) { + super(url, interval); + } + + @Override + public void update(JsonObject obj) { + if (obj.containsKey("hist")) { + obj = obj.getJsonObject("hist"); + } + if (obj.containsKey("buckets")) { + histogram = new Histogram(new EstimatedHistogram(obj)); + } else { + histogram = new Histogram(obj); + } + } + + @Override + public long getCount() { + update(); + return histogram.getCount(); + } + + @Override + public long getMin() { + update(); + return histogram.getMin(); + } + + @Override + public long getMax() { + update(); + return histogram.getMax(); + } + + @Override + public double getMean() { + update(); + return histogram.getMean(); + } + + @Override + public double getStdDev() { + update(); + return histogram.getStdDev(); + } + + @Override + public double get50thPercentile() { + update(); + return histogram.getValue(.5); + } + + @Override + public double get75thPercentile() { + update(); + return histogram.getValue(.75); + } + + @Override + public double get95thPercentile() { + update(); + return histogram.getValue(.95); + } + + @Override + public double get98thPercentile() { + update(); + return histogram.getValue(.98); + } + + @Override + public double get99thPercentile() { + update(); + return histogram.getValue(.99); + } + + @Override + public double get999thPercentile() { + update(); + return histogram.getValue(.999); + } + + @Override + public long[] values() { + update(); + return histogram.getValues(); + } + } + + public MetricMBean histogram(String url, boolean considerZeroes) { + return new JmxHistogram(url, UPDATE_INTERVAL); + } + + private class JmxTimer extends JmxMeter implements JmxTimerMBean { + private Histogram histogram = new Histogram(); + + public JmxTimer(String url, long interval) { + super(url, interval); + } + + @Override + public void update(JsonObject obj) { + // TODO: this is not atomic. + super.update(obj.getJsonObject("meter")); + histogram = new Histogram(obj.getJsonObject("hist")); + } + + @Override + public double getMin() { + return toDuration(histogram.getMin()); + } + + @Override + public double getMax() { + return toDuration(histogram.getMax()); + } + + @Override + public double getMean() { + return toDuration(histogram.getMean()); + } + + @Override + public double getStdDev() { + return toDuration(histogram.getStdDev()); + } + + @Override + public double get50thPercentile() { + return toDuration(histogram.getValue(.5)); + } + + @Override + public double get75thPercentile() { + return toDuration(histogram.getValue(.75)); + } + + @Override + public double get95thPercentile() { + return toDuration(histogram.getValue(.95)); + } + + @Override + public double get98thPercentile() { + return toDuration(histogram.getValue(.98)); + } + + @Override + public double get99thPercentile() { + return toDuration(histogram.getValue(.99)); + } + + @Override + public double get999thPercentile() { + return toDuration(histogram.getValue(.999)); + } + + @Override + public long[] values() { + return histogram.getValues(); + } + + @Override + public String getDurationUnit() { + return DURATION_UNIT.toString().toLowerCase(Locale.US); + } + } + + public MetricMBean timer(String url) { + return new JmxTimer(url, UPDATE_INTERVAL); + } + + public interface MetricMBean { + } + + public static interface JmxGaugeMBean extends MetricMBean { + Object getValue(); + } + + public interface JmxHistogramMBean extends MetricMBean { + long getCount(); + + long getMin(); + + long getMax(); + + double getMean(); + + double getStdDev(); + + double get50thPercentile(); + + double get75thPercentile(); + + double get95thPercentile(); + + double get98thPercentile(); + + double get99thPercentile(); + + double get999thPercentile(); + + long[] values(); + } + + public interface JmxCounterMBean extends MetricMBean { + long getCount(); + } + + public interface JmxMeterMBean extends MetricMBean { + long getCount(); + + double getMeanRate(); + + double getOneMinuteRate(); + + double getFiveMinuteRate(); + + double getFifteenMinuteRate(); + + String getRateUnit(); + } + + public interface JmxTimerMBean extends JmxMeterMBean { + double getMin(); + + double getMax(); + + double getMean(); + + double getStdDev(); + + double get50thPercentile(); + + double get75thPercentile(); + + double get95thPercentile(); + + double get98thPercentile(); + + double get99thPercentile(); + + double get999thPercentile(); + + long[] values(); + + String getDurationUnit(); + } +} From 319dadb79c90e77f043c1abde797bb5ec592add7 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 13:48:24 +0200 Subject: [PATCH 14/32] Add TableMetrics - c3 version of ColumnFamilyMetrics Using new, slimmer, metrics binding --- .../cassandra/metrics/TableMetrics.java | 310 ++++++++++++++++++ 1 file changed, 310 insertions(+) create mode 100644 src/main/java/org/apache/cassandra/metrics/TableMetrics.java diff --git a/src/main/java/org/apache/cassandra/metrics/TableMetrics.java b/src/main/java/org/apache/cassandra/metrics/TableMetrics.java new file mode 100644 index 0000000..e517255 --- /dev/null +++ b/src/main/java/org/apache/cassandra/metrics/TableMetrics.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.metrics; + +import static com.scylladb.jmx.api.APIClient.getReader; + +import java.util.function.BiFunction; +import java.util.function.Function; + +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; + +import org.apache.cassandra.db.ColumnFamilyStore; + +import com.scylladb.jmx.api.APIClient; + +/** + * Metrics for {@link ColumnFamilyStore}. + */ +public class TableMetrics implements Metrics { + private final MetricNameFactory factory; + private final MetricNameFactory aliasFactory; + private static final MetricNameFactory globalFactory = new AllTableMetricNameFactory("Table"); + private static final MetricNameFactory globalAliasFactory = new AllTableMetricNameFactory("ColumnFamily"); + private static final LatencyMetrics globalLatency[] = new LatencyMetrics[] { + new LatencyMetrics("Read", compose("read_latency"), globalFactory, globalAliasFactory), + new LatencyMetrics("Write", compose("read_latency"), globalFactory, globalAliasFactory), + new LatencyMetrics("Range", compose("read_latency"), globalFactory, globalAliasFactory), }; + + private final String cfName; + private final LatencyMetrics latencyMetrics[]; + + public TableMetrics(String keyspace, String columnFamily, boolean isIndex) { + this.factory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "Table"); + this.aliasFactory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "ColumnFamily"); + this.cfName = keyspace + ":" + columnFamily; + + latencyMetrics = new LatencyMetrics[] { + new LatencyMetrics("Read", compose("read_latency"), cfName, factory, aliasFactory), + new LatencyMetrics("Write", compose("write_latency"), cfName, factory, aliasFactory), + new LatencyMetrics("Range", compose("range_latency"), cfName, factory, aliasFactory), + + new LatencyMetrics("CasPrepare", compose("cas_prepare"), cfName, factory, aliasFactory), + new LatencyMetrics("CasPropose", compose("cas_propose"), cfName, factory, aliasFactory), + new LatencyMetrics("CasCommit", compose("cas_commit"), cfName, factory, aliasFactory), }; + } + + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + Registry r = new Registry(registry, factory, aliasFactory, cfName); + registerCommon(r); + registerLocal(r); + } + + @Override + public void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException { + Registry r = new Registry(registry, globalFactory, globalAliasFactory, null); + registerCommon(r); + for (LatencyMetrics l : globalLatency) { + l.register(registry); + } + } + + private static String compose(String base, String name) { + String s = "/column_family/metrics/" + base; + return name != null ? s + "/" + name : s; + } + + private static String compose(String base) { + return compose(base, null); + } + + /** + * Creates metrics for given {@link ColumnFamilyStore}. + * + * @param cfs + * ColumnFamilyStore to measure metrics + */ + static class Registry extends MetricsRegistry { + @SuppressWarnings("unused") + private Function newGauge(final String url) { + return newGauge(Long.class, url); + } + + public Function newGauge(BiFunction function, String url) { + return c -> { + return function.apply(c, url); + }; + } + + private Function newGauge(Class type, final String url) { + return newGauge(getReader(type), url); + } + + final MetricNameFactory factory; + final MetricNameFactory aliasFactory; + final String cfName; + + public Registry(MetricsRegistry other, MetricNameFactory factory, MetricNameFactory aliasFactory, + String cfName) { + super(other); + this.cfName = cfName; + this.factory = factory; + this.aliasFactory = aliasFactory; + } + + public void createTableGauge(String name, String uri) throws MalformedObjectNameException { + createTableGauge(name, name, uri); + } + + public void createTableGauge(String name, String alias, String uri) throws MalformedObjectNameException { + createTableGauge(Long.class, name, alias, uri); + } + + public void createTableGauge(Class c, String name, String uri) throws MalformedObjectNameException { + createTableGauge(c, c, name, name, uri); + } + + public void createTableGauge(Class c, String name, String alias, String uri) throws MalformedObjectNameException { + createTableGauge(c, name, alias, uri, getReader(c)); + } + + public void createTableGauge(Class c, String name, String uri, BiFunction f) + throws MalformedObjectNameException { + createTableGauge(c, name, name, uri, f); + } + + public void createTableGauge(Class c, String name, String alias, String uri, + BiFunction f) throws MalformedObjectNameException { + register(() -> gauge(newGauge(f, compose(uri, cfName))), factory.createMetricName(name), + aliasFactory.createMetricName(alias)); + } + + public void createTableGauge(Class c1, Class c2, String name, String alias, String uri) + throws MalformedObjectNameException { + if (cfName != null) { + createTableGauge(c1, name, alias, uri, getReader(c1)); + } else { // global case + createTableGauge(c2, name, alias, uri, getReader(c2)); + } + } + + public void createTableCounter(String name, String uri) throws MalformedObjectNameException { + createTableCounter(name, name, uri); + } + + public void createTableCounter(String name, String alias, String uri) throws MalformedObjectNameException { + register(() -> counter(compose(uri, cfName)), factory.createMetricName(name), + aliasFactory.createMetricName(alias)); + } + + public void createTableHistogram(String name, String uri, boolean considerZeros) + throws MalformedObjectNameException { + createTableHistogram(name, name, uri, considerZeros); + } + + public void createTableHistogram(String name, String alias, String uri, boolean considerZeros) + throws MalformedObjectNameException { + register(() -> histogram(compose(uri, cfName), considerZeros), factory.createMetricName(name), + aliasFactory.createMetricName(alias)); + } + + public void createTimer(String name, String uri) throws MalformedObjectNameException { + register(() -> timer(compose(uri, cfName)), factory.createMetricName(name)); + } + } + + private void registerLocal(Registry registry) throws MalformedObjectNameException { + registry.createTableGauge(long[].class, "EstimatedPartitionSizeHistogram", "EstimatedRowSizeHistogram", + "estimated_row_size_histogram", APIClient::getEstimatedHistogramAsLongArrValue); + registry.createTableGauge("EstimatedPartitionCount", "EstimatedRowCount", "estimated_row_count"); + + registry.createTableGauge(long[].class, "EstimatedColumnCountHistogram", "estimated_column_count_histogram", + APIClient::getEstimatedHistogramAsLongArrValue); + registry.createTableGauge(Double.class, "KeyCacheHitRate", "key_cache_hit_rate"); + + registry.createTimer("CoordinatorReadLatency", "coordinator/read"); + registry.createTimer("CoordinatorScanLatency", "coordinator/scan"); + registry.createTimer("WaitingOnFreeMemtableSpace", "waiting_on_free_memtable"); + + for (LatencyMetrics l : latencyMetrics) { + l.register(registry); + } + } + + private static void registerCommon(Registry registry) throws MalformedObjectNameException { + registry.createTableGauge("MemtableColumnsCount", "memtable_columns_count"); + registry.createTableGauge("MemtableOnHeapSize", "memtable_on_heap_size"); + registry.createTableGauge("MemtableOffHeapSize", "memtable_off_heap_size"); + registry.createTableGauge("MemtableLiveDataSize", "memtable_live_data_size"); + registry.createTableGauge("AllMemtablesHeapSize", "all_memtables_on_heap_size"); + registry.createTableGauge("AllMemtablesOffHeapSize", "all_memtables_off_heap_size"); + registry.createTableGauge("AllMemtablesLiveDataSize", "all_memtables_live_data_size"); + + registry.createTableCounter("MemtableSwitchCount", "memtable_switch_count"); + + registry.createTableHistogram("SSTablesPerReadHistogram", "sstables_per_read_histogram", true); + registry.createTableGauge(Double.class, "CompressionRatio", "compression_ratio"); + + registry.createTableCounter("PendingFlushes", "pending_flushes"); + + registry.createTableGauge(Integer.class, Long.class, "PendingCompactions", "PendingCompactions", + "pending_compactions"); + registry.createTableGauge(Integer.class, Long.class, "LiveSSTableCount", "LiveSSTableCount", + "live_ss_table_count"); + + registry.createTableCounter("LiveDiskSpaceUsed", "live_disk_space_used"); + registry.createTableCounter("TotalDiskSpaceUsed", "total_disk_space_used"); + registry.createTableGauge("MinPartitionSize", "MinRowSize", "min_row_size"); + registry.createTableGauge("MaxPartitionSize", "MaxRowSize", "max_row_size"); + registry.createTableGauge("MeanPartitionSize", "MeanRowSize", "mean_row_size"); + + registry.createTableGauge("BloomFilterFalsePositives", "bloom_filter_false_positives"); + registry.createTableGauge("RecentBloomFilterFalsePositives", "recent_bloom_filter_false_positives"); + registry.createTableGauge(Double.class, "BloomFilterFalseRatio", "bloom_filter_false_ratio"); + registry.createTableGauge(Double.class, "RecentBloomFilterFalseRatio", "recent_bloom_filter_false_ratio"); + + registry.createTableGauge("BloomFilterDiskSpaceUsed", "bloom_filter_disk_space_used"); + registry.createTableGauge("BloomFilterOffHeapMemoryUsed", "bloom_filter_off_heap_memory_used"); + registry.createTableGauge("IndexSummaryOffHeapMemoryUsed", "index_summary_off_heap_memory_used"); + registry.createTableGauge("CompressionMetadataOffHeapMemoryUsed", "compression_metadata_off_heap_memory_used"); + registry.createTableGauge("SpeculativeRetries", "speculative_retries"); + + registry.createTableHistogram("TombstoneScannedHistogram", "tombstone_scanned_histogram", false); + registry.createTableHistogram("LiveScannedHistogram", "live_scanned_histogram", false); + registry.createTableHistogram("ColUpdateTimeDeltaHistogram", "col_update_time_delta_histogram", false); + + // We do not want to capture view mutation specific metrics for a view + // They only makes sense to capture on the base table + // TODO: views + // if (!cfs.metadata.isView()) + // { + // viewLockAcquireTime = createTableTimer("ViewLockAcquireTime", + // cfs.keyspace.metric.viewLockAcquireTime); + // viewReadTime = createTableTimer("ViewReadTime", + // cfs.keyspace.metric.viewReadTime); + // } + + registry.createTableGauge("SnapshotsSize", "snapshots_size"); + registry.createTableCounter("RowCacheHitOutOfRange", "row_cache_hit_out_of_range"); + registry.createTableCounter("RowCacheHit", "row_cache_hit"); + registry.createTableCounter("RowCacheMiss", "row_cache_miss"); + } + + static class TableMetricNameFactory implements MetricNameFactory { + private final String keyspaceName; + private final String tableName; + private final boolean isIndex; + private final String type; + + public TableMetricNameFactory(String keyspaceName, String tableName, boolean isIndex, String type) { + this.keyspaceName = keyspaceName; + this.tableName = tableName; + this.isIndex = isIndex; + this.type = type; + } + + @Override + public ObjectName createMetricName(String metricName) throws MalformedObjectNameException { + String groupName = TableMetrics.class.getPackage().getName(); + String type = isIndex ? "Index" + this.type : this.type; + + StringBuilder mbeanName = new StringBuilder(); + mbeanName.append(groupName).append(":"); + mbeanName.append("type=").append(type); + mbeanName.append(",keyspace=").append(keyspaceName); + mbeanName.append(",scope=").append(tableName); + mbeanName.append(",name=").append(metricName); + + return new ObjectName(mbeanName.toString()); + } + } + + static class AllTableMetricNameFactory implements MetricNameFactory { + private final String type; + + public AllTableMetricNameFactory(String type) { + this.type = type; + } + + @Override + public ObjectName createMetricName(String metricName) throws MalformedObjectNameException { + String groupName = TableMetrics.class.getPackage().getName(); + StringBuilder mbeanName = new StringBuilder(); + mbeanName.append(groupName).append(":"); + mbeanName.append("type=" + type); + mbeanName.append(",name=").append(metricName); + return new ObjectName(mbeanName.toString()); + } + } + + public enum Sampler { + READS, WRITES + } +} From cd9deafc5130b597f0ecb5108ac941afcc7c1d68 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 13:52:46 +0200 Subject: [PATCH 15/32] Rework all org.apache.cassandra.metrics types to new style I.e. bind only JMX object via registry. --- .../com/scylladb/jmx/metrics/APIMetrics.java | 399 ------------ .../jmx/metrics/MetricNameFactory.java | 37 -- .../metrics/CASClientRequestMetrics.java | 34 +- .../cassandra/metrics/CacheMetrics.java | 113 +--- .../metrics/ClientRequestMetrics.java | 51 +- .../metrics/ColumnFamilyMetrics.java | 576 ------------------ .../cassandra/metrics/CommitLogMetrics.java | 83 +-- .../cassandra/metrics/CompactionMetrics.java | 58 +- .../metrics/DefaultNameFactory.java | 28 +- .../metrics/DroppedMessageMetrics.java | 37 +- .../metrics/EstimatedHistogramWrapper.java | 55 -- .../cassandra/metrics/LatencyMetrics.java | 130 +--- .../cassandra/metrics/StorageMetrics.java | 32 +- .../cassandra/metrics/StreamingMetrics.java | 123 ++-- 14 files changed, 210 insertions(+), 1546 deletions(-) delete mode 100644 src/main/java/com/scylladb/jmx/metrics/APIMetrics.java delete mode 100644 src/main/java/com/scylladb/jmx/metrics/MetricNameFactory.java delete mode 100644 src/main/java/org/apache/cassandra/metrics/ColumnFamilyMetrics.java rename src/main/java/{com/scylladb/jmx => org/apache/cassandra}/metrics/DefaultNameFactory.java (71%) delete mode 100644 src/main/java/org/apache/cassandra/metrics/EstimatedHistogramWrapper.java diff --git a/src/main/java/com/scylladb/jmx/metrics/APIMetrics.java b/src/main/java/com/scylladb/jmx/metrics/APIMetrics.java deleted file mode 100644 index b9e4fa3..0000000 --- a/src/main/java/com/scylladb/jmx/metrics/APIMetrics.java +++ /dev/null @@ -1,399 +0,0 @@ -package com.scylladb.jmx.metrics; - -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -import java.util.concurrent.TimeUnit; - -import com.yammer.metrics.core.APIMetricsRegistry; -import com.yammer.metrics.core.Counter; -import com.yammer.metrics.core.Gauge; -import com.yammer.metrics.core.Histogram; -import com.yammer.metrics.core.Meter; -import com.yammer.metrics.core.MetricName; -import com.yammer.metrics.core.Timer; -import com.yammer.metrics.reporting.JmxReporter; -import com.yammer.metrics.core.APIMeter; - -public class APIMetrics { - private static final APIMetricsRegistry DEFAULT_REGISTRY = new APIMetricsRegistry(); - private static final Thread SHUTDOWN_HOOK = new Thread() { - public void run() { - JmxReporter.shutdownDefault(); - } - }; - - static { - JmxReporter.startDefault(DEFAULT_REGISTRY); - Runtime.getRuntime().addShutdownHook(SHUTDOWN_HOOK); - } - - private APIMetrics() { /* unused */ - } - - /** - * Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the - * given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param metric - * the metric - * @param - * the type of the value returned by the metric - * @return {@code metric} - */ - public static Gauge newGauge(Class klass, String name, - Gauge metric) { - return DEFAULT_REGISTRY.newGauge(klass, name, metric); - } - - /** - * Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the - * given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param metric - * the metric - * @param - * the type of the value returned by the metric - * @return {@code metric} - */ - public static Gauge newGauge(Class klass, String name, - String scope, Gauge metric) { - return DEFAULT_REGISTRY.newGauge(klass, name, scope, metric); - } - - /** - * Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the - * given metric name. - * - * @param metricName - * the name of the metric - * @param metric - * the metric - * @param - * the type of the value returned by the metric - * @return {@code metric} - */ - public static Gauge newGauge(MetricName metricName, Gauge metric) { - return DEFAULT_REGISTRY.newGauge(metricName, metric); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Counter} and registers it - * under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link com.yammer.metrics.core.Counter} - */ - public static Counter newCounter(String url, Class klass, String name) { - return DEFAULT_REGISTRY.newCounter(url, klass, name); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Counter} and registers it - * under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link com.yammer.metrics.core.Counter} - */ - public static Counter newCounter(String url, Class klass, String name, - String scope) { - return DEFAULT_REGISTRY.newCounter(url, klass, name, scope); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Counter} and registers it - * under the given metric name. - * - * @param metricName - * the name of the metric - * @return a new {@link com.yammer.metrics.core.Counter} - */ - public static Counter newCounter(String url, MetricName metricName) { - return DEFAULT_REGISTRY.newCounter(url, metricName); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Histogram} and registers it - * under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, Class klass, - String name, boolean biased) { - return DEFAULT_REGISTRY.newHistogram(url, klass, name, biased); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Histogram} and registers it - * under the given class, name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, Class klass, - String name, String scope, boolean biased) { - return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope, biased); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Histogram} and registers it - * under the given metric name. - * - * @param metricName - * the name of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, MetricName metricName, - boolean biased) { - return DEFAULT_REGISTRY.newHistogram(url, metricName, biased); - } - - /** - * Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and - * registers it under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, Class klass, String name) { - return DEFAULT_REGISTRY.newHistogram(url, klass, name); - } - - /** - * Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and - * registers it under the given class, name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, Class klass, - String name, String scope) { - return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope); - } - - /** - * Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and - * registers it under the given metric name. - * - * @param metricName - * the name of the metric - * @return a new {@link com.yammer.metrics.core.Histogram} - */ - public static Histogram newHistogram(String url, MetricName metricName) { - return newHistogram(url, metricName, false); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Meter} and registers it - * under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link com.yammer.metrics.core.Meter} - */ - public static APIMeter newMeter(String url, Class klass, String name, - String eventType, TimeUnit unit) { - return DEFAULT_REGISTRY.newMeter(url, klass, name, eventType, unit); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Meter} and registers it - * under the given class, name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link com.yammer.metrics.core.Meter} - */ - public static APIMeter newMeter(String url, Class klass, String name, - String scope, String eventType, TimeUnit unit) { - return DEFAULT_REGISTRY.newMeter(url, klass, name, scope, eventType, - unit); - } - - /** - * Creates a new {@link com.yammer.metrics.core.Meter} and registers it - * under the given metric name. - * - * @param metricName - * the name of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link com.yammer.metrics.core.Meter} - */ - public static APIMeter newMeter(String url, MetricName metricName, - String eventType, TimeUnit unit) { - return DEFAULT_REGISTRY.newMeter(url, metricName, eventType, unit); - } - - /** - * Creates a new {@link com.yammer.metrics.core.APITimer} and registers it - * under the given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link com.yammer.metrics.core.APITimer} - */ - public static Timer newTimer(String url, Class klass, String name, - TimeUnit durationUnit, TimeUnit rateUnit) { - return DEFAULT_REGISTRY.newTimer(url, klass, name, durationUnit, rateUnit); - } - - /** - * Creates a new {@link com.yammer.metrics.core.APITimer} and registers it - * under the given class and name, measuring elapsed time in milliseconds - * and invocations per second. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link com.yammer.metrics.core.APITimer} - */ - public static Timer newTimer(String url, Class klass, String name) { - return DEFAULT_REGISTRY.newTimer(url, klass, name); - } - - /** - * Creates a new {@link com.yammer.metrics.core.APITimer} and registers it - * under the given class, name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link com.yammer.metrics.core.APITimer} - */ - public static Timer newTimer(String url, Class klass, String name, String scope, - TimeUnit durationUnit, TimeUnit rateUnit) { - return DEFAULT_REGISTRY.newTimer(url, klass, name, scope, durationUnit, - rateUnit); - } - - /** - * Creates a new {@link com.yammer.metrics.core.APITimer} and registers it - * under the given class, name, and scope, measuring elapsed time in - * milliseconds and invocations per second. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link com.yammer.metrics.core.APITimer} - */ - public static Timer newTimer(String url, Class klass, String name, String scope) { - return DEFAULT_REGISTRY.newTimer(url, klass, name, scope); - } - - /** - * Creates a new {@link com.yammer.metrics.core.APITimer} and registers it - * under the given metric name. - * - * @param metricName - * the name of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link com.yammer.metrics.core.APITimer} - */ - public static Timer newTimer(String url, MetricName metricName, TimeUnit durationUnit, - TimeUnit rateUnit) { - return DEFAULT_REGISTRY.newTimer(url, metricName, durationUnit, rateUnit); - } - - /** - * Returns the (static) default registry. - * - * @return the metrics registry - */ - public static APIMetricsRegistry defaultRegistry() { - return DEFAULT_REGISTRY; - } - - /** - * Shuts down all thread pools for the default registry. - */ - public static void shutdown() { - DEFAULT_REGISTRY.shutdown(); - JmxReporter.shutdownDefault(); - Runtime.getRuntime().removeShutdownHook(SHUTDOWN_HOOK); - } - -} diff --git a/src/main/java/com/scylladb/jmx/metrics/MetricNameFactory.java b/src/main/java/com/scylladb/jmx/metrics/MetricNameFactory.java deleted file mode 100644 index 9114711..0000000 --- a/src/main/java/com/scylladb/jmx/metrics/MetricNameFactory.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -package com.scylladb.jmx.metrics; - -import com.yammer.metrics.core.MetricName; - -public interface MetricNameFactory -{ - /** - * Create a qualified name from given metric name. - * - * @param metricName part of qualified name. - * @return new String with given metric name. - */ - MetricName createMetricName(String metricName); -} diff --git a/src/main/java/org/apache/cassandra/metrics/CASClientRequestMetrics.java b/src/main/java/org/apache/cassandra/metrics/CASClientRequestMetrics.java index c1c54ce..7688a99 100644 --- a/src/main/java/org/apache/cassandra/metrics/CASClientRequestMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/CASClientRequestMetrics.java @@ -25,34 +25,20 @@ package org.apache.cassandra.metrics; -import com.scylladb.jmx.metrics.APIMetrics; -import com.yammer.metrics.core.*; +import javax.management.MalformedObjectNameException; +// TODO: In StorageProxy public class CASClientRequestMetrics extends ClientRequestMetrics { - public final Histogram contention; - /* Used only for write */ - public final Counter conditionNotMet; - - public final Counter unfinishedCommit; - - public CASClientRequestMetrics(String url, String scope) { - super(url, scope); - contention = APIMetrics.newHistogram(url + "contention", - factory.createMetricName("ContentionHistogram"), true); - conditionNotMet = APIMetrics.newCounter(url + "condition_not_met", - factory.createMetricName("ConditionNotMet")); - unfinishedCommit = APIMetrics.newCounter(url + "unfinished_commit", - factory.createMetricName("UnfinishedCommit")); + public CASClientRequestMetrics(String scope, String url) { + super(scope, url); } - public void release() { - super.release(); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName("ContentionHistogram")); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName("ConditionNotMet")); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName("UnfinishedCommit")); + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + super.register(registry); + registry.register(() -> registry.histogram(uri + "/contention", true), names("ContentionHistogram")); + registry.register(() -> registry.counter(uri + "/condition_not_met"), names("ConditionNotMet")); + registry.register(() -> registry.counter(uri + "/unfinished_commit"), names("UnfinishedCommit")); } } diff --git a/src/main/java/org/apache/cassandra/metrics/CacheMetrics.java b/src/main/java/org/apache/cassandra/metrics/CacheMetrics.java index 6ff459c..d47d28f 100644 --- a/src/main/java/org/apache/cassandra/metrics/CacheMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/CacheMetrics.java @@ -23,44 +23,20 @@ */ package org.apache.cassandra.metrics; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.Gauge; -import com.yammer.metrics.core.APIMeter; +import javax.management.MalformedObjectNameException; /** * Metrics for {@code ICache}. */ -public class CacheMetrics { - /** Cache capacity in bytes */ - public final Gauge capacity; - /** Total number of cache hits */ - public final APIMeter hits; - /** Total number of cache requests */ - public final APIMeter requests; - /** cache hit rate */ - public final Gauge hitRate; - /** Total size of cache, in bytes */ - public final Gauge size; - /** Total number of cache entries */ - public final Gauge entries; +public class CacheMetrics implements Metrics { - private final AtomicLong lastRequests = new AtomicLong(0); - private final AtomicLong lastHits = new AtomicLong(0); + private final String type; + private final String url; - private APIClient c = new APIClient(); - - private String getURL(String url, String value) { - if (url == null || value == null) { - return null; - } - return "/cache_service/metrics/" + url + value; + private String compose(String value) { + return "/cache_service/metrics/" + url + "/" + value; } + /** * Create metrics for given cache. * @@ -70,70 +46,21 @@ public class CacheMetrics { * Cache to measure metrics */ public CacheMetrics(String type, final String url) { + this.type = type; + this.url = url; + } + + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { MetricNameFactory factory = new DefaultNameFactory("Cache", type); + registry.register(() -> registry.gauge(compose("capacity")), factory.createMetricName("Capacity")); + registry.register(() -> registry.meter(compose("hits_moving_avrage")), factory.createMetricName("Hits")); + registry.register(() -> registry.meter(compose("requests_moving_avrage")), + factory.createMetricName("Requests")); - capacity = APIMetrics.newGauge(factory.createMetricName("Capacity"), - new Gauge() { - String u = getURL(url, "/capacity"); - public Long value() { - if (u == null) { - return 0L; - } - return c.getLongValue(u); - } - }); - hits = APIMetrics.newMeter(getURL(url, "/hits_moving_avrage"), factory.createMetricName("Hits"), "hits", - TimeUnit.SECONDS); - requests = APIMetrics.newMeter(getURL(url, "/requests_moving_avrage"), factory.createMetricName("Requests"), - "requests", TimeUnit.SECONDS); - hitRate = APIMetrics.newGauge(factory.createMetricName("HitRate"), - new Gauge() { - String u = getURL(url, "/hit_rate"); - @Override - public Double value() { - if (u == null) { - return 0.0; - } - return c.getDoubleValue(u); - } - }); - size = APIMetrics.newGauge(factory.createMetricName("Size"), - new Gauge() { - String u = getURL(url, "/size"); - public Long value() { - if (u == null) { - return 0L; - } - return c.getLongValue(u); - } - }); - entries = APIMetrics.newGauge(factory.createMetricName("Entries"), - new Gauge() { - String u = getURL(url, "/entries"); - public Integer value() { - if (u == null) { - return 0; - } - return c.getIntValue(u); - } - }); + registry.register(() -> registry.gauge(Double.class, compose("hit_rate")), factory.createMetricName("HitRate")); + registry.register(() -> registry.gauge(compose("size")), factory.createMetricName("Size")); + registry.register(() -> registry.gauge(Integer.class, compose("entries")), factory.createMetricName("Entries")); } - - // for backward compatibility - @Deprecated - public double getRecentHitRate() { - long r = requests.count(); - long h = hits.count(); - try - { - return ((double)(h - lastHits.get())) / (r - lastRequests.get()); - } - finally - { - lastRequests.set(r); - lastHits.set(h); - } - } - } diff --git a/src/main/java/org/apache/cassandra/metrics/ClientRequestMetrics.java b/src/main/java/org/apache/cassandra/metrics/ClientRequestMetrics.java index 6fb8718..9644e3f 100644 --- a/src/main/java/org/apache/cassandra/metrics/ClientRequestMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/ClientRequestMetrics.java @@ -27,52 +27,17 @@ package org.apache.cassandra.metrics; -import java.util.concurrent.TimeUnit; - -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.yammer.metrics.Metrics; -import com.yammer.metrics.core.Counter; -import com.yammer.metrics.core.Meter; +import javax.management.MalformedObjectNameException; public class ClientRequestMetrics extends LatencyMetrics { - @Deprecated - public static final Counter readTimeouts = Metrics - .newCounter(DefaultNameFactory.createMetricName( - "ClientRequestMetrics", "ReadTimeouts", null)); - @Deprecated - public static final Counter writeTimeouts = Metrics - .newCounter(DefaultNameFactory.createMetricName( - "ClientRequestMetrics", "WriteTimeouts", null)); - @Deprecated - public static final Counter readUnavailables = Metrics - .newCounter(DefaultNameFactory.createMetricName( - "ClientRequestMetrics", "ReadUnavailables", null)); - @Deprecated - public static final Counter writeUnavailables = Metrics - .newCounter(DefaultNameFactory.createMetricName( - "ClientRequestMetrics", "WriteUnavailables", null)); - - public final Meter timeouts; - public final Meter unavailables; - - public ClientRequestMetrics(String url, String scope) { - super(url, "ClientRequest", scope); - - timeouts = APIMetrics.newMeter(url + "/timeouts_rates", - factory.createMetricName("Timeouts"), "timeouts", - TimeUnit.SECONDS); - unavailables = APIMetrics.newMeter(url + "/unavailables_rates", - factory.createMetricName("Unavailables"), "unavailables", - TimeUnit.SECONDS); - + public ClientRequestMetrics(String scope, String url) { + super("ClientRequest", scope, url); } - public void release() { - super.release(); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName("Timeouts")); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName("Unavailables")); + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + super.register(registry); + registry.register(() -> registry.meter(uri + "/timeouts_rates"), names("Timeouts")); + registry.register(() -> registry.meter(uri + "/unavailables_rates"), names("Unavailables")); } } diff --git a/src/main/java/org/apache/cassandra/metrics/ColumnFamilyMetrics.java b/src/main/java/org/apache/cassandra/metrics/ColumnFamilyMetrics.java deleted file mode 100644 index ffd86d8..0000000 --- a/src/main/java/org/apache/cassandra/metrics/ColumnFamilyMetrics.java +++ /dev/null @@ -1,576 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ -package org.apache.cassandra.metrics; - -import java.util.HashSet; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -import org.apache.cassandra.db.ColumnFamilyStore; - -import com.google.common.collect.Maps; -import com.google.common.collect.Sets; -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.scylladb.jmx.utils.RecentEstimatedHistogram; -import com.yammer.metrics.Metrics; -import com.yammer.metrics.core.*; - -/** - * Metrics for {@link ColumnFamilyStore}. - */ -public class ColumnFamilyMetrics { - private APIClient c = new APIClient(); - /** - * Total amount of data stored in the memtable that resides on-heap, - * including column related overhead and overwritten rows. - */ - public final Gauge memtableOnHeapSize; - /** - * Total amount of data stored in the memtable that resides off-heap, - * including column related overhead and overwritten rows. - */ - public final Gauge memtableOffHeapSize; - /** - * Total amount of live data stored in the memtable, excluding any data - * structure overhead - */ - public final Gauge memtableLiveDataSize; - /** - * Total amount of data stored in the memtables (2i and pending flush - * memtables included) that resides on-heap. - */ - public final Gauge allMemtablesOnHeapSize; - /** - * Total amount of data stored in the memtables (2i and pending flush - * memtables included) that resides off-heap. - */ - public final Gauge allMemtablesOffHeapSize; - /** - * Total amount of live data stored in the memtables (2i and pending flush - * memtables included) that resides off-heap, excluding any data structure - * overhead - */ - public final Gauge allMemtablesLiveDataSize; - /** Total number of columns present in the memtable. */ - public final Gauge memtableColumnsCount; - /** Number of times flush has resulted in the memtable being switched out. */ - public final Counter memtableSwitchCount; - /** Current compression ratio for all SSTables */ - public final Gauge compressionRatio; - /** Histogram of estimated row size (in bytes). */ - public final Gauge estimatedRowSizeHistogram; - /** Approximate number of keys in table. */ - public final Gauge estimatedRowCount; - /** Histogram of estimated number of columns. */ - public final Gauge estimatedColumnCountHistogram; - /** Histogram of the number of sstable data files accessed per read */ - public final ColumnFamilyHistogram sstablesPerReadHistogram; - /** (Local) read metrics */ - public final LatencyMetrics readLatency; - /** (Local) range slice metrics */ - public final LatencyMetrics rangeLatency; - /** (Local) write metrics */ - public final LatencyMetrics writeLatency; - /** Estimated number of tasks pending for this column family */ - public final Counter pendingFlushes; - /** Estimate of number of pending compactios for this CF */ - public final Gauge pendingCompactions; - /** Number of SSTables on disk for this CF */ - public final Gauge liveSSTableCount; - /** Disk space used by SSTables belonging to this CF */ - public final Counter liveDiskSpaceUsed; - /** - * Total disk space used by SSTables belonging to this CF, including - * obsolete ones waiting to be GC'd - */ - public final Counter totalDiskSpaceUsed; - /** Size of the smallest compacted row */ - public final Gauge minRowSize; - /** Size of the largest compacted row */ - public final Gauge maxRowSize; - /** Size of the smallest compacted row */ - public final Gauge meanRowSize; - /** Number of false positives in bloom filter */ - public final Gauge bloomFilterFalsePositives; - /** Number of false positives in bloom filter from last read */ - public final Gauge recentBloomFilterFalsePositives; - /** False positive ratio of bloom filter */ - public final Gauge bloomFilterFalseRatio; - /** False positive ratio of bloom filter from last read */ - public final Gauge recentBloomFilterFalseRatio; - /** Disk space used by bloom filter */ - public final Gauge bloomFilterDiskSpaceUsed; - /** Off heap memory used by bloom filter */ - public final Gauge bloomFilterOffHeapMemoryUsed; - /** Off heap memory used by index summary */ - public final Gauge indexSummaryOffHeapMemoryUsed; - /** Off heap memory used by compression meta data */ - public final Gauge compressionMetadataOffHeapMemoryUsed; - /** Key cache hit rate for this CF */ - public final Gauge keyCacheHitRate; - /** Tombstones scanned in queries on this CF */ - public final ColumnFamilyHistogram tombstoneScannedHistogram; - /** Live cells scanned in queries on this CF */ - public final ColumnFamilyHistogram liveScannedHistogram; - /** Column update time delta on this CF */ - public final ColumnFamilyHistogram colUpdateTimeDeltaHistogram; - /** Disk space used by snapshot files which */ - public final Gauge trueSnapshotsSize; - /** Row cache hits, but result out of range */ - public final Counter rowCacheHitOutOfRange; - /** Number of row cache hits */ - public final Counter rowCacheHit; - /** Number of row cache misses */ - public final Counter rowCacheMiss; - /** CAS Prepare metrics */ - public final LatencyMetrics casPrepare; - /** CAS Propose metrics */ - public final LatencyMetrics casPropose; - /** CAS Commit metrics */ - public final LatencyMetrics casCommit; - - public final Timer coordinatorReadLatency; - public final Timer coordinatorScanLatency; - - /** Time spent waiting for free memtable space, either on- or off-heap */ - public final Timer waitingOnFreeMemtableSpace; - - private final MetricNameFactory factory; - private static final MetricNameFactory globalNameFactory = new AllColumnFamilyMetricNameFactory(); - - public final Counter speculativeRetries; - - // for backward compatibility - @Deprecated - public final EstimatedHistogramWrapper sstablesPerRead; - // it should not be called directly - @Deprecated - protected final RecentEstimatedHistogram recentSSTablesPerRead = new RecentEstimatedHistogram(35); - private String cfName; - - public final static LatencyMetrics globalReadLatency = new LatencyMetrics( - "/column_family/metrics/read_latency", globalNameFactory, "Read"); - public final static LatencyMetrics globalWriteLatency = new LatencyMetrics( - "/column_family/metrics/write_latency", globalNameFactory, "Write"); - public final static LatencyMetrics globalRangeLatency = new LatencyMetrics( - "/column_family/metrics/range_latency", globalNameFactory, "Range"); - - /** - * stores metrics that will be rolled into a single global metric - */ - public final static ConcurrentMap> allColumnFamilyMetrics = Maps - .newConcurrentMap(); - - /** - * Stores all metric names created that can be used when unregistering - */ - public final static Set all = Sets.newHashSet(); - - /** - * Creates metrics for given {@link ColumnFamilyStore}. - * - * @param cfs - * ColumnFamilyStore to measure metrics - */ - public ColumnFamilyMetrics(final ColumnFamilyStore cfs) { - factory = new ColumnFamilyMetricNameFactory(cfs); - cfName = cfs.getCFName(); - memtableColumnsCount = createColumnFamilyGauge( - "/column_family/metrics/memtable_columns_count", - "MemtableColumnsCount"); - memtableOnHeapSize = createColumnFamilyGauge( - "/column_family/metrics/memtable_on_heap_size", - "MemtableOnHeapSize"); - memtableOffHeapSize = createColumnFamilyGauge( - "/column_family/metrics/memtable_off_heap_size", - "MemtableOffHeapSize"); - memtableLiveDataSize = createColumnFamilyGauge( - "/column_family/metrics/memtable_live_data_size", - "MemtableLiveDataSize"); - allMemtablesOnHeapSize = createColumnFamilyGauge( - "/column_family/metrics/all_memtables_on_heap_size", - "AllMemtablesHeapSize"); - allMemtablesOffHeapSize = createColumnFamilyGauge( - "/column_family/metrics/all_memtables_off_heap_size", - "AllMemtablesOffHeapSize"); - allMemtablesLiveDataSize = createColumnFamilyGauge( - "/column_family/metrics/all_memtables_live_data_size", - "AllMemtablesLiveDataSize"); - memtableSwitchCount = createColumnFamilyCounter( - "/column_family/metrics/memtable_switch_count", - "MemtableSwitchCount"); - estimatedRowSizeHistogram = Metrics.newGauge( - factory.createMetricName("EstimatedRowSizeHistogram"), - new Gauge() { - public long[] value() { - return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_row_size_histogram/" - + cfName); - } - }); - estimatedRowCount= Metrics.newGauge( - factory.createMetricName("EstimatedRowCount"), - new Gauge() { - public Long value() { - return c.getLongValue("/column_family/metrics/estimated_row_count/" - + cfName); - } - }); - - estimatedColumnCountHistogram = Metrics.newGauge( - factory.createMetricName("EstimatedColumnCountHistogram"), - new Gauge() { - public long[] value() { - return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_column_count_histogram/" - + cfName); - } - }); - sstablesPerReadHistogram = createColumnFamilyHistogram( - "/column_family/metrics/sstables_per_read_histogram", - "SSTablesPerReadHistogram"); - compressionRatio = createColumnFamilyGauge("CompressionRatio", - new Gauge() { - public Double value() { - return c.getDoubleValue("/column_family/metrics/compression_ratio/" - + cfName); - } - }, new Gauge() // global gauge - { - public Double value() { - return c.getDoubleValue("/column_family/metrics/compression_ratio/"); - } - }); - readLatency = new LatencyMetrics("/column_family/metrics/read_latency", - cfName, factory, "Read"); - writeLatency = new LatencyMetrics( - "/column_family/metrics/write_latency", cfName, factory, - "Write"); - rangeLatency = new LatencyMetrics( - "/column_family/metrics/range_latency", cfName, factory, - "Range"); - pendingFlushes = createColumnFamilyCounter( - "/column_family/metrics/pending_flushes", "PendingFlushes"); - pendingCompactions = createColumnFamilyGaugeInt( - "/column_family/metrics/pending_compactions", - "PendingCompactions"); - liveSSTableCount = createColumnFamilyGaugeInt( - "/column_family/metrics/live_ss_table_count", - "LiveSSTableCount"); - liveDiskSpaceUsed = createColumnFamilyCounter( - "/column_family/metrics/live_disk_space_used", - "LiveDiskSpaceUsed"); - totalDiskSpaceUsed = createColumnFamilyCounter( - "/column_family/metrics/total_disk_space_used", - "TotalDiskSpaceUsed"); - minRowSize = createColumnFamilyGauge( - "/column_family/metrics/min_row_size", "MinRowSize"); - maxRowSize = createColumnFamilyGauge( - "/column_family/metrics/max_row_size", "MaxRowSize"); - meanRowSize = createColumnFamilyGauge( - "/column_family/metrics/mean_row_size", "MeanRowSize"); - bloomFilterFalsePositives = createColumnFamilyGauge( - "/column_family/metrics/bloom_filter_false_positives", - "BloomFilterFalsePositives"); - recentBloomFilterFalsePositives = createColumnFamilyGauge( - "/column_family/metrics/recent_bloom_filter_false_positives", - "RecentBloomFilterFalsePositives"); - bloomFilterFalseRatio = createColumnFamilyGaugeDouble( - "/column_family/metrics/bloom_filter_false_ratio", - "BloomFilterFalseRatio"); - recentBloomFilterFalseRatio = createColumnFamilyGaugeDouble( - "/column_family/metrics/recent_bloom_filter_false_ratio", - "RecentBloomFilterFalseRatio"); - bloomFilterDiskSpaceUsed = createColumnFamilyGauge( - "/column_family/metrics/bloom_filter_disk_space_used", - "BloomFilterDiskSpaceUsed"); - bloomFilterOffHeapMemoryUsed = createColumnFamilyGauge( - "/column_family/metrics/bloom_filter_off_heap_memory_used", - "BloomFilterOffHeapMemoryUsed"); - indexSummaryOffHeapMemoryUsed = createColumnFamilyGauge( - "/column_family/metrics/index_summary_off_heap_memory_used", - "IndexSummaryOffHeapMemoryUsed"); - compressionMetadataOffHeapMemoryUsed = createColumnFamilyGauge( - "/column_family/metrics/compression_metadata_off_heap_memory_used", - "CompressionMetadataOffHeapMemoryUsed"); - speculativeRetries = createColumnFamilyCounter( - "/column_family/metrics/speculative_retries", - "SpeculativeRetries"); - keyCacheHitRate = Metrics.newGauge( - factory.createMetricName("KeyCacheHitRate"), - new Gauge() { - @Override - public Double value() { - return c.getDoubleValue("/column_family/metrics/key_cache_hit_rate/" - + cfName); - } - }); - tombstoneScannedHistogram = createColumnFamilyHistogram( - "/column_family/metrics/tombstone_scanned_histogram", - "TombstoneScannedHistogram"); - liveScannedHistogram = createColumnFamilyHistogram( - "/column_family/metrics/live_scanned_histogram", - "LiveScannedHistogram"); - colUpdateTimeDeltaHistogram = createColumnFamilyHistogram( - "/column_family/metrics/col_update_time_delta_histogram", - "ColUpdateTimeDeltaHistogram"); - coordinatorReadLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/read/" + cfName, - factory.createMetricName("CoordinatorReadLatency"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); - coordinatorScanLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/scan/" + cfName, - factory.createMetricName("CoordinatorScanLatency"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); - waitingOnFreeMemtableSpace = APIMetrics.newTimer("/column_family/metrics/waiting_on_free_memtable/" + cfName, - factory.createMetricName("WaitingOnFreeMemtableSpace"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); - - trueSnapshotsSize = createColumnFamilyGauge( - "/column_family/metrics/snapshots_size", "SnapshotsSize"); - rowCacheHitOutOfRange = createColumnFamilyCounter( - "/column_family/metrics/row_cache_hit_out_of_range", - "RowCacheHitOutOfRange"); - rowCacheHit = createColumnFamilyCounter( - "/column_family/metrics/row_cache_hit", "RowCacheHit"); - rowCacheMiss = createColumnFamilyCounter( - "/column_family/metrics/row_cache_miss", "RowCacheMiss"); - - casPrepare = new LatencyMetrics("/column_family/metrics/cas_prepare/" - + cfName, factory, "CasPrepare"); - casPropose = new LatencyMetrics("/column_family/metrics/cas_propose/" - + cfName, factory, "CasPropose"); - casCommit = new LatencyMetrics("/column_family/metrics/cas_commit/" - + cfName, factory, "CasCommit"); - sstablesPerRead = new EstimatedHistogramWrapper("/column_family/metrics/sstables_per_read_histogram/" + cfName); - } - - /** - * Release all associated metrics. - */ - public void release() { - for (String name : all) { - allColumnFamilyMetrics.get(name).remove( - Metrics.defaultRegistry().allMetrics() - .get(factory.createMetricName(name))); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName(name)); - } - readLatency.release(); - writeLatency.release(); - rangeLatency.release(); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("EstimatedRowSizeHistogram")); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("EstimatedColumnCountHistogram")); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("KeyCacheHitRate")); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("CoordinatorReadLatency")); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("CoordinatorScanLatency")); - Metrics.defaultRegistry().removeMetric( - factory.createMetricName("WaitingOnFreeMemtableSpace")); - } - - /** - * Create a gauge that will be part of a merged version of all column - * families. The global gauge will merge each CF gauge by adding their - * values - */ - protected Gauge createColumnFamilyGaugeDouble(final String url, - final String name) { - Gauge gauge = new Gauge() { - public Double value() { - return c.getDoubleValue(url + "/" + cfName); - } - }; - return createColumnFamilyGauge(url, name, gauge); - } - - /** - * Create a gauge that will be part of a merged version of all column - * families. The global gauge will merge each CF gauge by adding their - * values - */ - protected Gauge createColumnFamilyGauge(final String url, final String name) { - Gauge gauge = new Gauge() { - public Long value() { - return (long)c.getDoubleValue(url + "/" + cfName); - } - }; - return createColumnFamilyGauge(url, name, gauge); - } - - /** - * Create a gauge that will be part of a merged version of all column - * families. The global gauge will merge each CF gauge by adding their - * values - */ - protected Gauge createColumnFamilyGaugeInt(final String url, - final String name) { - Gauge gauge = new Gauge() { - public Integer value() { - return (int)c.getDoubleValue(url + "/" + cfName); - } - }; - return createColumnFamilyGauge(url, name, gauge); - } - - /** - * Create a gauge that will be part of a merged version of all column - * families. The global gauge will merge each CF gauge by adding their - * values - */ - protected Gauge createColumnFamilyGauge(final String url, - final String name, Gauge gauge) { - return createColumnFamilyGauge(name, gauge, new Gauge() { - public Long value() { - // This is an optimiztion, call once for all column families - // instead - // of iterating over all of them - return c.getLongValue(url); - } - }); - } - - /** - * Create a gauge that will be part of a merged version of all column - * families. The global gauge is defined as the globalGauge parameter - */ - protected Gauge createColumnFamilyGauge(String name, - Gauge gauge, Gauge globalGauge) { - Gauge cfGauge = APIMetrics.newGauge(factory.createMetricName(name), - gauge); - if (register(name, cfGauge)) { - Metrics.newGauge(globalNameFactory.createMetricName(name), - globalGauge); - } - return cfGauge; - } - - /** - * Creates a counter that will also have a global counter thats the sum of - * all counters across different column families - */ - protected Counter createColumnFamilyCounter(final String url, final String name) { - Counter cfCounter = APIMetrics.newCounter(url + "/" + cfName, - factory.createMetricName(name)); - if (register(name, cfCounter)) { - Metrics.newGauge(globalNameFactory.createMetricName(name), - new Gauge() { - public Long value() { - // This is an optimiztion, call once for all column - // families instead - // of iterating over all of them - return c.getLongValue(url); - } - }); - } - return cfCounter; - } - - /** - * Create a histogram-like interface that will register both a CF, keyspace - * and global level histogram and forward any updates to both - */ - protected ColumnFamilyHistogram createColumnFamilyHistogram(String url, - String name) { - Histogram cfHistogram = APIMetrics.newHistogram(url + "/" + cfName, - factory.createMetricName(name), true); - register(name, cfHistogram); - - // TBD add keyspace and global histograms - // keyspaceHistogram, - // Metrics.newHistogram(globalNameFactory.createMetricName(name), - // true)); - return new ColumnFamilyHistogram(cfHistogram, null, null); - } - - /** - * Registers a metric to be removed when unloading CF. - * - * @return true if first time metric with that name has been registered - */ - private boolean register(String name, Metric metric) { - boolean ret = allColumnFamilyMetrics.putIfAbsent(name, - new HashSet()) == null; - allColumnFamilyMetrics.get(name).add(metric); - all.add(name); - return ret; - } - - public long[] getRecentSSTablesPerRead() { - return recentSSTablesPerRead - .getBuckets(sstablesPerRead.getBuckets(false)); - } - - public class ColumnFamilyHistogram { - public final Histogram[] all; - public final Histogram cf; - - private ColumnFamilyHistogram(Histogram cf, Histogram keyspace, - Histogram global) { - this.cf = cf; - this.all = new Histogram[] { cf, keyspace, global }; - } - } - - class ColumnFamilyMetricNameFactory implements MetricNameFactory { - private final String keyspaceName; - private final String columnFamilyName; - private final boolean isIndex; - - ColumnFamilyMetricNameFactory(ColumnFamilyStore cfs) { - this.keyspaceName = cfs.getKeyspace(); - this.columnFamilyName = cfs.getColumnFamilyName(); - isIndex = cfs.isIndex(); - } - - public MetricName createMetricName(String metricName) { - String groupName = ColumnFamilyMetrics.class.getPackage().getName(); - String type = isIndex ? "IndexColumnFamily" : "ColumnFamily"; - - StringBuilder mbeanName = new StringBuilder(); - mbeanName.append(groupName).append(":"); - mbeanName.append("type=").append(type); - mbeanName.append(",keyspace=").append(keyspaceName); - mbeanName.append(",scope=").append(columnFamilyName); - mbeanName.append(",name=").append(metricName); - return new MetricName(groupName, type, metricName, keyspaceName - + "." + columnFamilyName, mbeanName.toString()); - } - } - - static class AllColumnFamilyMetricNameFactory implements MetricNameFactory { - public MetricName createMetricName(String metricName) { - String groupName = ColumnFamilyMetrics.class.getPackage().getName(); - StringBuilder mbeanName = new StringBuilder(); - mbeanName.append(groupName).append(":"); - mbeanName.append("type=ColumnFamily"); - mbeanName.append(",name=").append(metricName); - return new MetricName(groupName, "ColumnFamily", metricName, "all", - mbeanName.toString()); - } - } -} diff --git a/src/main/java/org/apache/cassandra/metrics/CommitLogMetrics.java b/src/main/java/org/apache/cassandra/metrics/CommitLogMetrics.java index e3a1aa9..9a7be67 100644 --- a/src/main/java/org/apache/cassandra/metrics/CommitLogMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/CommitLogMetrics.java @@ -23,65 +23,38 @@ */ package org.apache.cassandra.metrics; -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.Gauge; -import com.yammer.metrics.core.Timer; - -import java.util.concurrent.TimeUnit; +import javax.management.MalformedObjectNameException; /** * Metrics for commit log */ -public class CommitLogMetrics { - public static final MetricNameFactory factory = new DefaultNameFactory( - "CommitLog"); - private APIClient c = new APIClient(); - - /** Number of completed tasks */ - public final Gauge completedTasks; - /** Number of pending tasks */ - public final Gauge pendingTasks; - /** Current size used by all the commit log segments */ - public final Gauge totalCommitLogSize; - /** - * Time spent waiting for a CLS to be allocated - under normal conditions - * this should be zero - */ - public final Timer waitingOnSegmentAllocation; - /** - * The time spent waiting on CL sync; for Periodic this is only occurs when - * the sync is lagging its sync interval - */ - public final Timer waitingOnCommit; - +public class CommitLogMetrics implements Metrics { public CommitLogMetrics() { - completedTasks = APIMetrics.newGauge( - factory.createMetricName("CompletedTasks"), new Gauge() { - public Long value() { - return c.getLongValue("/commitlog/metrics/completed_tasks"); - } - }); - pendingTasks = APIMetrics.newGauge( - factory.createMetricName("PendingTasks"), new Gauge() { - public Long value() { - return c.getLongValue("/commitlog/metrics/pending_tasks"); - } - }); - totalCommitLogSize = APIMetrics.newGauge( - factory.createMetricName("TotalCommitLogSize"), - new Gauge() { - public Long value() { - return c.getLongValue("/commitlog/metrics/total_commit_log_size"); - } - }); - waitingOnSegmentAllocation = APIMetrics.newTimer("/commit_log/metrics/waiting_on_segment_allocation", - factory.createMetricName("WaitingOnSegmentAllocation"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); - waitingOnCommit = APIMetrics.newTimer("/commit_log/metrics/waiting_on_commit", - factory.createMetricName("WaitingOnCommit"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); + } + + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + MetricNameFactory factory = new DefaultNameFactory("CommitLog"); + /** Number of completed tasks */ + registry.register(() -> registry.gauge("/commitlog/metrics/completed_tasks"), + factory.createMetricName("CompletedTasks")); + /** Number of pending tasks */ + registry.register(() -> registry.gauge("/commitlog/metrics/pending_tasks"), + factory.createMetricName("PendingTasks")); + /** Current size used by all the commit log segments */ + registry.register(() -> registry.gauge("/commitlog/metrics/total_commit_log_size"), + factory.createMetricName("TotalCommitLogSize")); + /** + * Time spent waiting for a CLS to be allocated - under normal + * conditions this should be zero + */ + registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_segment_allocation"), + factory.createMetricName("WaitingOnSegmentAllocation")); + /** + * The time spent waiting on CL sync; for Periodic this is only occurs + * when the sync is lagging its sync interval + */ + registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_commit"), + factory.createMetricName("WaitingOnCommit")); } } diff --git a/src/main/java/org/apache/cassandra/metrics/CompactionMetrics.java b/src/main/java/org/apache/cassandra/metrics/CompactionMetrics.java index d996ee2..afd53d3 100644 --- a/src/main/java/org/apache/cassandra/metrics/CompactionMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/CompactionMetrics.java @@ -23,52 +23,30 @@ */ package org.apache.cassandra.metrics; -import java.util.concurrent.TimeUnit; - -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.Counter; -import com.yammer.metrics.core.Gauge; -import com.yammer.metrics.core.APIMeter; +import javax.management.MalformedObjectNameException; /** * Metrics for compaction. */ -public class CompactionMetrics { - public static final MetricNameFactory factory = new DefaultNameFactory( - "Compaction"); - private APIClient c = new APIClient(); - /** Estimated number of compactions remaining to perform */ - public final Gauge pendingTasks; - /** Number of completed compactions since server [re]start */ - public final Gauge completedTasks; - /** Total number of compactions since server [re]start */ - public final APIMeter totalCompactionsCompleted; - /** Total number of bytes compacted since server [re]start */ - public final Counter bytesCompacted; - +public class CompactionMetrics implements Metrics { public CompactionMetrics() { + } - pendingTasks = APIMetrics.newGauge( - factory.createMetricName("PendingTasks"), new Gauge() { - public Integer value() { - return c.getIntValue("/compaction_manager/metrics/pending_tasks"); - } - }); - completedTasks = APIMetrics.newGauge( - factory.createMetricName("CompletedTasks"), new Gauge() { - public Long value() { - return c.getLongValue("/compaction_manager/metrics/completed_tasks"); - } - }); - totalCompactionsCompleted = APIMetrics.newMeter( - "/compaction_manager/metrics/total_compactions_completed", - factory.createMetricName("TotalCompactionsCompleted"), - "compaction completed", TimeUnit.SECONDS); - bytesCompacted = APIMetrics.newCounter( - "/compaction_manager/metrics/bytes_compacted", + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + MetricNameFactory factory = new DefaultNameFactory("Compaction"); + /** Estimated number of compactions remaining to perform */ + registry.register(() -> registry.gauge(Integer.class, "/compaction_manager/metrics/pending_tasks"), + factory.createMetricName("PendingTasks")); + /** Number of completed compactions since server [re]start */ + registry.register(() -> registry.gauge("/compaction_manager/metrics/completed_tasks"), + factory.createMetricName("CompletedTasks")); + /** Total number of compactions since server [re]start */ + registry.register(() -> registry.meter("/compaction_manager/metrics/total_compactions_completed"), + factory.createMetricName("TotalCompactionsCompleted")); + /** Total number of bytes compacted since server [re]start */ + registry.register(() -> registry.meter("/compaction_manager/metrics/bytes_compacted"), factory.createMetricName("BytesCompacted")); } + } diff --git a/src/main/java/com/scylladb/jmx/metrics/DefaultNameFactory.java b/src/main/java/org/apache/cassandra/metrics/DefaultNameFactory.java similarity index 71% rename from src/main/java/com/scylladb/jmx/metrics/DefaultNameFactory.java rename to src/main/java/org/apache/cassandra/metrics/DefaultNameFactory.java index 1ba99d8..654a1a1 100644 --- a/src/main/java/com/scylladb/jmx/metrics/DefaultNameFactory.java +++ b/src/main/java/org/apache/cassandra/metrics/DefaultNameFactory.java @@ -15,15 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.cassandra.metrics; -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ -package com.scylladb.jmx.metrics; - -import com.yammer.metrics.core.MetricName; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; /** * MetricNameFactory that generates default MetricName of metrics. @@ -43,19 +38,14 @@ public class DefaultNameFactory implements MetricNameFactory { this.scope = scope; } - public MetricName createMetricName(String metricName) { + @Override + public ObjectName createMetricName(String metricName) throws MalformedObjectNameException { return createMetricName(type, metricName, scope); } - public static MetricName createMetricName(String type, String metricName, - String scope) { - return new MetricName(GROUP_NAME, type, metricName, scope, - createDefaultMBeanName(type, metricName, scope)); - } - - protected static String createDefaultMBeanName(String type, String name, - String scope) { - final StringBuilder nameBuilder = new StringBuilder(); + public static ObjectName createMetricName(String type, String name, String scope) + throws MalformedObjectNameException { + StringBuilder nameBuilder = new StringBuilder(); nameBuilder.append(GROUP_NAME); nameBuilder.append(":type="); nameBuilder.append(type); @@ -67,6 +57,6 @@ public class DefaultNameFactory implements MetricNameFactory { nameBuilder.append(",name="); nameBuilder.append(name); } - return nameBuilder.toString(); + return new ObjectName(nameBuilder.toString()); } } diff --git a/src/main/java/org/apache/cassandra/metrics/DroppedMessageMetrics.java b/src/main/java/org/apache/cassandra/metrics/DroppedMessageMetrics.java index fe9140a..e67e0c1 100644 --- a/src/main/java/org/apache/cassandra/metrics/DroppedMessageMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/DroppedMessageMetrics.java @@ -24,42 +24,27 @@ package org.apache.cassandra.metrics; -import java.util.concurrent.TimeUnit; +import javax.management.MalformedObjectNameException; import org.apache.cassandra.net.MessagingService; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.APIMeter; - /** * Metrics for dropped messages by verb. */ -public class DroppedMessageMetrics { - /** Number of dropped messages */ - public final APIMeter dropped; - - private long lastDropped = 0; +public class DroppedMessageMetrics implements Metrics { + private final MessagingService.Verb verb; public DroppedMessageMetrics(MessagingService.Verb verb) { - MetricNameFactory factory = new DefaultNameFactory("DroppedMessage", - verb.toString()); - dropped = (APIMeter) APIMetrics.newMeter(null, - factory.createMetricName("Dropped"), "dropped", - TimeUnit.SECONDS); - dropped.stop(); + this.verb = verb; } - @Deprecated - public int getRecentlyDropped() { - long currentDropped = dropped.count(); - long recentlyDropped = currentDropped - lastDropped; - lastDropped = currentDropped; - return (int) recentlyDropped; - } + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + MetricNameFactory factory = new DefaultNameFactory("DroppedMessage", verb.toString()); + /** Number of dropped messages */ + // TODO: this API url does not exist. Add meter calls for verbs. + registry.register(() -> registry.meter("/messaging_service/messages/dropped/" + verb), + factory.createMetricName("Dropped")); - public APIMeter getMeter() { - return dropped; } } diff --git a/src/main/java/org/apache/cassandra/metrics/EstimatedHistogramWrapper.java b/src/main/java/org/apache/cassandra/metrics/EstimatedHistogramWrapper.java deleted file mode 100644 index 3b511fb..0000000 --- a/src/main/java/org/apache/cassandra/metrics/EstimatedHistogramWrapper.java +++ /dev/null @@ -1,55 +0,0 @@ -package org.apache.cassandra.metrics; -/* - * Copyright (C) 2015 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -import javax.ws.rs.core.MultivaluedMap; - -import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.utils.EstimatedHistogram; - -public class EstimatedHistogramWrapper { - private APIClient c = new APIClient(); - private String url; - private MultivaluedMap queryParams; - private static final int DURATION = 50; - private int duration; - public EstimatedHistogramWrapper(String url, MultivaluedMap queryParams, int duration) { - this.url = url; - this.queryParams = queryParams; - this.duration = duration; - - } - public EstimatedHistogramWrapper(String url) { - this(url, null, DURATION); - - } - public EstimatedHistogramWrapper(String url, int duration) { - this(url, null, duration); - - } - public EstimatedHistogram get() { - return c.getEstimatedHistogram(url, queryParams, duration); - } - - public long[] getBuckets(boolean reset) { - return get().getBuckets(reset); - } -} diff --git a/src/main/java/org/apache/cassandra/metrics/LatencyMetrics.java b/src/main/java/org/apache/cassandra/metrics/LatencyMetrics.java index 1f4a9b4..4346f6a 100644 --- a/src/main/java/org/apache/cassandra/metrics/LatencyMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/LatencyMetrics.java @@ -23,41 +23,19 @@ */ package org.apache.cassandra.metrics; -import java.util.List; -import java.util.concurrent.TimeUnit; +import java.util.Arrays; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.Lists; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.scylladb.jmx.utils.RecentEstimatedHistogram; -import com.yammer.metrics.core.Counter; -import com.yammer.metrics.core.Timer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; /** * Metrics about latencies */ -public class LatencyMetrics { - /** Latency */ - public final Timer latency; - /** Total latency in micro sec */ - public final Counter totalLatency; - - /** parent metrics to replicate any updates to **/ - private List parents = Lists.newArrayList(); - - protected final MetricNameFactory factory; +public class LatencyMetrics implements Metrics { + protected final MetricNameFactory[] factories; protected final String namePrefix; - - @Deprecated public EstimatedHistogramWrapper totalLatencyHistogram; - /* - * It should not be called directly, use the getRecentLatencyHistogram - */ - @Deprecated protected final RecentEstimatedHistogram recentLatencyHistogram = new RecentEstimatedHistogram(); - - protected long lastLatency; - protected long lastOpCount; + protected final String uri; + protected final String param; /** * Create LatencyMetrics with given group, type, and scope. Name prefix for @@ -68,8 +46,8 @@ public class LatencyMetrics { * @param scope * Scope */ - public LatencyMetrics(String url, String type, String scope) { - this(url, type, "", scope); + public LatencyMetrics(String type, String scope, String uri) { + this(type, "", scope, uri, null); } /** @@ -83,83 +61,35 @@ public class LatencyMetrics { * @param scope * Scope of metrics */ - public LatencyMetrics(String url, String type, String namePrefix, - String scope) { - this(url, new DefaultNameFactory(type, scope), namePrefix); + public LatencyMetrics(String type, String namePrefix, String scope, String uri, String param) { + this(namePrefix, uri, param, new DefaultNameFactory(type, scope)); } - /** - * Create LatencyMetrics with given group, type, prefix to append to each - * metric name, and scope. - * - * @param factory - * MetricName factory to use - * @param namePrefix - * Prefix to append to each metric name - */ - public LatencyMetrics(String url, MetricNameFactory factory, - String namePrefix) { - this(url, null, factory, namePrefix); + public LatencyMetrics(String namePrefix, String uri, MetricNameFactory... factories) { + this(namePrefix, uri, null, factories); } - public LatencyMetrics(String url, String paramName, - MetricNameFactory factory, String namePrefix) { - this.factory = factory; + public LatencyMetrics(String namePrefix, String uri, String param, MetricNameFactory... factories) { + this.factories = factories; this.namePrefix = namePrefix; - - paramName = (paramName == null)? "" : "/" + paramName; - latency = APIMetrics.newTimer(url + "/moving_average_histogram" + paramName, - factory.createMetricName(namePrefix + "Latency"), - TimeUnit.MICROSECONDS, TimeUnit.SECONDS); - totalLatency = APIMetrics.newCounter(url + paramName, - factory.createMetricName(namePrefix + "TotalLatency")); - totalLatencyHistogram = new EstimatedHistogramWrapper(url + "/estimated_histogram" + paramName); + this.uri = uri; + this.param = param; } - /** - * Create LatencyMetrics with given group, type, prefix to append to each - * metric name, and scope. Any updates to this will also run on parent - * - * @param factory - * MetricName factory to use - * @param namePrefix - * Prefix to append to each metric name - * @param parents - * any amount of parents to replicate updates to - */ - public LatencyMetrics(String url, MetricNameFactory factory, - String namePrefix, LatencyMetrics... parents) { - this(url, factory, namePrefix); - this.parents.addAll(ImmutableList.copyOf(parents)); + protected ObjectName[] names(String suffix) throws MalformedObjectNameException { + return Arrays.stream(factories).map(f -> { + try { + return f.createMetricName(namePrefix + suffix); + } catch (MalformedObjectNameException e) { + throw new RuntimeException(e); // dung... + } + }).toArray(size -> new ObjectName[size]); } - /** takes nanoseconds **/ - public void addNano(long nanos) { - // the object is only updated from the API - } - - public void release() { - APIMetrics.defaultRegistry() - .removeMetric(factory.createMetricName(namePrefix + "Latency")); - APIMetrics.defaultRegistry().removeMetric( - factory.createMetricName(namePrefix + "TotalLatency")); - } - - @Deprecated - public synchronized double getRecentLatency() { - long ops = latency.count(); - long n = totalLatency.count(); - if (ops == lastOpCount) - return 0; - try { - return ((double) n - lastLatency) / (ops - lastOpCount); - } finally { - lastLatency = n; - lastOpCount = ops; - } - } - - public long[] getRecentLatencyHistogram() { - return recentLatencyHistogram.getBuckets(totalLatencyHistogram.getBuckets(false)); + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + String paramName = (param == null) ? "" : "/" + param; + registry.register(() -> registry.timer(uri + "/moving_average_histogram" + paramName), names("Latency")); + registry.register(() -> registry.counter(uri + paramName), names("TotalLatency")); } } diff --git a/src/main/java/org/apache/cassandra/metrics/StorageMetrics.java b/src/main/java/org/apache/cassandra/metrics/StorageMetrics.java index aa87e99..d717412 100644 --- a/src/main/java/org/apache/cassandra/metrics/StorageMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/StorageMetrics.java @@ -23,27 +23,21 @@ */ package org.apache.cassandra.metrics; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.Counter; +import javax.management.MalformedObjectNameException; /** * Metrics related to Storage. */ -public class StorageMetrics { - private static final MetricNameFactory factory = new DefaultNameFactory( - "Storage"); - - public static final Counter load = APIMetrics.newCounter( - "/storage_service/metrics/load", factory.createMetricName("Load")); - public static final Counter exceptions = APIMetrics.newCounter( - "/storage_service/metrics/exceptions", - factory.createMetricName("Exceptions")); - public static final Counter totalHintsInProgress = APIMetrics.newCounter( - "/storage_service/metrics/hints_in_progress", - factory.createMetricName("TotalHintsInProgress")); - public static final Counter totalHints = APIMetrics.newCounter( - "/storage_service/metrics/total_hints", - factory.createMetricName("TotalHints")); +public class StorageMetrics implements Metrics { + @Override + public void register(MetricsRegistry registry) throws MalformedObjectNameException { + MetricNameFactory factory = new DefaultNameFactory("Storage"); + registry.register(() -> registry.counter("/storage_service/metrics/load"), factory.createMetricName("Load")); + registry.register(() -> registry.counter("/storage_service/metrics/exceptions"), + factory.createMetricName("Exceptions")); + registry.register(() -> registry.counter("/storage_service/metrics/hints_in_progress"), + factory.createMetricName("TotalHintsInProgress")); + registry.register(() -> registry.counter("/storage_service/metrics/total_hints"), + factory.createMetricName("TotalHints")); + } } diff --git a/src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java b/src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java index ce389ce..b2eb35f 100644 --- a/src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java +++ b/src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java @@ -23,84 +23,87 @@ */ package org.apache.cassandra.metrics; +import static java.util.Arrays.asList; +import static java.util.Collections.emptySet; +import static org.apache.cassandra.metrics.DefaultNameFactory.createMetricName; + import java.net.InetAddress; -import java.util.HashMap; +import java.net.UnknownHostException; import java.util.HashSet; -import java.util.Map; import java.util.Set; -import java.util.Timer; -import java.util.TimerTask; import javax.json.JsonArray; +import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; import com.scylladb.jmx.api.APIClient; -import com.scylladb.jmx.metrics.APIMetrics; -import com.scylladb.jmx.metrics.DefaultNameFactory; -import com.scylladb.jmx.metrics.MetricNameFactory; -import com.yammer.metrics.core.Counter; +import com.scylladb.jmx.metrics.APIMBean; /** * Metrics for streaming. */ -public class StreamingMetrics -{ +public class StreamingMetrics { public static final String TYPE_NAME = "Streaming"; - private static final Map instances = new HashMap(); - static final int INTERVAL = 1000; //update every 1second - private static Timer timer = new Timer("Streaming Metrics"); + private static final HashSet globalNames; - public static final Counter activeStreamsOutbound = APIMetrics.newCounter("/stream_manager/metrics/outbound", DefaultNameFactory.createMetricName(TYPE_NAME, "ActiveOutboundStreams", null)); - public static final Counter totalIncomingBytes = APIMetrics.newCounter("/stream_manager/metrics/incoming", DefaultNameFactory.createMetricName(TYPE_NAME, "TotalIncomingBytes", null)); - public static final Counter totalOutgoingBytes = APIMetrics.newCounter("/stream_manager/metrics/outgoing", DefaultNameFactory.createMetricName(TYPE_NAME, "TotalOutgoingBytes", null)); - public final Counter incomingBytes; - public final Counter outgoingBytes; - private static APIClient s_c = new APIClient(); - - public static void register_mbeans() { - TimerTask taskToExecute = new CheckRegistration(); - timer.scheduleAtFixedRate(taskToExecute, 100, INTERVAL); - } - - public StreamingMetrics(final InetAddress peer) - { - MetricNameFactory factory = new DefaultNameFactory("Streaming", peer.getHostAddress().replaceAll(":", ".")); - incomingBytes = APIMetrics.newCounter("/stream_manager/metrics/incoming/" + peer,factory.createMetricName("IncomingBytes")); - outgoingBytes= APIMetrics.newCounter("/stream_manager/metrics/outgoing/" + peer, factory.createMetricName("OutgoingBytes")); - } - - public static boolean checkRegistration() { + static { try { - JsonArray streams = s_c.getJsonArray("/stream_manager/"); - Set all = new HashSet(); - for (int i = 0; i < streams.size(); i ++) { - JsonArray sessions = streams.getJsonObject(i).getJsonArray("sessions"); - for (int j = 0; j < sessions.size(); j++) { - String name = sessions.getJsonObject(j).getString("peer"); - if (!instances.containsKey(name)) { - StreamingMetrics metrics = new StreamingMetrics(InetAddress.getByName(name)); - instances.put(name, metrics); - } - all.add(name); - } - } - //removing deleted stream - for (String n : instances.keySet()) { - if (! all.contains(n)) { - instances.remove(n); - } - } - } catch (Exception e) { - // ignoring exceptions, will retry on the next interval - return false; + globalNames = new HashSet(asList(createMetricName(TYPE_NAME, "ActiveOutboundStreams", null), + createMetricName(TYPE_NAME, "TotalIncomingBytes", null), + createMetricName(TYPE_NAME, "TotalOutgoingBytes", null))); + } catch (MalformedObjectNameException e) { + throw new Error(e); } - return true; + }; + + private StreamingMetrics() { } - private static final class CheckRegistration extends TimerTask { - @Override - public void run() { - checkRegistration(); + private static boolean isStreamingName(ObjectName n) { + return TYPE_NAME.equals(n.getKeyProperty("type")); + } + + public static void unregister(APIClient client, MBeanServer server) throws MalformedObjectNameException { + APIMBean.checkRegistration(server, emptySet(), StreamingMetrics::isStreamingName, (n) -> null); + } + + public static boolean checkRegistration(APIClient client, MBeanServer server) + throws MalformedObjectNameException, UnknownHostException { + + Set all = new HashSet(globalNames); + JsonArray streams = client.getJsonArray("/stream_manager/"); + for (int i = 0; i < streams.size(); i++) { + JsonArray sessions = streams.getJsonObject(i).getJsonArray("sessions"); + for (int j = 0; j < sessions.size(); j++) { + String peer = sessions.getJsonObject(j).getString("peer"); + String scope = InetAddress.getByName(peer).getHostAddress().replaceAll(":", "."); + all.add(createMetricName(TYPE_NAME, "IncomingBytes", scope)); + all.add(createMetricName(TYPE_NAME, "OutgoingBytes", scope)); + } } + + MetricsRegistry registry = new MetricsRegistry(client, server); + return APIMBean.checkRegistration(server, all, StreamingMetrics::isStreamingName, n -> { + String scope = n.getKeyProperty("scope"); + String name = n.getKeyProperty("name"); + + String url = null; + if ("ActiveOutboundStreams".equals(name)) { + url = "/stream_manager/metrics/outbound"; + } else if ("IncomingBytes".equals(name) || "TotalIncomingBytes".equals(name)) { + url = "/stream_manager/metrics/incoming"; + } else if ("OutgoingBytes".equals(name) || "TotalOutgoingBytes".equals(name)) { + url = "/stream_manager/metrics/outgoing"; + } + if (url == null) { + throw new IllegalArgumentException(); + } + if (scope != null) { + url = url + "/" + scope; + } + return registry.counter(url); + }); } } From 781821ac9e5c4f286c95bb7e949fe73623c26a9b Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:01:35 +0200 Subject: [PATCH 16/32] Make APIMBean name derivation check interface fields as well. --- src/main/java/com/scylladb/jmx/metrics/APIMBean.java | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/main/java/com/scylladb/jmx/metrics/APIMBean.java b/src/main/java/com/scylladb/jmx/metrics/APIMBean.java index 23d5d3c..4c57171 100644 --- a/src/main/java/com/scylladb/jmx/metrics/APIMBean.java +++ b/src/main/java/com/scylladb/jmx/metrics/APIMBean.java @@ -133,6 +133,18 @@ public class APIMBean implements MBeanRegistration { } catch (Throwable t) { } } + if (mbeanName == null) { + for (Class c : getClass().getInterfaces()) { + Field f; + try { + f = c.getDeclaredField("OBJECT_NAME"); + f.setAccessible(true); + mbeanName = (String) f.get(null); + break; + } catch (Throwable t) { + } + } + } if (mbeanName == null) { String name = getClass().getName(); int i = name.lastIndexOf('.'); From 4b83a9388e23c680470a7fda3e451c4d44b7e09d Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:03:17 +0200 Subject: [PATCH 17/32] Make APIMBeanServer simply wrap actual mbeanserver --- .../com/scylladb/jmx/utils/APIBuilder.java | 22 +- .../scylladb/jmx/utils/APIMBeanServer.java | 370 +++++++++++++----- 2 files changed, 272 insertions(+), 120 deletions(-) diff --git a/src/main/java/com/scylladb/jmx/utils/APIBuilder.java b/src/main/java/com/scylladb/jmx/utils/APIBuilder.java index 941a2bc..b7fb775 100644 --- a/src/main/java/com/scylladb/jmx/utils/APIBuilder.java +++ b/src/main/java/com/scylladb/jmx/utils/APIBuilder.java @@ -3,6 +3,8 @@ package com.scylladb.jmx.utils; * Copyright 2016 ScyllaDB */ +import static com.scylladb.jmx.main.Main.client; + /* * This file is part of Scylla. * @@ -21,21 +23,13 @@ package com.scylladb.jmx.utils; */ import javax.management.MBeanServer; +import javax.management.MBeanServerBuilder; import javax.management.MBeanServerDelegate; -import mx4j.server.ChainedMBeanServerBuilder; - -public class APIBuilder extends ChainedMBeanServerBuilder { - public APIBuilder() { - super(new mx4j.server.MX4JMBeanServerBuilder()); - } - - public MBeanServer newMBeanServer(String defaultDomain, MBeanServer outer, - MBeanServerDelegate delegate) { - APIMBeanServer extern = new APIMBeanServer(); - MBeanServer nested = getMBeanServerBuilder().newMBeanServer( - defaultDomain, outer == null ? extern : outer, delegate); - extern.setMBeanServer(nested); - return extern; +public class APIBuilder extends MBeanServerBuilder { + @Override + public MBeanServer newMBeanServer(String defaultDomain, MBeanServer outer, MBeanServerDelegate delegate) { + MBeanServer nested = super.newMBeanServer(defaultDomain, outer, delegate); + return new APIMBeanServer(client, nested); } } \ No newline at end of file diff --git a/src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java b/src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java index e3ef0a7..6f2f8f4 100644 --- a/src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java +++ b/src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java @@ -1,132 +1,290 @@ package com.scylladb.jmx.utils; -/** - * Copyright 2016 ScyllaDB - */ -/* -* This file is part of Scylla. -* -* Scylla is free software: you can redistribute it and/or modify -* it under the terms of the GNU Affero General Public License as published by -* the Free Software Foundation, either version 3 of the License, or -* (at your option) any later version. -* -* Scylla is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with Scylla. If not, see . -*/ -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; -import java.util.HashSet; -import java.util.Hashtable; -import java.util.Iterator; -import java.util.Map; +import java.io.ObjectInputStream; +import java.net.UnknownHostException; import java.util.Set; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import javax.management.Attribute; +import javax.management.AttributeList; +import javax.management.AttributeNotFoundException; +import javax.management.InstanceAlreadyExistsException; +import javax.management.InstanceNotFoundException; +import javax.management.IntrospectionException; +import javax.management.InvalidAttributeValueException; +import javax.management.ListenerNotFoundException; +import javax.management.MBeanException; +import javax.management.MBeanInfo; +import javax.management.MBeanRegistrationException; import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; +import javax.management.NotCompliantMBeanException; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; +import javax.management.ObjectInstance; import javax.management.ObjectName; +import javax.management.OperationsException; import javax.management.QueryExp; +import javax.management.ReflectionException; +import javax.management.loading.ClassLoaderRepository; + import org.apache.cassandra.db.ColumnFamilyStore; import org.apache.cassandra.metrics.StreamingMetrics; -import mx4j.server.ChainedMBeanServer; -import mx4j.server.MX4JMBeanServer; -import mx4j.util.Utils; +import com.scylladb.jmx.api.APIClient; -public class APIMBeanServer extends ChainedMBeanServer { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(APIMBeanServer.class.getName()); +public class APIMBeanServer implements MBeanServer { + @SuppressWarnings("unused") + private static final Logger logger = Logger.getLogger(APIMBeanServer.class.getName()); - public static void log(String str) { - logger.finest(str); + private final APIClient client; + private final MBeanServer server; + + public APIMBeanServer(APIClient client, MBeanServer server) { + this.client = client; + this.server = server; } - public void setMBeanServer(MBeanServer server) { - if (server != null) { - try { - Field f = server.getClass().getDeclaredField("introspector"); - f.setAccessible(true); - f.set(server, new APIMBeanIntrospector()); - } catch (Exception e) { - logger.warning( - "Failed setting new interceptor" + e.getMessage()); - } - } - super.setMBeanServer(server); + @Override + public ObjectInstance createMBean(String className, ObjectName name) throws ReflectionException, + InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException, NotCompliantMBeanException { + return server.createMBean(className, name); } - public ObjectName apiNormalizeObjectName(ObjectName name) { - try { - Class[] cArg = new Class[1]; - cArg[0] = ObjectName.class; - Method met = MX4JMBeanServer.class - .getDeclaredMethod("normalizeObjectName", cArg); - met.setAccessible(true); - return (ObjectName) met.invoke((MX4JMBeanServer) getMBeanServer(), - name); - } catch (NoSuchMethodException | SecurityException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) { - // TODO Auto-generated catch block - return null; - } + @Override + public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName) + throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException, + NotCompliantMBeanException, InstanceNotFoundException { + return server.createMBean(className, name, loaderName); + } + + @Override + public ObjectInstance createMBean(String className, ObjectName name, Object[] params, String[] signature) + throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException, + NotCompliantMBeanException { + return server.createMBean(className, name, params, signature); + } + + @Override + public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName, Object[] params, + String[] signature) throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, + MBeanException, NotCompliantMBeanException, InstanceNotFoundException { + return server.createMBean(className, name, loaderName, params, signature); + } + + @Override + public ObjectInstance registerMBean(Object object, ObjectName name) + throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException { + return server.registerMBean(object, name); + } + + @Override + public void unregisterMBean(ObjectName name) throws InstanceNotFoundException, MBeanRegistrationException { + server.unregisterMBean(name); + } + + @Override + public ObjectInstance getObjectInstance(ObjectName name) throws InstanceNotFoundException { + checkRegistrations(name); + return server.getObjectInstance(name); } @Override public Set queryNames(ObjectName name, QueryExp query) { - if (name == null) { - return super.queryNames(name, query); + checkRegistrations(name); + return server.queryNames(name, query); + } + + @Override + public Set queryMBeans(ObjectName name, QueryExp query) { + checkRegistrations(name); + return server.queryMBeans(name, query); + } + + @Override + public boolean isRegistered(ObjectName name) { + checkRegistrations(name); + return server.isRegistered(name); + } + + @Override + public Integer getMBeanCount() { + return server.getMBeanCount(); + } + + @Override + public Object getAttribute(ObjectName name, String attribute) + throws MBeanException, AttributeNotFoundException, InstanceNotFoundException, ReflectionException { + checkRegistrations(name); + return server.getAttribute(name, attribute); + } + + @Override + public AttributeList getAttributes(ObjectName name, String[] attributes) + throws InstanceNotFoundException, ReflectionException { + checkRegistrations(name); + return server.getAttributes(name, attributes); + } + + @Override + public void setAttribute(ObjectName name, Attribute attribute) throws InstanceNotFoundException, + AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException { + checkRegistrations(name); + server.setAttribute(name, attribute); + } + + @Override + public AttributeList setAttributes(ObjectName name, AttributeList attributes) + throws InstanceNotFoundException, ReflectionException { + checkRegistrations(name); + return server.setAttributes(name, attributes); + } + + @Override + public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature) + throws InstanceNotFoundException, MBeanException, ReflectionException { + checkRegistrations(name); + return server.invoke(name, operationName, params, signature); + } + + @Override + public String getDefaultDomain() { + return server.getDefaultDomain(); + } + + @Override + public String[] getDomains() { + return server.getDomains(); + } + + @Override + public void addNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter, + Object handback) throws InstanceNotFoundException { + server.addNotificationListener(name, listener, filter, handback); + } + + @Override + public void addNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter, + Object handback) throws InstanceNotFoundException { + server.addNotificationListener(name, listener, filter, handback); + } + + @Override + public void removeNotificationListener(ObjectName name, ObjectName listener) + throws InstanceNotFoundException, ListenerNotFoundException { + server.removeNotificationListener(name, listener); + } + + @Override + public void removeNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter, + Object handback) throws InstanceNotFoundException, ListenerNotFoundException { + server.removeNotificationListener(name, listener, filter, handback); + } + + @Override + public void removeNotificationListener(ObjectName name, NotificationListener listener) + throws InstanceNotFoundException, ListenerNotFoundException { + server.removeNotificationListener(name, listener); + } + + @Override + public void removeNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter, + Object handback) throws InstanceNotFoundException, ListenerNotFoundException { + server.removeNotificationListener(name, listener, filter, handback); + } + + @Override + public MBeanInfo getMBeanInfo(ObjectName name) + throws InstanceNotFoundException, IntrospectionException, ReflectionException { + checkRegistrations(name); + return server.getMBeanInfo(name); + } + + @Override + public boolean isInstanceOf(ObjectName name, String className) throws InstanceNotFoundException { + return server.isInstanceOf(name, className); + } + + @Override + public Object instantiate(String className) throws ReflectionException, MBeanException { + return server.instantiate(className); + } + + @Override + public Object instantiate(String className, ObjectName loaderName) + throws ReflectionException, MBeanException, InstanceNotFoundException { + return server.instantiate(className, loaderName); + } + + @Override + public Object instantiate(String className, Object[] params, String[] signature) + throws ReflectionException, MBeanException { + return server.instantiate(className, params, signature); + } + + @Override + public Object instantiate(String className, ObjectName loaderName, Object[] params, String[] signature) + throws ReflectionException, MBeanException, InstanceNotFoundException { + return server.instantiate(className, loaderName, params, signature); + } + + @Override + @Deprecated + public ObjectInputStream deserialize(ObjectName name, byte[] data) + throws InstanceNotFoundException, OperationsException { + return server.deserialize(name, data); + } + + @Override + @Deprecated + public ObjectInputStream deserialize(String className, byte[] data) + throws OperationsException, ReflectionException { + return server.deserialize(className, data); + } + + @Override + @Deprecated + public ObjectInputStream deserialize(String className, ObjectName loaderName, byte[] data) + throws InstanceNotFoundException, OperationsException, ReflectionException { + return server.deserialize(className, loaderName, data); + } + + @Override + public ClassLoader getClassLoaderFor(ObjectName mbeanName) throws InstanceNotFoundException { + return server.getClassLoaderFor(mbeanName); + } + + @Override + public ClassLoader getClassLoader(ObjectName loaderName) throws InstanceNotFoundException { + return server.getClassLoader(loaderName); + } + + @Override + public ClassLoaderRepository getClassLoaderRepository() { + return server.getClassLoaderRepository(); + } + + static final Pattern tables = Pattern.compile("^(ColumnFamil(ies|y)|(Index)?Tables?)$"); + + private boolean checkRegistrations(ObjectName name) { + if (name != null && server.isRegistered(name)) { + return false; } - if (name.getCanonicalKeyPropertyListString() - .contains("ColumnFamilies")) { - ColumnFamilyStore.checkRegistration(); - } else if (name.getCanonicalKeyPropertyListString() - .contains("Stream")) { - StreamingMetrics.checkRegistration(); - } - ObjectName no = apiNormalizeObjectName(name); - Hashtable patternProps = no.getKeyPropertyList(); - boolean paternFound = false; - for (Iterator j = patternProps.entrySet().iterator(); j.hasNext();) { - Map.Entry entry = (Map.Entry) j.next(); - String patternValue = (String) entry.getValue(); - if (patternValue.contains("*")) { - paternFound = true; - break; + + boolean result = false; + + try { + String type = name != null ? name.getKeyProperty("type") : null; + if (type == null || tables.matcher(type).matches()) { + result |= ColumnFamilyStore.checkRegistration(client, server); } - } - if (paternFound) { - Set res = new HashSet(); - for (ObjectName q : (Set) super.queryNames(null,query)) { - if (Utils.wildcardMatch(name.getDomain(), q.getDomain())) { - Hashtable props = q.getKeyPropertyList(); - boolean found = true; - for (Iterator j = patternProps.entrySet().iterator(); j - .hasNext();) { - Map.Entry entry = (Map.Entry) j.next(); - String patternKey = (String) entry.getKey(); - String patternValue = (String) entry.getValue(); - if (props.containsKey(patternKey)) { - if (!Utils.wildcardMatch(patternValue, - props.get(patternKey).toString())) { - found = false; - break; - } - } else { - found = false; - break; - } - } - if (found) { - res.add(q); - } - } + if (type == null || StreamingMetrics.TYPE_NAME.equals(type)) { + result |= StreamingMetrics.checkRegistration(client, server); } - return res; + } catch (MalformedObjectNameException | UnknownHostException e) { + // TODO: log } - return super.queryNames(name, query); + return result; } } \ No newline at end of file From e55863e37532bf5fef1bb04007dced24b1879e28 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:05:05 +0200 Subject: [PATCH 18/32] Rework ColumnFamilyStore --- .../cassandra/db/ColumnFamilyStore.java | 509 ++---------------- 1 file changed, 59 insertions(+), 450 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java index 0bee7af..7b5f2df 100644 --- a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -24,75 +24,61 @@ package org.apache.cassandra.db; import static java.lang.String.valueOf; +import static java.util.Arrays.asList; import static javax.json.Json.createObjectBuilder; import static javax.json.Json.createReader; import static javax.ws.rs.core.MediaType.APPLICATION_JSON; import java.io.StringReader; -import java.lang.management.ManagementFactory; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; -import java.util.Timer; -import java.util.TimerTask; import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; import javax.json.JsonArray; import javax.json.JsonObject; import javax.json.JsonObjectBuilder; import javax.json.JsonValue; import javax.management.MBeanServer; +import javax.management.MalformedObjectNameException; import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.OpenDataException; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; -import org.apache.cassandra.metrics.ColumnFamilyMetrics; +import org.apache.cassandra.metrics.TableMetrics; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; -public class ColumnFamilyStore implements ColumnFamilyStoreMBean { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(ColumnFamilyStore.class.getName()); - private APIClient c = new APIClient(); +public class ColumnFamilyStore extends MetricsMBean implements ColumnFamilyStoreMBean { + private static final Logger logger = Logger.getLogger(ColumnFamilyStore.class.getName()); @SuppressWarnings("unused") - private String type; - private String keyspace; - private String name; - private String mbeanName; - private static APIClient s_c = new APIClient(); - static final int INTERVAL = 1000; // update every 1second - public final ColumnFamilyMetrics metric; + private final String type; + private final String keyspace; + private final String name; - private static Map cf = new HashMap(); - private static Timer timer = new Timer("Column Family"); + public static final Set TYPE_NAMES = new HashSet<>(asList("ColumnFamilies", "IndexTables", "Tables")); public void log(String str) { logger.finest(str); } - public static void register_mbeans() { - TimerTask taskToExecute = new CheckRegistration(); - timer.schedule(taskToExecute, 100, INTERVAL); - } - - public ColumnFamilyStore(String type, String keyspace, String name) { + public ColumnFamilyStore(APIClient client, String type, String keyspace, String name) { + super(client, + new TableMetrics(keyspace, name, false /* hardcoded for now */)); this.type = type; this.keyspace = keyspace; this.name = name; - mbeanName = getName(type, keyspace, name); - try { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - ObjectName nameObj = new ObjectName(mbeanName); - mbs.registerMBean(this, nameObj); - } catch (Exception e) { - throw new RuntimeException(e); - } - metric = new ColumnFamilyMetrics(this); + } + + public ColumnFamilyStore(APIClient client, ObjectName name) { + this(client, name.getKeyProperty("type"), name.getKeyProperty("keyspace"), name.getKeyProperty("columnfamily")); } /** true if this CFS contains secondary index data */ @@ -112,59 +98,19 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { return keyspace + ":" + name; } - private static String getName(String type, String keyspace, String name) { - return "org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace - + ",columnfamily=" + name; + private static ObjectName getName(String type, String keyspace, String name) throws MalformedObjectNameException { + return new ObjectName( + "org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace + ",columnfamily=" + name); } - public static boolean checkRegistration() { - try { - JsonArray mbeans = s_c.getJsonArray("/column_family/"); - Set all_cf = new HashSet(); - for (int i = 0; i < mbeans.size(); i++) { - JsonObject mbean = mbeans.getJsonObject(i); - String name = getName(mbean.getString("type"), - mbean.getString("ks"), mbean.getString("cf")); - if (!cf.containsKey(name)) { - ColumnFamilyStore cfs = new ColumnFamilyStore( - mbean.getString("type"), mbean.getString("ks"), - mbean.getString("cf")); - cf.put(name, cfs); - } - all_cf.add(name); - } - // removing deleted column family - for (String n : cf.keySet()) { - if (!all_cf.contains(n)) { - cf.remove(n); - } - } - } catch (IllegalStateException e) { - return false; - } - return true; - } - - private static final class CheckRegistration extends TimerTask { - private int missed_response = 0; - // After MAX_RETRY retry we assume the API is not available - // and the jmx will shutdown - private static final int MAX_RETRY = 30; - @Override - public void run() { - try { - if (checkRegistration()) { - missed_response = 0; - } else { - if (missed_response++ > MAX_RETRY) { - System.err.println("API is not available, JMX is shuting down"); - System.exit(-1); - } - } - } catch (Exception e) { - // ignoring exceptions, will retry on the next interval - } + public static boolean checkRegistration(APIClient client, MBeanServer server) throws MalformedObjectNameException { + JsonArray mbeans = client.getJsonArray("/column_family/"); + Set all = new HashSet(); + for (int i = 0; i < mbeans.size(); i++) { + JsonObject mbean = mbeans.getJsonObject(i); + all.add(getName(mbean.getString("type"), mbean.getString("ks"), mbean.getString("cf"))); } + return checkRegistration(server, all, n -> TYPE_NAMES.contains(n.getKeyProperty("type")), n -> new ColumnFamilyStore(client, n)); } /** @@ -176,316 +122,12 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { return name; } - /** - * Returns the total amount of data stored in the memtable, including column - * related overhead. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize - * @return The size in bytes. - * @deprecated - */ - @Deprecated - public long getMemtableDataSize() { - log(" getMemtableDataSize()"); - return c.getLongValue("/column_family/metrics/memtable_on_heap_size/" + getCFName()); - } - - /** - * Returns the total number of columns present in the memtable. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount - * @return The number of columns. - */ - @Deprecated - public long getMemtableColumnsCount() { - log(" getMemtableColumnsCount()"); - return metric.memtableColumnsCount.value(); - } - - /** - * Returns the number of times that a flush has resulted in the memtable - * being switched out. - * - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount - * @return the number of memtable switches - */ - @Deprecated - public int getMemtableSwitchCount() { - log(" getMemtableSwitchCount()"); - return c.getIntValue("/column_family/metrics/memtable_switch_count/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead - * @return a histogram of the number of sstable data files accessed per - * read: reading this property resets it - */ - @Deprecated - public long[] getRecentSSTablesPerReadHistogram() { - log(" getRecentSSTablesPerReadHistogram()"); - return metric.getRecentSSTablesPerRead(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram - * @return a histogram of the number of sstable data files accessed per read - */ - @Deprecated - public long[] getSSTablesPerReadHistogram() { - log(" getSSTablesPerReadHistogram()"); - return metric.sstablesPerRead.getBuckets(false); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return the number of read operations on this column family - */ - @Deprecated - public long getReadCount() { - log(" getReadCount()"); - return c.getIntValue("/column_family/metrics/read/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return total read latency (divide by getReadCount() for average) - */ - @Deprecated - public long getTotalReadLatencyMicros() { - log(" getTotalReadLatencyMicros()"); - return c.getLongValue("/column_family/metrics/read_latency/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getLifetimeReadLatencyHistogramMicros() { - log(" getLifetimeReadLatencyHistogramMicros()"); - return metric.readLatency.totalLatencyHistogram.getBuckets(false); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getRecentReadLatencyHistogramMicros() { - log(" getRecentReadLatencyHistogramMicros()"); - return metric.readLatency.getRecentLatencyHistogram(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency - * @return average latency per read operation since the last call - */ - @Deprecated - public double getRecentReadLatencyMicros() { - log(" getRecentReadLatencyMicros()"); - return metric.readLatency.getRecentLatency(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return the number of write operations on this column family - */ - @Deprecated - public long getWriteCount() { - log(" getWriteCount()"); - return c.getLongValue("/column_family/metrics/write/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return total write latency (divide by getReadCount() for average) - */ - @Deprecated - public long getTotalWriteLatencyMicros() { - log(" getTotalWriteLatencyMicros()"); - return c.getLongValue("/column_family/metrics/write_latency/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getLifetimeWriteLatencyHistogramMicros() { - log(" getLifetimeWriteLatencyHistogramMicros()"); - return metric.writeLatency.totalLatencyHistogram.getBuckets(false); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return an array representing the latency histogram - */ - @Deprecated - public long[] getRecentWriteLatencyHistogramMicros() { - log(" getRecentWriteLatencyHistogramMicros()"); - return metric.writeLatency.getRecentLatencyHistogram(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency - * @return average latency per write operation since the last call - */ - @Deprecated - public double getRecentWriteLatencyMicros() { - log(" getRecentWriteLatencyMicros()"); - return metric.writeLatency.getRecentLatency(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes - * @return the estimated number of tasks pending for this column family - */ - @Deprecated - public int getPendingTasks() { - log(" getPendingTasks()"); - return c.getIntValue("/column_family/metrics/pending_flushes/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount - * @return the number of SSTables on disk for this CF - */ - @Deprecated - public int getLiveSSTableCount() { - log(" getLiveSSTableCount()"); - return c.getIntValue("/column_family/metrics/live_ss_table_count/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed - * @return disk space used by SSTables belonging to this CF - */ - @Deprecated - public long getLiveDiskSpaceUsed() { - log(" getLiveDiskSpaceUsed()"); - return c.getLongValue("/column_family/metrics/live_disk_space_used/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed - * @return total disk space used by SSTables belonging to this CF, including - * obsolete ones waiting to be GC'd - */ - @Deprecated - public long getTotalDiskSpaceUsed() { - log(" getTotalDiskSpaceUsed()"); - return c.getLongValue("/column_family/metrics/total_disk_space_used/" + getCFName()); - } - /** * force a major compaction of this column family */ - public void forceMajorCompaction() - throws ExecutionException, InterruptedException { + public void forceMajorCompaction() throws ExecutionException, InterruptedException { log(" forceMajorCompaction() throws ExecutionException, InterruptedException"); - c.post("column_family/major_compaction/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize - * @return the size of the smallest compacted row - */ - @Deprecated - public long getMinRowSize() { - log(" getMinRowSize()"); - return c.getLongValue("/column_family/metrics/min_row_size/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize - * @return the size of the largest compacted row - */ - @Deprecated - public long getMaxRowSize() { - log(" getMaxRowSize()"); - return c.getLongValue("/column_family/metrics/max_row_size/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize - * @return the average row size across all the sstables - */ - @Deprecated - public long getMeanRowSize() { - log(" getMeanRowSize()"); - return c.getLongValue("/column_family/metrics/mean_row_size/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives - */ - @Deprecated - public long getBloomFilterFalsePositives() { - log(" getBloomFilterFalsePositives()"); - return c.getLongValue("/column_family/metrics/bloom_filter_false_positives/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives - */ - @Deprecated - public long getRecentBloomFilterFalsePositives() { - log(" getRecentBloomFilterFalsePositives()"); - return c.getLongValue("/column_family/metrics/recent_bloom_filter_false_positives/" +getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio - */ - @Deprecated - public double getBloomFilterFalseRatio() { - log(" getBloomFilterFalseRatio()"); - return c.getDoubleValue("/column_family/metrics/bloom_filter_false_ratio/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio - */ - @Deprecated - public double getRecentBloomFilterFalseRatio() { - log(" getRecentBloomFilterFalseRatio()"); - return c.getDoubleValue("/column_family/metrics/recent_bloom_filter_false_ratio/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed - */ - @Deprecated - public long getBloomFilterDiskSpaceUsed() { - log(" getBloomFilterDiskSpaceUsed()"); - return c.getLongValue("/column_family/metrics/bloom_filter_disk_space_used/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed - */ - @Deprecated - public long getBloomFilterOffHeapMemoryUsed() { - log(" getBloomFilterOffHeapMemoryUsed()"); - return c.getLongValue("/column_family/metrics/bloom_filter_off_heap_memory_used/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed - */ - @Deprecated - public long getIndexSummaryOffHeapMemoryUsed() { - log(" getIndexSummaryOffHeapMemoryUsed()"); - return c.getLongValue("/column_family/metrics/index_summary_off_heap_memory_used/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed - */ - @Deprecated - public long getCompressionMetadataOffHeapMemoryUsed() { - log(" getCompressionMetadataOffHeapMemoryUsed()"); - return c.getLongValue("/column_family/metrics/compression_metadata_off_heap_memory_used/" + getCFName()); + client.post("column_family/major_compaction/" + getCFName()); } /** @@ -494,7 +136,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public int getMinimumCompactionThreshold() { log(" getMinimumCompactionThreshold()"); - return c.getIntValue("column_family/minimum_compaction/" + getCFName()); + return client.getIntValue("column_family/minimum_compaction/" + getCFName()); } /** @@ -505,7 +147,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" setMinimumCompactionThreshold(int threshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("value", Integer.toString(threshold)); - c.post("column_family/minimum_compaction/" + getCFName(), queryParams); + client.post("column_family/minimum_compaction/" + getCFName(), queryParams); } /** @@ -514,7 +156,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public int getMaximumCompactionThreshold() { log(" getMaximumCompactionThreshold()"); - return c.getIntValue("column_family/maximum_compaction/" + getCFName()); + return client.getIntValue("column_family/maximum_compaction/" + getCFName()); } /** @@ -527,7 +169,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("minimum", Integer.toString(minThreshold)); queryParams.add("maximum", Integer.toString(maxThreshold)); - c.post("column_family/compaction" + getCFName(), queryParams); + client.post("column_family/compaction" + getCFName(), queryParams); } /** @@ -538,7 +180,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" setMaximumCompactionThreshold(int threshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("value", Integer.toString(threshold)); - c.post("column_family/maximum_compaction/" + getCFName(), queryParams); + client.post("column_family/maximum_compaction/" + getCFName(), queryParams); } /** @@ -551,7 +193,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" setCompactionStrategyClass(String className)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("class_name", className); - c.post("column_family/compaction_strategy/" + getCFName(), queryParams); + client.post("column_family/compaction_strategy/" + getCFName(), queryParams); } /** @@ -559,8 +201,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { */ public String getCompactionStrategyClass() { log(" getCompactionStrategyClass()"); - return c.getStringValue( - "column_family/compaction_strategy/" + getCFName()); + return client.getStringValue("column_family/compaction_strategy/" + getCFName()); } /** @@ -569,8 +210,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public Map getCompressionParameters() { log(" getCompressionParameters()"); - return c.getMapStrValue( - "column_family/compression_parameters/" + getCFName()); + return client.getMapStrValue("column_family/compression_parameters/" + getCFName()); } /** @@ -584,8 +224,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" setCompressionParameters(Map opts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("opts", APIClient.mapToString(opts)); - c.post("column_family/compression_parameters/" + getCFName(), - queryParams); + client.post("column_family/compression_parameters/" + getCFName(), queryParams); } /** @@ -596,60 +235,33 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" setCrcCheckChance(double crcCheckChance)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("check_chance", Double.toString(crcCheckChance)); - c.post("column_family/crc_check_chance/" + getCFName(), queryParams); + client.post("column_family/crc_check_chance/" + getCFName(), queryParams); } @Override public boolean isAutoCompactionDisabled() { log(" isAutoCompactionDisabled()"); - return c.getBooleanValue("column_family/autocompaction/" + getCFName()); + return client.getBooleanValue("column_family/autocompaction/" + getCFName()); } /** Number of tombstoned cells retreived during the last slicequery */ @Deprecated public double getTombstonesPerSlice() { log(" getTombstonesPerSlice()"); - return c.getDoubleValue(""); + return client.getDoubleValue(""); } /** Number of live cells retreived during the last slicequery */ @Deprecated public double getLiveCellsPerSlice() { log(" getLiveCellsPerSlice()"); - return c.getDoubleValue(""); + return client.getDoubleValue(""); } @Override public long estimateKeys() { log(" estimateKeys()"); - return c.getLongValue("column_family/estimate_keys/" + getCFName()); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram - */ - @Deprecated - public long[] getEstimatedRowSizeHistogram() { - log(" getEstimatedRowSizeHistogram()"); - return metric.estimatedRowSizeHistogram.value(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram - */ - @Deprecated - public long[] getEstimatedColumnCountHistogram() { - log(" getEstimatedColumnCountHistogram()"); - return metric.estimatedColumnCountHistogram.value(); - } - - /** - * @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio - */ - @Deprecated - public double getCompressionRatio() { - log(" getCompressionRatio()"); - return c.getDoubleValue("/column_family/metrics/compression_ratio/" + getCFName()); + return client.getLongValue("column_family/estimate_keys/" + getCFName()); } /** @@ -660,7 +272,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public List getBuiltIndexes() { log(" getBuiltIndexes()"); - return c.getListStrValue("column_family/built_indexes/" + getCFName()); + return client.getListStrValue("column_family/built_indexes/" + getCFName()); } /** @@ -674,8 +286,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { log(" getSSTablesForKey(String key)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("key", key); - return c.getListStrValue("column_family/sstables/by_key/" + getCFName(), - queryParams); + return client.getListStrValue("column_family/sstables/by_key/" + getCFName(), queryParams); } /** @@ -685,7 +296,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public void loadNewSSTables() { log(" loadNewSSTables()"); - c.post("column_family/sstable/" + getCFName()); + client.post("column_family/sstable/" + getCFName()); } /** @@ -695,7 +306,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public int getUnleveledSSTables() { log(" getUnleveledSSTables()"); - return c.getIntValue("column_family/sstables/unleveled/" + getCFName()); + return client.getIntValue("column_family/sstables/unleveled/" + getCFName()); } /** @@ -706,8 +317,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public int[] getSSTableCountPerLevel() { log(" getSSTableCountPerLevel()"); - int[] res = c.getIntArrValue( - "column_family/sstables/per_level/" + getCFName()); + int[] res = client.getIntArrValue("column_family/sstables/per_level/" + getCFName()); if (res.length == 0) { // no sstable count // should return null @@ -725,7 +335,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public double getDroppableTombstoneRatio() { log(" getDroppableTombstoneRatio()"); - return c.getDoubleValue("column_family/droppable_ratio/" + getCFName()); + return client.getDoubleValue("column_family/droppable_ratio/" + getCFName()); } /** @@ -735,13 +345,13 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public long trueSnapshotsSize() { log(" trueSnapshotsSize()"); - return c.getLongValue("column_family/metrics/snapshots_size/" + getCFName()); + return client.getLongValue("column_family/metrics/snapshots_size/" + getCFName()); } public String getKeyspace() { return keyspace; } - + @Override public String getTableName() { log(" getTableName()"); @@ -752,20 +362,20 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException { log(" forceMajorCompaction(boolean) throws ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - queryParams.putSingle("value", valueOf(splitOutput)); - c.post("column_family/major_compaction/" + getCFName(), queryParams); + queryParams.putSingle("value", valueOf(splitOutput)); + client.post("column_family/major_compaction/" + getCFName(), queryParams); } @Override public void setCompactionParametersJson(String options) { log(" setCompactionParametersJson"); - c.post("column_family/compaction_parameters/" + getCFName(), null, options, APPLICATION_JSON); + client.post("column_family/compaction_parameters/" + getCFName(), null, options, APPLICATION_JSON); } @Override public String getCompactionParametersJson() { log(" getCompactionParametersJson"); - return c.getStringValue("column_family/compaction_parameters/" + getCFName()); + return client.getStringValue("column_family/compaction_parameters/" + getCFName()); } @Override @@ -782,7 +392,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { String s = getCompactionParametersJson(); JsonObject o = createReader(new StringReader(s)).readObject(); HashMap res = new HashMap<>(); - for (Entry e : o.entrySet()) { + for (Entry e : o.entrySet()) { res.put(e.getKey(), e.getValue().toString()); } return res; @@ -797,7 +407,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { @Override public void compactionDiskSpaceCheck(boolean enable) { - // TODO Auto-generated method stub + // TODO Auto-generated method stub log(" compactionDiskSpaceCheck()"); } @@ -809,8 +419,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean { } @Override - public CompositeData finishLocalSampling(String sampler, int count) - throws OpenDataException { + public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException { // TODO Auto-generated method stub log(" finishLocalSampling()"); return null; From 1470b37193abf9ce28c7f75a8713c47fa5907d0e Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:05:15 +0200 Subject: [PATCH 19/32] Rework CommitLog --- .../cassandra/db/commitlog/CommitLog.java | 48 +++++++------------ 1 file changed, 16 insertions(+), 32 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java b/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java index e943351..bc041ef 100644 --- a/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java +++ b/src/main/java/org/apache/cassandra/db/commitlog/CommitLog.java @@ -23,7 +23,6 @@ package org.apache.cassandra.db.commitlog; import java.io.IOException; -import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -31,48 +30,31 @@ import java.util.List; import java.util.Map; import java.util.Set; -import javax.management.MBeanServer; -import javax.management.ObjectName; - import org.apache.cassandra.metrics.CommitLogMetrics; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; /* * Commit Log tracks every write operation into the system. The aim of the commit log is to be able to * successfully recover data that was not stored to disk via the Memtable. */ -public class CommitLog implements CommitLogMBean { - - CommitLogMetrics metrics = new CommitLogMetrics(); +public class CommitLog extends MetricsMBean implements CommitLogMBean { private static final java.util.logging.Logger logger = java.util.logging.Logger .getLogger(CommitLog.class.getName()); - private APIClient c = new APIClient(); - public void log(String str) { logger.finest(str); } - private static final CommitLog instance = new CommitLog(); - - public static CommitLog getInstance() { - return instance; - } - - private CommitLog() { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - mbs.registerMBean(this, - new ObjectName("org.apache.cassandra.db:type=Commitlog")); - } catch (Exception e) { - throw new RuntimeException(e); - } + public CommitLog(APIClient client) { + super("org.apache.cassandra.db:type=Commitlog", client, new CommitLogMetrics()); } /** * Recover a single file. */ + @Override public void recover(String path) throws IOException { log(" recover(String path) throws IOException"); } @@ -81,9 +63,10 @@ public class CommitLog implements CommitLogMBean { * @return file names (not full paths) of active commit log segments * (segments containing unflushed data) */ + @Override public List getActiveSegmentNames() { log(" getActiveSegmentNames()"); - List lst = c.getListStrValue("/commitlog/segments/active"); + List lst = client.getListStrValue("/commitlog/segments/active"); Set set = new HashSet(); for (String l : lst) { String name = l.substring(l.lastIndexOf("/") + 1, l.length()); @@ -96,9 +79,10 @@ public class CommitLog implements CommitLogMBean { * @return Files which are pending for archival attempt. Does NOT include * failed archive attempts. */ + @Override public List getArchivingSegmentNames() { log(" getArchivingSegmentNames()"); - List lst = c.getListStrValue("/commitlog/segments/archiving"); + List lst = client.getListStrValue("/commitlog/segments/archiving"); Set set = new HashSet(); for (String l : lst) { String name = l.substring(l.lastIndexOf("/") + 1, l.length()); @@ -111,46 +95,46 @@ public class CommitLog implements CommitLogMBean { public String getArchiveCommand() { // TODO Auto-generated method stub log(" getArchiveCommand()"); - return c.getStringValue(""); + return client.getStringValue(""); } @Override public String getRestoreCommand() { // TODO Auto-generated method stub log(" getRestoreCommand()"); - return c.getStringValue(""); + return client.getStringValue(""); } @Override public String getRestoreDirectories() { // TODO Auto-generated method stub log(" getRestoreDirectories()"); - return c.getStringValue(""); + return client.getStringValue(""); } @Override public long getRestorePointInTime() { // TODO Auto-generated method stub log(" getRestorePointInTime()"); - return c.getLongValue(""); + return client.getLongValue(""); } @Override public String getRestorePrecision() { // TODO Auto-generated method stub log(" getRestorePrecision()"); - return c.getStringValue(""); + return client.getStringValue(""); } @Override public long getActiveContentSize() { - // scylla does not compress commit log, so this is equivalent + // scylla does not compress commit log, so this is equivalent return getActiveOnDiskSize(); } @Override public long getActiveOnDiskSize() { - return c.getLongValue("/commitlog/metrics/total_commit_log_size"); + return client.getLongValue("/commitlog/metrics/total_commit_log_size"); } @Override From e49b4ef32240d4538efc26edf4f276fbbd96e9e7 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:05:27 +0200 Subject: [PATCH 20/32] Rework CompactionManager --- .../db/compaction/CompactionManager.java | 53 +++++++------------ 1 file changed, 20 insertions(+), 33 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java b/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java index 7df536e..9542533 100644 --- a/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java +++ b/src/main/java/org/apache/cassandra/db/compaction/CompactionManager.java @@ -17,16 +17,14 @@ */ package org.apache.cassandra.db.compaction; -import java.lang.management.ManagementFactory; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.logging.Logger; import javax.json.JsonArray; import javax.json.JsonObject; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.management.openmbean.OpenDataException; import javax.management.openmbean.TabularData; import javax.ws.rs.core.MultivaluedHashMap; @@ -35,6 +33,7 @@ import javax.ws.rs.core.MultivaluedMap; import org.apache.cassandra.metrics.CompactionMetrics; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; /** * A singleton which manages a private executor of ongoing compactions. @@ -49,30 +48,16 @@ import com.scylladb.jmx.api.APIClient; * * Modified by Cloudius Systems */ -public class CompactionManager implements CompactionManagerMBean { +public class CompactionManager extends MetricsMBean implements CompactionManagerMBean { public static final String MBEAN_OBJECT_NAME = "org.apache.cassandra.db:type=CompactionManager"; - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(CompactionManager.class.getName()); - public static final CompactionManager instance; - private APIClient c = new APIClient(); - CompactionMetrics metrics = new CompactionMetrics(); + private static final Logger logger = Logger.getLogger(CompactionManager.class.getName()); public void log(String str) { logger.finest(str); } - static { - instance = new CompactionManager(); - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - mbs.registerMBean(instance, new ObjectName(MBEAN_OBJECT_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - public static CompactionManager getInstance() { - return instance; + public CompactionManager(APIClient client) { + super(MBEAN_OBJECT_NAME, client, new CompactionMetrics()); } /** List of running compaction objects. */ @@ -80,7 +65,7 @@ public class CompactionManager implements CompactionManagerMBean { public List> getCompactions() { log(" getCompactions()"); List> results = new ArrayList>(); - JsonArray compactions = c.getJsonArray("compaction_manager/compactions"); + JsonArray compactions = client.getJsonArray("compaction_manager/compactions"); for (int i = 0; i < compactions.size(); i++) { JsonObject compaction = compactions.getJsonObject(i); Map result = new HashMap(); @@ -99,7 +84,7 @@ public class CompactionManager implements CompactionManagerMBean { @Override public List getCompactionSummary() { log(" getCompactionSummary()"); - return c.getListStrValue("compaction_manager/compaction_summary"); + return client.getListStrValue("compaction_manager/compaction_summary"); } /** compaction history **/ @@ -107,7 +92,7 @@ public class CompactionManager implements CompactionManagerMBean { public TabularData getCompactionHistory() { log(" getCompactionHistory()"); try { - return CompactionHistoryTabularData.from(c.getJsonArray("/compaction_manager/compaction_history")); + return CompactionHistoryTabularData.from(client.getJsonArray("/compaction_manager/compaction_history")); } catch (OpenDataException e) { return null; } @@ -129,7 +114,7 @@ public class CompactionManager implements CompactionManagerMBean { log(" forceUserDefinedCompaction(String dataFiles)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("dataFiles", dataFiles); - c.post("compaction_manager/force_user_defined_compaction", queryParams); + client.post("compaction_manager/force_user_defined_compaction", queryParams); } /** @@ -144,7 +129,7 @@ public class CompactionManager implements CompactionManagerMBean { log(" stopCompaction(String type)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("type", type); - c.post("compaction_manager/stop_compaction", queryParams); + client.post("compaction_manager/stop_compaction", queryParams); } /** @@ -153,7 +138,7 @@ public class CompactionManager implements CompactionManagerMBean { @Override public int getCoreCompactorThreads() { log(" getCoreCompactorThreads()"); - return c.getIntValue(""); + return client.getIntValue(""); } /** @@ -173,7 +158,7 @@ public class CompactionManager implements CompactionManagerMBean { @Override public int getMaximumCompactorThreads() { log(" getMaximumCompactorThreads()"); - return c.getIntValue(""); + return client.getIntValue(""); } /** @@ -193,7 +178,7 @@ public class CompactionManager implements CompactionManagerMBean { @Override public int getCoreValidationThreads() { log(" getCoreValidationThreads()"); - return c.getIntValue(""); + return client.getIntValue(""); } /** @@ -213,7 +198,7 @@ public class CompactionManager implements CompactionManagerMBean { @Override public int getMaximumValidatorThreads() { log(" getMaximumValidatorThreads()"); - return c.getIntValue(""); + return client.getIntValue(""); } /** @@ -229,9 +214,11 @@ public class CompactionManager implements CompactionManagerMBean { @Override public void stopCompactionById(String compactionId) { - // scylla does not have neither compaction ids nor the file described in: - // "Ids can be found in the transaction log files whose name starts with compaction_, located in the table transactions folder" - // (nodetool) + // scylla does not have neither compaction ids nor the file described + // in: + // "Ids can be found in the transaction log files whose name starts with + // compaction_, located in the table transactions folder" + // (nodetool) // TODO: throw? log(" stopCompactionById"); } From 80762eb60a92b84331ceca73e9fe0e706e3e64e7 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:05:47 +0200 Subject: [PATCH 21/32] Rework gms beans --- .../apache/cassandra/gms/FailureDetector.java | 63 +++++++------------ .../org/apache/cassandra/gms/Gossiper.java | 45 ++++--------- 2 files changed, 37 insertions(+), 71 deletions(-) diff --git a/src/main/java/org/apache/cassandra/gms/FailureDetector.java b/src/main/java/org/apache/cassandra/gms/FailureDetector.java index 3762100..9d6520c 100644 --- a/src/main/java/org/apache/cassandra/gms/FailureDetector.java +++ b/src/main/java/org/apache/cassandra/gms/FailureDetector.java @@ -24,7 +24,6 @@ package org.apache.cassandra.gms; -import java.lang.management.ManagementFactory; import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; @@ -32,8 +31,6 @@ import java.util.Map; import javax.json.JsonArray; import javax.json.JsonObject; import javax.json.JsonValue; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.CompositeDataSupport; import javax.management.openmbean.CompositeType; @@ -45,34 +42,21 @@ import javax.management.openmbean.TabularDataSupport; import javax.management.openmbean.TabularType; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.APIMBean; -public class FailureDetector implements FailureDetectorMBean { +public class FailureDetector extends APIMBean implements FailureDetectorMBean { public static final String MBEAN_NAME = "org.apache.cassandra.net:type=FailureDetector"; private static final java.util.logging.Logger logger = java.util.logging.Logger .getLogger(FailureDetector.class.getName()); - private APIClient c = new APIClient(); + public FailureDetector(APIClient c) { + super(c); + } public void log(String str) { logger.finest(str); } - private static final FailureDetector instance = new FailureDetector(); - - public static FailureDetector getInstance() { - return instance; - } - - private FailureDetector() { - // Register this instance with JMX - try { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - @Override public void dumpInterArrivalTimes() { log(" dumpInterArrivalTimes()"); @@ -86,7 +70,7 @@ public class FailureDetector implements FailureDetectorMBean { @Override public double getPhiConvictThreshold() { log(" getPhiConvictThreshold()"); - return c.getDoubleValue("/failure_detector/phi"); + return client.getDoubleValue("/failure_detector/phi"); } @Override @@ -94,29 +78,27 @@ public class FailureDetector implements FailureDetectorMBean { log(" getAllEndpointStates()"); StringBuilder sb = new StringBuilder(); - for (Map.Entry entry : getEndpointStateMap().entrySet()) - { + for (Map.Entry entry : getEndpointStateMap().entrySet()) { sb.append('/').append(entry.getKey()).append("\n"); appendEndpointState(sb, entry.getValue()); } return sb.toString(); } - private void appendEndpointState(StringBuilder sb, EndpointState endpointState) - { + private void appendEndpointState(StringBuilder sb, EndpointState endpointState) { sb.append(" generation:").append(endpointState.getHeartBeatState().getGeneration()).append("\n"); sb.append(" heartbeat:").append(endpointState.getHeartBeatState().getHeartBeatVersion()).append("\n"); - for (Map.Entry state : endpointState.applicationState.entrySet()) - { - if (state.getKey() == ApplicationState.TOKENS) + for (Map.Entry state : endpointState.applicationState.entrySet()) { + if (state.getKey() == ApplicationState.TOKENS) { continue; + } sb.append(" ").append(state.getKey()).append(":").append(state.getValue()).append("\n"); } } public Map getEndpointStateMap() { Map res = new HashMap(); - JsonArray arr = c.getJsonArray("/failure_detector/endpoints"); + JsonArray arr = client.getJsonArray("/failure_detector/endpoints"); for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); EndpointState ep = new EndpointState(new HeartBeatState(obj.getInt("generation"), obj.getInt("version"))); @@ -135,31 +117,34 @@ public class FailureDetector implements FailureDetectorMBean { @Override public String getEndpointState(String address) throws UnknownHostException { log(" getEndpointState(String address) throws UnknownHostException"); - return c.getStringValue("/failure_detector/endpoints/states/" + address); + return client.getStringValue("/failure_detector/endpoints/states/" + address); } @Override public Map getSimpleStates() { log(" getSimpleStates()"); - return c.getMapStrValue("/failure_detector/simple_states"); + return client.getMapStrValue("/failure_detector/simple_states"); } @Override public int getDownEndpointCount() { log(" getDownEndpointCount()"); - return c.getIntValue("/failure_detector/count/endpoint/down"); + return client.getIntValue("/failure_detector/count/endpoint/down"); } @Override public int getUpEndpointCount() { log(" getUpEndpointCount()"); - return c.getIntValue("/failure_detector/count/endpoint/up"); + return client.getIntValue("/failure_detector/count/endpoint/up"); } // From origin: - // this is useless except to provide backwards compatibility in phi_convict_threshold, - // because everyone seems pretty accustomed to the default of 8, and users who have - // already tuned their phi_convict_threshold for their own environments won't need to + // this is useless except to provide backwards compatibility in + // phi_convict_threshold, + // because everyone seems pretty accustomed to the default of 8, and users + // who have + // already tuned their phi_convict_threshold for their own environments + // won't need to // change. private final double PHI_FACTOR = 1.0 / Math.log(10.0); // 0.434... @@ -170,7 +155,7 @@ public class FailureDetector implements FailureDetectorMBean { new OpenType[] { SimpleType.STRING, SimpleType.DOUBLE }); final TabularDataSupport results = new TabularDataSupport( new TabularType("PhiList", "PhiList", ct, new String[] { "Endpoint" })); - final JsonArray arr = c.getJsonArray("/failure_detector/endpoint_phi_values"); + final JsonArray arr = client.getJsonArray("/failure_detector/endpoint_phi_values"); for (JsonValue v : arr) { JsonObject o = (JsonObject) v; @@ -187,5 +172,5 @@ public class FailureDetector implements FailureDetectorMBean { } return results; - } + } } diff --git a/src/main/java/org/apache/cassandra/gms/Gossiper.java b/src/main/java/org/apache/cassandra/gms/Gossiper.java index b1c81e7..b7963a6 100644 --- a/src/main/java/org/apache/cassandra/gms/Gossiper.java +++ b/src/main/java/org/apache/cassandra/gms/Gossiper.java @@ -23,15 +23,14 @@ */ package org.apache.cassandra.gms; -import java.lang.management.ManagementFactory; import java.net.UnknownHostException; +import java.util.logging.Logger; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.APIMBean; /** * This module is responsible for Gossiping information for the local endpoint. @@ -48,61 +47,43 @@ import com.scylladb.jmx.api.APIClient; * node as down in the Failure Detector. */ -public class Gossiper implements GossiperMBean { +public class Gossiper extends APIMBean implements GossiperMBean { public static final String MBEAN_NAME = "org.apache.cassandra.net:type=Gossiper"; - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(Gossiper.class.getName()); + private static final Logger logger = Logger.getLogger(Gossiper.class.getName()); - private APIClient c = new APIClient(); + public Gossiper(APIClient c) { + super(c); + } public void log(String str) { logger.finest(str); } - private static final Gossiper instance = new Gossiper(); - - public static Gossiper getInstance() { - return instance; - } - - private Gossiper() { - - // Register this instance with JMX - try { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - @Override public long getEndpointDowntime(String address) throws UnknownHostException { log(" getEndpointDowntime(String address) throws UnknownHostException"); - return c.getLongValue("gossiper/downtime/" + address); + return client.getLongValue("gossiper/downtime/" + address); } @Override - public int getCurrentGenerationNumber(String address) - throws UnknownHostException { + public int getCurrentGenerationNumber(String address) throws UnknownHostException { log(" getCurrentGenerationNumber(String address) throws UnknownHostException"); - return c.getIntValue("gossiper/generation_number/" + address); + return client.getIntValue("gossiper/generation_number/" + address); } @Override - public void unsafeAssassinateEndpoint(String address) - throws UnknownHostException { + public void unsafeAssassinateEndpoint(String address) throws UnknownHostException { log(" unsafeAssassinateEndpoint(String address) throws UnknownHostException"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("unsafe", "True"); - c.post("gossiper/assassinate/" + address, queryParams); + client.post("gossiper/assassinate/" + address, queryParams); } @Override public void assassinateEndpoint(String address) throws UnknownHostException { log(" assassinateEndpoint(String address) throws UnknownHostException"); - c.post("gossiper/assassinate/" + address, null); + client.post("gossiper/assassinate/" + address, null); } } From 21a343d00340965df29dc090da674ad324f438e4 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:06:15 +0200 Subject: [PATCH 22/32] Rework EnpointSnitchInfo --- .../cassandra/locator/EndpointSnitchInfo.java | 41 ++++++------------- 1 file changed, 13 insertions(+), 28 deletions(-) diff --git a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java index 0cf82bc..6fb485d 100644 --- a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java +++ b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java @@ -17,43 +17,28 @@ */ package org.apache.cassandra.locator; -import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; +import java.util.logging.Logger; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.APIMBean; -public class EndpointSnitchInfo implements EndpointSnitchInfoMBean { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(EndpointSnitchInfo.class.getName()); +public class EndpointSnitchInfo extends APIMBean implements EndpointSnitchInfoMBean { + public static final String MBEAN_NAME = "org.apache.cassandra.db:type=EndpointSnitchInfo"; + private static final Logger logger = Logger.getLogger(EndpointSnitchInfo.class.getName()); - private APIClient c = new APIClient(); + public EndpointSnitchInfo(APIClient c) { + super(c); + } public void log(String str) { logger.finest(str); } - private static final EndpointSnitchInfo instance = new EndpointSnitchInfo(); - - public static EndpointSnitchInfo getInstance() { - return instance; - } - - private EndpointSnitchInfo() { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - mbs.registerMBean(this, new ObjectName( - "org.apache.cassandra.db:type=EndpointSnitchInfo")); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - /** * Provides the Rack name depending on the respective snitch used, given the * host name/ip @@ -69,7 +54,7 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean { host = InetAddress.getLoopbackAddress().getHostAddress(); } queryParams.add("host", host); - return c.getStringValue("/snitch/rack", queryParams, 10000); + return client.getStringValue("/snitch/rack", queryParams, 10000); } /** @@ -87,7 +72,7 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean { host = InetAddress.getLoopbackAddress().getHostAddress(); } queryParams.add("host", host); - return c.getStringValue("/snitch/datacenter", queryParams, 10000); + return client.getStringValue("/snitch/datacenter", queryParams, 10000); } /** @@ -98,16 +83,16 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean { @Override public String getSnitchName() { log(" getSnitchName()"); - return c.getStringValue("/snitch/name"); + return client.getStringValue("/snitch/name"); } @Override public String getRack() { - return c.getStringValue("/snitch/rack", null, 10000); + return client.getStringValue("/snitch/rack", null, 10000); } @Override public String getDatacenter() { - return c.getStringValue("/snitch/datacenter", null, 10000); + return client.getStringValue("/snitch/datacenter", null, 10000); } } From 3fe9cfc232ea78ed6121ae2e618216c1d4b8550a Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Mon, 24 Oct 2016 08:33:19 +0000 Subject: [PATCH 23/32] EndpointSnitchInfo: Fix getRack/DC host handling I.e. our localhost might be (and probably is) different from scyllas "fb::broadcast", and if not, try to get numerical asap. --- .../cassandra/locator/EndpointSnitchInfo.java | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java index 6fb485d..8f5d2f0 100644 --- a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java +++ b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfo.java @@ -17,6 +17,8 @@ */ package org.apache.cassandra.locator; +import static java.util.Collections.singletonMap; + import java.net.InetAddress; import java.net.UnknownHostException; import java.util.logging.Logger; @@ -49,11 +51,8 @@ public class EndpointSnitchInfo extends APIMBean implements EndpointSnitchInfoMB @Override public String getRack(String host) throws UnknownHostException { log("getRack(String host) throws UnknownHostException"); - MultivaluedMap queryParams = new MultivaluedHashMap(); - if (host == null) { - host = InetAddress.getLoopbackAddress().getHostAddress(); - } - queryParams.add("host", host); + MultivaluedMap queryParams = host != null ? new MultivaluedHashMap( + singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null; return client.getStringValue("/snitch/rack", queryParams, 10000); } @@ -67,11 +66,8 @@ public class EndpointSnitchInfo extends APIMBean implements EndpointSnitchInfoMB @Override public String getDatacenter(String host) throws UnknownHostException { log(" getDatacenter(String host) throws UnknownHostException"); - MultivaluedMap queryParams = new MultivaluedHashMap(); - if (host == null) { - host = InetAddress.getLoopbackAddress().getHostAddress(); - } - queryParams.add("host", host); + MultivaluedMap queryParams = host != null ? new MultivaluedHashMap( + singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null; return client.getStringValue("/snitch/datacenter", queryParams, 10000); } From fec8b44942473a446aa9c29c1c6debde0e9930e4 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:06:33 +0200 Subject: [PATCH 24/32] Rework MessagingService --- .../cassandra/net/MessagingService.java | 154 +++++++----------- 1 file changed, 58 insertions(+), 96 deletions(-) diff --git a/src/main/java/org/apache/cassandra/net/MessagingService.java b/src/main/java/org/apache/cassandra/net/MessagingService.java index a663030..4bbae71 100644 --- a/src/main/java/org/apache/cassandra/net/MessagingService.java +++ b/src/main/java/org/apache/cassandra/net/MessagingService.java @@ -24,105 +24,60 @@ package org.apache.cassandra.net; import static java.util.Collections.emptyMap; -import java.lang.management.ManagementFactory; import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import java.util.stream.Stream; import javax.json.JsonArray; import javax.json.JsonObject; -import javax.management.MBeanServer; -import javax.management.ObjectName; import org.apache.cassandra.metrics.DroppedMessageMetrics; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; -public final class MessagingService implements MessagingServiceMBean { - static final int INTERVAL = 1000; // update every 1second +public final class MessagingService extends MetricsMBean implements MessagingServiceMBean { public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService"; - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(MessagingService.class.getName()); - Map dropped; - private APIClient c = new APIClient(); - Map resent_timeout = new HashMap(); - private final ObjectName jmxObjectName; + private static final Logger logger = Logger.getLogger(MessagingService.class.getName()); + + private Map resentTimeouts = new HashMap(); private long recentTimeoutCount; /* All verb handler identifiers */ - public enum Verb - { - MUTATION, - @Deprecated BINARY, - READ_REPAIR, - READ, - REQUEST_RESPONSE, // client-initiated reads and writes - @Deprecated STREAM_INITIATE, - @Deprecated STREAM_INITIATE_DONE, - @Deprecated STREAM_REPLY, - @Deprecated STREAM_REQUEST, - RANGE_SLICE, - @Deprecated BOOTSTRAP_TOKEN, - @Deprecated TREE_REQUEST, - @Deprecated TREE_RESPONSE, - @Deprecated JOIN, - GOSSIP_DIGEST_SYN, - GOSSIP_DIGEST_ACK, - GOSSIP_DIGEST_ACK2, - @Deprecated DEFINITIONS_ANNOUNCE, - DEFINITIONS_UPDATE, - TRUNCATE, - SCHEMA_CHECK, - @Deprecated INDEX_SCAN, - REPLICATION_FINISHED, - INTERNAL_RESPONSE, // responses to internal calls - COUNTER_MUTATION, - @Deprecated STREAMING_REPAIR_REQUEST, - @Deprecated STREAMING_REPAIR_RESPONSE, - SNAPSHOT, // Similar to nt snapshot - MIGRATION_REQUEST, - GOSSIP_SHUTDOWN, - _TRACE, // dummy verb so we can use MS.droppedMessages - ECHO, - REPAIR_MESSAGE, - // use as padding for backwards compatability where a previous version needs to validate a verb from the future. - PAXOS_PREPARE, - PAXOS_PROPOSE, - PAXOS_COMMIT, - PAGED_RANGE, + public enum Verb { + MUTATION, @Deprecated BINARY, READ_REPAIR, READ, REQUEST_RESPONSE, // client-initiated + // reads + // and + // writes + @Deprecated STREAM_INITIATE, @Deprecated STREAM_INITIATE_DONE, @Deprecated STREAM_REPLY, @Deprecated STREAM_REQUEST, RANGE_SLICE, @Deprecated BOOTSTRAP_TOKEN, @Deprecated TREE_REQUEST, @Deprecated TREE_RESPONSE, @Deprecated JOIN, GOSSIP_DIGEST_SYN, GOSSIP_DIGEST_ACK, GOSSIP_DIGEST_ACK2, @Deprecated DEFINITIONS_ANNOUNCE, DEFINITIONS_UPDATE, TRUNCATE, SCHEMA_CHECK, @Deprecated INDEX_SCAN, REPLICATION_FINISHED, INTERNAL_RESPONSE, // responses + // to + // internal + // calls + COUNTER_MUTATION, @Deprecated STREAMING_REPAIR_REQUEST, @Deprecated STREAMING_REPAIR_RESPONSE, SNAPSHOT, // Similar + // to + // nt + // snapshot + MIGRATION_REQUEST, GOSSIP_SHUTDOWN, _TRACE, // dummy verb so we can use + // MS.droppedMessages + ECHO, REPAIR_MESSAGE, + // use as padding for backwards compatability where a previous version + // needs to validate a verb from the future. + PAXOS_PREPARE, PAXOS_PROPOSE, PAXOS_COMMIT, PAGED_RANGE, // remember to add new verbs at the end, since we serialize by ordinal - UNUSED_1, - UNUSED_2, - UNUSED_3, - ; + UNUSED_1, UNUSED_2, UNUSED_3,; } public void log(String str) { logger.finest(str); } - public MessagingService() { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - jmxObjectName = new ObjectName(MBEAN_NAME); - mbs.registerMBean(this, jmxObjectName); - dropped = new HashMap(); - for (Verb v : Verb.values()) { - dropped.put(v.name(), new DroppedMessageMetrics(v)); - } - } catch (Exception e) { - throw new RuntimeException(e); - } - } - - static MessagingService instance; - - public static MessagingService getInstance() { - if (instance == null) { - instance = new MessagingService(); - } - return instance; + public MessagingService(APIClient client) { + super(MBEAN_NAME, client, + Stream.of(Verb.values()).map(v -> new DroppedMessageMetrics(v)).collect(Collectors.toList())); } /** @@ -131,7 +86,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getCommandPendingTasks() { log(" getCommandPendingTasks()"); - return c.getMapStringIntegerValue("/messaging_service/messages/pending"); + return client.getMapStringIntegerValue("/messaging_service/messages/pending"); } /** @@ -140,8 +95,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getCommandCompletedTasks() { log("getCommandCompletedTasks()"); - Map res = c - .getListMapStringLongValue("/messaging_service/messages/sent"); + Map res = client.getListMapStringLongValue("/messaging_service/messages/sent"); return res; } @@ -151,7 +105,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getCommandDroppedTasks() { log(" getCommandDroppedTasks()"); - return c.getMapStringLongValue("/messaging_service/messages/dropped"); + return client.getMapStringLongValue("/messaging_service/messages/dropped"); } /** @@ -160,7 +114,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getResponsePendingTasks() { log(" getResponsePendingTasks()"); - return c.getMapStringIntegerValue("/messaging_service/messages/respond_pending"); + return client.getMapStringIntegerValue("/messaging_service/messages/respond_pending"); } /** @@ -169,7 +123,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getResponseCompletedTasks() { log(" getResponseCompletedTasks()"); - return c.getMapStringLongValue("/messaging_service/messages/respond_completed"); + return client.getMapStringLongValue("/messaging_service/messages/respond_completed"); } /** @@ -179,7 +133,7 @@ public final class MessagingService implements MessagingServiceMBean { public Map getDroppedMessages() { log(" getDroppedMessages()"); Map res = new HashMap(); - JsonArray arr = c.getJsonArray("/messaging_service/messages/dropped_by_ver"); + JsonArray arr = client.getJsonArray("/messaging_service/messages/dropped_by_ver"); for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); res.put(obj.getString("verb"), obj.getInt("count")); @@ -187,17 +141,26 @@ public final class MessagingService implements MessagingServiceMBean { return res; } + private Map recent; + /** * dropped message counts since last called */ - @SuppressWarnings("deprecation") @Override public Map getRecentlyDroppedMessages() { log(" getRecentlyDroppedMessages()"); - Map map = new HashMap(); - for (Map.Entry entry : dropped.entrySet()) - map.put(entry.getKey(), entry.getValue().getRecentlyDropped()); - return map; + + Map dropped = getDroppedMessages(), result = new HashMap<>(dropped), old = recent; + + recent = dropped; + + if (old != null) { + for (Map.Entry e : old.entrySet()) { + result.put(e.getKey(), result.get(e.getKey()) - e.getValue()); + } + } + + return result; } /** @@ -220,7 +183,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public Map getTimeoutsPerHost() { log(" getTimeoutsPerHost()"); - return c.getMapStringLongValue("/messaging_service/messages/timeout"); + return client.getMapStringLongValue("/messaging_service/messages/timeout"); } /** @@ -243,12 +206,11 @@ public final class MessagingService implements MessagingServiceMBean { log(" getRecentTimeoutsPerHost()"); Map timeouts = getTimeoutsPerHost(); Map result = new HashMap(); - for ( Entry e : timeouts.entrySet()) { - long res = e.getValue().longValue() - - ((resent_timeout.containsKey(e.getKey()))? (resent_timeout.get(e.getKey())).longValue() - : 0); - resent_timeout.put(e.getKey(), e.getValue()); - result.put(e.getKey(),res); + for (Entry e : timeouts.entrySet()) { + long res = e.getValue().longValue() + - ((resentTimeouts.containsKey(e.getKey())) ? (resentTimeouts.get(e.getKey())).longValue() : 0); + resentTimeouts.put(e.getKey(), e.getValue()); + result.put(e.getKey(), res); } return result; } @@ -256,7 +218,7 @@ public final class MessagingService implements MessagingServiceMBean { @Override public int getVersion(String address) throws UnknownHostException { log(" getVersion(String address) throws UnknownHostException"); - return c.getIntValue(""); + return client.getIntValue(""); } @Override From 4ec7d58249c011ee6b3811f8a23332fc4c2ab381 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:07:03 +0200 Subject: [PATCH 25/32] Rework service.* beans --- .../cassandra/service/CacheService.java | 73 +-- .../apache/cassandra/service/GCInspector.java | 257 +------- .../cassandra/service/StorageProxy.java | 108 ++-- .../cassandra/service/StorageService.java | 591 ++++++++---------- 4 files changed, 356 insertions(+), 673 deletions(-) diff --git a/src/main/java/org/apache/cassandra/service/CacheService.java b/src/main/java/org/apache/cassandra/service/CacheService.java index ceea0ac..8c1cb06 100644 --- a/src/main/java/org/apache/cassandra/service/CacheService.java +++ b/src/main/java/org/apache/cassandra/service/CacheService.java @@ -24,22 +24,19 @@ package org.apache.cassandra.service; -import java.lang.management.ManagementFactory; import java.util.concurrent.ExecutionException; +import java.util.logging.Logger; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import org.apache.cassandra.metrics.CacheMetrics; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; -public class CacheService implements CacheServiceMBean { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(CacheService.class.getName()); - private APIClient c = new APIClient(); +public class CacheService extends MetricsMBean implements CacheServiceMBean { + private static final Logger logger = Logger.getLogger(CacheService.class.getName()); public void log(String str) { logger.finest(str); @@ -47,33 +44,15 @@ public class CacheService implements CacheServiceMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=Caches"; - public final CacheMetrics keyCache; - public final CacheMetrics rowCache; - public final CacheMetrics counterCache; - public final static CacheService instance = new CacheService(); - - public static CacheService getInstance() { - return instance; - } - - private CacheService() { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - - try { - mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - - keyCache = new CacheMetrics("KeyCache", null); - rowCache = new CacheMetrics("RowCache", "row"); - counterCache = new CacheMetrics("CounterCache", null); + public CacheService(APIClient client) { + super(MBEAN_NAME, client, new CacheMetrics("KeyCache", "key"), new CacheMetrics("RowCache", "row"), + new CacheMetrics("CounterCache", "counter")); } @Override public int getRowCacheSavePeriodInSeconds() { log(" getRowCacheSavePeriodInSeconds()"); - return c.getIntValue("cache_service/row_cache_save_period"); + return client.getIntValue("cache_service/row_cache_save_period"); } @Override @@ -81,13 +60,13 @@ public class CacheService implements CacheServiceMBean { log(" setRowCacheSavePeriodInSeconds(int rcspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("period", Integer.toString(rcspis)); - c.post("cache_service/row_cache_save_period", queryParams); + client.post("cache_service/row_cache_save_period", queryParams); } @Override public int getKeyCacheSavePeriodInSeconds() { log(" getKeyCacheSavePeriodInSeconds()"); - return c.getIntValue("cache_service/key_cache_save_period"); + return client.getIntValue("cache_service/key_cache_save_period"); } @Override @@ -95,13 +74,13 @@ public class CacheService implements CacheServiceMBean { log(" setKeyCacheSavePeriodInSeconds(int kcspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("period", Integer.toString(kcspis)); - c.post("cache_service/key_cache_save_period", queryParams); + client.post("cache_service/key_cache_save_period", queryParams); } @Override public int getCounterCacheSavePeriodInSeconds() { log(" getCounterCacheSavePeriodInSeconds()"); - return c.getIntValue("cache_service/counter_cache_save_period"); + return client.getIntValue("cache_service/counter_cache_save_period"); } @Override @@ -109,13 +88,13 @@ public class CacheService implements CacheServiceMBean { log(" setCounterCacheSavePeriodInSeconds(int ccspis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("ccspis", Integer.toString(ccspis)); - c.post("cache_service/counter_cache_save_period", queryParams); + client.post("cache_service/counter_cache_save_period", queryParams); } @Override public int getRowCacheKeysToSave() { log(" getRowCacheKeysToSave()"); - return c.getIntValue("cache_service/row_cache_keys_to_save"); + return client.getIntValue("cache_service/row_cache_keys_to_save"); } @Override @@ -123,13 +102,13 @@ public class CacheService implements CacheServiceMBean { log(" setRowCacheKeysToSave(int rckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("rckts", Integer.toString(rckts)); - c.post("cache_service/row_cache_keys_to_save", queryParams); + client.post("cache_service/row_cache_keys_to_save", queryParams); } @Override public int getKeyCacheKeysToSave() { log(" getKeyCacheKeysToSave()"); - return c.getIntValue("cache_service/key_cache_keys_to_save"); + return client.getIntValue("cache_service/key_cache_keys_to_save"); } @Override @@ -137,13 +116,13 @@ public class CacheService implements CacheServiceMBean { log(" setKeyCacheKeysToSave(int kckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("kckts", Integer.toString(kckts)); - c.post("cache_service/key_cache_keys_to_save", queryParams); + client.post("cache_service/key_cache_keys_to_save", queryParams); } @Override public int getCounterCacheKeysToSave() { log(" getCounterCacheKeysToSave()"); - return c.getIntValue("cache_service/counter_cache_keys_to_save"); + return client.getIntValue("cache_service/counter_cache_keys_to_save"); } @Override @@ -151,7 +130,7 @@ public class CacheService implements CacheServiceMBean { log(" setCounterCacheKeysToSave(int cckts)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("cckts", Integer.toString(cckts)); - c.post("cache_service/counter_cache_keys_to_save", queryParams); + client.post("cache_service/counter_cache_keys_to_save", queryParams); } /** @@ -160,7 +139,7 @@ public class CacheService implements CacheServiceMBean { @Override public void invalidateKeyCache() { log(" invalidateKeyCache()"); - c.post("cache_service/invalidate_key_cache"); + client.post("cache_service/invalidate_key_cache"); } /** @@ -169,13 +148,13 @@ public class CacheService implements CacheServiceMBean { @Override public void invalidateRowCache() { log(" invalidateRowCache()"); - c.post("cache_service/invalidate_row_cache"); + client.post("cache_service/invalidate_row_cache"); } @Override public void invalidateCounterCache() { log(" invalidateCounterCache()"); - c.post("cache_service/invalidate_counter_cache"); + client.post("cache_service/invalidate_counter_cache"); } @Override @@ -183,7 +162,7 @@ public class CacheService implements CacheServiceMBean { log(" setRowCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("capacity", Long.toString(capacity)); - c.post("cache_service/row_cache_capacity", queryParams); + client.post("cache_service/row_cache_capacity", queryParams); } @Override @@ -191,7 +170,7 @@ public class CacheService implements CacheServiceMBean { log(" setKeyCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("capacity", Long.toString(capacity)); - c.post("cache_service/key_cache_capacity", queryParams); + client.post("cache_service/key_cache_capacity", queryParams); } @Override @@ -199,7 +178,7 @@ public class CacheService implements CacheServiceMBean { log(" setCounterCacheCapacityInMB(long capacity)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("capacity", Long.toString(capacity)); - c.post("cache_service/counter_cache_capacity_in_mb", queryParams); + client.post("cache_service/counter_cache_capacity_in_mb", queryParams); } /** @@ -216,6 +195,6 @@ public class CacheService implements CacheServiceMBean { @Override public void saveCaches() throws ExecutionException, InterruptedException { log(" saveCaches() throws ExecutionException, InterruptedException"); - c.post("cache_service/save_caches"); + client.post("cache_service/save_caches"); } } diff --git a/src/main/java/org/apache/cassandra/service/GCInspector.java b/src/main/java/org/apache/cassandra/service/GCInspector.java index 9b50316..de943c2 100644 --- a/src/main/java/org/apache/cassandra/service/GCInspector.java +++ b/src/main/java/org/apache/cassandra/service/GCInspector.java @@ -24,259 +24,18 @@ package org.apache.cassandra.service; -import java.lang.management.GarbageCollectorMXBean; -import java.lang.management.ManagementFactory; -import java.lang.management.MemoryUsage; -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicReference; +import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.APIMBean; -import javax.management.MBeanServer; -import javax.management.Notification; -import javax.management.NotificationListener; -import javax.management.ObjectName; -import javax.management.openmbean.CompositeData; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.sun.management.GarbageCollectionNotificationInfo; -import com.sun.management.GcInfo; - -@SuppressWarnings("restriction") -public class GCInspector implements NotificationListener, GCInspectorMXBean -{ +public class GCInspector extends APIMBean implements GCInspectorMXBean { public static final String MBEAN_NAME = "org.apache.cassandra.service:type=GCInspector"; - private static final Logger logger = LoggerFactory.getLogger(GCInspector.class); - final static long MIN_LOG_DURATION = 200; - final static long MIN_LOG_DURATION_TPSTATS = 1000; - static final class State - { - final double maxRealTimeElapsed; - final double totalRealTimeElapsed; - final double sumSquaresRealTimeElapsed; - final double totalBytesReclaimed; - final double count; - final long startNanos; - - State(double extraElapsed, double extraBytes, State prev) - { - this.totalRealTimeElapsed = prev.totalRealTimeElapsed + extraElapsed; - this.totalBytesReclaimed = prev.totalBytesReclaimed + extraBytes; - this.sumSquaresRealTimeElapsed = prev.sumSquaresRealTimeElapsed + (extraElapsed * extraElapsed); - this.startNanos = prev.startNanos; - this.count = prev.count + 1; - this.maxRealTimeElapsed = Math.max(prev.maxRealTimeElapsed, extraElapsed); - } - - State() - { - count = maxRealTimeElapsed = sumSquaresRealTimeElapsed = totalRealTimeElapsed = totalBytesReclaimed = 0; - startNanos = System.nanoTime(); - } + public GCInspector(APIClient client) { + super(client); } - static final class GCState - { - final GarbageCollectorMXBean gcBean; - final boolean assumeGCIsPartiallyConcurrent; - final boolean assumeGCIsOldGen; - private String[] keys; - long lastGcTotalDuration = 0; - - - GCState(GarbageCollectorMXBean gcBean, boolean assumeGCIsPartiallyConcurrent, boolean assumeGCIsOldGen) - { - this.gcBean = gcBean; - this.assumeGCIsPartiallyConcurrent = assumeGCIsPartiallyConcurrent; - this.assumeGCIsOldGen = assumeGCIsOldGen; - } - - String[] keys(GarbageCollectionNotificationInfo info) - { - if (keys != null) - return keys; - - keys = info.getGcInfo().getMemoryUsageBeforeGc().keySet().toArray(new String[0]); - Arrays.sort(keys); - - return keys; - } - } - - final AtomicReference state = new AtomicReference<>(new State()); - - final Map gcStates = new HashMap<>(); - - public GCInspector() - { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - - try - { - ObjectName gcName = new ObjectName(ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE + ",*"); - for (ObjectName name : mbs.queryNames(gcName, null)) - { - GarbageCollectorMXBean gc = ManagementFactory.newPlatformMXBeanProxy(mbs, name.getCanonicalName(), GarbageCollectorMXBean.class); - gcStates.put(gc.getName(), new GCState(gc, assumeGCIsPartiallyConcurrent(gc), assumeGCIsOldGen(gc))); - } - - mbs.registerMBean(this, new ObjectName(MBEAN_NAME)); - } - catch (Exception e) - { - throw new RuntimeException(e); - } - } - - public static void register() throws Exception - { - GCInspector inspector = new GCInspector(); - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - ObjectName gcName = new ObjectName(ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE + ",*"); - for (ObjectName name : server.queryNames(gcName, null)) - { - server.addNotificationListener(name, inspector, null, null); - } - } - - /* - * Assume that a GC type is at least partially concurrent and so a side channel method - * should be used to calculate application stopped time due to the GC. - * - * If the GC isn't recognized then assume that is concurrent and we need to do our own calculation - * via the the side channel. - */ - private static boolean assumeGCIsPartiallyConcurrent(GarbageCollectorMXBean gc) - { - switch (gc.getName()) - { - //First two are from the serial collector - case "Copy": - case "MarkSweepCompact": - //Parallel collector - case "PS MarkSweep": - case "PS Scavenge": - case "G1 Young Generation": - //CMS young generation collector - case "ParNew": - return false; - case "ConcurrentMarkSweep": - case "G1 Old Generation": - return true; - default: - //Assume possibly concurrent if unsure - return true; - } - } - - /* - * Assume that a GC type is an old generation collection so SSTableDeletingTask.rescheduleFailedTasks() - * should be invoked. - * - * Defaults to not invoking SSTableDeletingTask.rescheduleFailedTasks() on unrecognized GC names - */ - private static boolean assumeGCIsOldGen(GarbageCollectorMXBean gc) - { - switch (gc.getName()) - { - case "Copy": - case "PS Scavenge": - case "G1 Young Generation": - case "ParNew": - return false; - case "MarkSweepCompact": - case "PS MarkSweep": - case "ConcurrentMarkSweep": - case "G1 Old Generation": - return true; - default: - //Assume not old gen otherwise, don't call - //SSTableDeletingTask.rescheduleFailedTasks() - return false; - } - } - - public void handleNotification(final Notification notification, final Object handback) - { - String type = notification.getType(); - if (type.equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) - { - // retrieve the garbage collection notification information - CompositeData cd = (CompositeData) notification.getUserData(); - GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from(cd); - String gcName = info.getGcName(); - GcInfo gcInfo = info.getGcInfo(); - - long duration = gcInfo.getDuration(); - - /* - * The duration supplied in the notification info includes more than just - * application stopped time for concurrent GCs. Try and do a better job coming up with a good stopped time - * value by asking for and tracking cumulative time spent blocked in GC. - */ - GCState gcState = gcStates.get(gcName); - if (gcState.assumeGCIsPartiallyConcurrent) - { - long previousTotal = gcState.lastGcTotalDuration; - long total = gcState.gcBean.getCollectionTime(); - gcState.lastGcTotalDuration = total; - duration = total - previousTotal; // may be zero for a really fast collection - } - - StringBuilder sb = new StringBuilder(); - sb.append(info.getGcName()).append(" GC in ").append(duration).append("ms. "); - long bytes = 0; - Map beforeMemoryUsage = gcInfo.getMemoryUsageBeforeGc(); - Map afterMemoryUsage = gcInfo.getMemoryUsageAfterGc(); - for (String key : gcState.keys(info)) - { - MemoryUsage before = beforeMemoryUsage.get(key); - MemoryUsage after = afterMemoryUsage.get(key); - if (after != null && after.getUsed() != before.getUsed()) - { - sb.append(key).append(": ").append(before.getUsed()); - sb.append(" -> "); - sb.append(after.getUsed()); - if (!key.equals(gcState.keys[gcState.keys.length - 1])) - sb.append("; "); - bytes += before.getUsed() - after.getUsed(); - } - } - - while (true) - { - State prev = state.get(); - if (state.compareAndSet(prev, new State(duration, bytes, prev))) - break; - } - - String st = sb.toString(); - if (duration > MIN_LOG_DURATION) - logger.trace(st); - else if (logger.isDebugEnabled()) - logger.debug(st); - } - } - - public State getTotalSinceLastCheck() - { - return state.getAndSet(new State()); - } - - public double[] getAndResetStats() - { - State state = getTotalSinceLastCheck(); - double[] r = new double[6]; - r[0] = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - state.startNanos); - r[1] = state.maxRealTimeElapsed; - r[2] = state.totalRealTimeElapsed; - r[3] = state.sumSquaresRealTimeElapsed; - r[4] = state.totalBytesReclaimed; - r[5] = state.count; - return r; + @Override + public double[] getAndResetStats() { + return new double[6]; } } diff --git a/src/main/java/org/apache/cassandra/service/StorageProxy.java b/src/main/java/org/apache/cassandra/service/StorageProxy.java index 04e41e2..8a140c7 100644 --- a/src/main/java/org/apache/cassandra/service/StorageProxy.java +++ b/src/main/java/org/apache/cassandra/service/StorageProxy.java @@ -25,67 +25,54 @@ package org.apache.cassandra.service; import static java.util.Collections.emptySet; -import java.lang.management.ManagementFactory; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.logging.Logger; -import javax.management.MBeanServer; -import javax.management.ObjectName; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; +import org.apache.cassandra.metrics.CASClientRequestMetrics; +import org.apache.cassandra.metrics.ClientRequestMetrics; + import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; -public class StorageProxy implements StorageProxyMBean { +public class StorageProxy extends MetricsMBean implements StorageProxyMBean { public static final String MBEAN_NAME = "org.apache.cassandra.db:type=StorageProxy"; - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(StorageProxy.class.getName()); - - private APIClient c = new APIClient(); + private static final Logger logger = Logger.getLogger(StorageProxy.class.getName()); public void log(String str) { logger.finest(str); } - private static final StorageProxy instance = new StorageProxy(); - - public static StorageProxy getInstance() { - return instance; - } - public static final String UNREACHABLE = "UNREACHABLE"; - private StorageProxy() { - } - - static { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - mbs.registerMBean(instance, new ObjectName(MBEAN_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } - + public StorageProxy(APIClient client) { + super(MBEAN_NAME, client, new ClientRequestMetrics("Read", "storage_proxy/metrics/read"), + new ClientRequestMetrics("RangeSlice", "/storage_proxy/metrics/range"), + new ClientRequestMetrics("Write", "storage_proxy/metrics/write"), + new CASClientRequestMetrics("CASWrite", "storage_proxy/metrics/cas_write"), + new CASClientRequestMetrics("CASRead", "storage_proxy/metrics/cas_read")); } @Override public long getTotalHints() { log(" getTotalHints()"); - return c.getLongValue("storage_proxy/total_hints"); + return client.getLongValue("storage_proxy/total_hints"); } @Override public boolean getHintedHandoffEnabled() { log(" getHintedHandoffEnabled()"); - return c.getBooleanValue("storage_proxy/hinted_handoff_enabled"); + return client.getBooleanValue("storage_proxy/hinted_handoff_enabled"); } @Override public Set getHintedHandoffEnabledByDC() { log(" getHintedHandoffEnabledByDC()"); - return c.getSetStringValue( - "storage_proxy/hinted_handoff_enabled_by_dc"); + return client.getSetStringValue("storage_proxy/hinted_handoff_enabled_by_dc"); } @Override @@ -93,7 +80,7 @@ public class StorageProxy implements StorageProxyMBean { log(" setHintedHandoffEnabled(boolean b)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("enable", Boolean.toString(b)); - c.post("storage_proxy/hinted_handoff_enabled", queryParams); + client.post("storage_proxy/hinted_handoff_enabled", queryParams); } @Override @@ -101,13 +88,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setHintedHandoffEnabledByDCList(String dcs)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("dcs", dcs); - c.post("storage_proxy/hinted_handoff_enabled_by_dc_list"); + client.post("storage_proxy/hinted_handoff_enabled_by_dc_list"); } @Override public int getMaxHintWindow() { log(" getMaxHintWindow()"); - return c.getIntValue("storage_proxy/max_hint_window"); + return client.getIntValue("storage_proxy/max_hint_window"); } @Override @@ -115,13 +102,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setMaxHintWindow(int ms)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("ms", Integer.toString(ms)); - c.post("storage_proxy/max_hint_window", queryParams); + client.post("storage_proxy/max_hint_window", queryParams); } @Override public int getMaxHintsInProgress() { log(" getMaxHintsInProgress()"); - return c.getIntValue("storage_proxy/max_hints_in_progress"); + return client.getIntValue("storage_proxy/max_hints_in_progress"); } @Override @@ -129,19 +116,19 @@ public class StorageProxy implements StorageProxyMBean { log(" setMaxHintsInProgress(int qs)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("qs", Integer.toString(qs)); - c.post("storage_proxy/max_hints_in_progress", queryParams); + client.post("storage_proxy/max_hints_in_progress", queryParams); } @Override public int getHintsInProgress() { log(" getHintsInProgress()"); - return c.getIntValue("storage_proxy/hints_in_progress"); + return client.getIntValue("storage_proxy/hints_in_progress"); } @Override public Long getRpcTimeout() { log(" getRpcTimeout()"); - return c.getLongValue("storage_proxy/rpc_timeout"); + return client.getLongValue("storage_proxy/rpc_timeout"); } @Override @@ -149,13 +136,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/rpc_timeout", queryParams); + client.post("storage_proxy/rpc_timeout", queryParams); } @Override public Long getReadRpcTimeout() { log(" getReadRpcTimeout()"); - return c.getLongValue("storage_proxy/read_rpc_timeout"); + return client.getLongValue("storage_proxy/read_rpc_timeout"); } @Override @@ -163,13 +150,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setReadRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/read_rpc_timeout", queryParams); + client.post("storage_proxy/read_rpc_timeout", queryParams); } @Override public Long getWriteRpcTimeout() { log(" getWriteRpcTimeout()"); - return c.getLongValue("storage_proxy/write_rpc_timeout"); + return client.getLongValue("storage_proxy/write_rpc_timeout"); } @Override @@ -177,13 +164,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setWriteRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/write_rpc_timeout", queryParams); + client.post("storage_proxy/write_rpc_timeout", queryParams); } @Override public Long getCounterWriteRpcTimeout() { log(" getCounterWriteRpcTimeout()"); - return c.getLongValue("storage_proxy/counter_write_rpc_timeout"); + return client.getLongValue("storage_proxy/counter_write_rpc_timeout"); } @Override @@ -191,13 +178,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setCounterWriteRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/counter_write_rpc_timeout", queryParams); + client.post("storage_proxy/counter_write_rpc_timeout", queryParams); } @Override public Long getCasContentionTimeout() { log(" getCasContentionTimeout()"); - return c.getLongValue("storage_proxy/cas_contention_timeout"); + return client.getLongValue("storage_proxy/cas_contention_timeout"); } @Override @@ -205,13 +192,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setCasContentionTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/cas_contention_timeout", queryParams); + client.post("storage_proxy/cas_contention_timeout", queryParams); } @Override public Long getRangeRpcTimeout() { log(" getRangeRpcTimeout()"); - return c.getLongValue("storage_proxy/range_rpc_timeout"); + return client.getLongValue("storage_proxy/range_rpc_timeout"); } @Override @@ -219,13 +206,13 @@ public class StorageProxy implements StorageProxyMBean { log(" setRangeRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/range_rpc_timeout", queryParams); + client.post("storage_proxy/range_rpc_timeout", queryParams); } @Override public Long getTruncateRpcTimeout() { log(" getTruncateRpcTimeout()"); - return c.getLongValue("storage_proxy/truncate_rpc_timeout"); + return client.getLongValue("storage_proxy/truncate_rpc_timeout"); } @Override @@ -233,43 +220,42 @@ public class StorageProxy implements StorageProxyMBean { log(" setTruncateRpcTimeout(Long timeoutInMillis)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("timeout", Long.toString(timeoutInMillis)); - c.post("storage_proxy/truncate_rpc_timeout", queryParams); + client.post("storage_proxy/truncate_rpc_timeout", queryParams); } @Override public void reloadTriggerClasses() { log(" reloadTriggerClasses()"); - c.post("storage_proxy/reload_trigger_classes"); + client.post("storage_proxy/reload_trigger_classes"); } @Override public long getReadRepairAttempted() { log(" getReadRepairAttempted()"); - return c.getLongValue("storage_proxy/read_repair_attempted"); + return client.getLongValue("storage_proxy/read_repair_attempted"); } @Override public long getReadRepairRepairedBlocking() { log(" getReadRepairRepairedBlocking()"); - return c.getLongValue("storage_proxy/read_repair_repaired_blocking"); + return client.getLongValue("storage_proxy/read_repair_repaired_blocking"); } @Override public long getReadRepairRepairedBackground() { log(" getReadRepairRepairedBackground()"); - return c.getLongValue("storage_proxy/read_repair_repaired_background"); + return client.getLongValue("storage_proxy/read_repair_repaired_background"); } /** Returns each live node's schema version */ @Override public Map> getSchemaVersions() { log(" getSchemaVersions()"); - return c.getMapStringListStrValue("storage_proxy/schema_versions"); + return client.getMapStringListStrValue("storage_proxy/schema_versions"); } @Override - public void setNativeTransportMaxConcurrentConnections( - Long nativeTransportMaxConcurrentConnections) { + public void setNativeTransportMaxConcurrentConnections(Long nativeTransportMaxConcurrentConnections) { // TODO Auto-generated method stub log(" setNativeTransportMaxConcurrentConnections()"); @@ -279,21 +265,21 @@ public class StorageProxy implements StorageProxyMBean { public Long getNativeTransportMaxConcurrentConnections() { // TODO Auto-generated method stub log(" getNativeTransportMaxConcurrentConnections()"); - return c.getLongValue(""); + return client.getLongValue(""); } @Override public void enableHintsForDC(String dc) { - // TODO if/when scylla uses hints + // TODO if/when scylla uses hints log(" enableHintsForDC()"); } @Override public void disableHintsForDC(String dc) { - // TODO if/when scylla uses hints + // TODO if/when scylla uses hints log(" disableHintsForDC()"); } - + @Override public Set getHintedHandoffDisabledDCs() { // TODO if/when scylla uses hints diff --git a/src/main/java/org/apache/cassandra/service/StorageService.java b/src/main/java/org/apache/cassandra/service/StorageService.java index a10880c..7b7aa7e 100644 --- a/src/main/java/org/apache/cassandra/service/StorageService.java +++ b/src/main/java/org/apache/cassandra/service/StorageService.java @@ -23,7 +23,6 @@ package org.apache.cassandra.service; import java.io.IOException; -import java.lang.management.ManagementFactory; import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; @@ -42,23 +41,27 @@ import java.util.TimerTask; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Logger; import javax.json.JsonArray; import javax.json.JsonObject; -import javax.management.MBeanServer; +import javax.management.ListenerNotFoundException; +import javax.management.MBeanNotificationInfo; import javax.management.Notification; +import javax.management.NotificationBroadcaster; import javax.management.NotificationBroadcasterSupport; -import javax.management.ObjectName; +import javax.management.NotificationFilter; +import javax.management.NotificationListener; import javax.management.openmbean.TabularData; import javax.ws.rs.core.MultivaluedHashMap; import javax.ws.rs.core.MultivaluedMap; import org.apache.cassandra.metrics.StorageMetrics; import org.apache.cassandra.repair.RepairParallelism; -import org.apache.cassandra.streaming.StreamManager; import com.google.common.base.Joiner; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.MetricsMBean; import com.scylladb.jmx.utils.FileUtils; /** @@ -66,42 +69,46 @@ import com.scylladb.jmx.utils.FileUtils; * space. This token gets gossiped around. This class will also maintain * histograms of the load information of other nodes in the cluster. */ -public class StorageService extends NotificationBroadcasterSupport - implements StorageServiceMBean { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(StorageService.class.getName()); +public class StorageService extends MetricsMBean implements StorageServiceMBean, NotificationBroadcaster { + private static final Logger logger = Logger.getLogger(StorageService.class.getName()); + private static final Timer timer = new Timer("Storage Service Repair", true); - private APIClient c = new APIClient(); - private static Timer timer = new Timer("Storage Service Repair"); - @SuppressWarnings("unused") - private StorageMetrics metrics = new StorageMetrics(); + private final NotificationBroadcasterSupport notificationBroadcasterSupport = new NotificationBroadcasterSupport(); - public static final StorageService instance = new StorageService(); - - public static StorageService getInstance() { - return instance; + @Override + public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) { + notificationBroadcasterSupport.addNotificationListener(listener, filter, handback); } - public static enum RepairStatus - { + @Override + public void removeNotificationListener(NotificationListener listener) throws ListenerNotFoundException { + notificationBroadcasterSupport.removeNotificationListener(listener); + } + + @Override + public void removeNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) + throws ListenerNotFoundException { + notificationBroadcasterSupport.removeNotificationListener(listener, filter, handback); + } + + @Override + public MBeanNotificationInfo[] getNotificationInfo() { + return notificationBroadcasterSupport.getNotificationInfo(); + } + + public void sendNotification(Notification notification) { + notificationBroadcasterSupport.sendNotification(notification); + } + + public static enum RepairStatus { STARTED, SESSION_SUCCESS, SESSION_FAILED, FINISHED } /* JMX notification serial number counter */ private final AtomicLong notificationSerialNumber = new AtomicLong(); - private final ObjectName jmxObjectName; - - public StorageService() { - MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); - try { - jmxObjectName = new ObjectName( - "org.apache.cassandra.db:type=StorageService"); - mbs.registerMBean(this, jmxObjectName); - mbs.registerMBean(StreamManager.getInstance(), new ObjectName(StreamManager.OBJECT_NAME)); - } catch (Exception e) { - throw new RuntimeException(e); - } + public StorageService(APIClient client) { + super("org.apache.cassandra.db:type=StorageService", client, new StorageMetrics()); } @@ -118,7 +125,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getLiveNodes() { log(" getLiveNodes()"); - return c.getListStrValue("/gossiper/endpoint/live"); + return client.getListStrValue("/gossiper/endpoint/live"); } /** @@ -130,7 +137,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getUnreachableNodes() { log(" getUnreachableNodes()"); - return c.getListStrValue("/gossiper/endpoint/down"); + return client.getListStrValue("/gossiper/endpoint/down"); } /** @@ -141,7 +148,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getJoiningNodes() { log(" getJoiningNodes()"); - return c.getListStrValue("/storage_service/nodes/joining"); + return client.getListStrValue("/storage_service/nodes/joining"); } /** @@ -152,7 +159,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getLeavingNodes() { log(" getLeavingNodes()"); - return c.getListStrValue("/storage_service/nodes/leaving"); + return client.getListStrValue("/storage_service/nodes/leaving"); } /** @@ -163,7 +170,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getMovingNodes() { log(" getMovingNodes()"); - return c.getListStrValue("/storage_service/nodes/moving"); + return client.getListStrValue("/storage_service/nodes/moving"); } /** @@ -193,7 +200,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List getTokens(String endpoint) throws UnknownHostException { log(" getTokens(String endpoint) throws UnknownHostException"); - return c.getListStrValue("/storage_service/tokens/" + endpoint); + return client.getListStrValue("/storage_service/tokens/" + endpoint); } /** @@ -204,7 +211,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getReleaseVersion() { log(" getReleaseVersion()"); - return c.getStringValue("/storage_service/release_version"); + return client.getStringValue("/storage_service/release_version"); } /** @@ -215,7 +222,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getSchemaVersion() { log(" getSchemaVersion()"); - return c.getStringValue("/storage_service/schema_version"); + return client.getStringValue("/storage_service/schema_version"); } /** @@ -226,7 +233,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String[] getAllDataFileLocations() { log(" getAllDataFileLocations()"); - return c.getStringArrValue("/storage_service/data_file/locations"); + return client.getStringArrValue("/storage_service/data_file/locations"); } /** @@ -237,7 +244,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getCommitLogLocation() { log(" getCommitLogLocation()"); - return c.getStringValue("/storage_service/commitlog"); + return client.getStringValue("/storage_service/commitlog"); } /** @@ -248,7 +255,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getSavedCachesLocation() { log(" getSavedCachesLocation()"); - return c.getStringValue("/storage_service/saved_caches/location"); + return client.getStringValue("/storage_service/saved_caches/location"); } /** @@ -258,10 +265,9 @@ public class StorageService extends NotificationBroadcasterSupport * @return mapping of ranges to end points */ @Override - public Map, List> getRangeToEndpointMap( - String keyspace) { + public Map, List> getRangeToEndpointMap(String keyspace) { log(" getRangeToEndpointMap(String keyspace)"); - return c.getMapListStrValue("/storage_service/range/" + keyspace); + return client.getMapListStrValue("/storage_service/range/" + keyspace); } /** @@ -271,13 +277,11 @@ public class StorageService extends NotificationBroadcasterSupport * @return mapping of ranges to rpc addresses */ @Override - public Map, List> getRangeToRpcaddressMap( - String keyspace) { + public Map, List> getRangeToRpcaddressMap(String keyspace) { log(" getRangeToRpcaddressMap(String keyspace)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("rpc", "true"); - return c.getMapListStrValue("/storage_service/range/" + keyspace, - queryParams); + return client.getMapListStrValue("/storage_service/range/" + keyspace, queryParams); } /** @@ -293,7 +297,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List describeRingJMX(String keyspace) throws IOException { log(" describeRingJMX(String keyspace) throws IOException"); - JsonArray arr = c.getJsonArray("/storage_service/describe_ring/" + keyspace); + JsonArray arr = client.getJsonArray("/storage_service/describe_ring/" + keyspace); List res = new ArrayList(); for (int i = 0; i < arr.size(); i++) { @@ -352,11 +356,9 @@ public class StorageService extends NotificationBroadcasterSupport * @return a map of pending ranges to endpoints */ @Override - public Map, List> getPendingRangeToEndpointMap( - String keyspace) { + public Map, List> getPendingRangeToEndpointMap(String keyspace) { log(" getPendingRangeToEndpointMap(String keyspace)"); - return c.getMapListStrValue( - "/storage_service/pending_range/" + keyspace); + return client.getMapListStrValue("/storage_service/pending_range/" + keyspace); } /** @@ -367,13 +369,13 @@ public class StorageService extends NotificationBroadcasterSupport @Override public Map getTokenToEndpointMap() { log(" getTokenToEndpointMap()"); - Map mapInetAddress = c.getMapStrValue("/storage_service/tokens_endpoint"); - // in order to preserve tokens in ascending order, we use LinkedHashMap here + Map mapInetAddress = client.getMapStrValue("/storage_service/tokens_endpoint"); + // in order to preserve tokens in ascending order, we use LinkedHashMap + // here Map mapString = new LinkedHashMap<>(mapInetAddress.size()); List tokens = new ArrayList<>(mapInetAddress.keySet()); Collections.sort(tokens); - for (String token : tokens) - { + for (String token : tokens) { mapString.put(token, mapInetAddress.get(token)); } return mapString; @@ -383,7 +385,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getLocalHostId() { log(" getLocalHostId()"); - return c.getStringValue("/storage_service/hostid/local"); + return client.getStringValue("/storage_service/hostid/local"); } public String getLocalBroadCastingAddress() { @@ -393,17 +395,18 @@ public class StorageService extends NotificationBroadcasterSupport // we will use the getHostIdToAddressMap with the hostid return getHostIdToAddressMap().get(getLocalHostId()); } + /** Retrieve the mapping of endpoint to host ID */ @Override public Map getHostIdMap() { log(" getHostIdMap()"); - return c.getMapStrValue("/storage_service/host_id"); + return client.getMapStrValue("/storage_service/host_id"); } /** Retrieve the mapping of endpoint to host ID */ public Map getHostIdToAddressMap() { log(" getHostIdToAddressMap()"); - return c.getReverseMapStrValue("/storage_service/host_id"); + return client.getReverseMapStrValue("/storage_service/host_id"); } /** @@ -414,7 +417,7 @@ public class StorageService extends NotificationBroadcasterSupport @Deprecated public double getLoad() { log(" getLoad()"); - return c.getDoubleValue("/storage_service/load"); + return client.getDoubleValue("/storage_service/load"); } /** Human-readable load value */ @@ -430,8 +433,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" getLoadMap()"); Map load = getLoadMapAsDouble(); Map map = new HashMap<>(); - for (Map.Entry entry : load.entrySet()) - { + for (Map.Entry entry : load.entrySet()) { map.put(entry.getKey(), FileUtils.stringifyFileSize(entry.getValue())); } return map; @@ -439,7 +441,7 @@ public class StorageService extends NotificationBroadcasterSupport public Map getLoadMapAsDouble() { log(" getLoadMapAsDouble()"); - return c.getMapStringDouble("/storage_service/load_map"); + return client.getMapStringDouble("/storage_service/load_map"); } /** @@ -450,7 +452,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public int getCurrentGenerationNumber() { log(" getCurrentGenerationNumber()"); - return c.getIntValue("/storage_service/generation_number"); + return client.getIntValue("/storage_service/generation_number"); } /** @@ -466,22 +468,18 @@ public class StorageService extends NotificationBroadcasterSupport * the endpoint responsible for this key */ @Override - public List getNaturalEndpoints(String keyspaceName, String cf, - String key) { + public List getNaturalEndpoints(String keyspaceName, String cf, String key) { log(" getNaturalEndpoints(String keyspaceName, String cf, String key)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("cf", cf); queryParams.add("key", key); - return c.getListInetAddressValue( - "/storage_service/natural_endpoints/" + keyspaceName, - queryParams); + return client.getListInetAddressValue("/storage_service/natural_endpoints/" + keyspaceName, queryParams); } @Override - public List getNaturalEndpoints(String keyspaceName, - ByteBuffer key) { + public List getNaturalEndpoints(String keyspaceName, ByteBuffer key) { log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)"); - return c.getListInetAddressValue(""); + return client.getListInetAddressValue(""); } /** @@ -494,14 +492,12 @@ public class StorageService extends NotificationBroadcasterSupport * the name of the keyspaces to snapshot; empty means "all." */ @Override - public void takeSnapshot(String tag, String... keyspaceNames) - throws IOException { + public void takeSnapshot(String tag, String... keyspaceNames) throws IOException { log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "tag", tag); - APIClient.set_query_param(queryParams, "kn", - APIClient.join(keyspaceNames)); - c.post("/storage_service/snapshots", queryParams); + APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames)); + client.post("/storage_service/snapshots", queryParams); } /** @@ -516,20 +512,22 @@ public class StorageService extends NotificationBroadcasterSupport * the tag given to the snapshot; may not be null or empty */ @Override - public void takeColumnFamilySnapshot(String keyspaceName, - String columnFamilyName, String tag) throws IOException { + public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException { log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - if (keyspaceName == null) + if (keyspaceName == null) { throw new IOException("You must supply a keyspace name"); - if (columnFamilyName == null) + } + if (columnFamilyName == null) { throw new IOException("You must supply a table name"); - if (tag == null || tag.equals("")) + } + if (tag == null || tag.equals("")) { throw new IOException("You must supply a snapshot name."); + } queryParams.add("tag", tag); queryParams.add("kn", keyspaceName); queryParams.add("cf", columnFamilyName); - c.post("/storage_service/snapshots", queryParams); + client.post("/storage_service/snapshots", queryParams); } /** @@ -537,14 +535,12 @@ public class StorageService extends NotificationBroadcasterSupport * tag is specified we will remove all snapshots. */ @Override - public void clearSnapshot(String tag, String... keyspaceNames) - throws IOException { + public void clearSnapshot(String tag, String... keyspaceNames) throws IOException { log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "tag", tag); - APIClient.set_query_param(queryParams, "kn", - APIClient.join(keyspaceNames)); - c.delete("/storage_service/snapshots", queryParams); + APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames)); + client.delete("/storage_service/snapshots", queryParams); } /** @@ -555,12 +551,11 @@ public class StorageService extends NotificationBroadcasterSupport @Override public Map getSnapshotDetails() { log(" getSnapshotDetails()"); - return c.getMapStringSnapshotTabularDataValue( - "/storage_service/snapshots", null); + return client.getMapStringSnapshotTabularDataValue("/storage_service/snapshots", null); } public Map>> getSnapshotKeyspaceColumnFamily() { - JsonArray arr = c.getJsonArray("/storage_service/snapshots"); + JsonArray arr = client.getJsonArray("/storage_service/snapshots"); Map>> res = new HashMap>>(); for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); @@ -588,36 +583,30 @@ public class StorageService extends NotificationBroadcasterSupport @Override public long trueSnapshotsSize() { log(" trueSnapshotsSize()"); - return c.getLongValue("/storage_service/snapshots/size/true"); + return client.getLongValue("/storage_service/snapshots/size/true"); } /** * Forces major compaction of a single keyspace */ - public void forceKeyspaceCompaction(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException { + public void forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { log(" forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - c.post("/storage_service/keyspace_compaction/" + keyspaceName, - queryParams); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.post("/storage_service/keyspace_compaction/" + keyspaceName, queryParams); } /** * Trigger a cleanup of keys on a single keyspace */ @Override - public int forceKeyspaceCleanup(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException { + public int forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { log(" forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - return c.postInt("/storage_service/keyspace_cleanup/" + keyspaceName, - queryParams); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.postInt("/storage_service/keyspace_cleanup/" + keyspaceName, queryParams); } /** @@ -628,27 +617,21 @@ public class StorageService extends NotificationBroadcasterSupport * Scrubbed CFs will be snapshotted first, if disableSnapshot is false */ @Override - public int scrub(boolean disableSnapshot, boolean skipCorrupted, - String keyspaceName, String... columnFamilies) throws IOException, - ExecutionException, InterruptedException { + public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { log(" scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); return scrub(disableSnapshot, skipCorrupted, true, keyspaceName, columnFamilies); } @Override - public int scrub(boolean disableSnapshot, boolean skipCorrupted, - boolean checkData, String keyspaceName, String... columnFamilies) - throws IOException, ExecutionException, - InterruptedException { + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, + String... columnFamilies) throws IOException, ExecutionException, InterruptedException { log(" scrub(boolean disableSnapshot, boolean skipCorrupted, bool checkData, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_bool_query_param(queryParams, "disable_snapshot", - disableSnapshot); - APIClient.set_bool_query_param(queryParams, "skip_corrupted", - skipCorrupted); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - return c.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName); + APIClient.set_bool_query_param(queryParams, "disable_snapshot", disableSnapshot); + APIClient.set_bool_query_param(queryParams, "skip_corrupted", skipCorrupted); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName); } /** @@ -656,18 +639,13 @@ public class StorageService extends NotificationBroadcasterSupport * bad rows and do not snapshot sstables first. */ @Override - public int upgradeSSTables(String keyspaceName, - boolean excludeCurrentVersion, String... columnFamilies) - throws IOException, ExecutionException, - InterruptedException { + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { log(" upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_bool_query_param(queryParams, "exclude_current_version", - excludeCurrentVersion); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - return c.getIntValue( - "/storage_service/keyspace_upgrade_sstables/" + keyspaceName); + APIClient.set_bool_query_param(queryParams, "exclude_current_version", excludeCurrentVersion); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + return client.getIntValue("/storage_service/keyspace_upgrade_sstables/" + keyspaceName); } /** @@ -679,23 +657,22 @@ public class StorageService extends NotificationBroadcasterSupport * @throws IOException */ @Override - public void forceKeyspaceFlush(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException { + public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException { log(" forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - c.post("/storage_service/keyspace_flush/" + keyspaceName, queryParams); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.post("/storage_service/keyspace_flush/" + keyspaceName, queryParams); } - class CheckRepair extends TimerTask { - private APIClient c = new APIClient(); - int id; - String keyspace; - String message; - MultivaluedMap queryParams = new MultivaluedHashMap(); - int cmd; + private class CheckRepair extends TimerTask { + @SuppressWarnings("unused") + private int id; + private String keyspace; + private String message; + private MultivaluedMap queryParams = new MultivaluedHashMap(); + private int cmd; + public CheckRepair(int id, String keyspace) { this.id = id; this.keyspace = keyspace; @@ -704,15 +681,17 @@ public class StorageService extends NotificationBroadcasterSupport // The returned id is the command number this.cmd = id; } + @Override public void run() { - String status = c.getStringValue("/storage_service/repair_async/" + keyspace, queryParams); + String status = client.getStringValue("/storage_service/repair_async/" + keyspace, queryParams); if (!status.equals("RUNNING")) { cancel(); if (!status.equals("SUCCESSFUL")) { - sendNotification("repair", message + "failed", new int[]{cmd, RepairStatus.SESSION_FAILED.ordinal()}); + sendNotification("repair", message + "failed", + new int[] { cmd, RepairStatus.SESSION_FAILED.ordinal() }); } - sendNotification("repair", message + "finished", new int[]{cmd, RepairStatus.FINISHED.ordinal()}); + sendNotification("repair", message + "finished", new int[] { cmd, RepairStatus.FINISHED.ordinal() }); } } @@ -721,24 +700,25 @@ public class StorageService extends NotificationBroadcasterSupport /** * Sends JMX notification to subscribers. * - * @param type Message type - * @param message Message itself - * @param userObject Arbitrary object to attach to notification + * @param type + * Message type + * @param message + * Message itself + * @param userObject + * Arbitrary object to attach to notification */ - public void sendNotification(String type, String message, Object userObject) - { - Notification jmxNotification = new Notification(type, jmxObjectName, notificationSerialNumber.incrementAndGet(), message); + public void sendNotification(String type, String message, Object userObject) { + Notification jmxNotification = new Notification(type, getBoundName(), + notificationSerialNumber.incrementAndGet(), message); jmxNotification.setUserData(userObject); sendNotification(jmxNotification); } - public String getRepairMessage(final int cmd, - final String keyspace, - final int ranges_size, - final RepairParallelism parallelismDegree, - final boolean fullRepair) { - return String.format("Starting repair command #%d, repairing %d ranges for keyspace %s (parallelism=%s, full=%b)", - cmd, ranges_size, keyspace, parallelismDegree, fullRepair); + public String getRepairMessage(final int cmd, final String keyspace, final int ranges_size, + final RepairParallelism parallelismDegree, final boolean fullRepair) { + return String.format( + "Starting repair command #%d, repairing %d ranges for keyspace %s (parallelism=%s, full=%b)", cmd, + ranges_size, keyspace, parallelismDegree, fullRepair); } /** @@ -747,7 +727,7 @@ public class StorageService extends NotificationBroadcasterSupport */ public int waitAndNotifyRepair(int cmd, String keyspace, String message) { logger.finest(message); - sendNotification("repair", message, new int[]{cmd, RepairStatus.STARTED.ordinal()}); + sendNotification("repair", message, new int[] { cmd, RepairStatus.STARTED.ordinal() }); TimerTask taskToExecute = new CheckRepair(cmd, keyspace); timer.schedule(taskToExecute, 100, 1000); return cmd; @@ -773,16 +753,15 @@ public class StorageService extends NotificationBroadcasterSupport APIClient.set_query_param(queryParams, op, options.get(op)); } - int cmd = c.postInt("/storage_service/repair_async/" + keyspace, queryParams); + int cmd = client.postInt("/storage_service/repair_async/" + keyspace, queryParams); waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true)); return cmd; } @Override - public int forceRepairAsync(String keyspace, boolean isSequential, - Collection dataCenters, Collection hosts, - boolean primaryRange, boolean repairedAt, String... columnFamilies) - throws IOException { + public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) + throws IOException { log(" forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException"); Map options = new HashMap(); return repairAsync(keyspace, options); @@ -794,18 +773,16 @@ public class StorageService extends NotificationBroadcasterSupport } @Override - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, boolean isSequential, - Collection dataCenters, Collection hosts, - boolean repairedAt, String... columnFamilies) throws IOException { + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) + throws IOException { log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException"); - return c.getIntValue(""); + return client.getIntValue(""); } @Override - public int forceRepairAsync(String keyspace, boolean isSequential, - boolean isLocal, boolean primaryRange, boolean fullRepair, - String... columnFamilies) { + public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, + boolean fullRepair, String... columnFamilies) { log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)"); Map options = new HashMap(); return repairAsync(keyspace, options); @@ -813,17 +790,16 @@ public class StorageService extends NotificationBroadcasterSupport @Override @Deprecated - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, boolean isSequential, boolean isLocal, - boolean repairedAt, String... columnFamilies) { + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + boolean isLocal, boolean repairedAt, String... columnFamilies) { log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)"); - return c.getIntValue(""); + return client.getIntValue(""); } @Override public void forceTerminateAllRepairSessions() { log(" forceTerminateAllRepairSessions()"); - c.post("/storage_service/force_terminate"); + client.post("/storage_service/force_terminate"); } /** @@ -832,7 +808,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public void decommission() throws InterruptedException { log(" decommission() throws InterruptedException"); - c.post("/storage_service/decommission"); + client.post("/storage_service/decommission"); } /** @@ -845,20 +821,22 @@ public class StorageService extends NotificationBroadcasterSupport log(" move(String newToken) throws IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "new_token", newToken); - c.post("/storage_service/move", queryParams); + client.post("/storage_service/move", queryParams); } /** * removeToken removes token (and all data associated with enpoint that had * it) from the ring - * @param hostIdString the host id to remove + * + * @param hostIdString + * the host id to remove */ @Override public void removeNode(String hostIdString) { log(" removeNode(String token)"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "host_id", hostIdString); - c.post("/storage_service/remove_node", queryParams); + client.post("/storage_service/remove_node", queryParams); } /** @@ -867,7 +845,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getRemovalStatus() { log(" getRemovalStatus()"); - return c.getStringValue("/storage_service/removal_status"); + return client.getStringValue("/storage_service/removal_status"); } /** @@ -876,7 +854,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public void forceRemoveCompletion() { log(" forceRemoveCompletion()"); - c.post("/storage_service/force_remove_completion"); + client.post("/storage_service/force_remove_completion"); } /** @@ -899,19 +877,18 @@ public class StorageService extends NotificationBroadcasterSupport * @see ch.qos.logback.classic.Level#toLevel(String) */ @Override - public void setLoggingLevel(String classQualifier, String level) - throws Exception { + public void setLoggingLevel(String classQualifier, String level) throws Exception { log(" setLoggingLevel(String classQualifier, String level) throws Exception"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "level", level); - c.post("/system/logger/" + classQualifier, queryParams); + client.post("/system/logger/" + classQualifier, queryParams); } /** get the runtime logging levels */ @Override public Map getLoggingLevels() { log(" getLoggingLevels()"); - return c.getMapStrValue("/storage_service/logging_level"); + return client.getMapStrValue("/storage_service/logging_level"); } /** @@ -921,14 +898,14 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String getOperationMode() { log(" getOperationMode()"); - return c.getStringValue("/storage_service/operation_mode"); + return client.getStringValue("/storage_service/operation_mode"); } /** Returns whether the storage service is starting or not */ @Override public boolean isStarting() { log(" isStarting()"); - return c.getBooleanValue("/storage_service/is_starting"); + return client.getBooleanValue("/storage_service/is_starting"); } /** get the progress of a drain operation */ @@ -938,7 +915,7 @@ public class StorageService extends NotificationBroadcasterSupport // FIXME // This is a workaround so the nodetool would work // it should be revert when the drain progress will be implemented - //return c.getStringValue("/storage_service/drain"); + // return c.getStringValue("/storage_service/drain"); return String.format("Drained %s/%s ColumnFamilies", 0, 0); } @@ -947,10 +924,9 @@ public class StorageService extends NotificationBroadcasterSupport * commitlog. */ @Override - public void drain() - throws IOException, InterruptedException, ExecutionException { + public void drain() throws IOException, InterruptedException, ExecutionException { log(" drain() throws IOException, InterruptedException, ExecutionException"); - c.post("/storage_service/drain"); + client.post("/storage_service/drain"); } /** @@ -966,12 +942,11 @@ public class StorageService extends NotificationBroadcasterSupport * The column family to delete data from. */ @Override - public void truncate(String keyspace, String columnFamily) - throws TimeoutException, IOException { + public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException { log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "cf", columnFamily); - c.post("/storage_service/truncate/" + keyspace, queryParams); + client.post("/storage_service/truncate/" + keyspace, queryParams); } /** @@ -981,7 +956,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public Map getOwnership() { log(" getOwnership()"); - return c.getMapInetAddressFloatValue("/storage_service/ownership/"); + return client.getMapInetAddressFloatValue("/storage_service/ownership/"); } /** @@ -992,26 +967,26 @@ public class StorageService extends NotificationBroadcasterSupport * else a empty Map is returned. */ @Override - public Map effectiveOwnership(String keyspace) - throws IllegalStateException { + public Map effectiveOwnership(String keyspace) throws IllegalStateException { log(" effectiveOwnership(String keyspace) throws IllegalStateException"); try { - return c.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace); + return client.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace); } catch (Exception e) { - throw new IllegalStateException("Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless"); + throw new IllegalStateException( + "Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless"); } } @Override public List getKeyspaces() { log(" getKeyspaces()"); - return c.getListStrValue("/storage_service/keyspaces"); + return client.getListStrValue("/storage_service/keyspaces"); } public Map> getColumnFamilyPerKeyspace() { Map> res = new HashMap>(); - JsonArray mbeans = c.getJsonArray("/column_family/"); + JsonArray mbeans = client.getJsonArray("/column_family/"); for (int i = 0; i < mbeans.size(); i++) { JsonObject mbean = mbeans.getJsonObject(i); @@ -1030,7 +1005,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" getNonSystemKeyspaces()"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("type", "user"); - return c.getListStrValue("/storage_service/keyspaces", queryParams); + return client.getListStrValue("/storage_service/keyspaces", queryParams); } /** @@ -1050,114 +1025,109 @@ public class StorageService extends NotificationBroadcasterSupport * double, (default 0.0) */ @Override - public void updateSnitch(String epSnitchClassName, Boolean dynamic, - Integer dynamicUpdateInterval, Integer dynamicResetInterval, - Double dynamicBadnessThreshold) throws ClassNotFoundException { + public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, + Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException { log(" updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException"); MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_bool_query_param(queryParams, "dynamic", dynamic); - APIClient.set_query_param(queryParams, "epSnitchClassName", - epSnitchClassName); + APIClient.set_query_param(queryParams, "epSnitchClassName", epSnitchClassName); if (dynamicUpdateInterval != null) { - queryParams.add("dynamic_update_interval", - dynamicUpdateInterval.toString()); + queryParams.add("dynamic_update_interval", dynamicUpdateInterval.toString()); } if (dynamicResetInterval != null) { - queryParams.add("dynamic_reset_interval", - dynamicResetInterval.toString()); + queryParams.add("dynamic_reset_interval", dynamicResetInterval.toString()); } if (dynamicBadnessThreshold != null) { - queryParams.add("dynamic_badness_threshold", - dynamicBadnessThreshold.toString()); + queryParams.add("dynamic_badness_threshold", dynamicBadnessThreshold.toString()); } - c.post("/storage_service/update_snitch", queryParams); + client.post("/storage_service/update_snitch", queryParams); } // allows a user to forcibly 'kill' a sick node @Override public void stopGossiping() { log(" stopGossiping()"); - c.delete("/storage_service/gossiping"); + client.delete("/storage_service/gossiping"); } // allows a user to recover a forcibly 'killed' node @Override public void startGossiping() { log(" startGossiping()"); - c.post("/storage_service/gossiping"); + client.post("/storage_service/gossiping"); } // allows a user to see whether gossip is running or not @Override public boolean isGossipRunning() { log(" isGossipRunning()"); - return c.getBooleanValue("/storage_service/gossiping"); + return client.getBooleanValue("/storage_service/gossiping"); } // allows a user to forcibly completely stop cassandra @Override public void stopDaemon() { log(" stopDaemon()"); - c.post("/storage_service/stop_daemon"); + client.post("/storage_service/stop_daemon"); } // to determine if gossip is disabled @Override public boolean isInitialized() { log(" isInitialized()"); - return c.getBooleanValue("/storage_service/is_initialized"); + return client.getBooleanValue("/storage_service/is_initialized"); } // allows a user to disable thrift @Override public void stopRPCServer() { log(" stopRPCServer()"); - c.delete("/storage_service/rpc_server"); + client.delete("/storage_service/rpc_server"); } // allows a user to reenable thrift @Override public void startRPCServer() { log(" startRPCServer()"); - c.post("/storage_service/rpc_server"); + client.post("/storage_service/rpc_server"); } // to determine if thrift is running @Override public boolean isRPCServerRunning() { log(" isRPCServerRunning()"); - return c.getBooleanValue("/storage_service/rpc_server"); + return client.getBooleanValue("/storage_service/rpc_server"); } @Override public void stopNativeTransport() { log(" stopNativeTransport()"); - c.delete("/storage_service/native_transport"); + client.delete("/storage_service/native_transport"); } @Override public void startNativeTransport() { log(" startNativeTransport()"); - c.post("/storage_service/native_transport"); + client.post("/storage_service/native_transport"); } @Override public boolean isNativeTransportRunning() { log(" isNativeTransportRunning()"); - return c.getBooleanValue("/storage_service/native_transport"); + return client.getBooleanValue("/storage_service/native_transport"); } // allows a node that have been started without joining the ring to join it @Override public void joinRing() throws IOException { log(" joinRing() throws IOException"); - c.post("/storage_service/join_ring"); + client.post("/storage_service/join_ring"); } @Override public boolean isJoined() { log(" isJoined()"); - return c.getBooleanValue("/storage_service/join_ring"); + return client.getBooleanValue("/storage_service/join_ring"); } @Override @@ -1165,18 +1135,18 @@ public class StorageService extends NotificationBroadcasterSupport log(" setStreamThroughputMbPerSec(int value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("value", Integer.toString(value)); - c.post("/storage_service/stream_throughput", queryParams); + client.post("/storage_service/stream_throughput", queryParams); } @Override public int getStreamThroughputMbPerSec() { log(" getStreamThroughputMbPerSec()"); - return c.getIntValue("/storage_service/stream_throughput"); + return client.getIntValue("/storage_service/stream_throughput"); } public int getCompactionThroughputMbPerSec() { log(" getCompactionThroughputMbPerSec()"); - return c.getIntValue("/storage_service/compaction_throughput"); + return client.getIntValue("/storage_service/compaction_throughput"); } @Override @@ -1184,13 +1154,13 @@ public class StorageService extends NotificationBroadcasterSupport log(" setCompactionThroughputMbPerSec(int value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("value", Integer.toString(value)); - c.post("/storage_service/compaction_throughput", queryParams); + client.post("/storage_service/compaction_throughput", queryParams); } @Override public boolean isIncrementalBackupsEnabled() { log(" isIncrementalBackupsEnabled()"); - return c.getBooleanValue("/storage_service/incremental_backups"); + return client.getBooleanValue("/storage_service/incremental_backups"); } @Override @@ -1198,7 +1168,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" setIncrementalBackupsEnabled(boolean value)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("value", Boolean.toString(value)); - c.post("/storage_service/incremental_backups", queryParams); + client.post("/storage_service/incremental_backups", queryParams); } /** @@ -1217,9 +1187,9 @@ public class StorageService extends NotificationBroadcasterSupport if (sourceDc != null) { MultivaluedMap queryParams = new MultivaluedHashMap(); APIClient.set_query_param(queryParams, "source_dc", sourceDc); - c.post("/storage_service/rebuild", queryParams); + client.post("/storage_service/rebuild", queryParams); } else { - c.post("/storage_service/rebuild"); + client.post("/storage_service/rebuild"); } } @@ -1227,7 +1197,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public void bulkLoad(String directory) { log(" bulkLoad(String directory)"); - c.post("/storage_service/bulk_load/" + directory); + client.post("/storage_service/bulk_load/" + directory); } /** @@ -1237,14 +1207,13 @@ public class StorageService extends NotificationBroadcasterSupport @Override public String bulkLoadAsync(String directory) { log(" bulkLoadAsync(String directory)"); - return c.getStringValue( - "/storage_service/bulk_load_async/" + directory); + return client.getStringValue("/storage_service/bulk_load_async/" + directory); } @Override public void rescheduleFailedDeletions() { log(" rescheduleFailedDeletions()"); - c.post("/storage_service/reschedule_failed_deletions"); + client.post("/storage_service/reschedule_failed_deletions"); } /** @@ -1260,7 +1229,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" loadNewSSTables(String ksName, String cfName)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("cf", cfName); - c.post("/storage_service/sstables/" + ksName, queryParams); + client.post("/storage_service/sstables/" + ksName, queryParams); } /** @@ -1276,22 +1245,21 @@ public class StorageService extends NotificationBroadcasterSupport @Override public List sampleKeyRange() { log(" sampleKeyRange()"); - return c.getListStrValue("/storage_service/sample_key_range"); + return client.getListStrValue("/storage_service/sample_key_range"); } /** * rebuild the specified indexes */ @Override - public void rebuildSecondaryIndex(String ksName, String cfName, - String... idxNames) { + public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) { log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)"); } @Override public void resetLocalSchema() throws IOException { log(" resetLocalSchema() throws IOException"); - c.post("/storage_service/relocal_schema"); + client.post("/storage_service/relocal_schema"); } /** @@ -1309,7 +1277,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" setTraceProbability(double probability)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("probability", Double.toString(probability)); - c.post("/storage_service/trace_probability", queryParams); + client.post("/storage_service/trace_probability", queryParams); } /** @@ -1318,28 +1286,24 @@ public class StorageService extends NotificationBroadcasterSupport @Override public double getTraceProbability() { log(" getTraceProbability()"); - return c.getDoubleValue("/storage_service/trace_probability"); + return client.getDoubleValue("/storage_service/trace_probability"); } @Override - public void disableAutoCompaction(String ks, String... columnFamilies) - throws IOException { + public void disableAutoCompaction(String ks, String... columnFamilies) throws IOException { log("disableAutoCompaction(String ks, String... columnFamilies)"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); - c.delete("/storage_service/auto_compaction/", queryParams); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); + client.delete("/storage_service/auto_compaction/", queryParams); } @Override - public void enableAutoCompaction(String ks, String... columnFamilies) - throws IOException { + public void enableAutoCompaction(String ks, String... columnFamilies) throws IOException { log("enableAutoCompaction(String ks, String... columnFamilies)"); MultivaluedMap queryParams = new MultivaluedHashMap(); - APIClient.set_query_param(queryParams, "cf", - APIClient.join(columnFamilies)); + APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies)); try { - c.post("/storage_service/auto_compaction/", queryParams); + client.post("/storage_service/auto_compaction/", queryParams); } catch (RuntimeException e) { // FIXME should throw the right exception throw new IOException(e.getMessage()); @@ -1352,28 +1316,28 @@ public class StorageService extends NotificationBroadcasterSupport log(" deliverHints(String host) throws UnknownHostException"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("host", host); - c.post("/storage_service/deliver_hints", queryParams); + client.post("/storage_service/deliver_hints", queryParams); } /** Returns the name of the cluster */ @Override public String getClusterName() { log(" getClusterName()"); - return c.getStringValue("/storage_service/cluster_name"); + return client.getStringValue("/storage_service/cluster_name"); } /** Returns the cluster partitioner */ @Override public String getPartitionerName() { log(" getPartitionerName()"); - return c.getStringValue("/storage_service/partitioner_name"); + return client.getStringValue("/storage_service/partitioner_name"); } /** Returns the threshold for warning of queries with many tombstones */ @Override public int getTombstoneWarnThreshold() { log(" getTombstoneWarnThreshold()"); - return c.getIntValue("/storage_service/tombstone_warn_threshold"); + return client.getIntValue("/storage_service/tombstone_warn_threshold"); } /** Sets the threshold for warning queries with many tombstones */ @@ -1381,16 +1345,15 @@ public class StorageService extends NotificationBroadcasterSupport public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) { log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); - queryParams.add("debug_threshold", - Integer.toString(tombstoneDebugThreshold)); - c.post("/storage_service/tombstone_warn_threshold", queryParams); + queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold)); + client.post("/storage_service/tombstone_warn_threshold", queryParams); } /** Returns the threshold for abandoning queries with many tombstones */ @Override public int getTombstoneFailureThreshold() { log(" getTombstoneFailureThreshold()"); - return c.getIntValue("/storage_service/tombstone_failure_threshold"); + return client.getIntValue("/storage_service/tombstone_failure_threshold"); } /** Sets the threshold for abandoning queries with many tombstones */ @@ -1398,16 +1361,15 @@ public class StorageService extends NotificationBroadcasterSupport public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) { log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); - queryParams.add("debug_threshold", - Integer.toString(tombstoneDebugThreshold)); - c.post("/storage_service/tombstone_failure_threshold", queryParams); + queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold)); + client.post("/storage_service/tombstone_failure_threshold", queryParams); } /** Returns the threshold for rejecting queries due to a large batch size */ @Override public int getBatchSizeFailureThreshold() { log(" getBatchSizeFailureThreshold()"); - return c.getIntValue("/storage_service/batch_size_failure_threshold"); + return client.getIntValue("/storage_service/batch_size_failure_threshold"); } /** Sets the threshold for rejecting queries due to a large batch size */ @@ -1416,7 +1378,7 @@ public class StorageService extends NotificationBroadcasterSupport log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("threshold", Integer.toString(batchSizeDebugThreshold)); - c.post("/storage_service/batch_size_failure_threshold", queryParams); + client.post("/storage_service/batch_size_failure_threshold", queryParams); } /** @@ -1427,74 +1389,73 @@ public class StorageService extends NotificationBroadcasterSupport log(" setHintedHandoffThrottleInKB(int throttleInKB)"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("throttle", Integer.toString(throttleInKB)); - c.post("/storage_service/hinted_handoff", queryParams); + client.post("/storage_service/hinted_handoff", queryParams); } @Override - public void takeMultipleColumnFamilySnapshot(String tag, - String... columnFamilyList) throws IOException { + public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException { log(" takeMultipleColumnFamilySnapshot"); Map> keyspaceColumnfamily = new HashMap>(); Map> kss = getColumnFamilyPerKeyspace(); Map>> snapshots = getSnapshotKeyspaceColumnFamily(); - for (String columnFamily : columnFamilyList) - { + for (String columnFamily : columnFamilyList) { String splittedString[] = columnFamily.split("\\."); - if (splittedString.length == 2) - { + if (splittedString.length == 2) { String keyspaceName = splittedString[0]; String columnFamilyName = splittedString[1]; - if (keyspaceName == null) + if (keyspaceName == null) { throw new IOException("You must supply a keyspace name"); - if (columnFamilyName == null) + } + if (columnFamilyName == null) { throw new IOException("You must supply a column family name"); - if (tag == null || tag.equals("")) + } + if (tag == null || tag.equals("")) { throw new IOException("You must supply a snapshot name."); - if (!kss.containsKey(keyspaceName)) - { + } + if (!kss.containsKey(keyspaceName)) { throw new IOException("Keyspace " + keyspaceName + " does not exist"); } if (!kss.get(keyspaceName).contains(columnFamilyName)) { - throw new IllegalArgumentException(String.format("Unknown keyspace/cf pair (%s.%s)", keyspaceName, columnFamilyName)); + throw new IllegalArgumentException( + String.format("Unknown keyspace/cf pair (%s.%s)", keyspaceName, columnFamilyName)); } - // As there can be multiple column family from same keyspace check if snapshot exist for that specific + // As there can be multiple column family from same keyspace + // check if snapshot exist for that specific // columnfamily and not for whole keyspace - if (snapshots.containsKey(tag) && snapshots.get(tag).containsKey(keyspaceName) && snapshots.get(tag).get(keyspaceName).contains(columnFamilyName)) { + if (snapshots.containsKey(tag) && snapshots.get(tag).containsKey(keyspaceName) + && snapshots.get(tag).get(keyspaceName).contains(columnFamilyName)) { throw new IOException("Snapshot " + tag + " already exists."); } - if (!keyspaceColumnfamily.containsKey(keyspaceName)) - { + if (!keyspaceColumnfamily.containsKey(keyspaceName)) { keyspaceColumnfamily.put(keyspaceName, new ArrayList()); } - // Add Keyspace columnfamily to map in order to support atomicity for snapshot process. - // So no snapshot should happen if any one of the above conditions fail for any keyspace or columnfamily + // Add Keyspace columnfamily to map in order to support + // atomicity for snapshot process. + // So no snapshot should happen if any one of the above + // conditions fail for any keyspace or columnfamily keyspaceColumnfamily.get(keyspaceName).add(columnFamilyName); - } - else - { + } else { throw new IllegalArgumentException( "Cannot take a snapshot on secondary index or invalid column family name. You must supply a column family name in the form of keyspace.columnfamily"); } } - for (Entry> entry : keyspaceColumnfamily.entrySet()) - { - for (String columnFamily : entry.getValue()) + for (Entry> entry : keyspaceColumnfamily.entrySet()) { + for (String columnFamily : entry.getValue()) { takeColumnFamilySnapshot(entry.getKey(), columnFamily, tag); + } } } @Override - public int forceRepairAsync(String keyspace, int parallelismDegree, - Collection dataCenters, Collection hosts, - boolean primaryRange, boolean fullRepair, - String... columnFamilies) { + public int forceRepairAsync(String keyspace, int parallelismDegree, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean fullRepair, String... columnFamilies) { log(" forceRepairAsync(keyspace, parallelismDegree, dataCenters, hosts, primaryRange, fullRepair, columnFamilies)"); Map options = new HashMap(); Joiner commas = Joiner.on(","); @@ -1506,7 +1467,7 @@ public class StorageService extends NotificationBroadcasterSupport options.put("hosts", commas.join(hosts)); } options.put("primaryRange", Boolean.toString(primaryRange)); - options.put("incremental", Boolean.toString(!fullRepair)); + options.put("incremental", Boolean.toString(!fullRepair)); if (columnFamilies != null && columnFamilies.length > 0) { options.put("columnFamilies", commas.join(columnFamilies)); } @@ -1514,10 +1475,8 @@ public class StorageService extends NotificationBroadcasterSupport } @Override - public int forceRepairRangeAsync(String beginToken, String endToken, - String keyspaceName, int parallelismDegree, - Collection dataCenters, Collection hosts, - boolean fullRepair, String... columnFamilies) { + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree, + Collection dataCenters, Collection hosts, boolean fullRepair, String... columnFamilies) { log(" forceRepairRangeAsync(beginToken, endToken, keyspaceName, parallelismDegree, dataCenters, hosts, fullRepair, columnFamilies)"); Map options = new HashMap(); Joiner commas = Joiner.on(","); @@ -1528,9 +1487,9 @@ public class StorageService extends NotificationBroadcasterSupport if (hosts != null) { options.put("hosts", commas.join(hosts)); } - options.put("incremental", Boolean.toString(!fullRepair)); + options.put("incremental", Boolean.toString(!fullRepair)); options.put("startToken", beginToken); - options.put("endToken", endToken); + options.put("endToken", endToken); return repairAsync(keyspaceName, options); } @@ -1539,14 +1498,14 @@ public class StorageService extends NotificationBroadcasterSupport return getHostIdMap(); } - @Override + @Override public Map getHostIdToEndpoint() { return getHostIdToAddressMap(); } @Override public void refreshSizeEstimates() throws ExecutionException { - // TODO Auto-generated method stub + // TODO Auto-generated method stub log(" refreshSizeEstimates"); } @@ -1560,14 +1519,14 @@ public class StorageService extends NotificationBroadcasterSupport @Override public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException { - // "jobs" not (yet) relevant for scylla. (though possibly useful...) + // "jobs" not (yet) relevant for scylla. (though possibly useful...) return forceKeyspaceCleanup(keyspaceName, tables); } @Override public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException { - // "jobs" not (yet) relevant for scylla. (though possibly useful...) + // "jobs" not (yet) relevant for scylla. (though possibly useful...) return scrub(disableSnapshot, skipCorrupted, checkData, 0, keyspaceName, columnFamilies); } @@ -1582,7 +1541,7 @@ public class StorageService extends NotificationBroadcasterSupport @Override public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException { - // "jobs" not (yet) relevant for scylla. (though possibly useful...) + // "jobs" not (yet) relevant for scylla. (though possibly useful...) return upgradeSSTables(keyspaceName, excludeCurrentVersion, tableNames); } @@ -1591,12 +1550,12 @@ public class StorageService extends NotificationBroadcasterSupport log(" getNonLocalStrategyKeyspaces"); MultivaluedMap queryParams = new MultivaluedHashMap(); queryParams.add("type", "non_local_strategy"); - return c.getListStrValue("/storage_service/keyspaces", queryParams); + return client.getListStrValue("/storage_service/keyspaces", queryParams); } @Override public void setInterDCStreamThroughputMbPerSec(int value) { - // TODO Auto-generated method stub + // TODO Auto-generated method stub log(" setInterDCStreamThroughputMbPerSec"); } From 4ed049739a2778b76b420d240da890b56f5e1e34 Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Mon, 17 Oct 2016 11:34:21 +0000 Subject: [PATCH 26/32] Storage service: Fix 3.x style notifications (repair) --- .../cassandra/service/StorageService.java | 143 +++++++++++++++--- 1 file changed, 119 insertions(+), 24 deletions(-) diff --git a/src/main/java/org/apache/cassandra/service/StorageService.java b/src/main/java/org/apache/cassandra/service/StorageService.java index 7b7aa7e..4ea72ed 100644 --- a/src/main/java/org/apache/cassandra/service/StorageService.java +++ b/src/main/java/org/apache/cassandra/service/StorageService.java @@ -22,6 +22,8 @@ */ package org.apache.cassandra.service; +import static java.util.Arrays.asList; + import java.io.IOException; import java.net.InetAddress; import java.net.UnknownHostException; @@ -672,10 +674,12 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, private String message; private MultivaluedMap queryParams = new MultivaluedHashMap(); private int cmd; + private final boolean legacy; - public CheckRepair(int id, String keyspace) { + public CheckRepair(int id, String keyspace, boolean legacy) { this.id = id; this.keyspace = keyspace; + this.legacy = legacy; APIClient.set_query_param(queryParams, "id", Integer.toString(id)); message = String.format("Repair session %d ", id); // The returned id is the command number @@ -687,11 +691,12 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, String status = client.getStringValue("/storage_service/repair_async/" + keyspace, queryParams); if (!status.equals("RUNNING")) { cancel(); - if (!status.equals("SUCCESSFUL")) { - sendNotification("repair", message + "failed", - new int[] { cmd, RepairStatus.SESSION_FAILED.ordinal() }); + if (status.equals("SUCCESSFUL")) { + sendMessage(cmd, RepairStatus.SESSION_SUCCESS, message, legacy); + } else { + sendMessage(cmd, RepairStatus.SESSION_FAILED, message + "failed", legacy); } - sendNotification("repair", message + "finished", new int[] { cmd, RepairStatus.FINISHED.ordinal() }); + sendMessage(cmd, RepairStatus.FINISHED, message + "finished", legacy); } } @@ -725,14 +730,57 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, * * @param repair */ - public int waitAndNotifyRepair(int cmd, String keyspace, String message) { + private int waitAndNotifyRepair(int cmd, String keyspace, String message, boolean legacy) { logger.finest(message); - sendNotification("repair", message, new int[] { cmd, RepairStatus.STARTED.ordinal() }); - TimerTask taskToExecute = new CheckRepair(cmd, keyspace); + + sendMessage(cmd, RepairStatus.STARTED, message, legacy); + + TimerTask taskToExecute = new CheckRepair(cmd, keyspace, legacy); timer.schedule(taskToExecute, 100, 1000); return cmd; } + // See org.apache.cassandra.utils.progress.ProgressEventType + private static enum ProgressEventType { + START, PROGRESS, ERROR, ABORT, SUCCESS, COMPLETE, NOTIFICATION + } + + private void sendMessage(int cmd, RepairStatus status, String message, boolean legacy) { + String tag = "repair:" + cmd; + + ProgressEventType type = ProgressEventType.ERROR; + int total = 100; + int count = 0; + switch (status) { + case STARTED: + type = ProgressEventType.START; + break; + case FINISHED: + type = ProgressEventType.COMPLETE; + count = 100; + break; + case SESSION_SUCCESS: + type = ProgressEventType.SUCCESS; + count = 100; + break; + default: + break; + } + + Notification jmxNotification = new Notification("progress", tag, notificationSerialNumber.incrementAndGet(), + message); + Map userData = new HashMap<>(); + userData.put("type", type.ordinal()); + userData.put("progressCount", count); + userData.put("total", total); + jmxNotification.setUserData(userData); + sendNotification(jmxNotification); + + if (legacy) { + sendNotification("repair", message, new int[] { cmd, status.ordinal() }); + } + } + /** * Invoke repair asynchronously. You can track repair progress by * subscribing JMX notification sent from this StorageServiceMBean. @@ -747,6 +795,24 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, */ @Override public int repairAsync(String keyspace, Map options) { + return repairAsync(keyspace, options, false); + } + + @SuppressWarnings("unused") + private static final String PARALLELISM_KEY = "parallelism"; + private static final String PRIMARY_RANGE_KEY = "primaryRange"; + @SuppressWarnings("unused") + private static final String INCREMENTAL_KEY = "incremental"; + @SuppressWarnings("unused") + private static final String JOB_THREADS_KEY = "jobThreads"; + private static final String RANGES_KEY = "ranges"; + private static final String COLUMNFAMILIES_KEY = "columnFamilies"; + private static final String DATACENTERS_KEY = "dataCenters"; + private static final String HOSTS_KEY = "hosts"; + @SuppressWarnings("unused") + private static final String TRACE_KEY = "trace"; + + private int repairAsync(String keyspace, Map options, boolean legacy) { log(" repairAsync(String keyspace, Map options)"); MultivaluedMap queryParams = new MultivaluedHashMap(); for (String op : options.keySet()) { @@ -754,38 +820,66 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, } int cmd = client.postInt("/storage_service/repair_async/" + keyspace, queryParams); - waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true)); + waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true), + legacy); return cmd; } + private static String commaSeparated(Collection c) { + String s = c.toString(); + return s.substring(1, s.length() - 1); + } + + private int repairRangeAsync(String beginToken, String endToken, String keyspaceName, Boolean isSequential, + Collection dataCenters, Collection hosts, Boolean primaryRange, Boolean repairedAt, + String... columnFamilies) { + log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException"); + + Map options = new HashMap(); + if (beginToken != null && endToken != null) { + options.put(RANGES_KEY, beginToken + ":" + endToken); + } + if (dataCenters != null) { + options.put(DATACENTERS_KEY, commaSeparated(dataCenters)); + } + if (hosts != null) { + options.put(HOSTS_KEY, commaSeparated(hosts)); + } + if (columnFamilies != null && columnFamilies.length != 0) { + options.put(COLUMNFAMILIES_KEY, commaSeparated(asList(columnFamilies))); + } + if (primaryRange != null) { + options.put(PRIMARY_RANGE_KEY, primaryRange.toString()); + } + + return repairAsync(keyspaceName, options, true); + } + @Override + @Deprecated public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) - throws IOException { + throws IOException { log(" forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException"); - Map options = new HashMap(); - return repairAsync(keyspace, options); - } - - public int forceRepairAsync(String keyspace) { - Map options = new HashMap(); - return repairAsync(keyspace, options); + return repairRangeAsync(null, null, keyspace, isSequential, dataCenters, hosts, primaryRange, repairedAt, + columnFamilies); } @Override + @Deprecated public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, - Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) - throws IOException { + Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) { log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException"); - return client.getIntValue(""); + return repairRangeAsync(beginToken, endToken, keyspaceName, isSequential, dataCenters, hosts, null, repairedAt, + columnFamilies); } @Override - public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, + @Deprecated + public int forceRepairAsync(String keyspaceName, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies) { log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)"); - Map options = new HashMap(); - return repairAsync(keyspace, options); + return repairRangeAsync(null, null, keyspaceName, isSequential, null, null, primaryRange, null, columnFamilies); } @Override @@ -793,7 +887,8 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean, public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies) { log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)"); - return client.getIntValue(""); + return forceRepairRangeAsync(beginToken, endToken, keyspaceName, isSequential, null, null, repairedAt, + columnFamilies); } @Override From f4f3c44dc1ef42b5775c36305b9577a6d823bd46 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:07:25 +0200 Subject: [PATCH 27/32] Rework StreamManager --- .../cassandra/streaming/StreamManager.java | 63 ++++++++----------- 1 file changed, 27 insertions(+), 36 deletions(-) diff --git a/src/main/java/org/apache/cassandra/streaming/StreamManager.java b/src/main/java/org/apache/cassandra/streaming/StreamManager.java index 57c8285..7d45034 100644 --- a/src/main/java/org/apache/cassandra/streaming/StreamManager.java +++ b/src/main/java/org/apache/cassandra/streaming/StreamManager.java @@ -26,21 +26,23 @@ package org.apache.cassandra.streaming; import java.util.HashSet; import java.util.Set; +import java.util.logging.Logger; import javax.json.JsonArray; import javax.json.JsonObject; import javax.management.ListenerNotFoundException; import javax.management.MBeanNotificationInfo; +import javax.management.NotificationBroadcasterSupport; import javax.management.NotificationFilter; import javax.management.NotificationListener; import javax.management.openmbean.CompositeData; import org.apache.cassandra.streaming.management.StreamStateCompositeData; -import com.google.common.base.Function; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.metrics.APIMBean; /** * StreamManager manages currently running {@link StreamResultFuture}s and @@ -49,62 +51,51 @@ import com.scylladb.jmx.api.APIClient; * All stream operation should be created through this class to track streaming * status and progress. */ -public class StreamManager implements StreamManagerMBean { - public static final StreamManager instance = new StreamManager(); - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(StreamManager.class.getName()); - private APIClient c = new APIClient(); +public class StreamManager extends APIMBean implements StreamManagerMBean { + private static final Logger logger = Logger.getLogger(StreamManager.class.getName()); + + private final NotificationBroadcasterSupport notifier = new NotificationBroadcasterSupport(); + + public StreamManager(APIClient c) { + super(c); + } public Set getState() { - JsonArray arr = c.getJsonArray("/stream_manager/"); + JsonArray arr = client.getJsonArray("/stream_manager/"); Set res = new HashSet(); for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); - res.add(new StreamState(obj.getString("plan_id"), obj.getString("description"), SessionInfo.fromJsonArr(obj.getJsonArray("sessions")))); + res.add(new StreamState(obj.getString("plan_id"), obj.getString("description"), + SessionInfo.fromJsonArr(obj.getJsonArray("sessions")))); } return res; } - public static StreamManager getInstance() { - return instance; - } + @Override public Set getCurrentStreams() { logger.finest("getCurrentStreams"); - return Sets.newHashSet(Iterables.transform(getState(), new Function() - { - public CompositeData apply(StreamState input) - { - return StreamStateCompositeData.toCompositeData(input); - } - })); + return Sets + .newHashSet(Iterables.transform(getState(), input -> StreamStateCompositeData.toCompositeData(input))); } @Override - public void removeNotificationListener(NotificationListener arg0, - NotificationFilter arg1, Object arg2) - throws ListenerNotFoundException { - // TODO Auto-generated method stub - + public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) { + notifier.addNotificationListener(listener, filter, handback); } @Override - public void addNotificationListener(NotificationListener arg0, - NotificationFilter arg1, Object arg2) - throws IllegalArgumentException { - // TODO Auto-generated method stub + public void removeNotificationListener(NotificationListener listener) throws ListenerNotFoundException { + notifier.removeNotificationListener(listener); + } + @Override + public void removeNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) + throws ListenerNotFoundException { + notifier.removeNotificationListener(listener, filter, handback); } @Override public MBeanNotificationInfo[] getNotificationInfo() { - // TODO Auto-generated method stub - return null; - } - - @Override - public void removeNotificationListener(NotificationListener arg0) - throws ListenerNotFoundException { - // TODO Auto-generated method stub - + return notifier.getNotificationInfo(); } } From 1709ff2d0203b0bb13b5f63e00aabf4aa27954e7 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:10:19 +0200 Subject: [PATCH 28/32] API accessor * Make config an instance object * Add functional interfaces * http options * Remove dead code * Clean up/format --- .../java/com/scylladb/jmx/api/APIClient.java | 213 +++++++----------- .../java/com/scylladb/jmx/api/APIConfig.java | 46 ++-- .../java/com/scylladb/jmx/api/CacheEntry.java | 14 +- 3 files changed, 102 insertions(+), 171 deletions(-) diff --git a/src/main/java/com/scylladb/jmx/api/APIClient.java b/src/main/java/com/scylladb/jmx/api/APIClient.java index 6d7b49a..24cfcac 100644 --- a/src/main/java/com/scylladb/jmx/api/APIClient.java +++ b/src/main/java/com/scylladb/jmx/api/APIClient.java @@ -14,6 +14,8 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; +import java.util.function.BiFunction; +import java.util.logging.Logger; import javax.json.Json; import javax.json.JsonArray; @@ -35,13 +37,12 @@ import javax.ws.rs.core.Response; import org.glassfish.jersey.client.ClientConfig; -import com.scylladb.jmx.utils.EstimatedHistogram; import com.scylladb.jmx.utils.SnapshotDetailsTabularData; -import com.yammer.metrics.core.HistogramValues; public class APIClient { - Map cache = new HashMap(); - String getCacheKey(String key, MultivaluedMap param, long duration) { + private Map cache = new HashMap(); + + private String getCacheKey(String key, MultivaluedMap param, long duration) { if (duration <= 0) { return null; } @@ -56,43 +57,40 @@ public class APIClient { return key; } - String getStringFromCache(String key, long duration) { + private String getStringFromCache(String key, long duration) { if (key == null) { return null; } CacheEntry value = cache.get(key); - return (value!= null && value.valid(duration))? value.stringValue() : null; + return (value != null && value.valid(duration)) ? value.stringValue() : null; } - JsonObject getJsonObjectFromCache(String key, long duration) { + private JsonObject getJsonObjectFromCache(String key, long duration) { if (key == null) { return null; } CacheEntry value = cache.get(key); - return (value!= null && value.valid(duration))? value.jsonObject() : null; + return (value != null && value.valid(duration)) ? value.jsonObject() : null; } - EstimatedHistogram getEstimatedHistogramFromCache(String key, long duration) { - if (key == null) { - return null; - } - CacheEntry value = cache.get(key); - return (value!= null && value.valid(duration))? value.getEstimatedHistogram() : null; + private JsonReaderFactory factory = Json.createReaderFactory(null); + private static final Logger logger = Logger.getLogger(APIClient.class.getName()); + + private final APIConfig config; + + public APIClient(APIConfig config) { + this.config = config; } - JsonReaderFactory factory = Json.createReaderFactory(null); - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(APIClient.class.getName()); - - public static String getBaseUrl() { - return APIConfig.getBaseUrl(); + private String getBaseUrl() { + return config.getBaseUrl(); } public Invocation.Builder get(String path, MultivaluedMap queryParams) { - Client client = ClientBuilder.newClient( new ClientConfig()); + Client client = ClientBuilder.newClient(new ClientConfig()); WebTarget webTarget = client.target(getBaseUrl()).path(path); if (queryParams != null) { - for (Entry> qp : queryParams.entrySet()) { + for (Entry> qp : queryParams.entrySet()) { for (String e : qp.getValue()) { webTarget = webTarget.queryParam(qp.getKey(), e); } @@ -112,8 +110,9 @@ public class APIClient { public Response post(String path, MultivaluedMap queryParams, Object object, String type) { try { Response response = get(path, queryParams).post(Entity.entity(object, type)); - if (response.getStatus() != Response.Status.OK.getStatusCode() ) { - throw getException("Scylla API server HTTP POST to URL '" + path + "' failed", response.readEntity(String.class)); + if (response.getStatus() != Response.Status.OK.getStatusCode()) { + throw getException("Scylla API server HTTP POST to URL '" + path + "' failed", + response.readEntity(String.class)); } return response; } catch (ProcessingException e) { @@ -124,7 +123,7 @@ public class APIClient { public Response post(String path, MultivaluedMap queryParams, Object object) { return post(path, queryParams, object, MediaType.TEXT_PLAIN); } - + public void post(String path) { post(path, null); } @@ -159,8 +158,7 @@ public class APIClient { delete(path, null); } - public String getRawValue(String string, - MultivaluedMap queryParams, long duration) { + public String getRawValue(String string, MultivaluedMap queryParams, long duration) { try { if (string.equals("")) { return ""; @@ -176,7 +174,8 @@ public class APIClient { // TBD // We are currently not caching errors, // it should be reconsider. - throw getException("Scylla API server HTTP GET to URL '" + string + "' failed", response.readEntity(String.class)); + throw getException("Scylla API server HTTP GET to URL '" + string + "' failed", + response.readEntity(String.class)); } res = response.readEntity(String.class); if (duration > 0) { @@ -188,8 +187,7 @@ public class APIClient { } } - public String getRawValue(String string, - MultivaluedMap queryParams) { + public String getRawValue(String string, MultivaluedMap queryParams) { return getRawValue(string, queryParams, 0); } @@ -202,23 +200,19 @@ public class APIClient { } public String getStringValue(String string, MultivaluedMap queryParams) { - return getRawValue(string, - queryParams).replaceAll("^\"|\"$", ""); + return getRawValue(string, queryParams).replaceAll("^\"|\"$", ""); } public String getStringValue(String string, MultivaluedMap queryParams, long duration) { - return getRawValue(string, - queryParams, duration).replaceAll("^\"|\"$", ""); + return getRawValue(string, queryParams, duration).replaceAll("^\"|\"$", ""); } public String getStringValue(String string) { return getStringValue(string, null); } - public JsonReader getReader(String string, - MultivaluedMap queryParams) { - return factory.createReader(new StringReader(getRawValue(string, - queryParams))); + public JsonReader getReader(String string, MultivaluedMap queryParams) { + return factory.createReader(new StringReader(getRawValue(string, queryParams))); } public JsonReader getReader(String string) { @@ -230,8 +224,7 @@ public class APIClient { return val.toArray(new String[val.size()]); } - public int getIntValue(String string, - MultivaluedMap queryParams) { + public int getIntValue(String string, MultivaluedMap queryParams) { return Integer.parseInt(getRawValue(string, queryParams)); } @@ -239,6 +232,19 @@ public class APIClient { return getIntValue(string, null); } + public static BiFunction getReader(Class type) { + if (type == String.class) { + return (c, s) -> type.cast(c.getRawValue(s)); + } else if (type == Integer.class) { + return (c, s) -> type.cast(c.getIntValue(s)); + } else if (type == Double.class) { + return (c, s) -> type.cast(c.getDoubleValue(s)); + } else if (type == Long.class) { + return (c, s) -> type.cast(c.getLongValue(s)); + } + throw new IllegalArgumentException(type.getName()); + } + public boolean getBooleanValue(String string) { return Boolean.parseBoolean(getRawValue(string)); } @@ -247,8 +253,7 @@ public class APIClient { return Double.parseDouble(getRawValue(string)); } - public List getListStrValue(String string, - MultivaluedMap queryParams) { + public List getListStrValue(String string, MultivaluedMap queryParams) { JsonReader reader = getReader(string, queryParams); JsonArray arr = reader.readArray(); List res = new ArrayList(arr.size()); @@ -303,8 +308,7 @@ public class APIClient { return join(arr, ","); } - public static String mapToString(Map mp, String pairJoin, - String joiner) { + public static String mapToString(Map mp, String pairJoin, String joiner) { String res = ""; if (mp != null) { for (String name : mp.keySet()) { @@ -321,19 +325,15 @@ public class APIClient { return mapToString(mp, "=", ","); } - public static boolean set_query_param( - MultivaluedMap queryParams, String key, String value) { - if (queryParams != null && key != null && value != null - && !value.equals("")) { + public static boolean set_query_param(MultivaluedMap queryParams, String key, String value) { + if (queryParams != null && key != null && value != null && !value.equals("")) { queryParams.add(key, value); return true; } return false; } - public static boolean set_bool_query_param( - MultivaluedMap queryParams, String key, - boolean value) { + public static boolean set_bool_query_param(MultivaluedMap queryParams, String key, boolean value) { if (queryParams != null && key != null && value) { queryParams.add(key, "true"); return true; @@ -352,8 +352,7 @@ public class APIClient { for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); if (obj.containsKey("key") && obj.containsKey("value")) { - map.put(obj.getString("key"), - listStrFromJArr(obj.getJsonArray("value"))); + map.put(obj.getString("key"), listStrFromJArr(obj.getJsonArray("value"))); } } reader.close(); @@ -375,8 +374,7 @@ public class APIClient { for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); if (obj.containsKey("key") && obj.containsKey("value")) { - map.put(listStrFromJArr(obj.getJsonArray("key")), - listStrFromJArr(obj.getJsonArray("value"))); + map.put(listStrFromJArr(obj.getJsonArray("key")), listStrFromJArr(obj.getJsonArray("value"))); } } reader.close(); @@ -387,8 +385,7 @@ public class APIClient { return getMapListStrValue(string, null); } - public Set getSetStringValue(String string, - MultivaluedMap queryParams) { + public Set getSetStringValue(String string, MultivaluedMap queryParams) { JsonReader reader = getReader(string, queryParams); JsonArray arr = reader.readArray(); Set res = new HashSet(); @@ -403,8 +400,7 @@ public class APIClient { return getSetStringValue(string, null); } - public Map getMapStrValue(String string, - MultivaluedMap queryParams) { + public Map getMapStrValue(String string, MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -425,8 +421,7 @@ public class APIClient { return getMapStrValue(string, null); } - public Map getReverseMapStrValue(String string, - MultivaluedMap queryParams) { + public Map getReverseMapStrValue(String string, MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -443,12 +438,11 @@ public class APIClient { return map; } - public Map getReverseMapStrValue(String string) { + public Map getReverseMapStrValue(String string) { return getReverseMapStrValue(string, null); } - public List getListInetAddressValue(String string, - MultivaluedMap queryParams) { + public List getListInetAddressValue(String string, MultivaluedMap queryParams) { List vals = getListStrValue(string, queryParams); List res = new ArrayList(); for (String val : vals) { @@ -472,22 +466,20 @@ public class APIClient { } private TabularDataSupport getSnapshotData(String key, JsonArray arr) { - TabularDataSupport data = new TabularDataSupport( - SnapshotDetailsTabularData.TABULAR_TYPE); + TabularDataSupport data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE); for (int i = 0; i < arr.size(); i++) { JsonObject obj = arr.getJsonObject(i); if (obj.containsKey("ks") && obj.containsKey("cf")) { - SnapshotDetailsTabularData.from(key, obj.getString("ks"), - obj.getString("cf"), obj.getInt("total"), + SnapshotDetailsTabularData.from(key, obj.getString("ks"), obj.getString("cf"), obj.getInt("total"), obj.getInt("live"), data); } } return data; } - public Map getMapStringSnapshotTabularDataValue( - String string, MultivaluedMap queryParams) { + public Map getMapStringSnapshotTabularDataValue(String string, + MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -521,8 +513,7 @@ public class APIClient { for (int i = 0; i < arr.size(); i++) { try { obj = arr.getJsonObject(i); - res.put(InetAddress.getByName(obj.getString("key")), - Float.parseFloat(obj.getString("value"))); + res.put(InetAddress.getByName(obj.getString("key")), Float.parseFloat(obj.getString("value"))); } catch (UnknownHostException e) { logger.warning("Bad formatted address " + obj.getString("key")); } @@ -534,8 +525,7 @@ public class APIClient { return getMapInetAddressFloatValue(string, null); } - public Map getMapStringLongValue(String string, - MultivaluedMap queryParams) { + public Map getMapStringLongValue(String string, MultivaluedMap queryParams) { Map res = new HashMap(); JsonReader reader = getReader(string, queryParams); @@ -553,8 +543,7 @@ public class APIClient { return getMapStringLongValue(string, null); } - public long[] getLongArrValue(String string, - MultivaluedMap queryParams) { + public long[] getLongArrValue(String string, MultivaluedMap queryParams) { JsonReader reader = getReader(string, queryParams); JsonArray arr = reader.readArray(); long[] res = new long[arr.size()]; @@ -569,8 +558,7 @@ public class APIClient { return getLongArrValue(string, null); } - public Map getMapStringIntegerValue(String string, - MultivaluedMap queryParams) { + public Map getMapStringIntegerValue(String string, MultivaluedMap queryParams) { Map res = new HashMap(); JsonReader reader = getReader(string, queryParams); @@ -588,8 +576,7 @@ public class APIClient { return getMapStringIntegerValue(string, null); } - public int[] getIntArrValue(String string, - MultivaluedMap queryParams) { + public int[] getIntArrValue(String string, MultivaluedMap queryParams) { JsonReader reader = getReader(string, queryParams); JsonArray arr = reader.readArray(); int[] res = new int[arr.size()]; @@ -604,8 +591,7 @@ public class APIClient { return getIntArrValue(string, null); } - public Map getListMapStringLongValue(String string, - MultivaluedMap queryParams) { + public Map getListMapStringLongValue(String string, MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -638,8 +624,7 @@ public class APIClient { return getListMapStringLongValue(string, null); } - public JsonArray getJsonArray(String string, - MultivaluedMap queryParams) { + public JsonArray getJsonArray(String string, MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -653,8 +638,7 @@ public class APIClient { return getJsonArray(string, null); } - public List> getListMapStrValue(String string, - MultivaluedMap queryParams) { + public List> getListMapStrValue(String string, MultivaluedMap queryParams) { JsonArray arr = getJsonArray(string, queryParams); List> res = new ArrayList>(); for (int i = 0; i < arr.size(); i++) { @@ -672,8 +656,7 @@ public class APIClient { return null; } - public JsonObject getJsonObj(String string, - MultivaluedMap queryParams, long duration) { + public JsonObject getJsonObj(String string, MultivaluedMap queryParams, long duration) { if (string.equals("")) { return null; } @@ -690,61 +673,19 @@ public class APIClient { } return res; } - public JsonObject getJsonObj(String string, - MultivaluedMap queryParams) { + + public JsonObject getJsonObj(String string, MultivaluedMap queryParams) { return getJsonObj(string, queryParams, 0); } - public static HistogramValues json2histogram(JsonObject obj) { - HistogramValues res = new HistogramValues(); - res.count = obj.getJsonNumber("count").longValue(); - res.max = obj.getJsonNumber("max").longValue(); - res.min = obj.getJsonNumber("min").longValue(); - res.sum = obj.getJsonNumber("sum").longValue(); - res.variance = obj.getJsonNumber("variance").doubleValue(); - res.mean = obj.getJsonNumber("mean").doubleValue(); - JsonArray arr = obj.getJsonArray("sample"); - if (arr != null) { - res.sample = new long[arr.size()]; - for (int i = 0; i < arr.size(); i++) { - res.sample[i] = arr.getJsonNumber(i).longValue(); - } - } - return res; - } - - public HistogramValues getHistogramValue(String url, - MultivaluedMap queryParams) { - return json2histogram(getJsonObj(url, queryParams)); - } - - public HistogramValues getHistogramValue(String url) { - return getHistogramValue(url, null); - } - - public EstimatedHistogram getEstimatedHistogram(String string, - MultivaluedMap queryParams, long duration) { - String key = getCacheKey(string, queryParams, duration); - EstimatedHistogram res = getEstimatedHistogramFromCache(key, duration); - if (res != null) { - return res; - } - res = new EstimatedHistogram(getEstimatedHistogramAsLongArrValue(string, queryParams)); - if (duration > 0) { - cache.put(key, new CacheEntry(res)); - } - return res; - - } - public long[] getEstimatedHistogramAsLongArrValue(String string, - MultivaluedMap queryParams) { + public long[] getEstimatedHistogramAsLongArrValue(String string, MultivaluedMap queryParams) { JsonObject obj = getJsonObj(string, queryParams); JsonArray arr = obj.getJsonArray("buckets"); if (arr == null) { return new long[0]; } long res[] = new long[arr.size()]; - for (int i = 0; i< arr.size(); i++) { + for (int i = 0; i < arr.size(); i++) { res[i] = arr.getJsonNumber(i).longValue(); } return res; @@ -754,8 +695,7 @@ public class APIClient { return getEstimatedHistogramAsLongArrValue(string, null); } - public Map getMapStringDouble(String string, - MultivaluedMap queryParams) { + public Map getMapStringDouble(String string, MultivaluedMap queryParams) { if (string.equals("")) { return null; } @@ -783,6 +723,7 @@ public class APIClient { reader.close(); return map; } + public Map getMapStringDouble(String string) { return getMapStringDouble(string, null); } diff --git a/src/main/java/com/scylladb/jmx/api/APIConfig.java b/src/main/java/com/scylladb/jmx/api/APIConfig.java index 522a12d..cd66844 100644 --- a/src/main/java/com/scylladb/jmx/api/APIConfig.java +++ b/src/main/java/com/scylladb/jmx/api/APIConfig.java @@ -30,23 +30,22 @@ import org.yaml.snakeyaml.Yaml; */ public class APIConfig { - static String address = "localhost"; - static String port = "10000"; + private String address = "localhost"; + private String port = "10000"; - public static String getAddress() { + public String getAddress() { return address; } - public static String getPort() { + public String getPort() { return port; } - public static String getBaseUrl() { - return "http://" + address + ":" - + port; + public String getBaseUrl() { + return "http://" + address + ":" + port; } - public static void readFile(String name) { + private void readFile(String name) { System.out.println("Using config file: " + name); InputStream input; try { @@ -61,7 +60,7 @@ public class APIConfig { address = (String) map.get("api_address"); } if (map.containsKey("api_port")) { - port = (String) map.get("api_port").toString(); + port = map.get("api_port").toString(); } } catch (FileNotFoundException e) { System.err.println("fail reading from config file: " + name); @@ -74,7 +73,7 @@ public class APIConfig { return varTmpDir.exists(); } - public static boolean loadIfExists(String path, String name) { + private boolean loadIfExists(String path, String name) { if (path == null) { return false; } @@ -84,24 +83,21 @@ public class APIConfig { readFile(path + name); return true; } + /** - * setConfig load the JMX proxy configuration - * The configuration hierarchy is as follow: - * Command line argument takes precedence over everything - * Then configuration file in the command line (command line - * argument can replace specific values in it. - * Then SCYLLA_CONF/scylla.yaml - * Then SCYLLA_HOME/conf/scylla.yaml - * Then conf/scylla.yaml - * Then the default values - * With file configuration, to make it clearer what is been used, only - * one file will be chosen with the highest precedence + * setConfig load the JMX proxy configuration The configuration hierarchy is + * as follow: Command line argument takes precedence over everything Then + * configuration file in the command line (command line argument can replace + * specific values in it. Then SCYLLA_CONF/scylla.yaml Then + * SCYLLA_HOME/conf/scylla.yaml Then conf/scylla.yaml Then the default + * values With file configuration, to make it clearer what is been used, + * only one file will be chosen with the highest precedence */ - public static void setConfig() { - if (!System.getProperty("apiconfig","").equals("")) { + public APIConfig() { + if (!System.getProperty("apiconfig", "").equals("")) { readFile(System.getProperty("apiconfig")); - } else if (!loadIfExists(System.getenv("SCYLLA_CONF"), "/scylla.yaml") && - !loadIfExists(System.getenv("SCYLLA_HOME"), "/conf/scylla.yaml")) { + } else if (!loadIfExists(System.getenv("SCYLLA_CONF"), "/scylla.yaml") + && !loadIfExists(System.getenv("SCYLLA_HOME"), "/conf/scylla.yaml")) { loadIfExists("", "conf/scylla.yaml"); } diff --git a/src/main/java/com/scylladb/jmx/api/CacheEntry.java b/src/main/java/com/scylladb/jmx/api/CacheEntry.java index c71756f..2451180 100644 --- a/src/main/java/com/scylladb/jmx/api/CacheEntry.java +++ b/src/main/java/com/scylladb/jmx/api/CacheEntry.java @@ -23,13 +23,11 @@ package com.scylladb.jmx.api; import javax.json.JsonObject; -import com.scylladb.jmx.utils.EstimatedHistogram; +class CacheEntry { + private long time; + private Object value; -public class CacheEntry { - long time; - Object value; - - CacheEntry(Object res) { + public CacheEntry(Object res) { time = System.currentTimeMillis(); this.value = res; } @@ -42,10 +40,6 @@ public class CacheEntry { return (String) value; } - public EstimatedHistogram getEstimatedHistogram() { - return (EstimatedHistogram)value; - } - public JsonObject jsonObject() { return (JsonObject) value; } From 824638594ba5dcb18b53f2689888f600e237ab42 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:13:18 +0200 Subject: [PATCH 29/32] Clean up and simplify Main startup --- src/main/java/com/scylladb/jmx/main/Main.java | 124 +++++++++++++++--- .../jmx/utils/APIMBeanIntrospector.java | 103 --------------- .../jmx/utils/RMIServerSocketFactoryImpl.java | 121 ----------------- 3 files changed, 107 insertions(+), 241 deletions(-) delete mode 100644 src/main/java/com/scylladb/jmx/utils/APIMBeanIntrospector.java delete mode 100644 src/main/java/com/scylladb/jmx/utils/RMIServerSocketFactoryImpl.java diff --git a/src/main/java/com/scylladb/jmx/main/Main.java b/src/main/java/com/scylladb/jmx/main/Main.java index 414aeac..6a8dfd3 100644 --- a/src/main/java/com/scylladb/jmx/main/Main.java +++ b/src/main/java/com/scylladb/jmx/main/Main.java @@ -3,38 +3,128 @@ */ package com.scylladb.jmx.main; -import com.scylladb.jmx.api.APIConfig; -import com.scylladb.jmx.utils.RMIServerSocketFactoryImpl; +import static java.lang.management.ManagementFactory.getPlatformMBeanServer; +import static java.rmi.registry.LocateRegistry.createRegistry; +import static java.util.Arrays.asList; +import static javax.net.ServerSocketFactory.getDefault; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.rmi.server.RMIServerSocketFactory; +import java.util.HashMap; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.remote.JMXConnectorServer; +import javax.management.remote.JMXServiceURL; +import javax.management.remote.rmi.RMIConnectorServer; import org.apache.cassandra.db.commitlog.CommitLog; import org.apache.cassandra.db.compaction.CompactionManager; -import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.gms.FailureDetector; +import org.apache.cassandra.gms.Gossiper; import org.apache.cassandra.locator.EndpointSnitchInfo; import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.CacheService; import org.apache.cassandra.service.GCInspector; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.service.StorageService; +import org.apache.cassandra.streaming.StreamManager; + +import com.scylladb.jmx.api.APIClient; +import com.scylladb.jmx.api.APIConfig; +import com.scylladb.jmx.metrics.APIMBean; public class Main { + // todo: command line options. Make us an agent class (also) + private static final APIConfig config = new APIConfig(); + public static final APIClient client = new APIClient(config); + + private static JMXConnectorServer jmxServer = null; + + private static void setupJmx() { + System.setProperty("javax.management.builder.initial", "com.scylladb.jmx.utils.APIBuilder"); + String jmxPort = System.getProperty("com.sun.management.jmxremote.port"); + + if (jmxPort == null) { + System.out.println("JMX is not enabled to receive remote connections."); + + jmxPort = System.getProperty("cassandra.jmx.local.port", "7199"); + String address = System.getProperty("jmx.address", "localhost"); + if (address.equals("localhost")) { + System.setProperty("java.rmi.server.hostname", InetAddress.getLoopbackAddress().getHostAddress()); + } else { + try { + System.setProperty("java.rmi.server.hostname", InetAddress.getByName(address).getHostAddress()); + } catch (UnknownHostException e) { + // TODO Auto-generated catch block + e.printStackTrace(); + } + + } + try { + RMIServerSocketFactory serverFactory = pPort -> getDefault().createServerSocket(pPort, 0, + InetAddress.getLoopbackAddress()); + createRegistry(Integer.valueOf(jmxPort), null, serverFactory); + + StringBuffer url = new StringBuffer(); + url.append("service:jmx:"); + url.append("rmi://").append(address).append("/jndi/"); + url.append("rmi://").append(address).append(":").append(jmxPort).append("/jmxrmi"); + System.out.println(url); + Map env = new HashMap<>(); + env.put(RMIConnectorServer.RMI_SERVER_SOCKET_FACTORY_ATTRIBUTE, serverFactory); + + jmxServer = new RMIConnectorServer(new JMXServiceURL(url.toString()), env, getPlatformMBeanServer()); + + jmxServer.start(); + } catch (IOException e) { + System.out.println("Error starting local jmx server: " + e.toString()); + } + + } else { + System.out.println("JMX is enabled to receive remote connections on port: " + jmxPort); + } + + } public static void main(String[] args) throws Exception { - APIConfig.setConfig(); - System.out.println("Connecting to " + APIConfig.getBaseUrl()); + System.out.println("Connecting to " + config.getBaseUrl()); System.out.println("Starting the JMX server"); - RMIServerSocketFactoryImpl.maybeInitJmx(); - StorageService.getInstance(); - StorageProxy.getInstance(); - MessagingService.getInstance(); - CommitLog.getInstance(); - Gossiper.getInstance(); - EndpointSnitchInfo.getInstance(); - FailureDetector.getInstance(); - CacheService.getInstance(); - CompactionManager.getInstance(); - GCInspector.register(); - Thread.sleep(Long.MAX_VALUE); + + setupJmx(); + + try { + MBeanServer server = getPlatformMBeanServer(); + for (Class clazz : asList(StorageService.class, StorageProxy.class, + MessagingService.class, CommitLog.class, Gossiper.class, EndpointSnitchInfo.class, + FailureDetector.class, CacheService.class, CompactionManager.class, GCInspector.class, + StreamManager.class)) { + Constructor c = clazz.getDeclaredConstructor(APIClient.class); + APIMBean m = c.newInstance(client); + server.registerMBean(m, null); + } + + try { + // forces check for dynamically created mbeans + server.queryNames(null, null); + } catch (IllegalStateException e) { + // ignore this. Just means we started before scylla. + } + + for (;;) { + Thread.sleep(Long.MAX_VALUE); + } + } finally { + // make sure to kill the server otherwise we can hang. Not an issue + // when killed perhaps, but any exception above etc would leave a + // zombie. + if (jmxServer != null) { + jmxServer.stop(); + } + } } } diff --git a/src/main/java/com/scylladb/jmx/utils/APIMBeanIntrospector.java b/src/main/java/com/scylladb/jmx/utils/APIMBeanIntrospector.java deleted file mode 100644 index 738b2c8..0000000 --- a/src/main/java/com/scylladb/jmx/utils/APIMBeanIntrospector.java +++ /dev/null @@ -1,103 +0,0 @@ -package com.scylladb.jmx.utils; -/** - * Copyright (C) The MX4J Contributors. - * All rights reserved. - * - * This software is distributed under the terms of the MX4J License version 1.0. - * See the terms of the MX4J License in the documentation provided with this software. - */ - -/** - * Modified by ScyllaDB - * Copyright 2016 ScyllaDB - */ -/* -* This file is part of Scylla. -* -* Scylla is free software: you can redistribute it and/or modify -* it under the terms of the GNU Affero General Public License as published by -* the Free Software Foundation, either version 3 of the License, or -* (at your option) any later version. -* -* Scylla is distributed in the hope that it will be useful, -* but WITHOUT ANY WARRANTY; without even the implied warranty of -* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -* GNU General Public License for more details. -* -* You should have received a copy of the GNU General Public License -* along with Scylla. If not, see . -*/ - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -import javax.management.MBeanInfo; - -import mx4j.server.MBeanIntrospector; -import mx4j.server.MBeanMetaData; - -public class APIMBeanIntrospector extends MBeanIntrospector { - private static final java.util.logging.Logger logger = java.util.logging.Logger - .getLogger(APIMBeanIntrospector.class.getName()); - - public boolean isMBeanCompliant(MBeanMetaData metadata) { - Class info = metadata.getMBeanInterface(); - if (info != null) { - String cn = info.getName(); - if (cn != null) { - if (cn.endsWith("MXBean")) { - return true; - } - } - } - return super.isMBeanCompliant(metadata); - } - - public void apiIntrospectStandardMBean(MBeanMetaData metadata) { - try { - Class[] cArg = new Class[1]; - cArg[0] = MBeanMetaData.class; - Method met = MBeanIntrospector.class - .getDeclaredMethod("introspectStandardMBean", cArg); - met.setAccessible(true); - met.invoke((MBeanIntrospector) this, metadata); - } catch (NoSuchMethodException | SecurityException - | IllegalAccessException | IllegalArgumentException - | InvocationTargetException e) { - logger.warning("Failed setting mbean info " + e.getMessage()); - } - } - - public void apiIntrospect(MBeanMetaData metadata) { - apiIntrospectStandardMBean(metadata); - Class[] cArg = new Class[1]; - cArg[0] = MBeanMetaData.class; - try { - Method met = MBeanIntrospector.class - .getDeclaredMethod("createStandardMBeanInfo", cArg); - met.setAccessible(true); - Object info = met.invoke((MBeanIntrospector) this, metadata); - metadata.setMBeanInfo((MBeanInfo) info); - } catch (IllegalAccessException | NoSuchMethodException - | SecurityException | IllegalArgumentException - | InvocationTargetException e) { - logger.warning("Failed setting mbean info" + e.getMessage()); - } - } - - public void introspect(MBeanMetaData metadata) { - Class mx_mbean = null; - for (Class it : metadata.getMBean().getClass().getInterfaces()) { - if (it.getName().endsWith("MXBean")) { - mx_mbean = it; - break; - } - } - if (mx_mbean != null) { - metadata.setMBeanInterface(mx_mbean); - apiIntrospect(metadata); - return; - } - super.introspect(metadata); - } -} diff --git a/src/main/java/com/scylladb/jmx/utils/RMIServerSocketFactoryImpl.java b/src/main/java/com/scylladb/jmx/utils/RMIServerSocketFactoryImpl.java deleted file mode 100644 index a234e56..0000000 --- a/src/main/java/com/scylladb/jmx/utils/RMIServerSocketFactoryImpl.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright 2016 ScyllaDB - * - * Modified by ScyllaDB - */ - -package com.scylladb.jmx.utils; - -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.net.*; -import java.rmi.registry.LocateRegistry; -import java.rmi.server.RMIServerSocketFactory; -import java.util.HashMap; -import java.util.Map; - -import javax.management.remote.JMXConnectorServer; -import javax.management.remote.JMXServiceURL; -import javax.management.remote.rmi.RMIConnectorServer; -import javax.net.ServerSocketFactory; - -public class RMIServerSocketFactoryImpl implements RMIServerSocketFactory { - public static JMXConnectorServer jmxServer = null; - - public static void maybeInitJmx() { - System.setProperty("javax.management.builder.initial", "com.scylladb.jmx.utils.APIBuilder"); - System.setProperty("mx4j.strict.mbean.interface", "no"); - - String jmxPort = System - .getProperty("com.sun.management.jmxremote.port"); - - if (jmxPort == null) { - System.out.println( - "JMX is not enabled to receive remote connections."); - - jmxPort = System.getProperty("cassandra.jmx.local.port", "7199"); - String address = System.getProperty("jmx.address", "localhost"); - if (address.equals("localhost")) { - System.setProperty("java.rmi.server.hostname", - InetAddress.getLoopbackAddress().getHostAddress()); - } else { - try { - System.setProperty("java.rmi.server.hostname", - InetAddress.getByName(address).getHostAddress()); - } catch (UnknownHostException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - - } - try { - RMIServerSocketFactory serverFactory = new RMIServerSocketFactoryImpl(); - LocateRegistry.createRegistry(Integer.valueOf(jmxPort), null, - serverFactory); - - StringBuffer url = new StringBuffer(); - url.append("service:jmx:"); - url.append("rmi://").append(address).append("/jndi/"); - url.append("rmi://").append(address).append(":").append(jmxPort) - .append("/jmxrmi"); - System.out.println(url); - Map env = new HashMap(); - env.put(RMIConnectorServer.RMI_SERVER_SOCKET_FACTORY_ATTRIBUTE, - serverFactory); - - jmxServer = new RMIConnectorServer( - new JMXServiceURL(url.toString()), env, - ManagementFactory.getPlatformMBeanServer()); - - jmxServer.start(); - } catch (IOException e) { - System.out.println( - "Error starting local jmx server: " + e.toString()); - } - - } else { - System.out.println( - "JMX is enabled to receive remote connections on port: " - + jmxPort); - } - } - - public ServerSocket createServerSocket(final int pPort) throws IOException { - return ServerSocketFactory.getDefault().createServerSocket(pPort, 0, - InetAddress.getLoopbackAddress()); - } - - public boolean equals(Object obj) { - if (obj == null) { - return false; - } - if (obj == this) { - return true; - } - - return obj.getClass().equals(getClass()); - } - - public int hashCode() { - return RMIServerSocketFactoryImpl.class.hashCode(); - } - -} From 9c2d6cec5156fdbe9ff6f06f0afe11372ca48722 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:14:05 +0200 Subject: [PATCH 30/32] Remove yammer/codehale dependencies and augumentations --- pom.xml | 14 +- .../jmx/utils/EstimatedHistogram.java | 315 -------------- .../jmx/utils/RecentEstimatedHistogram.java | 65 --- .../com/yammer/metrics/core/APICounter.java | 29 -- .../com/yammer/metrics/core/APIHistogram.java | 215 ---------- .../com/yammer/metrics/core/APIMeter.java | 113 ------ .../metrics/core/APIMetricsRegistry.java | 384 ------------------ .../yammer/metrics/core/APISettableMeter.java | 49 --- .../com/yammer/metrics/core/APITimer.java | 134 ------ .../yammer/metrics/core/HistogramValues.java | 11 - 10 files changed, 2 insertions(+), 1327 deletions(-) delete mode 100644 src/main/java/com/scylladb/jmx/utils/EstimatedHistogram.java delete mode 100644 src/main/java/com/scylladb/jmx/utils/RecentEstimatedHistogram.java delete mode 100644 src/main/java/com/yammer/metrics/core/APICounter.java delete mode 100644 src/main/java/com/yammer/metrics/core/APIHistogram.java delete mode 100644 src/main/java/com/yammer/metrics/core/APIMeter.java delete mode 100644 src/main/java/com/yammer/metrics/core/APIMetricsRegistry.java delete mode 100644 src/main/java/com/yammer/metrics/core/APISettableMeter.java delete mode 100644 src/main/java/com/yammer/metrics/core/APITimer.java delete mode 100644 src/main/java/com/yammer/metrics/core/HistogramValues.java diff --git a/pom.xml b/pom.xml index 4ecfc53..f621830 100644 --- a/pom.xml +++ b/pom.xml @@ -10,8 +10,8 @@ Scylla JMX - 1.7 - 1.7 + 1.8 + 1.8 @@ -71,21 +71,11 @@ guava 18.0 - - com.yammer.metrics - metrics-core - 2.2.0 - com.google.collections google-collections 1.0 - - mx4j - mx4j - 3.0.2 - diff --git a/src/main/java/com/scylladb/jmx/utils/EstimatedHistogram.java b/src/main/java/com/scylladb/jmx/utils/EstimatedHistogram.java deleted file mode 100644 index d2b5feb..0000000 --- a/src/main/java/com/scylladb/jmx/utils/EstimatedHistogram.java +++ /dev/null @@ -1,315 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -package com.scylladb.jmx.utils; - -import java.util.Arrays; -import java.util.concurrent.atomic.AtomicLongArray; - -import com.google.common.base.Objects; - -import org.slf4j.Logger; - -public class EstimatedHistogram { - /** - * The series of values to which the counts in `buckets` correspond: 1, 2, - * 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of [0, 0, 1, - * 10] would mean we had seen one value of 3 and 10 values of 4. - * - * The series starts at 1 and grows by 1.2 each time (rounding and removing - * duplicates). It goes from 1 to around 36M by default (creating 90+1 - * buckets), which will give us timing resolution from microseconds to 36 - * seconds, with less precision as the numbers get larger. - * - * Each bucket represents values from (previous bucket offset, current - * offset]. - */ - private final long[] bucketOffsets; - - // buckets is one element longer than bucketOffsets -- the last element is - // values greater than the last offset - final AtomicLongArray buckets; - - public EstimatedHistogram() { - this(90); - } - - public EstimatedHistogram(int bucketCount) { - bucketOffsets = newOffsets(bucketCount); - buckets = new AtomicLongArray(bucketOffsets.length + 1); - } - - public EstimatedHistogram(long[] offsets, long[] bucketData) { - assert bucketData.length == offsets.length + 1; - bucketOffsets = offsets; - buckets = new AtomicLongArray(bucketData); - } - - - public EstimatedHistogram(long[] bucketData) { - bucketOffsets = newOffsets(bucketData.length - 1); - buckets = new AtomicLongArray(bucketData); - } - - private static long[] newOffsets(int size) { - if (size <= 0) { - return new long[0]; - } - long[] result = new long[size]; - long last = 1; - result[0] = last; - for (int i = 1; i < size; i++) { - long next = Math.round(last * 1.2); - if (next == last) - next++; - result[i] = next; - last = next; - } - - return result; - } - - /** - * @return the histogram values corresponding to each bucket index - */ - public long[] getBucketOffsets() { - return bucketOffsets; - } - - /** - * Increments the count of the bucket closest to n, rounding UP. - * - * @param n - */ - public void add(long n) { - int index = Arrays.binarySearch(bucketOffsets, n); - if (index < 0) { - // inexact match, take the first bucket higher than n - index = -index - 1; - } - // else exact match; we're good - buckets.incrementAndGet(index); - } - - /** - * @return the count in the given bucket - */ - long get(int bucket) { - return buckets.get(bucket); - } - - /** - * @param reset - * zero out buckets afterwards if true - * @return a long[] containing the current histogram buckets - */ - public long[] getBuckets(boolean reset) { - final int len = buckets.length(); - long[] rv = new long[len]; - - if (reset) - for (int i = 0; i < len; i++) - rv[i] = buckets.getAndSet(i, 0L); - else - for (int i = 0; i < len; i++) - rv[i] = buckets.get(i); - - return rv; - } - - /** - * @return the smallest value that could have been added to this histogram - */ - public long min() { - for (int i = 0; i < buckets.length(); i++) { - if (buckets.get(i) > 0) - return i == 0 ? 0 : 1 + bucketOffsets[i - 1]; - } - return 0; - } - - /** - * @return the largest value that could have been added to this histogram. - * If the histogram overflowed, returns Long.MAX_VALUE. - */ - public long max() { - int lastBucket = buckets.length() - 1; - if (buckets.get(lastBucket) > 0) - return Long.MAX_VALUE; - - for (int i = lastBucket - 1; i >= 0; i--) { - if (buckets.get(i) > 0) - return bucketOffsets[i]; - } - return 0; - } - - /** - * @param percentile - * @return estimated value at given percentile - */ - public long percentile(double percentile) { - assert percentile >= 0 && percentile <= 1.0; - int lastBucket = buckets.length() - 1; - if (buckets.get(lastBucket) > 0) - throw new IllegalStateException( - "Unable to compute when histogram overflowed"); - - long pcount = (long) Math.floor(count() * percentile); - if (pcount == 0) - return 0; - - long elements = 0; - for (int i = 0; i < lastBucket; i++) { - elements += buckets.get(i); - if (elements >= pcount) - return bucketOffsets[i]; - } - return 0; - } - - /** - * @return the mean histogram value (average of bucket offsets, weighted by - * count) - * @throws IllegalStateException - * if any values were greater than the largest bucket threshold - */ - public long mean() { - int lastBucket = buckets.length() - 1; - if (buckets.get(lastBucket) > 0) - throw new IllegalStateException( - "Unable to compute ceiling for max when histogram overflowed"); - - long elements = 0; - long sum = 0; - for (int i = 0; i < lastBucket; i++) { - long bCount = buckets.get(i); - elements += bCount; - sum += bCount * bucketOffsets[i]; - } - - return (long) Math.ceil((double) sum / elements); - } - - /** - * @return the total number of non-zero values - */ - public long count() { - long sum = 0L; - for (int i = 0; i < buckets.length(); i++) - sum += buckets.get(i); - return sum; - } - - /** - * @return true if this histogram has overflowed -- that is, a value larger - * than our largest bucket could bound was added - */ - public boolean isOverflowed() { - return buckets.get(buckets.length() - 1) > 0; - } - - /** - * log.debug() every record in the histogram - * - * @param log - */ - public void log(Logger log) { - // only print overflow if there is any - int nameCount; - if (buckets.get(buckets.length() - 1) == 0) - nameCount = buckets.length() - 1; - else - nameCount = buckets.length(); - String[] names = new String[nameCount]; - - int maxNameLength = 0; - for (int i = 0; i < nameCount; i++) { - names[i] = nameOfRange(bucketOffsets, i); - maxNameLength = Math.max(maxNameLength, names[i].length()); - } - - // emit log records - String formatstr = "%" + maxNameLength + "s: %d"; - for (int i = 0; i < nameCount; i++) { - long count = buckets.get(i); - // sort-of-hack to not print empty ranges at the start that are only - // used to demarcate the - // first populated range. for code clarity we don't omit this record - // from the maxNameLength - // calculation, and accept the unnecessary whitespace prefixes that - // will occasionally occur - if (i == 0 && count == 0) - continue; - log.debug(String.format(formatstr, names[i], count)); - } - } - - private static String nameOfRange(long[] bucketOffsets, int index) { - StringBuilder sb = new StringBuilder(); - appendRange(sb, bucketOffsets, index); - return sb.toString(); - } - - private static void appendRange(StringBuilder sb, long[] bucketOffsets, - int index) { - sb.append("["); - if (index == 0) - if (bucketOffsets[0] > 0) - // by original definition, this histogram is for values greater - // than zero only; - // if values of 0 or less are required, an entry of lb-1 must be - // inserted at the start - sb.append("1"); - else - sb.append("-Inf"); - else - sb.append(bucketOffsets[index - 1] + 1); - sb.append(".."); - if (index == bucketOffsets.length) - sb.append("Inf"); - else - sb.append(bucketOffsets[index]); - sb.append("]"); - } - - @Override - public boolean equals(Object o) { - if (this == o) - return true; - - if (!(o instanceof EstimatedHistogram)) - return false; - - EstimatedHistogram that = (EstimatedHistogram) o; - return Arrays.equals(getBucketOffsets(), that.getBucketOffsets()) - && Arrays.equals(getBuckets(false), that.getBuckets(false)); - } - - @Override - public int hashCode() { - return Objects.hashCode(getBucketOffsets(), getBuckets(false)); - } -} diff --git a/src/main/java/com/scylladb/jmx/utils/RecentEstimatedHistogram.java b/src/main/java/com/scylladb/jmx/utils/RecentEstimatedHistogram.java deleted file mode 100644 index 9a823e8..0000000 --- a/src/main/java/com/scylladb/jmx/utils/RecentEstimatedHistogram.java +++ /dev/null @@ -1,65 +0,0 @@ -package com.scylladb.jmx.utils; -/* - * Copyright (C) 2015 ScyllaDB - */ - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -/** - * - * RecentEstimatedHistogram In the (deprecated) 'recent' functionality, each - * call to get the values cleans the value. - * - * The RecentEstimatedHistogram support recent call to EstimatedHistogram. - * It holds the latest total values and a call to getBuckets return the delta. - * - */ -public class RecentEstimatedHistogram extends EstimatedHistogram { - public RecentEstimatedHistogram() { - } - - public RecentEstimatedHistogram(int bucketCount) { - super(bucketCount); - } - - public RecentEstimatedHistogram(long[] offsets, long[] bucketData) { - super(offsets, bucketData); - } - - /** - * Set the current buckets to new value and return the delta from the last - * getBuckets call - * - * @param bucketData - * - new bucket value - * @return a long[] containing the current histogram difference buckets - */ - public long[] getBuckets(long[] bucketData) { - if (bucketData.length == 0) { - return new long[0]; - } - final int len = buckets.length(); - long[] rv = new long[len]; - - for (int i = 0; i < len; i++) { - rv[i] = bucketData[i]; - rv[i] -= buckets.getAndSet(i, bucketData[i]); - } - return rv; - } -} diff --git a/src/main/java/com/yammer/metrics/core/APICounter.java b/src/main/java/com/yammer/metrics/core/APICounter.java deleted file mode 100644 index 79eeca4..0000000 --- a/src/main/java/com/yammer/metrics/core/APICounter.java +++ /dev/null @@ -1,29 +0,0 @@ -package com.yammer.metrics.core; -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -import com.scylladb.jmx.api.APIClient; -import com.yammer.metrics.core.Counter; - -public class APICounter extends Counter { - String url; - private APIClient c = new APIClient(); - - public APICounter(String _url) { - super(); - url = _url; - } - /** - * Returns the counter's current value. - * - * @return the counter's current value - */ - public long count() { - return c.getLongValue(url); - } - - -} diff --git a/src/main/java/com/yammer/metrics/core/APIHistogram.java b/src/main/java/com/yammer/metrics/core/APIHistogram.java deleted file mode 100644 index ca9e27d..0000000 --- a/src/main/java/com/yammer/metrics/core/APIHistogram.java +++ /dev/null @@ -1,215 +0,0 @@ -package com.yammer.metrics.core; - -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -import java.lang.reflect.Field; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; - -import javax.json.JsonObject; - -import com.scylladb.jmx.api.APIClient; -import com.yammer.metrics.stats.Sample; -import com.yammer.metrics.stats.Snapshot; - -public class APIHistogram extends Histogram { - Field countField; - Field minField; - Field maxField; - Field sumField; - Field varianceField; - Field sampleField; - - long last_update = 0; - static final long UPDATE_INTERVAL = 50; - long updateInterval; - String url; - private APIClient c = new APIClient(); - - private void setFields() { - try { - minField = Histogram.class.getDeclaredField("min"); - minField.setAccessible(true); - maxField = Histogram.class.getDeclaredField("max"); - maxField.setAccessible(true); - sumField = Histogram.class.getDeclaredField("sum"); - sumField.setAccessible(true); - varianceField = Histogram.class.getDeclaredField("variance"); - varianceField.setAccessible(true); - sampleField = Histogram.class.getDeclaredField("sample"); - sampleField.setAccessible(true); - countField = Histogram.class.getDeclaredField("count"); - countField.setAccessible(true); - try { - getCount().set(0); - } catch (IllegalArgumentException | IllegalAccessException e) { - // There's no reason to get here - // and there's nothing we can do even if we would - } - } catch (NoSuchFieldException | SecurityException e) { - e.printStackTrace(); - } - } - - public AtomicLong getMin() throws IllegalArgumentException, - IllegalAccessException { - return (AtomicLong) minField.get(this); - } - - public AtomicLong getMax() throws IllegalArgumentException, - IllegalAccessException { - return (AtomicLong) maxField.get(this); - } - - public AtomicLong getSum() throws IllegalArgumentException, - IllegalAccessException { - return (AtomicLong) sumField.get(this); - } - - public AtomicLong getCount() throws IllegalArgumentException, - IllegalAccessException { - return (AtomicLong) countField.get(this); - } - - @SuppressWarnings("unchecked") - public AtomicReference getVariance() - throws IllegalArgumentException, IllegalAccessException { - return (AtomicReference) varianceField.get(this); - } - - public Sample getSample() throws IllegalArgumentException, - IllegalAccessException { - return (Sample) sampleField.get(this); - } - - public APIHistogram(String url, Sample sample) { - super(sample); - setFields(); - this.url = url; - } - - public APIHistogram(String url, SampleType type, long updateInterval) { - super(type); - setFields(); - this.url = url; - this.updateInterval = updateInterval; - } - - public APIHistogram(String url, SampleType type) { - this(url, type, UPDATE_INTERVAL); - } - - public void updateValue(HistogramValues vals) { - try { - if (vals.sample != null) { - for (long v : vals.sample) { - getSample().update(v); - } - } - getCount().set(vals.count); - getMax().set(vals.max); - getMin().set(vals.min); - getSum().set(vals.sum); - double[] newValue = new double[2]; - newValue[0] = vals.mean; - newValue[1] = vals.variance; - getVariance().getAndSet(newValue); - } catch (IllegalArgumentException | IllegalAccessException e) { - e.printStackTrace(); - } - } - - public void update() { - if (url == null) { - return; - } - long now = System.currentTimeMillis(); - if (now - last_update < UPDATE_INTERVAL) { - return; - } - last_update = now; - clear(); - JsonObject obj = c.getJsonObj(url, null); - if (obj.containsKey("hist")) { - updateValue(APIClient.json2histogram(obj.getJsonObject("hist"))); - } else { - updateValue(APIClient.json2histogram(obj)); - } - - } - - /** - * Returns the number of values recorded. - * - * @return the number of values recorded - */ - public long count() { - update(); - return super.count(); - } - - /* - * (non-Javadoc) - * - * @see com.yammer.metrics.core.Summarizable#max() - */ - @Override - public double max() { - update(); - return super.max(); - } - - /* - * (non-Javadoc) - * - * @see com.yammer.metrics.core.Summarizable#min() - */ - @Override - public double min() { - update(); - return super.min(); - } - - /* - * (non-Javadoc) - * - * @see com.yammer.metrics.core.Summarizable#mean() - */ - @Override - public double mean() { - update(); - return super.mean(); - } - - /* - * (non-Javadoc) - * - * @see com.yammer.metrics.core.Summarizable#stdDev() - */ - @Override - public double stdDev() { - update(); - return super.stdDev(); - } - - /* - * (non-Javadoc) - * - * @see com.yammer.metrics.core.Summarizable#sum() - */ - @Override - public double sum() { - update(); - return super.sum(); - } - - @Override - public Snapshot getSnapshot() { - update(); - return super.getSnapshot(); - } -} diff --git a/src/main/java/com/yammer/metrics/core/APIMeter.java b/src/main/java/com/yammer/metrics/core/APIMeter.java deleted file mode 100644 index a20e0e3..0000000 --- a/src/main/java/com/yammer/metrics/core/APIMeter.java +++ /dev/null @@ -1,113 +0,0 @@ -package com.yammer.metrics.core; -/* - * Copyright (C) 2015 ScyllaDB - */ - -import java.util.concurrent.ScheduledExecutorService; - -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -/* - * Modified by ScyllaDB - */ -import java.util.concurrent.TimeUnit; - -import javax.json.JsonArray; -import javax.json.JsonObject; - -import com.scylladb.jmx.api.APIClient; - -public class APIMeter extends Meter { - public final static long CACHE_DURATION = 1000; - - String url; - String eventType; - TimeUnit rateUnit; - APIClient c = new APIClient(); - long count; - double oneMinuteRate; - double fiveMinuteRate; - double fifteenMinuteRate; - double meanRate; - - public APIMeter(String url, ScheduledExecutorService tickThread, - String eventType, TimeUnit rateUnit) { - super(tickThread, eventType, rateUnit, Clock.defaultClock()); - super.stop(); - this.url = url; - this.eventType = eventType; - this.rateUnit = rateUnit; - } - - public void fromJson(JsonObject obj) { - JsonArray rates = obj.getJsonArray("rates"); - int i = 0; - oneMinuteRate = rates.getJsonNumber(i++).doubleValue(); - fiveMinuteRate = rates.getJsonNumber(i++).doubleValue(); - fifteenMinuteRate = rates.getJsonNumber(i++).doubleValue(); - meanRate = obj.getJsonNumber("mean_rate").doubleValue(); - count = obj.getJsonNumber("count").longValue(); - } - - public void update_fields() { - if (url != null) { - fromJson(c.getJsonObj(url, null, CACHE_DURATION)); - } - } - - @Override - public TimeUnit rateUnit() { - return rateUnit; - } - - @Override - public String eventType() { - return eventType; - } - - @Override - public long count() { - update_fields(); - return count; - } - - @Override - public double fifteenMinuteRate() { - update_fields(); - return fifteenMinuteRate; - } - - @Override - public double fiveMinuteRate() { - update_fields(); - return fiveMinuteRate; - } - - @Override - public double meanRate() { - update_fields(); - return meanRate; - } - - @Override - public double oneMinuteRate() { - update_fields(); - return oneMinuteRate; - } - -} diff --git a/src/main/java/com/yammer/metrics/core/APIMetricsRegistry.java b/src/main/java/com/yammer/metrics/core/APIMetricsRegistry.java deleted file mode 100644 index 3ef1b20..0000000 --- a/src/main/java/com/yammer/metrics/core/APIMetricsRegistry.java +++ /dev/null @@ -1,384 +0,0 @@ -package com.yammer.metrics.core; - -import java.lang.reflect.Field; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import com.yammer.metrics.core.APICounter; -import com.yammer.metrics.core.APIMeter; -import com.yammer.metrics.core.Clock; -import com.yammer.metrics.core.Counter; -import com.yammer.metrics.core.Meter; -import com.yammer.metrics.core.Metric; -import com.yammer.metrics.core.MetricName; -import com.yammer.metrics.core.MetricsRegistry; -import com.yammer.metrics.core.ThreadPools; -import com.yammer.metrics.core.Histogram.SampleType; - -/* - * Copyright 2015 Cloudius Systems - * - * Modified by Cloudius Systems - */ - -public class APIMetricsRegistry extends MetricsRegistry { - Field fieldMetrics; - Field fieldClock; - Field fieldThreadPool; - - public APIMetricsRegistry() { - try { - fieldMetrics = MetricsRegistry.class.getDeclaredField("metrics"); - fieldMetrics.setAccessible(true); - fieldClock = MetricsRegistry.class.getDeclaredField("clock"); - fieldClock.setAccessible(true); - fieldThreadPool = MetricsRegistry.class - .getDeclaredField("threadPools"); - fieldThreadPool.setAccessible(true); - } catch (NoSuchFieldException | SecurityException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - - public ThreadPools getThreadPools() { - try { - return (ThreadPools) fieldThreadPool.get(this); - } catch (IllegalArgumentException | IllegalAccessException e) { - e.printStackTrace(); - } - return null; - } - - public Clock getClock() { - try { - return (Clock) fieldClock.get(this); - } catch (IllegalArgumentException | IllegalAccessException e) { - e.printStackTrace(); - } - return null; - } - - @SuppressWarnings("unchecked") - public ConcurrentMap getMetrics() { - try { - return (ConcurrentMap) fieldMetrics.get(this); - } catch (IllegalArgumentException | IllegalAccessException e) { - e.printStackTrace(); - } - return null; - } - - /** - * Creates a new {@link Counter} and registers it under the given class and - * name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link Counter} - */ - public Counter newCounter(String url, Class klass, String name) { - return newCounter(url, klass, name, null); - } - - /** - * Creates a new {@link Counter} and registers it under the given class and - * name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link Counter} - */ - public Counter newCounter(String url, Class klass, String name, - String scope) { - return newCounter(url, createName(klass, name, scope)); - } - - /** - * Creates a new {@link Counter} and registers it under the given metric - * name. - * - * @param metricName - * the name of the metric - * @return a new {@link Counter} - */ - public Counter newCounter(String url, MetricName metricName) { - return getOrAdd(metricName, new APICounter(url)); - } - - /** - * Creates a new {@link Meter} and registers it under the given class and - * name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link Meter} - */ - public APIMeter newMeter(String url, Class klass, String name, - String eventType, TimeUnit unit) { - return newMeter(url, klass, name, null, eventType, unit); - } - - /** - * Creates a new {@link Meter} and registers it under the given class, name, - * and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link Meter} - */ - public APIMeter newMeter(String url, Class klass, String name, - String scope, String eventType, TimeUnit unit) { - return newMeter(url, createName(klass, name, scope), eventType, unit); - } - - private ScheduledExecutorService newMeterTickThreadPool() { - return getThreadPools().newScheduledThreadPool(2, "meter-tick"); - } - - /** - * Creates a new {@link Meter} and registers it under the given metric name. - * - * @param metricName - * the name of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link Meter} - */ - public APIMeter newMeter(String url, MetricName metricName, String eventType, - TimeUnit unit) { - final Metric existingMetric = getMetrics().get(metricName); - if (existingMetric != null) { - return (APIMeter) existingMetric; - } - return getOrAdd(metricName, new APIMeter(url, newMeterTickThreadPool(), - eventType, unit)); - } - - /** - * Creates a new {@link APISettableMeter} and registers it under the given metric name. - * - * @param metricName - * the name of the metric - * @param eventType - * the plural name of the type of events the meter is measuring - * (e.g., {@code "requests"}) - * @param unit - * the rate unit of the new meter - * @return a new {@link Meter} - */ - public Meter newSettableMeter(MetricName metricName, String eventType, - TimeUnit unit) { - final Metric existingMetric = getMetrics().get(metricName); - if (existingMetric != null) { - return (Meter) existingMetric; - } - return getOrAdd(metricName, new APISettableMeter(newMeterTickThreadPool(), - eventType, unit, getClock())); - } - - /** - * Creates a new {@link Histogram} and registers it under the given class - * and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link Histogram} - */ - public Histogram newHistogram(String url, Class klass, String name, - boolean biased) { - return newHistogram(url, klass, name, null, biased); - } - - /** - * Creates a new {@link Histogram} and registers it under the given class, - * name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link Histogram} - */ - public Histogram newHistogram(String url, Class klass, String name, - String scope, boolean biased) { - return newHistogram(url, createName(klass, name, scope), biased); - } - - /** - * Creates a new non-biased {@link Histogram} and registers it under the - * given class and name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link Histogram} - */ - public Histogram newHistogram(String url, Class klass, String name) { - return newHistogram(url, klass, name, false); - } - - /** - * Creates a new non-biased {@link Histogram} and registers it under the - * given class, name, and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link Histogram} - */ - public Histogram newHistogram(String url, Class klass, String name, - String scope) { - return newHistogram(url, klass, name, scope, false); - } - - /** - * Creates a new {@link Histogram} and registers it under the given metric - * name. - * - * @param metricName - * the name of the metric - * @param biased - * whether or not the histogram should be biased - * @return a new {@link Histogram} - */ - public Histogram newHistogram(String url, MetricName metricName, - boolean biased) { - return getOrAdd(metricName, new APIHistogram(url, - biased ? SampleType.BIASED : SampleType.UNIFORM)); - } - - /** - * Creates a new {@link Timer} and registers it under the given class and - * name, measuring elapsed time in milliseconds and invocations per second. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @return a new {@link Timer} - */ - public Timer newTimer(String url, Class klass, String name) { - return newTimer(url, klass, name, null, TimeUnit.MILLISECONDS, - TimeUnit.SECONDS); - } - - /** - * Creates a new {@link Timer} and registers it under the given class and - * name. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link Timer} - */ - public Timer newTimer(String url, Class klass, String name, - TimeUnit durationUnit, TimeUnit rateUnit) { - return newTimer(url, klass, name, null, durationUnit, rateUnit); - } - - /** - * Creates a new {@link Timer} and registers it under the given class, name, - * and scope, measuring elapsed time in milliseconds and invocations per - * second. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @return a new {@link Timer} - */ - public Timer newTimer(String url, Class klass, String name, String scope) { - return newTimer(url, klass, name, scope, TimeUnit.MILLISECONDS, - TimeUnit.SECONDS); - } - - /** - * Creates a new {@link Timer} and registers it under the given class, name, - * and scope. - * - * @param klass - * the class which owns the metric - * @param name - * the name of the metric - * @param scope - * the scope of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link Timer} - */ - public Timer newTimer(String url, Class klass, String name, - String scope, TimeUnit durationUnit, TimeUnit rateUnit) { - return newTimer(url, createName(klass, name, scope), durationUnit, - rateUnit); - } - - /** - * Creates a new {@link Timer} and registers it under the given metric name. - * - * @param metricName - * the name of the metric - * @param durationUnit - * the duration scale unit of the new timer - * @param rateUnit - * the rate scale unit of the new timer - * @return a new {@link Timer} - */ - public Timer newTimer(String url, MetricName metricName, - TimeUnit durationUnit, TimeUnit rateUnit) { - final Metric existingMetric = getMetrics().get(metricName); - if (existingMetric != null) { - return (Timer) existingMetric; - } - return getOrAdd(metricName, new APITimer(url, newMeterTickThreadPool(), - durationUnit, rateUnit)); - } - -} diff --git a/src/main/java/com/yammer/metrics/core/APISettableMeter.java b/src/main/java/com/yammer/metrics/core/APISettableMeter.java deleted file mode 100644 index 51c92bb..0000000 --- a/src/main/java/com/yammer/metrics/core/APISettableMeter.java +++ /dev/null @@ -1,49 +0,0 @@ -package com.yammer.metrics.core; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -/* - * Copyright 2015 ScyllaDB - * - */ -/* - * This file is part of Scylla. - * - * Scylla is free software: you can redistribute it and/or modify - * it under the terms of the GNU Affero General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * Scylla is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Scylla. If not, see . - */ - -public class APISettableMeter extends Meter { - - public APISettableMeter(ScheduledExecutorService tickThread, - String eventType, TimeUnit rateUnit, Clock clock) { - super(tickThread, eventType, rateUnit, clock); - } - - // Meter doesn't have a set value method. - // to mimic it, we clear the old value and set it to a new one. - // This is safe because the only this method would be used - // to update the values - public long set(long new_value) { - long res = super.count(); - mark(-res); - mark(new_value); - return res; - } - - @Override - public void tick() { - super.tick(); - } -} diff --git a/src/main/java/com/yammer/metrics/core/APITimer.java b/src/main/java/com/yammer/metrics/core/APITimer.java deleted file mode 100644 index 9f29ca5..0000000 --- a/src/main/java/com/yammer/metrics/core/APITimer.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2015 Cloudius Systems - * - */ -package com.yammer.metrics.core; - -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - -import javax.json.JsonObject; - -import com.scylladb.jmx.api.APIClient; -import com.yammer.metrics.core.Histogram.SampleType; -import com.yammer.metrics.stats.Snapshot; - -/** - * A timer metric which aggregates timing durations and provides duration - * statistics, plus throughput statistics via {@link Meter}. - */ -public class APITimer extends Timer { - public final static long CACHE_DURATION = 1000; - - final TimeUnit durationUnit, rateUnit; - final APIMeter meter; - final APIHistogram histogram; - APIClient c = new APIClient(); - - private double convertFromNS(double ns) { - return ns / TimeUnit.NANOSECONDS.convert(1, durationUnit); - } - - String url; - - public APITimer(String url, ScheduledExecutorService tickThread, - TimeUnit durationUnit, TimeUnit rateUnit) { - super(tickThread, durationUnit, rateUnit); - super.stop(); - this.url = url; - this.durationUnit = durationUnit; - this.rateUnit = rateUnit; - meter = new APIMeter(null, tickThread, "calls", rateUnit); - histogram = new APIHistogram(null, SampleType.BIASED); - } - - public void fromJson(JsonObject obj) { - meter.fromJson(obj.getJsonObject("meter")); - histogram.updateValue(APIClient.json2histogram(obj.getJsonObject("hist"))); - } - - public void update_fields() { - if (url != null) { - fromJson(c.getJsonObj(url, null, CACHE_DURATION)); - } - } - - @Override - public double max() { - update_fields(); - return convertFromNS(histogram.max()); - } - - @Override - public double min() { - update_fields(); - return convertFromNS(histogram.min()); - } - - @Override - public double mean() { - update_fields(); - return convertFromNS(histogram.mean()); - } - - @Override - public double stdDev() { - update_fields(); - return convertFromNS(histogram.stdDev()); - } - - @Override - public double sum() { - update_fields(); - return convertFromNS(histogram.sum()); - } - - @Override - public Snapshot getSnapshot() { - update_fields(); - return histogram.getSnapshot(); - } - - @Override - public TimeUnit rateUnit() { - update_fields(); - return meter.rateUnit(); - } - - @Override - public String eventType() { - update_fields(); - return meter.eventType(); - } - - @Override - public long count() { - update_fields(); - return meter.count(); - } - - @Override - public double fifteenMinuteRate() { - update_fields(); - return meter.fifteenMinuteRate(); - } - - @Override - public double fiveMinuteRate() { - update_fields(); - return meter.fiveMinuteRate(); - } - - @Override - public double meanRate() { - update_fields(); - return meter.meanRate(); - } - - @Override - public double oneMinuteRate() { - update_fields(); - return meter.oneMinuteRate(); - } - -} diff --git a/src/main/java/com/yammer/metrics/core/HistogramValues.java b/src/main/java/com/yammer/metrics/core/HistogramValues.java deleted file mode 100644 index 6e6f490..0000000 --- a/src/main/java/com/yammer/metrics/core/HistogramValues.java +++ /dev/null @@ -1,11 +0,0 @@ -package com.yammer.metrics.core; - -public class HistogramValues { - public long count; - public long min; - public long max; - public long sum; - public double variance; - public double mean; - public long sample[]; -} From 434ce947b09a07dea0733858926a1fa22b761dc7 Mon Sep 17 00:00:00 2001 From: elcallio Date: Tue, 11 Oct 2016 14:17:06 +0200 Subject: [PATCH 31/32] Code formatting + source cleanup (eclipse) --- SCYLLA-VERSION-GEN | 0 scripts/git-archive-all | 0 .../com/scylladb/jmx/utils/FileUtils.java | 52 ++---- .../java/com/scylladb/jmx/utils/Pair.java | 25 +-- .../jmx/utils/SnapshotDetailsTabularData.java | 44 ++--- .../cassandra/db/ColumnFamilyStoreMBean.java | 67 ++++--- .../db/commitlog/CommitLogMBean.java | 12 +- .../CompactionHistoryTabularData.java | 30 ++- .../db/compaction/CompactionManagerMBean.java | 22 ++- .../cassandra/gms/ApplicationState.java | 33 +--- .../apache/cassandra/gms/EndpointState.java | 5 +- .../cassandra/gms/FailureDetectorMBean.java | 3 +- .../apache/cassandra/gms/GossiperMBean.java | 3 +- .../apache/cassandra/gms/HeartBeatState.java | 4 +- .../locator/EndpointSnitchInfoMBean.java | 18 +- .../cassandra/net/MessagingServiceMBean.java | 4 +- .../cassandra/service/CacheServiceMBean.java | 20 +- .../cassandra/service/GCInspectorMXBean.java | 6 +- .../cassandra/service/StorageProxyMBean.java | 3 +- .../service/StorageServiceMBean.java | 173 ++++++++++-------- .../cassandra/streaming/ProgressInfo.java | 72 ++++---- .../cassandra/streaming/SessionInfo.java | 112 ++++-------- .../streaming/StreamManagerMBean.java | 1 + .../cassandra/streaming/StreamSession.java | 119 ++++++------ .../cassandra/streaming/StreamState.java | 19 +- .../cassandra/streaming/StreamSummary.java | 30 +-- .../management/ProgressInfoCompositeData.java | 85 +++------ .../management/SessionInfoCompositeData.java | 163 ++++++----------- .../management/StreamStateCompositeData.java | 101 ++++------ .../StreamSummaryCompositeData.java | 60 +++--- 30 files changed, 575 insertions(+), 711 deletions(-) mode change 100755 => 100644 SCYLLA-VERSION-GEN mode change 100755 => 100644 scripts/git-archive-all diff --git a/SCYLLA-VERSION-GEN b/SCYLLA-VERSION-GEN old mode 100755 new mode 100644 diff --git a/scripts/git-archive-all b/scripts/git-archive-all old mode 100755 new mode 100644 diff --git a/src/main/java/com/scylladb/jmx/utils/FileUtils.java b/src/main/java/com/scylladb/jmx/utils/FileUtils.java index 11ffba1..b71c055 100644 --- a/src/main/java/com/scylladb/jmx/utils/FileUtils.java +++ b/src/main/java/com/scylladb/jmx/utils/FileUtils.java @@ -24,69 +24,57 @@ package com.scylladb.jmx.utils; -import java.io.*; +import java.io.File; import java.text.DecimalFormat; -public class FileUtils -{ +public class FileUtils { private static final double KB = 1024d; - private static final double MB = 1024*1024d; - private static final double GB = 1024*1024*1024d; - private static final double TB = 1024*1024*1024*1024d; + private static final double MB = 1024 * 1024d; + private static final double GB = 1024 * 1024 * 1024d; + private static final double TB = 1024 * 1024 * 1024 * 1024d; private static final DecimalFormat df = new DecimalFormat("#.##"); - - public static String stringifyFileSize(double value) - { + public static String stringifyFileSize(double value) { double d; - if ( value >= TB ) - { + if (value >= TB) { d = value / TB; String val = df.format(d); return val + " TB"; - } - else if ( value >= GB ) - { + } else if (value >= GB) { d = value / GB; String val = df.format(d); return val + " GB"; - } - else if ( value >= MB ) - { + } else if (value >= MB) { d = value / MB; String val = df.format(d); return val + " MB"; - } - else if ( value >= KB ) - { + } else if (value >= KB) { d = value / KB; String val = df.format(d); return val + " KB"; - } - else - { + } else { String val = df.format(value); return val + " bytes"; } } - /** * Get the size of a directory in bytes - * @param directory The directory for which we need size. + * + * @param directory + * The directory for which we need size. * @return The size of the directory */ - public static long folderSize(File directory) - { + public static long folderSize(File directory) { long length = 0; - for (File file : directory.listFiles()) - { - if (file.isFile()) + for (File file : directory.listFiles()) { + if (file.isFile()) { length += file.length(); - else + } else { length += folderSize(file); + } } return length; } - } +} diff --git a/src/main/java/com/scylladb/jmx/utils/Pair.java b/src/main/java/com/scylladb/jmx/utils/Pair.java index c5aa795..9644cc9 100644 --- a/src/main/java/com/scylladb/jmx/utils/Pair.java +++ b/src/main/java/com/scylladb/jmx/utils/Pair.java @@ -26,43 +26,38 @@ package com.scylladb.jmx.utils; import com.google.common.base.Objects; -public class Pair -{ +public class Pair { public final T1 left; public final T2 right; - protected Pair(T1 left, T2 right) - { + protected Pair(T1 left, T2 right) { this.left = left; this.right = right; } @Override - public final int hashCode() - { + public final int hashCode() { int hashCode = 31 + (left == null ? 0 : left.hashCode()); - return 31*hashCode + (right == null ? 0 : right.hashCode()); + return 31 * hashCode + (right == null ? 0 : right.hashCode()); } @Override - public final boolean equals(Object o) - { - if(!(o instanceof Pair)) + public final boolean equals(Object o) { + if (!(o instanceof Pair)) { return false; + } @SuppressWarnings("rawtypes") - Pair that = (Pair)o; + Pair that = (Pair) o; // handles nulls properly return Objects.equal(left, that.left) && Objects.equal(right, that.right); } @Override - public String toString() - { + public String toString() { return "(" + left + "," + right + ")"; } - public static Pair create(X x, Y y) - { + public static Pair create(X x, Y y) { return new Pair(x, y); } } diff --git a/src/main/java/com/scylladb/jmx/utils/SnapshotDetailsTabularData.java b/src/main/java/com/scylladb/jmx/utils/SnapshotDetailsTabularData.java index 403e7c4..3624ee4 100644 --- a/src/main/java/com/scylladb/jmx/utils/SnapshotDetailsTabularData.java +++ b/src/main/java/com/scylladb/jmx/utils/SnapshotDetailsTabularData.java @@ -23,18 +23,24 @@ package com.scylladb.jmx.utils; import java.util.Map; -import javax.management.openmbean.*; + +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; +import javax.management.openmbean.TabularDataSupport; +import javax.management.openmbean.TabularType; import com.google.common.base.Throwables; public class SnapshotDetailsTabularData { - private static final String[] ITEM_NAMES = new String[] { "Snapshot name", - "Keyspace name", "Column family name", "True size", "Size on disk" }; + private static final String[] ITEM_NAMES = new String[] { "Snapshot name", "Keyspace name", "Column family name", + "True size", "Size on disk" }; - private static final String[] ITEM_DESCS = new String[] { "snapshot_name", - "keyspace_name", "columnfamily_name", "TrueDiskSpaceUsed", - "TotalDiskSpaceUsed" }; + private static final String[] ITEM_DESCS = new String[] { "snapshot_name", "keyspace_name", "columnfamily_name", + "TrueDiskSpaceUsed", "TotalDiskSpaceUsed" }; private static final String TYPE_NAME = "SnapshotDetails"; @@ -48,28 +54,22 @@ public class SnapshotDetailsTabularData { static { try { - ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, - SimpleType.STRING, SimpleType.STRING, SimpleType.STRING }; + ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, + SimpleType.STRING }; - COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, - ITEM_DESCS, ITEM_TYPES); + COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES); - TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, - ITEM_NAMES); + TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES); } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static void from(final String snapshot, final String ks, - final String cf, - Map.Entry> snapshotDetail, - TabularDataSupport result) { + public static void from(final String snapshot, final String ks, final String cf, + Map.Entry> snapshotDetail, TabularDataSupport result) { try { - final String totalSize = FileUtils.stringifyFileSize(snapshotDetail - .getValue().left); - final String liveSize = FileUtils.stringifyFileSize(snapshotDetail - .getValue().right); + final String totalSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().left); + final String liveSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().right); result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES, new Object[] { snapshot, ks, cf, liveSize, totalSize })); } catch (OpenDataException e) { @@ -77,8 +77,8 @@ public class SnapshotDetailsTabularData { } } - public static void from(final String snapshot, final String ks, - final String cf, long total, long live, TabularDataSupport result) { + public static void from(final String snapshot, final String ks, final String cf, long total, long live, + TabularDataSupport result) { try { final String totalSize = FileUtils.stringifyFileSize(total); final String liveSize = FileUtils.stringifyFileSize(live); diff --git a/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java b/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java index a74316e..355b733 100644 --- a/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java +++ b/src/main/java/org/apache/cassandra/db/ColumnFamilyStoreMBean.java @@ -27,8 +27,7 @@ import javax.management.openmbean.OpenDataException; /** * The MBean interface for ColumnFamilyStore */ -public interface ColumnFamilyStoreMBean -{ +public interface ColumnFamilyStoreMBean { /** * @return the name of the column family */ @@ -40,7 +39,9 @@ public interface ColumnFamilyStoreMBean /** * force a major compaction of this column family * - * @param splitOutput true if the output of the major compaction should be split in several sstables + * @param splitOutput + * true if the output of the major compaction should be split in + * several sstables */ public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException; @@ -60,7 +61,8 @@ public interface ColumnFamilyStoreMBean public int getMaximumCompactionThreshold(); /** - * Sets the maximum and maximum number of SSTables in queue before compaction kicks off + * Sets the maximum and maximum number of SSTables in queue before + * compaction kicks off */ public void setCompactionThresholds(int minThreshold, int maxThreshold); @@ -72,33 +74,42 @@ public interface ColumnFamilyStoreMBean /** * Sets the compaction parameters locally for this node * - * Note that this will be set until an ALTER with compaction = {..} is executed or the node is restarted + * Note that this will be set until an ALTER with compaction = {..} is + * executed or the node is restarted * - * @param options compaction options with the same syntax as when doing ALTER ... WITH compaction = {..} + * @param options + * compaction options with the same syntax as when doing ALTER + * ... WITH compaction = {..} */ public void setCompactionParametersJson(String options); + public String getCompactionParametersJson(); /** * Sets the compaction parameters locally for this node * - * Note that this will be set until an ALTER with compaction = {..} is executed or the node is restarted + * Note that this will be set until an ALTER with compaction = {..} is + * executed or the node is restarted * - * @param options compaction options map + * @param options + * compaction options map */ public void setCompactionParameters(Map options); + public Map getCompactionParameters(); /** * Get the compression parameters */ - public Map getCompressionParameters(); + public Map getCompressionParameters(); /** * Set the compression parameters - * @param opts map of string names to values + * + * @param opts + * map of string names to values */ - public void setCompressionParameters(Map opts); + public void setCompressionParameters(Map opts); /** * Set new crc check chance @@ -109,66 +120,74 @@ public interface ColumnFamilyStoreMBean public long estimateKeys(); - /** * Returns a list of the names of the built column indexes for current store + * * @return list of the index names */ public List getBuiltIndexes(); /** * Returns a list of filenames that contain the given key on this node + * * @param key * @return list of filenames containing the key */ public List getSSTablesForKey(String key); /** - * Scan through Keyspace/ColumnFamily's data directory - * determine which SSTables should be loaded and load them + * Scan through Keyspace/ColumnFamily's data directory determine which + * SSTables should be loaded and load them */ public void loadNewSSTables(); /** - * @return the number of SSTables in L0. Always return 0 if Leveled compaction is not enabled. + * @return the number of SSTables in L0. Always return 0 if Leveled + * compaction is not enabled. */ public int getUnleveledSSTables(); /** - * @return sstable count for each level. null unless leveled compaction is used. - * array index corresponds to level(int[0] is for level 0, ...). + * @return sstable count for each level. null unless leveled compaction is + * used. array index corresponds to level(int[0] is for level 0, + * ...). */ public int[] getSSTableCountPerLevel(); /** - * Get the ratio of droppable tombstones to real columns (and non-droppable tombstones) + * Get the ratio of droppable tombstones to real columns (and non-droppable + * tombstones) + * * @return ratio */ public double getDroppableTombstoneRatio(); /** - * @return the size of SSTables in "snapshots" subdirectory which aren't live anymore + * @return the size of SSTables in "snapshots" subdirectory which aren't + * live anymore */ public long trueSnapshotsSize(); /** - * begin sampling for a specific sampler with a given capacity. The cardinality may - * be larger than the capacity, but depending on the use case it may affect its accuracy + * begin sampling for a specific sampler with a given capacity. The + * cardinality may be larger than the capacity, but depending on the use + * case it may affect its accuracy */ public void beginLocalSampling(String sampler, int capacity); /** - * @return top count items for the sampler since beginLocalSampling was called + * @return top count items for the sampler since beginLocalSampling + * was called */ public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException; /* - Is Compaction space check enabled + * Is Compaction space check enabled */ public boolean isCompactionDiskSpaceCheckEnabled(); /* - Enable/Disable compaction space check + * Enable/Disable compaction space check */ public void compactionDiskSpaceCheck(boolean enable); } diff --git a/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java b/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java index e0cfd3c..77ad028 100644 --- a/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java +++ b/src/main/java/org/apache/cassandra/db/commitlog/CommitLogMBean.java @@ -17,14 +17,13 @@ */ package org.apache.cassandra.db.commitlog; - import java.io.IOException; import java.util.List; import java.util.Map; public interface CommitLogMBean { /** - * Command to execute to archive a commitlog segment. Blank to disabled. + * Command to execute to archive a commitlog segment. Blank to disabled. */ public String getArchiveCommand(); @@ -66,12 +65,14 @@ public interface CommitLogMBean { public List getActiveSegmentNames(); /** - * @return Files which are pending for archival attempt. Does NOT include failed archive attempts. + * @return Files which are pending for archival attempt. Does NOT include + * failed archive attempts. */ public List getArchivingSegmentNames(); /** - * @return The size of the mutations in all active commit log segments (uncompressed). + * @return The size of the mutations in all active commit log segments + * (uncompressed). */ public long getActiveContentSize(); @@ -81,7 +82,8 @@ public interface CommitLogMBean { public long getActiveOnDiskSize(); /** - * @return A map between active log segments and the compression ratio achieved for each. + * @return A map between active log segments and the compression ratio + * achieved for each. */ public Map getActiveSegmentCompressionRatios(); } diff --git a/src/main/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java b/src/main/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java index ab3fa2b..07a27ee 100644 --- a/src/main/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java +++ b/src/main/java/org/apache/cassandra/db/compaction/CompactionHistoryTabularData.java @@ -36,13 +36,11 @@ import javax.management.openmbean.TabularType; import com.google.common.base.Throwables; public class CompactionHistoryTabularData { - private static final String[] ITEM_NAMES = new String[] { "id", - "keyspace_name", "columnfamily_name", "compacted_at", "bytes_in", - "bytes_out", "rows_merged" }; + private static final String[] ITEM_NAMES = new String[] { "id", "keyspace_name", "columnfamily_name", + "compacted_at", "bytes_in", "bytes_out", "rows_merged" }; - private static final String[] ITEM_DESCS = new String[] { "time uuid", - "keyspace name", "column family name", "compaction finished at", - "total bytes in", "total bytes out", "total rows merged" }; + private static final String[] ITEM_DESCS = new String[] { "time uuid", "keyspace name", "column family name", + "compaction finished at", "total bytes in", "total bytes out", "total rows merged" }; private static final String TYPE_NAME = "CompactionHistory"; @@ -56,22 +54,18 @@ public class CompactionHistoryTabularData { static { try { - ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, - SimpleType.STRING, SimpleType.LONG, SimpleType.LONG, - SimpleType.LONG, SimpleType.STRING }; + ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.LONG, + SimpleType.LONG, SimpleType.LONG, SimpleType.STRING }; - COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, - ITEM_DESCS, ITEM_TYPES); + COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES); - TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, - ITEM_NAMES); + TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES); } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static TabularData from(JsonArray resultSet) - throws OpenDataException { + public static TabularData from(JsonArray resultSet) throws OpenDataException { TabularDataSupport result = new TabularDataSupport(TABULAR_TYPE); for (int i = 0; i < resultSet.size(); i++) { JsonObject row = resultSet.getJsonObject(i); @@ -91,15 +85,13 @@ public class CompactionHistoryTabularData { if (m > 0) { sb.append(','); } - sb.append(entry.getString("key")).append(':') - .append(entry.getString("value")); + sb.append(entry.getString("key")).append(':').append(entry.getString("value")); } sb.append('}'); } result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES, - new Object[] { id, ksName, cfName, compactedAt, bytesIn, - bytesOut, sb.toString() })); + new Object[] { id, ksName, cfName, compactedAt, bytesIn, bytesOut, sb.toString() })); } return result; } diff --git a/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java b/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java index f101245..3e0d650 100644 --- a/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java +++ b/src/main/java/org/apache/cassandra/db/compaction/CompactionManagerMBean.java @@ -19,10 +19,10 @@ package org.apache.cassandra.db.compaction; import java.util.List; import java.util.Map; + import javax.management.openmbean.TabularData; -public interface CompactionManagerMBean -{ +public interface CompactionManagerMBean { /** List of running compaction objects. */ public List> getCompactions(); @@ -45,7 +45,7 @@ public interface CompactionManagerMBean /** * Stop all running compaction-like tasks having the provided {@code type}. - * + * * @param type * the type of compaction to stop. Can be one of: - COMPACTION - * VALIDATION - CLEANUP - SCRUB - INDEX_BUILD @@ -54,9 +54,11 @@ public interface CompactionManagerMBean /** * Stop an individual running compaction using the compactionId. - * @param compactionId Compaction ID of compaction to stop. Such IDs can be found in - * the transaction log files whose name starts with compaction_, - * located in the table transactions folder. + * + * @param compactionId + * Compaction ID of compaction to stop. Such IDs can be found in + * the transaction log files whose name starts with compaction_, + * located in the table transactions folder. */ public void stopCompactionById(String compactionId); @@ -67,7 +69,7 @@ public interface CompactionManagerMBean /** * Allows user to resize maximum size of the compaction thread pool. - * + * * @param number * New maximum of compaction threads */ @@ -80,7 +82,7 @@ public interface CompactionManagerMBean /** * Allows user to resize maximum size of the compaction thread pool. - * + * * @param number * New maximum of compaction threads */ @@ -93,7 +95,7 @@ public interface CompactionManagerMBean /** * Allows user to resize maximum size of the compaction thread pool. - * + * * @param number * New maximum of compaction threads */ @@ -106,7 +108,7 @@ public interface CompactionManagerMBean /** * Allows user to resize maximum size of the validator thread pool. - * + * * @param number * New maximum of validator threads */ diff --git a/src/main/java/org/apache/cassandra/gms/ApplicationState.java b/src/main/java/org/apache/cassandra/gms/ApplicationState.java index 31958cf..cb9ecea 100644 --- a/src/main/java/org/apache/cassandra/gms/ApplicationState.java +++ b/src/main/java/org/apache/cassandra/gms/ApplicationState.java @@ -24,31 +24,12 @@ package org.apache.cassandra.gms; -public enum ApplicationState -{ - STATUS, - LOAD, - SCHEMA, - DC, - RACK, - RELEASE_VERSION, - REMOVAL_COORDINATOR, - INTERNAL_IP, - RPC_ADDRESS, - X_11_PADDING, // padding specifically for 1.1 - SEVERITY, - NET_VERSION, - HOST_ID, - TOKENS, +public enum ApplicationState { + STATUS, LOAD, SCHEMA, DC, RACK, RELEASE_VERSION, REMOVAL_COORDINATOR, INTERNAL_IP, RPC_ADDRESS, X_11_PADDING, // padding + // specifically + // for + // 1.1 + SEVERITY, NET_VERSION, HOST_ID, TOKENS, // pad to allow adding new states to existing cluster - X1, - X2, - X3, - X4, - X5, - X6, - X7, - X8, - X9, - X10, + X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, } diff --git a/src/main/java/org/apache/cassandra/gms/EndpointState.java b/src/main/java/org/apache/cassandra/gms/EndpointState.java index c30eff2..d352910 100644 --- a/src/main/java/org/apache/cassandra/gms/EndpointState.java +++ b/src/main/java/org/apache/cassandra/gms/EndpointState.java @@ -42,6 +42,7 @@ public class EndpointState { ApplicationState[] applicationValues; private static final java.util.logging.Logger logger = java.util.logging.Logger .getLogger(EndpointState.class.getName()); + EndpointState(HeartBeatState initialHbState) { applicationValues = ApplicationState.values(); hbState = initialHbState; @@ -101,8 +102,8 @@ public class EndpointState { isAlive = alive; } + @Override public String toString() { - return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = " - + applicationState; + return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = " + applicationState; } } diff --git a/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java b/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java index 23fae3a..15ce293 100644 --- a/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java +++ b/src/main/java/org/apache/cassandra/gms/FailureDetectorMBean.java @@ -23,8 +23,7 @@ import java.util.Map; import javax.management.openmbean.OpenDataException; import javax.management.openmbean.TabularData; -public interface FailureDetectorMBean -{ +public interface FailureDetectorMBean { public void dumpInterArrivalTimes(); public void setPhiConvictThreshold(double phi); diff --git a/src/main/java/org/apache/cassandra/gms/GossiperMBean.java b/src/main/java/org/apache/cassandra/gms/GossiperMBean.java index c4b244c..9f8e567 100644 --- a/src/main/java/org/apache/cassandra/gms/GossiperMBean.java +++ b/src/main/java/org/apache/cassandra/gms/GossiperMBean.java @@ -19,8 +19,7 @@ package org.apache.cassandra.gms; import java.net.UnknownHostException; -public interface GossiperMBean -{ +public interface GossiperMBean { public long getEndpointDowntime(String address) throws UnknownHostException; public int getCurrentGenerationNumber(String address) throws UnknownHostException; diff --git a/src/main/java/org/apache/cassandra/gms/HeartBeatState.java b/src/main/java/org/apache/cassandra/gms/HeartBeatState.java index 0af0ef6..be9efbd 100644 --- a/src/main/java/org/apache/cassandra/gms/HeartBeatState.java +++ b/src/main/java/org/apache/cassandra/gms/HeartBeatState.java @@ -58,8 +58,8 @@ class HeartBeatState { version = Integer.MAX_VALUE; } + @Override public String toString() { - return String.format("HeartBeat: generation = %d, version = %d", - generation, version); + return String.format("HeartBeat: generation = %d, version = %d", generation, version); } } diff --git a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java index 6de5022..2f5bc5f 100644 --- a/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java +++ b/src/main/java/org/apache/cassandra/locator/EndpointSnitchInfoMBean.java @@ -22,34 +22,40 @@ import java.net.UnknownHostException; /** * MBean exposing standard Snitch info */ -public interface EndpointSnitchInfoMBean -{ +public interface EndpointSnitchInfoMBean { /** - * Provides the Rack name depending on the respective snitch used, given the host name/ip + * Provides the Rack name depending on the respective snitch used, given the + * host name/ip + * * @param host * @throws UnknownHostException */ public String getRack(String host) throws UnknownHostException; /** - * Provides the Datacenter name depending on the respective snitch used, given the hostname/ip + * Provides the Datacenter name depending on the respective snitch used, + * given the hostname/ip + * * @param host * @throws UnknownHostException */ public String getDatacenter(String host) throws UnknownHostException; /** - * Provides the Rack name depending on the respective snitch used for this node + * Provides the Rack name depending on the respective snitch used for this + * node */ public String getRack(); /** - * Provides the Datacenter name depending on the respective snitch used for this node + * Provides the Datacenter name depending on the respective snitch used for + * this node */ public String getDatacenter(); /** * Provides the snitch name of the cluster + * * @return Snitch name */ public String getSnitchName(); diff --git a/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java index 5a508e0..be98145 100644 --- a/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java +++ b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java @@ -24,8 +24,6 @@ package org.apache.cassandra.net; - - import java.net.UnknownHostException; import java.util.Map; @@ -133,6 +131,6 @@ public interface MessagingServiceMBean { * Number of timeouts since last check per host. */ public Map getRecentTimeoutsPerHost(); - + public int getVersion(String address) throws UnknownHostException; } diff --git a/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java b/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java index a28d2d1..bcb0cfb 100644 --- a/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java +++ b/src/main/java/org/apache/cassandra/service/CacheServiceMBean.java @@ -22,30 +22,33 @@ * Modified by Cloudius Systems */ - - package org.apache.cassandra.service; import java.util.concurrent.ExecutionException; -public interface CacheServiceMBean -{ +public interface CacheServiceMBean { public int getRowCacheSavePeriodInSeconds(); + public void setRowCacheSavePeriodInSeconds(int rcspis); public int getKeyCacheSavePeriodInSeconds(); + public void setKeyCacheSavePeriodInSeconds(int kcspis); public int getCounterCacheSavePeriodInSeconds(); + public void setCounterCacheSavePeriodInSeconds(int ccspis); public int getRowCacheKeysToSave(); + public void setRowCacheKeysToSave(int rckts); public int getKeyCacheKeysToSave(); + public void setKeyCacheKeysToSave(int kckts); public int getCounterCacheKeysToSave(); + public void setCounterCacheKeysToSave(int cckts); /** @@ -69,8 +72,13 @@ public interface CacheServiceMBean /** * save row and key caches * - * @throws ExecutionException when attempting to retrieve the result of a task that aborted by throwing an exception - * @throws InterruptedException when a thread is waiting, sleeping, or otherwise occupied, and the thread is interrupted, either before or during the activity. + * @throws ExecutionException + * when attempting to retrieve the result of a task that aborted + * by throwing an exception + * @throws InterruptedException + * when a thread is waiting, sleeping, or otherwise occupied, + * and the thread is interrupted, either before or during the + * activity. */ public void saveCaches() throws ExecutionException, InterruptedException; } diff --git a/src/main/java/org/apache/cassandra/service/GCInspectorMXBean.java b/src/main/java/org/apache/cassandra/service/GCInspectorMXBean.java index c26a67c..fae5724 100644 --- a/src/main/java/org/apache/cassandra/service/GCInspectorMXBean.java +++ b/src/main/java/org/apache/cassandra/service/GCInspectorMXBean.java @@ -18,8 +18,8 @@ */ package org.apache.cassandra.service; -public interface GCInspectorMXBean -{ - // returns { interval (ms), max(gc real time (ms)), sum(gc real time (ms)), sum((gc real time (ms))^2), sum(gc bytes), count(gc) } +public interface GCInspectorMXBean { + // returns { interval (ms), max(gc real time (ms)), sum(gc real time (ms)), + // sum((gc real time (ms))^2), sum(gc bytes), count(gc) } public double[] getAndResetStats(); } diff --git a/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java b/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java index fd086fa..404821c 100644 --- a/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java +++ b/src/main/java/org/apache/cassandra/service/StorageProxyMBean.java @@ -83,8 +83,7 @@ public interface StorageProxyMBean { public void setTruncateRpcTimeout(Long timeoutInMillis); - public void setNativeTransportMaxConcurrentConnections( - Long nativeTransportMaxConcurrentConnections); + public void setNativeTransportMaxConcurrentConnections(Long nativeTransportMaxConcurrentConnections); public Long getNativeTransportMaxConcurrentConnections(); diff --git a/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java index 4234a88..f89ee91 100644 --- a/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java +++ b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java @@ -132,8 +132,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * * @return mapping of ranges to end points */ - public Map, List> getRangeToEndpointMap( - String keyspace); + public Map, List> getRangeToEndpointMap(String keyspace); /** * Retrieve a map of range to rpc addresses that describe the ring topology @@ -141,8 +140,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * * @return mapping of ranges to rpc addresses */ - public Map, List> getRangeToRpcaddressMap( - String keyspace); + public Map, List> getRangeToRpcaddressMap(String keyspace); /** * The same as {@code describeRing(String)} but converts TokenRange to the @@ -164,8 +162,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * the keyspace to get the pending range map for. * @return a map of pending ranges to endpoints */ - public Map, List> getPendingRangeToEndpointMap( - String keyspace); + public Map, List> getPendingRangeToEndpointMap(String keyspace); /** * Retrieve a map of tokens to endpoints, including the bootstrapping ones. @@ -211,11 +208,9 @@ public interface StorageServiceMBean extends NotificationEmitter { * - key for which we need to find the endpoint return value - * the endpoint responsible for this key */ - public List getNaturalEndpoints(String keyspaceName, String cf, - String key); + public List getNaturalEndpoints(String keyspaceName, String cf, String key); - public List getNaturalEndpoints(String keyspaceName, - ByteBuffer key); + public List getNaturalEndpoints(String keyspaceName, ByteBuffer key); /** * Takes the snapshot for the given keyspaces. A snapshot name must be @@ -226,8 +221,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * @param keyspaceNames * the name of the keyspaces to snapshot; empty means "all." */ - public void takeSnapshot(String tag, String... keyspaceNames) - throws IOException; + public void takeSnapshot(String tag, String... keyspaceNames) throws IOException; /** * Takes the snapshot of a specific column family. A snapshot name must be specified. @@ -251,8 +245,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * @param tag * the tag given to the snapshot; may not be null or empty */ - public void takeColumnFamilySnapshot(String keyspaceName, - String columnFamilyName, String tag) throws IOException; + public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException; /** * Takes the snapshot of a multiple column family from different keyspaces. @@ -264,15 +257,13 @@ public interface StorageServiceMBean extends NotificationEmitter { * list of columnfamily from different keyspace in the form of * ks1.cf1 ks2.cf2 */ - public void takeMultipleColumnFamilySnapshot(String tag, - String... columnFamilyList) throws IOException; + public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException; /** * Remove the snapshot with the given name from the given keyspaces. If no * tag is specified we will remove all snapshots. */ - public void clearSnapshot(String tag, String... keyspaceNames) - throws IOException; + public void clearSnapshot(String tag, String... keyspaceNames) throws IOException; /** * Get the details of all the snapshot @@ -289,21 +280,26 @@ public interface StorageServiceMBean extends NotificationEmitter { public long trueSnapshotsSize(); /** - * Forces refresh of values stored in system.size_estimates of all column families. + * Forces refresh of values stored in system.size_estimates of all column + * families. */ public void refreshSizeEstimates() throws ExecutionException; /** * Forces major compaction of a single keyspace */ - public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException; /** * Trigger a cleanup of keys on a single keyspace */ @Deprecated - public int forceKeyspaceCleanup(String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException; - public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) throws IOException, ExecutionException, InterruptedException; + public int forceKeyspaceCleanup(String keyspaceName, String... tables) + throws IOException, ExecutionException, InterruptedException; + + public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables) + throws IOException, ExecutionException, InterruptedException; /** * Scrub (deserialize + reserialize at the latest version, skipping bad rows @@ -313,26 +309,36 @@ public interface StorageServiceMBean extends NotificationEmitter { * Scrubbed CFs will be snapshotted first, if disableSnapshot is false */ @Deprecated - public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException; + @Deprecated - public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; - public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException; + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName, + String... tableNames) throws IOException, ExecutionException, InterruptedException; + + public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName, + String... columnFamilies) throws IOException, ExecutionException, InterruptedException; /** - * Verify (checksums of) the given keyspace. - * If tableNames array is empty, all CFs are verified. + * Verify (checksums of) the given keyspace. If tableNames array is empty, + * all CFs are verified. * - * The entire sstable will be read to ensure each cell validates if extendedVerify is true + * The entire sstable will be read to ensure each cell validates if + * extendedVerify is true */ - public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public int verify(boolean extendedVerify, String keyspaceName, String... tableNames) + throws IOException, ExecutionException, InterruptedException; /** * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip * bad rows and do not snapshot sstables first. */ @Deprecated - public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames) throws IOException, ExecutionException, InterruptedException; - public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) throws IOException, ExecutionException, InterruptedException; + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames) + throws IOException, ExecutionException, InterruptedException; + + public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames) + throws IOException, ExecutionException, InterruptedException; /** * Flush all memtables for the given column families, or all columnfamilies @@ -342,71 +348,86 @@ public interface StorageServiceMBean extends NotificationEmitter { * @param columnFamilies * @throws IOException */ - public void forceKeyspaceFlush(String keyspaceName, - String... columnFamilies) throws IOException, ExecutionException, - InterruptedException; + public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies) + throws IOException, ExecutionException, InterruptedException; /** - * Invoke repair asynchronously. - * You can track repair progress by subscribing JMX notification sent from this StorageServiceMBean. - * Notification format is: - * type: "repair" - * userObject: int array of length 2, [0]=command number, [1]=ordinal of ActiveRepairService.Status + * Invoke repair asynchronously. You can track repair progress by + * subscribing JMX notification sent from this StorageServiceMBean. + * Notification format is: type: "repair" userObject: int array of length 2, + * [0]=command number, [1]=ordinal of ActiveRepairService.Status * - * @param keyspace Keyspace name to repair. Should not be null. - * @param options repair option. + * @param keyspace + * Keyspace name to repair. Should not be null. + * @param options + * repair option. * @return Repair command number, or 0 if nothing to repair */ public int repairAsync(String keyspace, Map options); /** - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. */ @Deprecated - public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames) throws IOException; + public int forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames) + throws IOException; /** - * Invoke repair asynchronously. - * You can track repair progress by subscribing JMX notification sent from this StorageServiceMBean. - * Notification format is: - * type: "repair" - * userObject: int array of length 2, [0]=command number, [1]=ordinal of ActiveRepairService.Status + * Invoke repair asynchronously. You can track repair progress by + * subscribing JMX notification sent from this StorageServiceMBean. + * Notification format is: type: "repair" userObject: int array of length 2, + * [0]=command number, [1]=ordinal of ActiveRepairService.Status * - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. * - * @param parallelismDegree 0: sequential, 1: parallel, 2: DC parallel + * @param parallelismDegree + * 0: sequential, 1: parallel, 2: DC parallel * @return Repair command number, or 0 if nothing to repair */ @Deprecated - public int forceRepairAsync(String keyspace, int parallelismDegree, Collection dataCenters, Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames); + public int forceRepairAsync(String keyspace, int parallelismDegree, Collection dataCenters, + Collection hosts, boolean primaryRange, boolean fullRepair, String... tableNames); /** - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. */ @Deprecated - public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames) throws IOException; + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames) + throws IOException; /** * Same as forceRepairAsync, but handles a specified range * - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. * - * @param parallelismDegree 0: sequential, 1: parallel, 2: DC parallel + * @param parallelismDegree + * 0: sequential, 1: parallel, 2: DC parallel */ @Deprecated - public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree, Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames); + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree, + Collection dataCenters, Collection hosts, boolean fullRepair, String... tableNames); /** - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. */ @Deprecated - public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... tableNames); + public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, + boolean fullRepair, String... tableNames); /** - * @deprecated use {@link #repairAsync(String keyspace, Map options)} instead. + * @deprecated use {@link #repairAsync(String keyspace, Map options)} + * instead. */ @Deprecated - public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean fullRepair, String... tableNames); + public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, + boolean isLocal, boolean fullRepair, String... tableNames); public void forceTerminateAllRepairSessions(); @@ -457,8 +478,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * * @see ch.qos.logback.classic.Level#toLevel(String) */ - public void setLoggingLevel(String classQualifier, String level) - throws Exception; + public void setLoggingLevel(String classQualifier, String level) throws Exception; /** get the runtime logging levels */ public Map getLoggingLevels(); @@ -479,8 +499,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * makes node unavailable for writes, flushes memtables and replays * commitlog. */ - public void drain() - throws IOException, InterruptedException, ExecutionException; + public void drain() throws IOException, InterruptedException, ExecutionException; /** * Truncates (deletes) the given columnFamily from the provided keyspace. @@ -494,8 +513,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * @param columnFamily * The column family to delete data from. */ - public void truncate(String keyspace, String columnFamily) - throws TimeoutException, IOException; + public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException; /** * given a list of tokens (representing the nodes in the cluster), returns a @@ -510,8 +528,7 @@ public interface StorageServiceMBean extends NotificationEmitter { * the same replication strategies and if yes then we will use the first * else a empty Map is returned. */ - public Map effectiveOwnership(String keyspace) - throws IllegalStateException; + public Map effectiveOwnership(String keyspace) throws IllegalStateException; public List getKeyspaces(); @@ -535,9 +552,8 @@ public interface StorageServiceMBean extends NotificationEmitter { * @param dynamicBadnessThreshold * double, (default 0.0) */ - public void updateSnitch(String epSnitchClassName, Boolean dynamic, - Integer dynamicUpdateInterval, Integer dynamicResetInterval, - Double dynamicBadnessThreshold) throws ClassNotFoundException; + public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, + Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException; // allows a user to forcibly 'kill' a sick node public void stopGossiping(); @@ -579,6 +595,7 @@ public interface StorageServiceMBean extends NotificationEmitter { public int getStreamThroughputMbPerSec(); public void setInterDCStreamThroughputMbPerSec(int value); + public int getInterDCStreamThroughputMbPerSec(); public void setCompactionThroughputMbPerSec(int value); @@ -635,8 +652,7 @@ public interface StorageServiceMBean extends NotificationEmitter { /** * rebuild the specified indexes */ - public void rebuildSecondaryIndex(String ksName, String cfName, - String... idxNames); + public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames); public void resetLocalSchema() throws IOException; @@ -657,11 +673,9 @@ public interface StorageServiceMBean extends NotificationEmitter { */ public double getTraceProbability(); - void disableAutoCompaction(String ks, String... columnFamilies) - throws IOException; + void disableAutoCompaction(String ks, String... columnFamilies) throws IOException; - void enableAutoCompaction(String ks, String... columnFamilies) - throws IOException; + void enableAutoCompaction(String ks, String... columnFamilies) throws IOException; public void deliverHints(String host) throws UnknownHostException; @@ -685,10 +699,13 @@ public interface StorageServiceMBean extends NotificationEmitter { /** Returns the threshold for rejecting queries due to a large batch size */ public int getBatchSizeFailureThreshold(); + /** Sets the threshold for rejecting queries due to a large batch size */ public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold); - /** Sets the hinted handoff throttle in kb per second, per delivery thread. */ + /** + * Sets the hinted handoff throttle in kb per second, per delivery thread. + */ public void setHintedHandoffThrottleInKB(int throttleInKB); /** diff --git a/src/main/java/org/apache/cassandra/streaming/ProgressInfo.java b/src/main/java/org/apache/cassandra/streaming/ProgressInfo.java index 8b6f99d..cdb52ea 100644 --- a/src/main/java/org/apache/cassandra/streaming/ProgressInfo.java +++ b/src/main/java/org/apache/cassandra/streaming/ProgressInfo.java @@ -29,6 +29,7 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; + import javax.json.JsonArray; import javax.json.JsonObject; @@ -37,25 +38,21 @@ import com.google.common.base.Objects; /** * ProgressInfo contains file transfer progress. */ -public class ProgressInfo implements Serializable -{ +@SuppressWarnings("serial") +public class ProgressInfo implements Serializable { /** * Direction of the stream. */ - public static enum Direction - { - OUT(0), - IN(1); + public static enum Direction { + OUT(0), IN(1); public final byte code; - private Direction(int code) - { + private Direction(int code) { this.code = (byte) code; } - public static Direction fromByte(byte direction) - { + public static Direction fromByte(byte direction) { return direction == 0 ? OUT : IN; } } @@ -67,8 +64,8 @@ public class ProgressInfo implements Serializable public final long currentBytes; public final long totalBytes; - public ProgressInfo(InetAddress peer, int sessionIndex, String fileName, Direction direction, long currentBytes, long totalBytes) - { + public ProgressInfo(InetAddress peer, int sessionIndex, String fileName, Direction direction, long currentBytes, + long totalBytes) { assert totalBytes > 0; this.peer = peer; @@ -81,12 +78,9 @@ public class ProgressInfo implements Serializable static public ProgressInfo fromJsonObject(JsonObject obj) { try { - return new ProgressInfo(InetAddress.getByName(obj.getString("peer")), - obj.getInt("session_index"), - obj.getString("file_name"), - Direction.valueOf(obj.getString("direction")), - obj.getJsonNumber("current_bytes").longValue(), - obj.getJsonNumber("total_bytes").longValue()); + return new ProgressInfo(InetAddress.getByName(obj.getString("peer")), obj.getInt("session_index"), + obj.getString("file_name"), Direction.valueOf(obj.getString("direction")), + obj.getJsonNumber("current_bytes").longValue(), obj.getJsonNumber("total_bytes").longValue()); } catch (UnknownHostException e) { // Not suppose to get here } @@ -104,45 +98,55 @@ public class ProgressInfo implements Serializable } return res; } + /** * @return true if file transfer is completed */ - public boolean isCompleted() - { + public boolean isCompleted() { return currentBytes >= totalBytes; } /** - * ProgressInfo is considered to be equal only when all attributes except currentBytes are equal. + * ProgressInfo is considered to be equal only when all attributes except + * currentBytes are equal. */ @Override - public boolean equals(Object o) - { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } ProgressInfo that = (ProgressInfo) o; - if (totalBytes != that.totalBytes) return false; - if (direction != that.direction) return false; - if (!fileName.equals(that.fileName)) return false; - if (sessionIndex != that.sessionIndex) return false; + if (totalBytes != that.totalBytes) { + return false; + } + if (direction != that.direction) { + return false; + } + if (!fileName.equals(that.fileName)) { + return false; + } + if (sessionIndex != that.sessionIndex) { + return false; + } return peer.equals(that.peer); } @Override - public int hashCode() - { + public int hashCode() { return Objects.hashCode(peer, sessionIndex, fileName, direction, totalBytes); } @Override - public String toString() - { + public String toString() { StringBuilder sb = new StringBuilder(fileName); sb.append(" ").append(currentBytes); sb.append("/").append(totalBytes).append(" bytes"); - sb.append("(").append(currentBytes*100/totalBytes).append("%) "); + sb.append("(").append(currentBytes * 100 / totalBytes).append("%) "); sb.append(direction == Direction.OUT ? "sent to " : "received from "); sb.append("idx:").append(sessionIndex); sb.append(peer); diff --git a/src/main/java/org/apache/cassandra/streaming/SessionInfo.java b/src/main/java/org/apache/cassandra/streaming/SessionInfo.java index 6d44484..9222b3d 100644 --- a/src/main/java/org/apache/cassandra/streaming/SessionInfo.java +++ b/src/main/java/org/apache/cassandra/streaming/SessionInfo.java @@ -28,30 +28,26 @@ import java.io.Serializable; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Collection; -import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; import javax.json.JsonArray; import javax.json.JsonObject; -import com.google.common.base.Predicate; -import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; /** * Stream session info. */ -public final class SessionInfo implements Serializable -{ +@SuppressWarnings("serial") +public final class SessionInfo implements Serializable { public final InetAddress peer; public final int sessionIndex; public final InetAddress connecting; /** Immutable collection of receiving summaries */ public final Collection receivingSummaries; - /** Immutable collection of sending summaries*/ + /** Immutable collection of sending summaries */ public final Collection sendingSummaries; /** Current session state */ public final StreamSession.State state; @@ -67,15 +63,10 @@ public final class SessionInfo implements Serializable return null; } - - public SessionInfo(InetAddress peer, - int sessionIndex, - InetAddress connecting, - Collection receivingSummaries, - Collection sendingSummaries, - StreamSession.State state, - Map receivingFiles, - Map sendingFiles) { + public SessionInfo(InetAddress peer, int sessionIndex, InetAddress connecting, + Collection receivingSummaries, Collection sendingSummaries, + StreamSession.State state, Map receivingFiles, + Map sendingFiles) { this.peer = peer; this.sessionIndex = sessionIndex; this.connecting = connecting; @@ -86,24 +77,19 @@ public final class SessionInfo implements Serializable this.state = state; } - public SessionInfo(String peer, - int sessionIndex, - String connecting, - Collection receivingSummaries, - Collection sendingSummaries, - String state, - Map receivingFiles, + public SessionInfo(String peer, int sessionIndex, String connecting, Collection receivingSummaries, + Collection sendingSummaries, String state, Map receivingFiles, Map sendingFiles) { this(address(peer), sessionIndex, address(connecting), receivingSummaries, sendingSummaries, StreamSession.State.valueOf(state), receivingFiles, sendingFiles); } + ProgressInfo in; + public static SessionInfo fromJsonObject(JsonObject obj) { - return new SessionInfo(obj.getString("peer"), obj.getInt("session_index"), - obj.getString("connecting"), + return new SessionInfo(obj.getString("peer"), obj.getInt("session_index"), obj.getString("connecting"), StreamSummary.fromJsonArr(obj.getJsonArray("receiving_summaries")), - StreamSummary.fromJsonArr(obj.getJsonArray("sending_summaries")), - obj.getString("state"), + StreamSummary.fromJsonArr(obj.getJsonArray("sending_summaries")), obj.getString("state"), ProgressInfo.fromJArrray(obj.getJsonArray("receiving_files")), ProgressInfo.fromJArrray(obj.getJsonArray("sending_files"))); } @@ -118,135 +104,117 @@ public final class SessionInfo implements Serializable return res; } - public boolean isFailed() - { + public boolean isFailed() { return state == StreamSession.State.FAILED; } /** * Update progress of receiving/sending file. * - * @param newProgress new progress info + * @param newProgress + * new progress info */ - public void updateProgress(ProgressInfo newProgress) - { + public void updateProgress(ProgressInfo newProgress) { assert peer.equals(newProgress.peer); - Map currentFiles = newProgress.direction == ProgressInfo.Direction.IN - ? receivingFiles : sendingFiles; + Map currentFiles = newProgress.direction == ProgressInfo.Direction.IN ? receivingFiles + : sendingFiles; currentFiles.put(newProgress.fileName, newProgress); } - public Collection getReceivingFiles() - { + public Collection getReceivingFiles() { return receivingFiles.values(); } - public Collection getSendingFiles() - { + public Collection getSendingFiles() { return sendingFiles.values(); } /** * @return total number of files already received. */ - public long getTotalFilesReceived() - { + public long getTotalFilesReceived() { return getTotalFilesCompleted(receivingFiles.values()); } /** * @return total number of files already sent. */ - public long getTotalFilesSent() - { + public long getTotalFilesSent() { return getTotalFilesCompleted(sendingFiles.values()); } /** * @return total size(in bytes) already received. */ - public long getTotalSizeReceived() - { + public long getTotalSizeReceived() { return getTotalSizeInProgress(receivingFiles.values()); } /** * @return total size(in bytes) already sent. */ - public long getTotalSizeSent() - { + public long getTotalSizeSent() { return getTotalSizeInProgress(sendingFiles.values()); } /** * @return total number of files to receive in the session */ - public long getTotalFilesToReceive() - { + public long getTotalFilesToReceive() { return getTotalFiles(receivingSummaries); } /** * @return total number of files to send in the session */ - public long getTotalFilesToSend() - { + public long getTotalFilesToSend() { return getTotalFiles(sendingSummaries); } /** * @return total size(in bytes) to receive in the session */ - public long getTotalSizeToReceive() - { + public long getTotalSizeToReceive() { return getTotalSizes(receivingSummaries); } /** * @return total size(in bytes) to send in the session */ - public long getTotalSizeToSend() - { + public long getTotalSizeToSend() { return getTotalSizes(sendingSummaries); } - private long getTotalSizeInProgress(Collection files) - { + private long getTotalSizeInProgress(Collection files) { long total = 0; - for (ProgressInfo file : files) + for (ProgressInfo file : files) { total += file.currentBytes; + } return total; } - private long getTotalFiles(Collection summaries) - { + private long getTotalFiles(Collection summaries) { long total = 0; - for (StreamSummary summary : summaries) + for (StreamSummary summary : summaries) { total += summary.files; + } return total; } - private long getTotalSizes(Collection summaries) - { + private long getTotalSizes(Collection summaries) { if (summaries == null) { return 0; } long total = 0; - for (StreamSummary summary : summaries) + for (StreamSummary summary : summaries) { total += summary.totalSize; + } return total; } - private long getTotalFilesCompleted(Collection files) - { - Iterable completed = Iterables.filter(files, new Predicate() - { - public boolean apply(ProgressInfo input) - { - return input.isCompleted(); - } - }); + private long getTotalFilesCompleted(Collection files) { + Iterable completed = Iterables.filter(files, input -> input.isCompleted()); return Iterables.size(completed); } } diff --git a/src/main/java/org/apache/cassandra/streaming/StreamManagerMBean.java b/src/main/java/org/apache/cassandra/streaming/StreamManagerMBean.java index 28b25db..17965d9 100644 --- a/src/main/java/org/apache/cassandra/streaming/StreamManagerMBean.java +++ b/src/main/java/org/apache/cassandra/streaming/StreamManagerMBean.java @@ -25,6 +25,7 @@ package org.apache.cassandra.streaming; import java.util.Set; + import javax.management.NotificationEmitter; import javax.management.openmbean.CompositeData; diff --git a/src/main/java/org/apache/cassandra/streaming/StreamSession.java b/src/main/java/org/apache/cassandra/streaming/StreamSession.java index 7646dc6..90d2b2a 100644 --- a/src/main/java/org/apache/cassandra/streaming/StreamSession.java +++ b/src/main/java/org/apache/cassandra/streaming/StreamSession.java @@ -25,81 +25,80 @@ package org.apache.cassandra.streaming; /** - * Handles the streaming a one or more section of one of more sstables to and from a specific - * remote node. + * Handles the streaming a one or more section of one of more sstables to and + * from a specific remote node. * - * Both this node and the remote one will create a similar symmetrical StreamSession. A streaming - * session has the following life-cycle: + * Both this node and the remote one will create a similar symmetrical + * StreamSession. A streaming session has the following life-cycle: * * 1. Connections Initialization * - * (a) A node (the initiator in the following) create a new StreamSession, initialize it (init()) - * and then start it (start()). Start will create a {@link ConnectionHandler} that will create - * two connections to the remote node (the follower in the following) with whom to stream and send - * a StreamInit message. The first connection will be the incoming connection for the - * initiator, and the second connection will be the outgoing. - * (b) Upon reception of that StreamInit message, the follower creates its own StreamSession, - * initialize it if it still does not exist, and attach connecting socket to its ConnectionHandler - * according to StreamInit message's isForOutgoing flag. - * (d) When the both incoming and outgoing connections are established, StreamSession calls - * StreamSession#onInitializationComplete method to start the streaming prepare phase - * (StreamResultFuture.startStreaming()). + * (a) A node (the initiator in the following) create a new StreamSession, + * initialize it (init()) and then start it (start()). Start will create a + * {@link ConnectionHandler} that will create two connections to the remote node + * (the follower in the following) with whom to stream and send a StreamInit + * message. The first connection will be the incoming connection for the + * initiator, and the second connection will be the outgoing. (b) Upon reception + * of that StreamInit message, the follower creates its own StreamSession, + * initialize it if it still does not exist, and attach connecting socket to its + * ConnectionHandler according to StreamInit message's isForOutgoing flag. (d) + * When the both incoming and outgoing connections are established, + * StreamSession calls StreamSession#onInitializationComplete method to start + * the streaming prepare phase (StreamResultFuture.startStreaming()). * * 2. Streaming preparation phase * - * (a) This phase is started when the initiator onInitializationComplete() method is called. This method sends a - * PrepareMessage that includes what files/sections this node will stream to the follower - * (stored in a StreamTransferTask, each column family has it's own transfer task) and what - * the follower needs to stream back (StreamReceiveTask, same as above). If the initiator has - * nothing to receive from the follower, it goes directly to its Streaming phase. Otherwise, - * it waits for the follower PrepareMessage. - * (b) Upon reception of the PrepareMessage, the follower records which files/sections it will receive - * and send back its own PrepareMessage with a summary of the files/sections that will be sent to - * the initiator (prepare()). After having sent that message, the follower goes to its Streamning - * phase. - * (c) When the initiator receives the follower PrepareMessage, it records which files/sections it will - * receive and then goes to his own Streaming phase. + * (a) This phase is started when the initiator onInitializationComplete() + * method is called. This method sends a PrepareMessage that includes what + * files/sections this node will stream to the follower (stored in a + * StreamTransferTask, each column family has it's own transfer task) and what + * the follower needs to stream back (StreamReceiveTask, same as above). If the + * initiator has nothing to receive from the follower, it goes directly to its + * Streaming phase. Otherwise, it waits for the follower PrepareMessage. (b) + * Upon reception of the PrepareMessage, the follower records which + * files/sections it will receive and send back its own PrepareMessage with a + * summary of the files/sections that will be sent to the initiator (prepare()). + * After having sent that message, the follower goes to its Streamning phase. + * (c) When the initiator receives the follower PrepareMessage, it records which + * files/sections it will receive and then goes to his own Streaming phase. * * 3. Streaming phase * - * (a) The streaming phase is started by each node (the sender in the follower, but note that each side - * of the StreamSession may be sender for some of the files) involved by calling startStreamingFiles(). - * This will sequentially send a FileMessage for each file of each SteamTransferTask. Each FileMessage - * consists of a FileMessageHeader that indicates which file is coming and then start streaming the - * content for that file (StreamWriter in FileMessage.serialize()). When a file is fully sent, the - * fileSent() method is called for that file. If all the files for a StreamTransferTask are sent - * (StreamTransferTask.complete()), the task is marked complete (taskCompleted()). - * (b) On the receiving side, a SSTable will be written for the incoming file (StreamReader in - * FileMessage.deserialize()) and once the FileMessage is fully received, the file will be marked as - * complete (received()). When all files for the StreamReceiveTask have been received, the sstables - * are added to the CFS (and 2ndary index are built, StreamReceiveTask.complete()) and the task - * is marked complete (taskCompleted()) - * (b) If during the streaming of a particular file an I/O error occurs on the receiving end of a stream - * (FileMessage.deserialize), the node will retry the file (up to DatabaseDescriptor.getMaxStreamingRetries()) - * by sending a RetryMessage to the sender. On receiving a RetryMessage, the sender simply issue a new - * FileMessage for that file. - * (c) When all transfer and receive tasks for a session are complete, the move to the Completion phase - * (maybeCompleted()). + * (a) The streaming phase is started by each node (the sender in the follower, + * but note that each side of the StreamSession may be sender for some of the + * files) involved by calling startStreamingFiles(). This will sequentially send + * a FileMessage for each file of each SteamTransferTask. Each FileMessage + * consists of a FileMessageHeader that indicates which file is coming and then + * start streaming the content for that file (StreamWriter in + * FileMessage.serialize()). When a file is fully sent, the fileSent() method is + * called for that file. If all the files for a StreamTransferTask are sent + * (StreamTransferTask.complete()), the task is marked complete + * (taskCompleted()). (b) On the receiving side, a SSTable will be written for + * the incoming file (StreamReader in FileMessage.deserialize()) and once the + * FileMessage is fully received, the file will be marked as complete + * (received()). When all files for the StreamReceiveTask have been received, + * the sstables are added to the CFS (and 2ndary index are built, + * StreamReceiveTask.complete()) and the task is marked complete + * (taskCompleted()) (b) If during the streaming of a particular file an I/O + * error occurs on the receiving end of a stream (FileMessage.deserialize), the + * node will retry the file (up to DatabaseDescriptor.getMaxStreamingRetries()) + * by sending a RetryMessage to the sender. On receiving a RetryMessage, the + * sender simply issue a new FileMessage for that file. (c) When all transfer + * and receive tasks for a session are complete, the move to the Completion + * phase (maybeCompleted()). * * 4. Completion phase * - * (a) When a node has finished all transfer and receive task, it enter the completion phase (maybeCompleted()). - * If it had already received a CompleteMessage from the other side (it is in the WAIT_COMPLETE state), that - * session is done is is closed (closeSession()). Otherwise, the node switch to the WAIT_COMPLETE state and - * send a CompleteMessage to the other side. + * (a) When a node has finished all transfer and receive task, it enter the + * completion phase (maybeCompleted()). If it had already received a + * CompleteMessage from the other side (it is in the WAIT_COMPLETE state), that + * session is done is is closed (closeSession()). Otherwise, the node switch to + * the WAIT_COMPLETE state and send a CompleteMessage to the other side. */ -public class StreamSession -{ +public class StreamSession { - public static enum State - { - INITIALIZED, - PREPARING, - STREAMING, - WAIT_COMPLETE, - COMPLETE, - FAILED, + public static enum State { + INITIALIZED, PREPARING, STREAMING, WAIT_COMPLETE, COMPLETE, FAILED, } - } diff --git a/src/main/java/org/apache/cassandra/streaming/StreamState.java b/src/main/java/org/apache/cassandra/streaming/StreamState.java index fbaa3fb..aa09143 100644 --- a/src/main/java/org/apache/cassandra/streaming/StreamState.java +++ b/src/main/java/org/apache/cassandra/streaming/StreamState.java @@ -28,14 +28,12 @@ import java.io.Serializable; import java.util.Set; import java.util.UUID; -import com.google.common.base.Predicate; import com.google.common.collect.Iterables; /** * Current snapshot of streaming progress. */ -public class StreamState implements Serializable -{ +public class StreamState implements Serializable { /** * */ @@ -49,19 +47,12 @@ public class StreamState implements Serializable this.description = description; this.sessions = sessions; } - public StreamState(String planId, String description, Set sessions) - { + + public StreamState(String planId, String description, Set sessions) { this(UUID.fromString(planId), description, sessions); } - public boolean hasFailedSession() - { - return Iterables.any(sessions, new Predicate() - { - public boolean apply(SessionInfo session) - { - return session.isFailed(); - } - }); + public boolean hasFailedSession() { + return Iterables.any(sessions, session -> session.isFailed()); } } diff --git a/src/main/java/org/apache/cassandra/streaming/StreamSummary.java b/src/main/java/org/apache/cassandra/streaming/StreamSummary.java index dbda53e..2e90810 100644 --- a/src/main/java/org/apache/cassandra/streaming/StreamSummary.java +++ b/src/main/java/org/apache/cassandra/streaming/StreamSummary.java @@ -36,18 +36,17 @@ import com.google.common.base.Objects; /** * Summary of streaming. */ -public class StreamSummary -{ +public class StreamSummary { public final UUID cfId; /** - * Number of files to transfer. Can be 0 if nothing to transfer for some streaming request. + * Number of files to transfer. Can be 0 if nothing to transfer for some + * streaming request. */ public final int files; public final long totalSize; - public StreamSummary(UUID cfId, int files, long totalSize) - { + public StreamSummary(UUID cfId, int files, long totalSize) { this.cfId = cfId; this.files = files; this.totalSize = totalSize; @@ -58,7 +57,8 @@ public class StreamSummary } public static StreamSummary fromJsonObject(JsonObject obj) { - return new StreamSummary(obj.getString("cf_id"), obj.getInt("files"), obj.getJsonNumber("total_size").longValue()); + return new StreamSummary(obj.getString("cf_id"), obj.getInt("files"), + obj.getJsonNumber("total_size").longValue()); } public static Collection fromJsonArr(JsonArray arr) { @@ -71,24 +71,26 @@ public class StreamSummary } return res; } + @Override - public boolean equals(Object o) - { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } StreamSummary summary = (StreamSummary) o; return files == summary.files && totalSize == summary.totalSize && cfId.equals(summary.cfId); } @Override - public int hashCode() - { + public int hashCode() { return Objects.hashCode(cfId, files, totalSize); } @Override - public String toString() - { + public String toString() { final StringBuilder sb = new StringBuilder("StreamSummary{"); sb.append("path=").append(cfId); sb.append(", files=").append(files); diff --git a/src/main/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java b/src/main/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java index b722c13..1642e1c 100644 --- a/src/main/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java +++ b/src/main/java/org/apache/cassandra/streaming/management/ProgressInfoCompositeData.java @@ -29,54 +29,38 @@ import java.net.UnknownHostException; import java.util.HashMap; import java.util.Map; import java.util.UUID; -import javax.management.openmbean.*; -import com.google.common.base.Throwables; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; import org.apache.cassandra.streaming.ProgressInfo; -public class ProgressInfoCompositeData -{ - private static final String[] ITEM_NAMES = new String[]{"planId", - "peer", - "sessionIndex", - "fileName", - "direction", - "currentBytes", - "totalBytes"}; - private static final String[] ITEM_DESCS = new String[]{"String representation of Plan ID", - "Session peer", - "Index of session", - "Name of the file", - "Direction('IN' or 'OUT')", - "Current bytes transferred", - "Total bytes to transfer"}; - private static final OpenType[] ITEM_TYPES = new OpenType[]{SimpleType.STRING, - SimpleType.STRING, - SimpleType.INTEGER, - SimpleType.STRING, - SimpleType.STRING, - SimpleType.LONG, - SimpleType.LONG}; +import com.google.common.base.Throwables; + +public class ProgressInfoCompositeData { + private static final String[] ITEM_NAMES = new String[] { "planId", "peer", "sessionIndex", "fileName", "direction", + "currentBytes", "totalBytes" }; + private static final String[] ITEM_DESCS = new String[] { "String representation of Plan ID", "Session peer", + "Index of session", "Name of the file", "Direction('IN' or 'OUT')", "Current bytes transferred", + "Total bytes to transfer" }; + private static final OpenType[] ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, + SimpleType.INTEGER, SimpleType.STRING, SimpleType.STRING, SimpleType.LONG, SimpleType.LONG }; public static final CompositeType COMPOSITE_TYPE; - static { - try - { - COMPOSITE_TYPE = new CompositeType(ProgressInfo.class.getName(), - "ProgressInfo", - ITEM_NAMES, - ITEM_DESCS, - ITEM_TYPES); - } - catch (OpenDataException e) - { + static { + try { + COMPOSITE_TYPE = new CompositeType(ProgressInfo.class.getName(), "ProgressInfo", ITEM_NAMES, ITEM_DESCS, + ITEM_TYPES); + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static CompositeData toCompositeData(UUID planId, ProgressInfo progressInfo) - { + public static CompositeData toCompositeData(UUID planId, ProgressInfo progressInfo) { Map valueMap = new HashMap<>(); valueMap.put(ITEM_NAMES[0], planId.toString()); valueMap.put(ITEM_NAMES[1], progressInfo.peer.getHostAddress()); @@ -85,30 +69,19 @@ public class ProgressInfoCompositeData valueMap.put(ITEM_NAMES[4], progressInfo.direction.name()); valueMap.put(ITEM_NAMES[5], progressInfo.currentBytes); valueMap.put(ITEM_NAMES[6], progressInfo.totalBytes); - try - { + try { return new CompositeDataSupport(COMPOSITE_TYPE, valueMap); - } - catch (OpenDataException e) - { + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static ProgressInfo fromCompositeData(CompositeData cd) - { + public static ProgressInfo fromCompositeData(CompositeData cd) { Object[] values = cd.getAll(ITEM_NAMES); - try - { - return new ProgressInfo(InetAddress.getByName((String) values[1]), - (int) values[2], - (String) values[3], - ProgressInfo.Direction.valueOf((String)values[4]), - (long) values[5], - (long) values[6]); - } - catch (UnknownHostException e) - { + try { + return new ProgressInfo(InetAddress.getByName((String) values[1]), (int) values[2], (String) values[3], + ProgressInfo.Direction.valueOf((String) values[4]), (long) values[5], (long) values[6]); + } catch (UnknownHostException e) { throw Throwables.propagate(e); } } diff --git a/src/main/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java b/src/main/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java index ef5d955..bcda9c0 100644 --- a/src/main/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java +++ b/src/main/java/org/apache/cassandra/streaming/management/SessionInfoCompositeData.java @@ -26,8 +26,24 @@ package org.apache.cassandra.streaming.management; import java.net.InetAddress; import java.net.UnknownHostException; -import java.util.*; -import javax.management.openmbean.*; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + +import javax.management.openmbean.ArrayType; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; + +import org.apache.cassandra.streaming.ProgressInfo; +import org.apache.cassandra.streaming.SessionInfo; +import org.apache.cassandra.streaming.StreamSession; +import org.apache.cassandra.streaming.StreamSummary; import com.google.common.base.Function; import com.google.common.base.Throwables; @@ -35,149 +51,86 @@ import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.cassandra.streaming.ProgressInfo; -import org.apache.cassandra.streaming.SessionInfo; -import org.apache.cassandra.streaming.StreamSession; -import org.apache.cassandra.streaming.StreamSummary; -import java.util.HashMap; - -public class SessionInfoCompositeData -{ - private static final String[] ITEM_NAMES = new String[]{"planId", - "peer", - "connecting", - "receivingSummaries", - "sendingSummaries", - "state", - "receivingFiles", - "sendingFiles", - "sessionIndex"}; - private static final String[] ITEM_DESCS = new String[]{"Plan ID", - "Session peer", - "Connecting address", - "Summaries of receiving data", - "Summaries of sending data", - "Current session state", - "Receiving files", - "Sending files", - "Session index"}; +public class SessionInfoCompositeData { + private static final String[] ITEM_NAMES = new String[] { "planId", "peer", "connecting", "receivingSummaries", + "sendingSummaries", "state", "receivingFiles", "sendingFiles", "sessionIndex" }; + private static final String[] ITEM_DESCS = new String[] { "Plan ID", "Session peer", "Connecting address", + "Summaries of receiving data", "Summaries of sending data", "Current session state", "Receiving files", + "Sending files", "Session index" }; private static final OpenType[] ITEM_TYPES; public static final CompositeType COMPOSITE_TYPE; - static { - try - { - ITEM_TYPES = new OpenType[]{SimpleType.STRING, - SimpleType.STRING, - SimpleType.STRING, - ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE), - ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE), - SimpleType.STRING, - ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE), - ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE), - SimpleType.INTEGER}; - COMPOSITE_TYPE = new CompositeType(SessionInfo.class.getName(), - "SessionInfo", - ITEM_NAMES, - ITEM_DESCS, - ITEM_TYPES); - } - catch (OpenDataException e) - { + static { + try { + ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, + ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE), + ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE), SimpleType.STRING, + ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE), + ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE), SimpleType.INTEGER }; + COMPOSITE_TYPE = new CompositeType(SessionInfo.class.getName(), "SessionInfo", ITEM_NAMES, ITEM_DESCS, + ITEM_TYPES); + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static CompositeData toCompositeData(final UUID planId, SessionInfo sessionInfo) - { + public static CompositeData toCompositeData(final UUID planId, SessionInfo sessionInfo) { Map valueMap = new HashMap<>(); valueMap.put(ITEM_NAMES[0], planId.toString()); valueMap.put(ITEM_NAMES[1], sessionInfo.peer.getHostAddress()); valueMap.put(ITEM_NAMES[2], sessionInfo.connecting.getHostAddress()); - Function fromStreamSummary = new Function() - { - public CompositeData apply(StreamSummary input) - { - return StreamSummaryCompositeData.toCompositeData(input); - } - }; + Function fromStreamSummary = input -> StreamSummaryCompositeData + .toCompositeData(input); valueMap.put(ITEM_NAMES[3], toArrayOfCompositeData(sessionInfo.receivingSummaries, fromStreamSummary)); valueMap.put(ITEM_NAMES[4], toArrayOfCompositeData(sessionInfo.sendingSummaries, fromStreamSummary)); valueMap.put(ITEM_NAMES[5], sessionInfo.state.name()); - Function fromProgressInfo = new Function() - { - public CompositeData apply(ProgressInfo input) - { - return ProgressInfoCompositeData.toCompositeData(planId, input); - } - }; + Function fromProgressInfo = input -> ProgressInfoCompositeData + .toCompositeData(planId, input); valueMap.put(ITEM_NAMES[6], toArrayOfCompositeData(sessionInfo.getReceivingFiles(), fromProgressInfo)); valueMap.put(ITEM_NAMES[7], toArrayOfCompositeData(sessionInfo.getSendingFiles(), fromProgressInfo)); valueMap.put(ITEM_NAMES[8], sessionInfo.sessionIndex); - try - { + try { return new CompositeDataSupport(COMPOSITE_TYPE, valueMap); - } - catch (OpenDataException e) - { + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static SessionInfo fromCompositeData(CompositeData cd) - { + public static SessionInfo fromCompositeData(CompositeData cd) { assert cd.getCompositeType().equals(COMPOSITE_TYPE); Object[] values = cd.getAll(ITEM_NAMES); InetAddress peer, connecting; - try - { + try { peer = InetAddress.getByName((String) values[1]); connecting = InetAddress.getByName((String) values[2]); - } - catch (UnknownHostException e) - { + } catch (UnknownHostException e) { throw Throwables.propagate(e); } - Function toStreamSummary = new Function() - { - public StreamSummary apply(CompositeData input) - { - return StreamSummaryCompositeData.fromCompositeData(input); - } - }; - SessionInfo info = new SessionInfo(peer, - (int)values[8], - connecting, - fromArrayOfCompositeData((CompositeData[]) values[3], toStreamSummary), - fromArrayOfCompositeData((CompositeData[]) values[4], toStreamSummary), - StreamSession.State.valueOf((String) values[5]), - new HashMap(), new HashMap()); - Function toProgressInfo = new Function() - { - public ProgressInfo apply(CompositeData input) - { - return ProgressInfoCompositeData.fromCompositeData(input); - } - }; - for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[6], toProgressInfo)) - { + Function toStreamSummary = input -> StreamSummaryCompositeData + .fromCompositeData(input); + SessionInfo info = new SessionInfo(peer, (int) values[8], connecting, + fromArrayOfCompositeData((CompositeData[]) values[3], toStreamSummary), + fromArrayOfCompositeData((CompositeData[]) values[4], toStreamSummary), + StreamSession.State.valueOf((String) values[5]), new HashMap(), + new HashMap()); + Function toProgressInfo = input -> ProgressInfoCompositeData + .fromCompositeData(input); + for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[6], toProgressInfo)) { info.updateProgress(progress); } - for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[7], toProgressInfo)) - { + for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[7], toProgressInfo)) { info.updateProgress(progress); } return info; } - private static Collection fromArrayOfCompositeData(CompositeData[] cds, Function func) - { + private static Collection fromArrayOfCompositeData(CompositeData[] cds, Function func) { return Lists.newArrayList(Iterables.transform(Arrays.asList(cds), func)); } - private static CompositeData[] toArrayOfCompositeData(Collection toConvert, Function func) - { + private static CompositeData[] toArrayOfCompositeData(Collection toConvert, + Function func) { if (toConvert == null) { toConvert = Sets.newHashSet(); } diff --git a/src/main/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java b/src/main/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java index 3f57608..daa1f57 100644 --- a/src/main/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java +++ b/src/main/java/org/apache/cassandra/streaming/management/StreamStateCompositeData.java @@ -24,79 +24,68 @@ package org.apache.cassandra.streaming.management; -import java.util.*; -import javax.management.openmbean.*; +import java.util.Arrays; +import java.util.HashMap; +import java.util.Map; +import java.util.Set; +import java.util.UUID; + +import javax.management.openmbean.ArrayType; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; + +import org.apache.cassandra.streaming.SessionInfo; +import org.apache.cassandra.streaming.StreamState; -import com.google.common.base.Function; import com.google.common.base.Throwables; import com.google.common.collect.Iterables; import com.google.common.collect.Lists; import com.google.common.collect.Sets; -import org.apache.cassandra.streaming.SessionInfo; -import org.apache.cassandra.streaming.StreamState; - /** */ -public class StreamStateCompositeData -{ - private static final String[] ITEM_NAMES = new String[]{"planId", "description", "sessions", - "currentRxBytes", "totalRxBytes", "rxPercentage", - "currentTxBytes", "totalTxBytes", "txPercentage"}; - private static final String[] ITEM_DESCS = new String[]{"Plan ID of this stream", - "Stream plan description", - "Active stream sessions", - "Number of bytes received across all streams", - "Total bytes available to receive across all streams", - "Percentage received across all streams", - "Number of bytes sent across all streams", - "Total bytes available to send across all streams", - "Percentage sent across all streams"}; +public class StreamStateCompositeData { + private static final String[] ITEM_NAMES = new String[] { "planId", "description", "sessions", "currentRxBytes", + "totalRxBytes", "rxPercentage", "currentTxBytes", "totalTxBytes", "txPercentage" }; + private static final String[] ITEM_DESCS = new String[] { "Plan ID of this stream", "Stream plan description", + "Active stream sessions", "Number of bytes received across all streams", + "Total bytes available to receive across all streams", "Percentage received across all streams", + "Number of bytes sent across all streams", "Total bytes available to send across all streams", + "Percentage sent across all streams" }; private static final OpenType[] ITEM_TYPES; public static final CompositeType COMPOSITE_TYPE; - static { - try - { - ITEM_TYPES = new OpenType[]{SimpleType.STRING, - SimpleType.STRING, - ArrayType.getArrayType(SessionInfoCompositeData.COMPOSITE_TYPE), - SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE, - SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE}; - COMPOSITE_TYPE = new CompositeType(StreamState.class.getName(), - "StreamState", - ITEM_NAMES, - ITEM_DESCS, - ITEM_TYPES); - } - catch (OpenDataException e) - { + static { + try { + ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, + ArrayType.getArrayType(SessionInfoCompositeData.COMPOSITE_TYPE), SimpleType.LONG, SimpleType.LONG, + SimpleType.DOUBLE, SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE }; + COMPOSITE_TYPE = new CompositeType(StreamState.class.getName(), "StreamState", ITEM_NAMES, ITEM_DESCS, + ITEM_TYPES); + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static CompositeData toCompositeData(final StreamState streamState) - { + public static CompositeData toCompositeData(final StreamState streamState) { Map valueMap = new HashMap<>(); valueMap.put(ITEM_NAMES[0], streamState.planId.toString()); valueMap.put(ITEM_NAMES[1], streamState.description); CompositeData[] sessions = new CompositeData[streamState.sessions.size()]; - Lists.newArrayList(Iterables.transform(streamState.sessions, new Function() - { - public CompositeData apply(SessionInfo input) - { - return SessionInfoCompositeData.toCompositeData(streamState.planId, input); - } - })).toArray(sessions); + Lists.newArrayList(Iterables.transform(streamState.sessions, + input -> SessionInfoCompositeData.toCompositeData(streamState.planId, input))).toArray(sessions); valueMap.put(ITEM_NAMES[2], sessions); long currentRxBytes = 0; long totalRxBytes = 0; long currentTxBytes = 0; long totalTxBytes = 0; - for (SessionInfo sessInfo : streamState.sessions) - { + for (SessionInfo sessInfo : streamState.sessions) { currentRxBytes += sessInfo.getTotalSizeReceived(); totalRxBytes += sessInfo.getTotalSizeToReceive(); currentTxBytes += sessInfo.getTotalSizeSent(); @@ -112,30 +101,20 @@ public class StreamStateCompositeData valueMap.put(ITEM_NAMES[7], totalTxBytes); valueMap.put(ITEM_NAMES[8], txPercentage); - try - { + try { return new CompositeDataSupport(COMPOSITE_TYPE, valueMap); - } - catch (OpenDataException e) - { + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static StreamState fromCompositeData(CompositeData cd) - { + public static StreamState fromCompositeData(CompositeData cd) { assert cd.getCompositeType().equals(COMPOSITE_TYPE); Object[] values = cd.getAll(ITEM_NAMES); UUID planId = UUID.fromString((String) values[0]); String description = (String) values[1]; Set sessions = Sets.newHashSet(Iterables.transform(Arrays.asList((CompositeData[]) values[2]), - new Function() - { - public SessionInfo apply(CompositeData input) - { - return SessionInfoCompositeData.fromCompositeData(input); - } - })); + input -> SessionInfoCompositeData.fromCompositeData(input))); return new StreamState(planId, description, sessions); } } diff --git a/src/main/java/org/apache/cassandra/streaming/management/StreamSummaryCompositeData.java b/src/main/java/org/apache/cassandra/streaming/management/StreamSummaryCompositeData.java index d649aff..93f39c0 100644 --- a/src/main/java/org/apache/cassandra/streaming/management/StreamSummaryCompositeData.java +++ b/src/main/java/org/apache/cassandra/streaming/management/StreamSummaryCompositeData.java @@ -27,63 +27,51 @@ package org.apache.cassandra.streaming.management; import java.util.HashMap; import java.util.Map; import java.util.UUID; -import javax.management.openmbean.*; -import com.google.common.base.Throwables; +import javax.management.openmbean.CompositeData; +import javax.management.openmbean.CompositeDataSupport; +import javax.management.openmbean.CompositeType; +import javax.management.openmbean.OpenDataException; +import javax.management.openmbean.OpenType; +import javax.management.openmbean.SimpleType; import org.apache.cassandra.streaming.StreamSummary; +import com.google.common.base.Throwables; + /** */ -public class StreamSummaryCompositeData -{ - private static final String[] ITEM_NAMES = new String[]{"cfId", - "files", - "totalSize"}; - private static final String[] ITEM_DESCS = new String[]{"ColumnFamilu ID", - "Number of files", - "Total bytes of the files"}; - private static final OpenType[] ITEM_TYPES = new OpenType[]{SimpleType.STRING, - SimpleType.INTEGER, - SimpleType.LONG}; +public class StreamSummaryCompositeData { + private static final String[] ITEM_NAMES = new String[] { "cfId", "files", "totalSize" }; + private static final String[] ITEM_DESCS = new String[] { "ColumnFamilu ID", "Number of files", + "Total bytes of the files" }; + private static final OpenType[] ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.INTEGER, + SimpleType.LONG }; public static final CompositeType COMPOSITE_TYPE; - static { - try - { - COMPOSITE_TYPE = new CompositeType(StreamSummary.class.getName(), - "StreamSummary", - ITEM_NAMES, - ITEM_DESCS, - ITEM_TYPES); - } - catch (OpenDataException e) - { + static { + try { + COMPOSITE_TYPE = new CompositeType(StreamSummary.class.getName(), "StreamSummary", ITEM_NAMES, ITEM_DESCS, + ITEM_TYPES); + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static CompositeData toCompositeData(StreamSummary streamSummary) - { + public static CompositeData toCompositeData(StreamSummary streamSummary) { Map valueMap = new HashMap<>(); valueMap.put(ITEM_NAMES[0], streamSummary.cfId.toString()); valueMap.put(ITEM_NAMES[1], streamSummary.files); valueMap.put(ITEM_NAMES[2], streamSummary.totalSize); - try - { + try { return new CompositeDataSupport(COMPOSITE_TYPE, valueMap); - } - catch (OpenDataException e) - { + } catch (OpenDataException e) { throw Throwables.propagate(e); } } - public static StreamSummary fromCompositeData(CompositeData cd) - { + public static StreamSummary fromCompositeData(CompositeData cd) { Object[] values = cd.getAll(ITEM_NAMES); - return new StreamSummary(UUID.fromString((String) values[0]), - (int) values[1], - (long) values[2]); + return new StreamSummary(UUID.fromString((String) values[0]), (int) values[1], (long) values[2]); } } From ae6a0008075e4349fca36322eb39417748ee6c7d Mon Sep 17 00:00:00 2001 From: Calle Wilund Date: Tue, 1 Nov 2016 09:44:17 +0000 Subject: [PATCH 32/32] ColumnFamilyStore: Remove compaction parameter API usage Do manual mangling of in/out data in JMX instead. Saves on controversy over more or less pointless API additions. --- .../cassandra/db/ColumnFamilyStore.java | 37 ++++++++++--------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java index 7b5f2df..ea62b14 100644 --- a/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/main/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -25,24 +25,23 @@ package org.apache.cassandra.db; import static java.lang.String.valueOf; import static java.util.Arrays.asList; +import static java.util.stream.Collectors.toMap; import static javax.json.Json.createObjectBuilder; -import static javax.json.Json.createReader; -import static javax.ws.rs.core.MediaType.APPLICATION_JSON; import java.io.StringReader; -import java.util.HashMap; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Map.Entry; import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.logging.Logger; +import javax.json.Json; import javax.json.JsonArray; import javax.json.JsonObject; import javax.json.JsonObjectBuilder; -import javax.json.JsonValue; +import javax.json.JsonReader; import javax.management.MBeanServer; import javax.management.MalformedObjectNameException; import javax.management.ObjectName; @@ -369,33 +368,37 @@ public class ColumnFamilyStore extends MetricsMBean implements ColumnFamilyStore @Override public void setCompactionParametersJson(String options) { log(" setCompactionParametersJson"); - client.post("column_family/compaction_parameters/" + getCFName(), null, options, APPLICATION_JSON); + JsonReader reader = Json.createReaderFactory(null).createReader(new StringReader(options)); + setCompactionParameters( + reader.readObject().entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.toString()))); } @Override public String getCompactionParametersJson() { log(" getCompactionParametersJson"); - return client.getStringValue("column_family/compaction_parameters/" + getCFName()); + JsonObjectBuilder b = createObjectBuilder(); + getCompactionParameters().forEach(b::add); + return b.build().toString(); } @Override public void setCompactionParameters(Map options) { - JsonObjectBuilder b = createObjectBuilder(); for (Map.Entry e : options.entrySet()) { - b.add(e.getKey(), e.getValue()); + // See below + if ("class".equals(e.getKey())) { + setCompactionStrategyClass(e.getValue()); + } else { + throw new IllegalArgumentException(e.getKey()); + } } - setCompactionParametersJson(b.build().toString()); } @Override public Map getCompactionParameters() { - String s = getCompactionParametersJson(); - JsonObject o = createReader(new StringReader(s)).readObject(); - HashMap res = new HashMap<>(); - for (Entry e : o.entrySet()) { - res.put(e.getKey(), e.getValue().toString()); - } - return res; + // We only currently support class. Here could have been a call that can + // be expanded only on the server side, but that raises controversy. + // Lets add some technical debt instead. + return Collections.singletonMap("class", getCompactionStrategyClass()); } @Override