scylla-jmx: Update JMX interfaces to origin 3.11

Almost 100% null implementations, which is ok for most purposes
currently used by scylla. Some of these new calls (like dropped
mutations etc) should perhaps however be implemented.

Tested with the nodetool dtests. So sparsely.

Needed when/if scylla-tools-java is upgraded to origin 3.11,
otherwise noodtool breaks.

Message-Id: <20180730113741.14952-1-calle@scylladb.com>
This commit is contained in:
Calle Wilund 2018-07-30 11:37:41 +00:00 committed by Pekka Enberg
parent b4d983b45a
commit 9c3ac3e547
9 changed files with 295 additions and 5 deletions

View File

@ -17,6 +17,7 @@
*/
package org.apache.cassandra.db;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutionException;
@ -45,6 +46,11 @@ public interface ColumnFamilyStoreMBean {
*/
public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException;
// NOT even default-throw implementing
// forceCompactionForTokenRange
// as this is clearly a misplaced method that should not be in the mbean interface
// (uses internal cassandra types)
/**
* Gets the minimum number of sstables in queue before compaction kicks off
*/
@ -154,6 +160,14 @@ public interface ColumnFamilyStoreMBean {
*/
public int[] getSSTableCountPerLevel();
/**
* @return sstable fanout size for level compaction strategy.
*/
default public int getLevelFanoutSize() {
// TODO: implement for real. This is sort of default.
return 10;
}
/**
* Get the ratio of droppable tombstones to real columns (and non-droppable
* tombstones)

View File

@ -43,6 +43,18 @@ public interface CompactionManagerMBean {
*/
public void forceUserDefinedCompaction(String dataFiles);
/**
* Triggers the cleanup of user specified sstables.
* You can specify files from various keyspaces and columnfamilies.
* If you do so, cleanup is performed each file individually
*
* @param dataFiles a comma separated list of sstable file to cleanup.
* must contain keyspace and columnfamily name in path(for 2.1+) or file name itself.
*/
default public void forceUserDefinedCleanup(String dataFiles) {
throw new UnsupportedOperationException();
}
/**
* Stop all running compaction-like tasks having the provided {@code type}.
*

View File

@ -131,7 +131,15 @@ public class MetricsRegistry {
}
public MetricMBean counter(final String url) {
return new JmxCounter(url);
if (url != null) {
return new JmxCounter(url);
}
return new JmxCounter(url) {
@Override
public long getCount() {
return 0;
}
};
}
private abstract class IntermediatelyUpdated {

View File

@ -155,6 +155,24 @@ public class TableMetrics implements Metrics {
aliasFactory.createMetricName(alias));
}
private static <T> BiFunction<APIClient, String, T> getDummy(Class<T> type) {
if (type == String.class) {
return (c, s) -> type.cast("");
} else if (type == Integer.class) {
return (c, s) -> type.cast(0);
} else if (type == Double.class) {
return (c, s) -> type.cast(0.0);
} else if (type == Long.class) {
return (c, s) -> type.cast(0L);
}
throw new IllegalArgumentException(type.getName());
}
public <T> void createDummyTableGauge(Class<T> c, String name) throws MalformedObjectNameException {
register(() -> gauge(newGauge(getDummy(c), null)), factory.createMetricName(name),
aliasFactory.createMetricName(name));
}
public <L, G> void createTableGauge(Class<L> c1, Class<G> c2, String name, String alias, String uri)
throws MalformedObjectNameException {
if (cfName != null) {
@ -173,6 +191,11 @@ public class TableMetrics implements Metrics {
aliasFactory.createMetricName(alias));
}
public void createDummyTableCounter(String name) throws MalformedObjectNameException {
register(() -> counter(null), factory.createMetricName(name),
aliasFactory.createMetricName(name));
}
public void createTableHistogram(String name, String uri, boolean considerZeros)
throws MalformedObjectNameException {
createTableHistogram(name, name, uri, considerZeros);
@ -205,6 +228,9 @@ public class TableMetrics implements Metrics {
for (LatencyMetrics l : latencyMetrics) {
l.register(registry);
}
// TODO: implement
registry.createDummyTableCounter("DroppedMutations");
}
private static void registerCommon(Registry registry) throws MalformedObjectNameException {
@ -264,8 +290,12 @@ public class TableMetrics implements Metrics {
registry.createTableCounter("RowCacheHitOutOfRange", "row_cache_hit_out_of_range");
registry.createTableCounter("RowCacheHit", "row_cache_hit");
registry.createTableCounter("RowCacheMiss", "row_cache_miss");
// TODO: implement
registry.createDummyTableGauge(Double.class, "PercentRepaired");
}
@SuppressWarnings("serial")
static class TableMetricObjectName extends javax.management.ObjectName {
private static final String FAKE_NAME = "a:a=a";

View File

@ -122,6 +122,20 @@ public interface MessagingServiceMBean {
*/
public Map<String, Long> getTimeoutsPerHost();
/**
* Back-pressure rate limiting per host
*/
default public Map<String, Double> getBackPressurePerHost() {
throw new UnsupportedOperationException();
}
/**
* Enable/Disable back-pressure
*/
default public void setBackPressureEnabled(boolean enabled) {
throw new UnsupportedOperationException();
}
/**
* Number of timeouts since last check.
*/

View File

@ -25,14 +25,19 @@ package org.apache.cassandra.service;
import static java.util.Collections.emptySet;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.logging.Logger;
import javax.json.JsonArray;
import javax.json.JsonObject;
import javax.management.ObjectName;
import javax.ws.rs.core.MultivaluedHashMap;
import javax.ws.rs.core.MultivaluedMap;
import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.metrics.CASClientRequestMetrics;
import org.apache.cassandra.metrics.ClientRequestMetrics;
@ -286,4 +291,11 @@ public class StorageProxy extends MetricsMBean implements StorageProxyMBean {
log(" getHintedHandoffDisabledDCs()");
return emptySet();
}
@Override
public int getNumberOfTables() {
// TODO: could be like 1000% more efficient
JsonArray mbeans = client.getJsonArray("/column_family/");
return mbeans.size();
}
}

View File

@ -95,6 +95,15 @@ public interface StorageProxyMBean {
public long getReadRepairRepairedBackground();
default public int getOtcBacklogExpirationInterval() {
throw new UnsupportedOperationException();
}
default void setOtcBacklogExpirationInterval(int intervalInMillis) {
throw new UnsupportedOperationException();
}
/** Returns each live node's schema version */
public Map<String, List<String>> getSchemaVersions();
public int getNumberOfTables();
}

View File

@ -30,10 +30,8 @@ import java.net.UnknownHostException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
@ -60,6 +58,8 @@ import javax.ws.rs.core.MultivaluedMap;
import org.apache.cassandra.metrics.StorageMetrics;
import org.apache.cassandra.repair.RepairParallelism;
import org.glassfish.jersey.client.ClientConfig;
import org.glassfish.jersey.client.ClientProperties;
import com.google.common.base.Joiner;
import com.scylladb.jmx.api.APIClient;
@ -486,10 +486,22 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean,
*/
@Override
public void takeSnapshot(String tag, String... keyspaceNames) throws IOException {
takeSnapshot(tag, null, keyspaceNames);
}
@Override
public void takeSnapshot(String tag, Map<String, String> options, String... keyspaceNames) throws IOException {
log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException");
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
APIClient.set_query_param(queryParams, "tag", tag);
if (keyspaceNames.length == 1 && keyspaceNames[0].indexOf('.') != -1) {
String[] parts = keyspaceNames[0].split("\\.");
keyspaceNames = new String[] { parts[0] };
APIClient.set_query_param(queryParams, "cf", parts[1]);
}
APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames));
// TODO: origin has one recognized option: skip flush. We don't.
client.post("/storage_service/snapshots", queryParams);
}
@ -591,6 +603,12 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean,
client.post("/storage_service/keyspace_compaction/" + keyspaceName, queryParams);
}
@Override
public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException {
// TODO: actually handle token ranges.
forceKeyspaceCompaction(keyspaceName, tableNames);
}
/**
* Trigger a cleanup of keys on a single keyspace
*/
@ -1276,7 +1294,29 @@ public class StorageService extends MetricsMBean implements StorageServiceMBean,
*/
@Override
public void rebuild(String sourceDc) {
log(" rebuild(String sourceDc)");
rebuild(sourceDc, null, null, null);
}
/**
* Same as {@link #rebuild(String)}, but only for specified keyspace and ranges.
*
* @param sourceDc Name of DC from which to select sources for streaming or null to pick any node
* @param keyspace Name of the keyspace which to rebuild or null to rebuild all keyspaces.
* @param tokens Range of tokens to rebuild or null to rebuild all token ranges. In the format of:
* "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]"
*/
@Override
public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources) {
log(" rebuild(String sourceDc, String keyspace, String tokens, String specificSources)");
if (keyspace != null) {
throw new UnsupportedOperationException("Rebuild: 'keyspace' not yet supported");
}
if (tokens != null) {
throw new UnsupportedOperationException("Rebuild: 'token range' not yet supported");
}
if (specificSources != null) {
throw new UnsupportedOperationException("Rebuild: 'specific sources' not yet supported");
}
if (sourceDc != null) {
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
APIClient.set_query_param(queryParams, "source_dc", sourceDc);

View File

@ -247,6 +247,14 @@ public interface StorageServiceMBean extends NotificationEmitter {
*/
public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException;
/**
* @deprecated use {@link #takeSnapshot(String tag, Map options, String... entities)} instead.
*/
@Deprecated
default public void takeMultipleTableSnapshot(String tag, String... tableList) throws IOException {
takeMultipleColumnFamilySnapshot(tag, tableList);
}
/**
* Takes the snapshot of a multiple column family from different keyspaces.
* A snapshot name must be specified.
@ -259,6 +267,18 @@ public interface StorageServiceMBean extends NotificationEmitter {
*/
public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException;
/**
* Takes the snapshot of a multiple column family from different keyspaces. A snapshot name must be specified.
*
* @param tag
* the tag given to the snapshot; may not be null or empty
* @param options
* Map of options (skipFlush is the only supported option for now)
* @param entities
* list of keyspaces / tables in the form of empty | ks1 ks2 ... | ks1.cf1,ks2.cf2,...
*/
public void takeSnapshot(String tag, Map<String, String> options, String... entities) throws IOException;
/**
* Remove the snapshot with the given name from the given keyspaces. If no
* tag is specified we will remove all snapshots.
@ -297,6 +317,20 @@ public interface StorageServiceMBean extends NotificationEmitter {
public void forceKeyspaceCompaction(String keyspaceName, String... tableNames)
throws IOException, ExecutionException, InterruptedException;
@Deprecated
default public int relocateSSTables(String keyspace, String ... cfnames) throws IOException, ExecutionException, InterruptedException {
return relocateSSTables(0, keyspace, cfnames);
}
default public int relocateSSTables(int jobs, String keyspace, String ... cfnames) throws IOException, ExecutionException, InterruptedException {
// Node tool op disabled in scylla
throw new UnsupportedOperationException("relocateSSTables");
}
/**
* Forces major compaction of specified token range in a single keyspace
*/
public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken, String... tableNames) throws IOException, ExecutionException, InterruptedException;
/**
* Trigger a cleanup of keys on a single keyspace
*/
@ -346,6 +380,15 @@ public interface StorageServiceMBean extends NotificationEmitter {
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames)
throws IOException, ExecutionException, InterruptedException;
/**
* Rewrites all sstables from the given tables to remove deleted data.
* The tombstone option defines the granularity of the procedure: ROW removes deleted partitions and rows, CELL also removes overwritten or deleted cells.
*/
default public int garbageCollect(String tombstoneOption, int jobs, String keyspaceName, String... tableNames) throws IOException, ExecutionException, InterruptedException {
// Node tool op disabled in scylla
throw new UnsupportedOperationException("garbageCollect");
}
/**
* Flush all memtables for the given column families, or all columnfamilies
* for the given keyspace if none are explicitly listed.
@ -563,6 +606,21 @@ public interface StorageServiceMBean extends NotificationEmitter {
public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval,
Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException;
/*
* Update dynamic_snitch_update_interval_in_ms
*/
default public void setDynamicUpdateInterval(int dynamicUpdateInterval) {
// afaict not used by nodetool.
throw new UnsupportedOperationException("setDynamicUpdateInterval");
}
/*
* Get dynamic_snitch_update_interval_in_ms
*/
default public int getDynamicUpdateInterval() {
throw new UnsupportedOperationException("getDynamicUpdateInterval");
}
// allows a user to forcibly 'kill' a sick node
public void stopGossiping();
@ -598,6 +656,78 @@ public interface StorageServiceMBean extends NotificationEmitter {
public boolean isJoined();
default public boolean isDrained() {
throw new UnsupportedOperationException();
}
default public boolean isDraining() {
throw new UnsupportedOperationException();
}
default public void setRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setReadRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getReadRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setRangeRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getRangeRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setWriteRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getWriteRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setCounterWriteRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getCounterWriteRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setCasContentionTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getCasContentionTimeout() {
throw new UnsupportedOperationException();
}
default public void setTruncateRpcTimeout(long value) {
throw new UnsupportedOperationException();
}
default public long getTruncateRpcTimeout() {
throw new UnsupportedOperationException();
}
default public void setStreamingSocketTimeout(int value) {
throw new UnsupportedOperationException();
}
default public int getStreamingSocketTimeout() {
throw new UnsupportedOperationException();
}
public void setStreamThroughputMbPerSec(int value);
public int getStreamThroughputMbPerSec();
@ -609,6 +739,13 @@ public interface StorageServiceMBean extends NotificationEmitter {
public int getCompactionThroughputMbPerSec();
public void setCompactionThroughputMbPerSec(int value);
default public int getConcurrentCompactors() {
throw new UnsupportedOperationException();
}
default public void setConcurrentCompactors(int value) {
throw new UnsupportedOperationException();
}
public boolean isIncrementalBackupsEnabled();
public void setIncrementalBackupsEnabled(boolean value);
@ -625,6 +762,16 @@ public interface StorageServiceMBean extends NotificationEmitter {
*/
public void rebuild(String sourceDc);
/**
* Same as {@link #rebuild(String)}, but only for specified keyspace and ranges.
*
* @param sourceDc Name of DC from which to select sources for streaming or null to pick any node
* @param keyspace Name of the keyspace which to rebuild or null to rebuild all keyspaces.
* @param tokens Range of tokens to rebuild or null to rebuild all token ranges. In the format of:
* "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]"
*/
public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources);
/** Starts a bulk load and blocks until it completes. */
public void bulkLoad(String directory);
@ -665,6 +812,10 @@ public interface StorageServiceMBean extends NotificationEmitter {
public void resetLocalSchema() throws IOException;
default public void reloadLocalSchema() {
throw new UnsupportedOperationException();
}
/**
* Enables/Disables tracing for the whole system. Only thrift requests can
* start tracing currently.