diff --git a/README.md b/README.md
new file mode 100644
index 0000000..8a8eabb
--- /dev/null
+++ b/README.md
@@ -0,0 +1,22 @@
+# Urchin JMX Interface
+This is the JMX interface for urchin.
+## Compile
+To compile do:
+```
+mvn install
+```
+
+## Run
+The maven will copy relevant jars to your local directory and would
+set the classpath accordingly.
+```
+java -jar target/urchin-mbean-1.0.jar
+```
+
+## Setting IP and Port
+By default the the JMX would connect to a node on the localhost
+on port 10000.
+
+The jmx API uses the system properties to set the IP address and Port.
+To change the ip address use the apiaddress property (e.g. -Dapiaddress=1.1.1.1)
+To change the port use the apiport (e.g. -Dapiport=10001)
diff --git a/pom.xml b/pom.xml
new file mode 100644
index 0000000..8ec48f7
--- /dev/null
+++ b/pom.xml
@@ -0,0 +1,72 @@
+
+ 4.0.0
+
+ com.cloudius.urchin
+ urchin-mbean
+ 1.0
+ jar
+
+ Urchin MBean
+
+
+ 1.8
+ 1.8
+
+
+
+
+ com.sun.jersey
+ jersey-client
+ 1.19
+
+
+
+ junit
+ junit
+ 4.8.2
+ test
+
+
+ org.glassfish
+ javax.json
+ 1.0.4
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-dependency-plugin
+
+
+ copy-dependencies
+ prepare-package
+
+ copy-dependencies
+
+
+ ${project.build.directory}/lib
+ false
+ false
+ true
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-jar-plugin
+
+
+
+ true
+ ${project.build.directory}/lib/
+ com.cloudius.main.Main
+
+
+
+
+
+
+
diff --git a/src/main/java/com/cloudius/api/APIClient.java b/src/main/java/com/cloudius/api/APIClient.java
new file mode 100644
index 0000000..952e4d7
--- /dev/null
+++ b/src/main/java/com/cloudius/api/APIClient.java
@@ -0,0 +1,164 @@
+/*
+ * Copyright 2015 Cloudius Systems
+ */
+package com.cloudius.api;
+
+import java.io.StringReader;
+import java.net.InetAddress;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import javax.json.Json;
+import javax.json.JsonArray;
+import javax.json.JsonObject;
+import javax.json.JsonReader;
+import javax.json.JsonReaderFactory;
+import javax.json.JsonString;
+import javax.management.openmbean.TabularData;
+import javax.ws.rs.core.UriBuilder;
+
+import com.sun.jersey.api.client.Client;
+import com.sun.jersey.api.client.WebResource;
+import com.sun.jersey.api.client.WebResource.Builder;
+import com.sun.jersey.api.client.config.ClientConfig;
+import com.sun.jersey.api.client.config.DefaultClientConfig;
+
+import javax.ws.rs.core.MediaType;
+
+public class APIClient {
+ JsonReaderFactory factory = Json.createReaderFactory(null);
+
+ public static String getBaseUrl() {
+ return "http://" + System.getProperty("apiaddress", "localhost") + ":"
+ + System.getProperty("apiport", "10000");
+ }
+
+ public Builder get(String path) {
+ ClientConfig config = new DefaultClientConfig();
+ Client client = Client.create(config);
+ WebResource service = client.resource(UriBuilder.fromUri(getBaseUrl())
+ .build());
+ return service.path(path).accept(MediaType.APPLICATION_JSON);
+ }
+
+ public String getStringValue(String string) {
+ if (string != "") {
+ return get(string).get(String.class);
+ }
+ return "";
+ }
+
+ public JsonReader getReader(String string) {
+ return factory.createReader(new StringReader(getStringValue(string)));
+ }
+
+ public String[] getStringArrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public int getIntValue(String string) {
+ return Integer.parseInt(getStringValue(string));
+ }
+
+ public boolean getBooleanValue(String string) {
+ return Boolean.parseBoolean(getStringValue(string));
+ }
+
+ public double getDoubleValue(String string) {
+ return Double.parseDouble(getStringValue(string));
+ }
+
+ public List getListStrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map, List> getMapListStrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map getMapStrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public List getListInetAddressValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map getMapStringTabularDataValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public long getLongValue(String string) {
+ return Long.parseLong(getStringValue(string));
+ }
+
+ public Map getMapInetAddressFloatValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map getMapStringLongValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public long[] getLongArrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map getMapStringIntegerValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public int[] getIntArrValue(String string) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+ public Map getListMapStringLongValue(String string) {
+ if (string.equals("")) {
+ return null;
+ }
+ // Builder builder =
+
+ String vals = get(string).get(String.class);
+ System.out.println(vals);
+ JsonReader reader = getReader(string);
+ JsonArray arr = reader.readArray();
+ System.out.println(arr.size());
+ Map map = new HashMap();
+ for (int i = 0; i < arr.size(); i++) {
+ JsonObject obj = arr.getJsonObject(i);
+ Iterator it = obj.keySet().iterator();
+ String key = "";
+ long val = -1;
+ while (it.hasNext()) {
+ String k = (String) it.next();
+ System.out.println(k);
+ if (obj.get(k) instanceof JsonString) {
+ key = obj.getString(k);
+ } else {
+ val = obj.getInt(k);
+ }
+ }
+ if (val > 0 && !key.equals("")) {
+ map.put(key, val);
+ }
+
+ }
+ reader.close();
+
+ // .get(String.class);
+
+ return map;
+ }
+}
diff --git a/src/main/java/com/cloudius/main/Main.java b/src/main/java/com/cloudius/main/Main.java
new file mode 100644
index 0000000..1b4994d
--- /dev/null
+++ b/src/main/java/com/cloudius/main/Main.java
@@ -0,0 +1,20 @@
+/*
+ * Copyright 2015 Cloudius Systems
+ */
+package com.cloudius.main;
+
+import com.cloudius.api.APIClient;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.StorageService;
+
+public class Main {
+
+ public static void main(String[] args) throws Exception {
+ System.out.println("Connecting to " + APIClient.getBaseUrl());
+ System.out.println("Starting the JMX server");
+ StorageService.getInstance();
+ MessagingService.getInstance();
+ Thread.sleep(Long.MAX_VALUE);
+ }
+
+}
diff --git a/src/main/java/org/apache/cassandra/net/MessagingService.java b/src/main/java/org/apache/cassandra/net/MessagingService.java
new file mode 100644
index 0000000..e758487
--- /dev/null
+++ b/src/main/java/org/apache/cassandra/net/MessagingService.java
@@ -0,0 +1,161 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Copyright 2015 Cloudius Systems
+ *
+ * Modified by Cloudius Systems
+ */
+package org.apache.cassandra.net;
+
+import java.lang.management.ManagementFactory;
+import java.net.*;
+import java.util.*;
+
+import javax.management.MBeanServer;
+import javax.management.ObjectName;
+
+import com.cloudius.api.APIClient;
+
+public final class MessagingService implements MessagingServiceMBean {
+ public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
+ private static final java.util.logging.Logger logger = java.util.logging.Logger
+ .getLogger(MessagingService.class.getName());
+
+ private APIClient c = new APIClient();
+
+ private final ObjectName jmxObjectName;
+
+ public void log(String str) {
+ System.out.println(str);
+ logger.info(str);
+ }
+
+ public MessagingService() {
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ try {
+ jmxObjectName = new ObjectName(MBEAN_NAME);
+ mbs.registerMBean(this, jmxObjectName);
+ // mbs.registerMBean(StreamManager.instance, new ObjectName(
+ // StreamManager.OBJECT_NAME));
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ }
+
+ static MessagingService instance = new MessagingService();
+
+ public static MessagingService getInstance() {
+ return instance;
+ }
+
+ /**
+ * Pending tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandPendingTasks() {
+ log(" getCommandPendingTasks()");
+ return c.getMapStringIntegerValue("/messaging_service/messages/pending");
+ }
+
+ /**
+ * Completed tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandCompletedTasks() {
+ System.out.println("getCommandCompletedTasks!");
+ Map res = c
+ .getListMapStringLongValue("/messaging_service/messages/sent");
+ return res;
+ }
+
+ /**
+ * Dropped tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandDroppedTasks() {
+ log(" getCommandDroppedTasks()");
+ return c.getMapStringLongValue("");
+ }
+
+ /**
+ * Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections
+ */
+ public Map getResponsePendingTasks() {
+ log(" getResponsePendingTasks()");
+ return c.getMapStringIntegerValue("");
+ }
+
+ /**
+ * Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections
+ */
+ public Map getResponseCompletedTasks() {
+ log(" getResponseCompletedTasks()");
+ return c.getMapStringLongValue("");
+ }
+
+ /**
+ * dropped message counts for server lifetime
+ */
+ public Map getDroppedMessages() {
+ log(" getDroppedMessages()");
+ return c.getMapStringIntegerValue("");
+ }
+
+ /**
+ * dropped message counts since last called
+ */
+ public Map getRecentlyDroppedMessages() {
+ log(" getRecentlyDroppedMessages()");
+ return c.getMapStringIntegerValue("");
+ }
+
+ /**
+ * Total number of timeouts happened on this node
+ */
+ public long getTotalTimeouts() {
+ log(" getTotalTimeouts()");
+ return c.getLongValue("");
+ }
+
+ /**
+ * Number of timeouts per host
+ */
+ public Map getTimeoutsPerHost() {
+ log(" getTimeoutsPerHost()");
+ return c.getMapStringLongValue("");
+ }
+
+ /**
+ * Number of timeouts since last check.
+ */
+ public long getRecentTotalTimouts() {
+ log(" getRecentTotalTimouts()");
+ return c.getLongValue("");
+ }
+
+ /**
+ * Number of timeouts since last check per host.
+ */
+ public Map getRecentTimeoutsPerHost() {
+ log(" getRecentTimeoutsPerHost()");
+ return c.getMapStringLongValue("");
+ }
+
+ public int getVersion(String address) throws UnknownHostException {
+ log(" getVersion(String address) throws UnknownHostException");
+ return c.getIntValue("");
+ }
+
+}
diff --git a/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java
new file mode 100644
index 0000000..3fbd5c1
--- /dev/null
+++ b/src/main/java/org/apache/cassandra/net/MessagingServiceMBean.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.net;
+
+import java.net.UnknownHostException;
+import java.util.Map;
+
+/**
+ * MBean exposing MessagingService metrics. - OutboundConnectionPools -
+ * Command/Response - Pending/Completed Tasks
+ */
+public interface MessagingServiceMBean {
+ /**
+ * Pending tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandPendingTasks();
+
+ /**
+ * Completed tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandCompletedTasks();
+
+ /**
+ * Dropped tasks for Command(Mutations, Read etc) TCP Connections
+ */
+ public Map getCommandDroppedTasks();
+
+ /**
+ * Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections
+ */
+ public Map getResponsePendingTasks();
+
+ /**
+ * Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections
+ */
+ public Map getResponseCompletedTasks();
+
+ /**
+ * dropped message counts for server lifetime
+ */
+ public Map getDroppedMessages();
+
+ /**
+ * dropped message counts since last called
+ */
+ public Map getRecentlyDroppedMessages();
+
+ /**
+ * Total number of timeouts happened on this node
+ */
+ public long getTotalTimeouts();
+
+ /**
+ * Number of timeouts per host
+ */
+ public Map getTimeoutsPerHost();
+
+ /**
+ * Number of timeouts since last check.
+ */
+ public long getRecentTotalTimouts();
+
+ /**
+ * Number of timeouts since last check per host.
+ */
+ public Map getRecentTimeoutsPerHost();
+
+ public int getVersion(String address) throws UnknownHostException;
+}
diff --git a/src/main/java/org/apache/cassandra/repair/RepairParallelism.java b/src/main/java/org/apache/cassandra/repair/RepairParallelism.java
new file mode 100644
index 0000000..4b7f6ef
--- /dev/null
+++ b/src/main/java/org/apache/cassandra/repair/RepairParallelism.java
@@ -0,0 +1,27 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Copyright 2015 Cloudius Systems
+ *
+ * Modified by Cloudius Systems
+ */
+package org.apache.cassandra.repair;
+
+public class RepairParallelism {
+
+}
diff --git a/src/main/java/org/apache/cassandra/service/StorageService.java b/src/main/java/org/apache/cassandra/service/StorageService.java
new file mode 100644
index 0000000..1aa97da
--- /dev/null
+++ b/src/main/java/org/apache/cassandra/service/StorageService.java
@@ -0,0 +1,949 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+/*
+ * Copyright 2015 Cloudius Systems
+ *
+ * Modified by Cloudius Systems
+ */
+package org.apache.cassandra.service;
+
+import java.io.*;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.*;
+import java.util.concurrent.*;
+
+import javax.management.*;
+import javax.management.openmbean.TabularData;
+
+import org.apache.cassandra.repair.RepairParallelism;
+
+import com.cloudius.api.APIClient;
+
+/**
+ * This abstraction contains the token/identifier of this node on the identifier
+ * space. This token gets gossiped around. This class will also maintain
+ * histograms of the load information of other nodes in the cluster.
+ */
+public class StorageService extends NotificationBroadcasterSupport implements
+ StorageServiceMBean {
+ private static final java.util.logging.Logger logger = java.util.logging.Logger
+ .getLogger(StorageService.class.getName());
+
+ private APIClient c = new APIClient();
+
+ public static final StorageService instance = new StorageService();
+
+ public static StorageService getInstance() {
+ return instance;
+ }
+
+ private final ObjectName jmxObjectName;
+
+ public StorageService() {
+ MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
+ try {
+ jmxObjectName = new ObjectName(
+ "org.apache.cassandra.db:type=StorageService");
+ mbs.registerMBean(this, jmxObjectName);
+ // mbs.registerMBean(StreamManager.instance, new ObjectName(
+ // StreamManager.OBJECT_NAME));
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+
+ }
+
+ public void log(String str) {
+ System.out.println(str);
+ logger.info(str);
+ }
+
+ /**
+ * Retrieve the list of live nodes in the cluster, where "liveness" is
+ * determined by the failure detector of the node being queried.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getLiveNodes() {
+ log(" getLiveNodes()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Retrieve the list of unreachable nodes in the cluster, as determined by
+ * this node's failure detector.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getUnreachableNodes() {
+ log(" getUnreachableNodes()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Retrieve the list of nodes currently bootstrapping into the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getJoiningNodes() {
+ log(" getJoiningNodes()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Retrieve the list of nodes currently leaving the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getLeavingNodes() {
+ log(" getLeavingNodes()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Retrieve the list of nodes currently moving in the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getMovingNodes() {
+ log(" getMovingNodes()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Fetch string representations of the tokens for this node.
+ *
+ * @return a collection of tokens formatted as strings
+ */
+ public List getTokens() {
+ log(" getTokens()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Fetch string representations of the tokens for a specified node.
+ *
+ * @param endpoint
+ * string representation of an node
+ * @return a collection of tokens formatted as strings
+ */
+ public List getTokens(String endpoint) throws UnknownHostException {
+ log(" getTokens(String endpoint) throws UnknownHostException");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Fetch a string representation of the Cassandra version.
+ *
+ * @return A string representation of the Cassandra version.
+ */
+ public String getReleaseVersion() {
+ log(" getReleaseVersion()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * Fetch a string representation of the current Schema version.
+ *
+ * @return A string representation of the Schema version.
+ */
+ public String getSchemaVersion() {
+ log(" getSchemaVersion()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * Get the list of all data file locations from conf
+ *
+ * @return String array of all locations
+ */
+ public String[] getAllDataFileLocations() {
+ log(" getAllDataFileLocations()");
+ return c.getStringArrValue("");
+ }
+
+ /**
+ * Get location of the commit log
+ *
+ * @return a string path
+ */
+ public String getCommitLogLocation() {
+ log(" getCommitLogLocation()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * Get location of the saved caches dir
+ *
+ * @return a string path
+ */
+ public String getSavedCachesLocation() {
+ log(" getSavedCachesLocation()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * Retrieve a map of range to end points that describe the ring topology of
+ * a Cassandra cluster.
+ *
+ * @return mapping of ranges to end points
+ */
+ public Map, List> getRangeToEndpointMap(String keyspace) {
+ log(" getRangeToEndpointMap(String keyspace)");
+ return c.getMapListStrValue("");
+ }
+
+ /**
+ * Retrieve a map of range to rpc addresses that describe the ring topology
+ * of a Cassandra cluster.
+ *
+ * @return mapping of ranges to rpc addresses
+ */
+ public Map, List> getRangeToRpcaddressMap(
+ String keyspace) {
+ log(" getRangeToRpcaddressMap(String keyspace)");
+ return c.getMapListStrValue("");
+ }
+
+ /**
+ * The same as {@code describeRing(String)} but converts TokenRange to the
+ * String for JMX compatibility
+ *
+ * @param keyspace
+ * The keyspace to fetch information about
+ *
+ * @return a List of TokenRange(s) converted to String for the given
+ * keyspace
+ */
+ public List describeRingJMX(String keyspace) throws IOException {
+ log(" describeRingJMX(String keyspace) throws IOException");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Retrieve a map of pending ranges to endpoints that describe the ring
+ * topology
+ *
+ * @param keyspace
+ * the keyspace to get the pending range map for.
+ * @return a map of pending ranges to endpoints
+ */
+ public Map, List> getPendingRangeToEndpointMap(
+ String keyspace) {
+ log(" getPendingRangeToEndpointMap(String keyspace)");
+ return c.getMapListStrValue("");
+ }
+
+ /**
+ * Retrieve a map of tokens to endpoints, including the bootstrapping ones.
+ *
+ * @return a map of tokens to endpoints in ascending order
+ */
+ public Map getTokenToEndpointMap() {
+ log(" getTokenToEndpointMap()");
+ return c.getMapStrValue("");
+ }
+
+ /** Retrieve this hosts unique ID */
+ public String getLocalHostId() {
+ log(" getLocalHostId()");
+ return c.getStringValue("");
+ }
+
+ /** Retrieve the mapping of endpoint to host ID */
+ public Map getHostIdMap() {
+ log(" getHostIdMap()");
+ return c.getMapStrValue("");
+ }
+
+ /**
+ * Numeric load value.
+ *
+ * @see org.apache.cassandra.metrics.StorageMetrics#load
+ */
+ @Deprecated
+ public double getLoad() {
+ log(" getLoad()");
+ return c.getDoubleValue("");
+ }
+
+ /** Human-readable load value */
+ public String getLoadString() {
+ log(" getLoadString()");
+ return c.getStringValue("");
+ }
+
+ /** Human-readable load value. Keys are IP addresses. */
+ public Map getLoadMap() {
+ log(" getLoadMap()");
+ return c.getMapStrValue("");
+ }
+
+ /**
+ * Return the generation value for this node.
+ *
+ * @return generation number
+ */
+ public int getCurrentGenerationNumber() {
+ log(" getCurrentGenerationNumber()");
+ return c.getIntValue("");
+ }
+
+ /**
+ * This method returns the N endpoints that are responsible for storing the
+ * specified key i.e for replication.
+ *
+ * @param keyspaceName
+ * keyspace name
+ * @param cf
+ * Column family name
+ * @param key
+ * - key for which we need to find the endpoint return value -
+ * the endpoint responsible for this key
+ */
+ public List getNaturalEndpoints(String keyspaceName,
+ String cf, String key) {
+ log(" getNaturalEndpoints(String keyspaceName, String cf, String key)");
+ return c.getListInetAddressValue("");
+ }
+
+ public List getNaturalEndpoints(String keyspaceName,
+ ByteBuffer key) {
+ log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)");
+ return c.getListInetAddressValue("");
+ }
+
+ /**
+ * Takes the snapshot for the given keyspaces. A snapshot name must be
+ * specified.
+ *
+ * @param tag
+ * the tag given to the snapshot; may not be null or empty
+ * @param keyspaceNames
+ * the name of the keyspaces to snapshot; empty means "all."
+ */
+ public void takeSnapshot(String tag, String... keyspaceNames)
+ throws IOException {
+ log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException");
+ }
+
+ /**
+ * Takes the snapshot of a specific column family. A snapshot name must be
+ * specified.
+ *
+ * @param keyspaceName
+ * the keyspace which holds the specified column family
+ * @param columnFamilyName
+ * the column family to snapshot
+ * @param tag
+ * the tag given to the snapshot; may not be null or empty
+ */
+ public void takeColumnFamilySnapshot(String keyspaceName,
+ String columnFamilyName, String tag) throws IOException {
+ log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException");
+ }
+
+ /**
+ * Remove the snapshot with the given name from the given keyspaces. If no
+ * tag is specified we will remove all snapshots.
+ */
+ public void clearSnapshot(String tag, String... keyspaceNames)
+ throws IOException {
+ log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException");
+ }
+
+ /**
+ * Get the details of all the snapshot
+ *
+ * @return A map of snapshotName to all its details in Tabular form.
+ */
+ public Map getSnapshotDetails() {
+ log(" getSnapshotDetails()");
+ return c.getMapStringTabularDataValue("");
+ }
+
+ /**
+ * Get the true size taken by all snapshots across all keyspaces.
+ *
+ * @return True size taken by all the snapshots.
+ */
+ public long trueSnapshotsSize() {
+ log(" trueSnapshotsSize()");
+ return c.getLongValue("");
+ }
+
+ /**
+ * Forces major compaction of a single keyspace
+ */
+ public void forceKeyspaceCompaction(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException {
+ log(" forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
+ }
+
+ /**
+ * Trigger a cleanup of keys on a single keyspace
+ */
+ public int forceKeyspaceCleanup(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException {
+ log(" forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
+ return c.getIntValue("");
+ }
+
+ /**
+ * Scrub (deserialize + reserialize at the latest version, skipping bad rows
+ * if any) the given keyspace. If columnFamilies array is empty, all CFs are
+ * scrubbed.
+ *
+ * Scrubbed CFs will be snapshotted first, if disableSnapshot is false
+ */
+ public int scrub(boolean disableSnapshot, boolean skipCorrupted,
+ String keyspaceName, String... columnFamilies) throws IOException,
+ ExecutionException, InterruptedException {
+ log(" scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
+ return c.getIntValue("");
+ }
+
+ /**
+ * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip
+ * bad rows and do not snapshot sstables first.
+ */
+ public int upgradeSSTables(String keyspaceName,
+ boolean excludeCurrentVersion, String... columnFamilies)
+ throws IOException, ExecutionException, InterruptedException {
+ log(" upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
+ return c.getIntValue("");
+ }
+
+ /**
+ * Flush all memtables for the given column families, or all columnfamilies
+ * for the given keyspace if none are explicitly listed.
+ *
+ * @param keyspaceName
+ * @param columnFamilies
+ * @throws IOException
+ */
+ public void forceKeyspaceFlush(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException {
+ log(" forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
+ }
+
+ /**
+ * Invoke repair asynchronously. You can track repair progress by
+ * subscribing JMX notification sent from this StorageServiceMBean.
+ * Notification format is: type: "repair" userObject: int array of length 2,
+ * [0]=command number, [1]=ordinal of AntiEntropyService.Status
+ *
+ * @param keyspace
+ * Keyspace name to repair. Should not be null.
+ * @param options
+ * repair option.
+ * @return Repair command number, or 0 if nothing to repair
+ */
+ public int repairAsync(String keyspace, Map options) {
+ log(" repairAsync(String keyspace, Map options)");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace, boolean isSequential,
+ Collection dataCenters, Collection hosts,
+ boolean primaryRange, boolean repairedAt, String... columnFamilies)
+ throws IOException {
+ log(" forceRepairAsync(String keyspace, boolean isSequential, Collection dataCenters, Collection hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace,
+ RepairParallelism parallelismDegree,
+ Collection dataCenters, Collection hosts,
+ boolean primaryRange, boolean fullRepair, String... columnFamilies) {
+ log(" forceRepairAsync(String keyspace, RepairParallelism parallelismDegree, Collection dataCenters, Collection hosts, boolean primaryRange, boolean fullRepair, String... columnFamilies)");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, boolean isSequential,
+ Collection dataCenters, Collection hosts,
+ boolean repairedAt, String... columnFamilies) throws IOException {
+ log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection dataCenters, Collection hosts, boolean repairedAt, String... columnFamilies) throws IOException");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, RepairParallelism parallelismDegree,
+ Collection dataCenters, Collection hosts,
+ boolean fullRepair, String... columnFamilies) {
+ log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, RepairParallelism parallelismDegree, Collection dataCenters, Collection hosts, boolean fullRepair, String... columnFamilies)");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace, boolean isSequential,
+ boolean isLocal, boolean primaryRange, boolean fullRepair,
+ String... columnFamilies) {
+ log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)");
+ return c.getIntValue("");
+ }
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, boolean isSequential, boolean isLocal,
+ boolean repairedAt, String... columnFamilies) {
+ log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)");
+ return c.getIntValue("");
+ }
+
+ public void forceTerminateAllRepairSessions() {
+ log(" forceTerminateAllRepairSessions()");
+ }
+
+ /**
+ * transfer this node's data to other machines and remove it from service.
+ */
+ public void decommission() throws InterruptedException {
+ log(" decommission() throws InterruptedException");
+ }
+
+ /**
+ * @param newToken
+ * token to move this node to. This node will unload its data
+ * onto its neighbors, and bootstrap to the new token.
+ */
+ public void move(String newToken) throws IOException {
+ log(" move(String newToken) throws IOException");
+ }
+
+ /**
+ * removeToken removes token (and all data associated with enpoint that had
+ * it) from the ring
+ */
+ public void removeNode(String token) {
+ log(" removeNode(String token)");
+ }
+
+ /**
+ * Get the status of a token removal.
+ */
+ public String getRemovalStatus() {
+ log(" getRemovalStatus()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * Force a remove operation to finish.
+ */
+ public void forceRemoveCompletion() {
+ log(" forceRemoveCompletion()");
+ }
+
+ /**
+ * set the logging level at runtime
+ *
+ * If both classQualifer and level are empty/null, it will reload the
+ * configuration to reset.
+ * If classQualifer is not empty but level is empty/null, it will set the
+ * level to null for the defined classQualifer
+ * If level cannot be parsed, then the level will be defaulted to DEBUG
+ *
+ * The logback configuration should have < jmxConfigurator /> set
+ *
+ * @param classQualifier
+ * The logger's classQualifer
+ * @param level
+ * The log level
+ * @throws Exception
+ *
+ * @see ch.qos.logback.classic.Level#toLevel(String)
+ */
+ public void setLoggingLevel(String classQualifier, String level)
+ throws Exception {
+ log(" setLoggingLevel(String classQualifier, String level) throws Exception");
+ }
+
+ /** get the runtime logging levels */
+ public Map getLoggingLevels() {
+ log(" getLoggingLevels()");
+ return c.getMapStrValue("");
+ }
+
+ /**
+ * get the operational mode (leaving, joining, normal, decommissioned,
+ * client)
+ **/
+ public String getOperationMode() {
+ log(" getOperationMode()");
+ return c.getStringValue("");
+ }
+
+ /** Returns whether the storage service is starting or not */
+ public boolean isStarting() {
+ log(" isStarting()");
+ return c.getBooleanValue("");
+ }
+
+ /** get the progress of a drain operation */
+ public String getDrainProgress() {
+ log(" getDrainProgress()");
+ return c.getStringValue("");
+ }
+
+ /**
+ * makes node unavailable for writes, flushes memtables and replays
+ * commitlog.
+ */
+ public void drain() throws IOException, InterruptedException,
+ ExecutionException {
+ log(" drain() throws IOException, InterruptedException, ExecutionException");
+ }
+
+ /**
+ * Truncates (deletes) the given columnFamily from the provided keyspace.
+ * Calling truncate results in actual deletion of all data in the cluster
+ * under the given columnFamily and it will fail unless all hosts are up.
+ * All data in the given column family will be deleted, but its definition
+ * will not be affected.
+ *
+ * @param keyspace
+ * The keyspace to delete from
+ * @param columnFamily
+ * The column family to delete data from.
+ */
+ public void truncate(String keyspace, String columnFamily)
+ throws TimeoutException, IOException {
+ log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException");
+ }
+
+ /**
+ * given a list of tokens (representing the nodes in the cluster), returns a
+ * mapping from "token -> %age of cluster owned by that token"
+ */
+ public Map getOwnership() {
+ log(" getOwnership()");
+ return c.getMapInetAddressFloatValue("");
+ }
+
+ /**
+ * Effective ownership is % of the data each node owns given the keyspace we
+ * calculate the percentage using replication factor. If Keyspace == null,
+ * this method will try to verify if all the keyspaces in the cluster have
+ * the same replication strategies and if yes then we will use the first
+ * else a empty Map is returned.
+ */
+ public Map effectiveOwnership(String keyspace)
+ throws IllegalStateException {
+ log(" effectiveOwnership(String keyspace) throws IllegalStateException");
+ return c.getMapInetAddressFloatValue("");
+ }
+
+ public List getKeyspaces() {
+ log(" getKeyspaces()");
+ return c.getListStrValue("");
+ }
+
+ public List getNonSystemKeyspaces() {
+ log(" getNonSystemKeyspaces()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * Change endpointsnitch class and dynamic-ness (and dynamic attributes) at
+ * runtime
+ *
+ * @param epSnitchClassName
+ * the canonical path name for a class implementing
+ * IEndpointSnitch
+ * @param dynamic
+ * boolean that decides whether dynamicsnitch is used or not
+ * @param dynamicUpdateInterval
+ * integer, in ms (default 100)
+ * @param dynamicResetInterval
+ * integer, in ms (default 600,000)
+ * @param dynamicBadnessThreshold
+ * double, (default 0.0)
+ */
+ public void updateSnitch(String epSnitchClassName, Boolean dynamic,
+ Integer dynamicUpdateInterval, Integer dynamicResetInterval,
+ Double dynamicBadnessThreshold) throws ClassNotFoundException {
+ log(" updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException");
+ }
+
+ // allows a user to forcibly 'kill' a sick node
+ public void stopGossiping() {
+ log(" stopGossiping()");
+ }
+
+ // allows a user to recover a forcibly 'killed' node
+ public void startGossiping() {
+ log(" startGossiping()");
+ }
+
+ // allows a user to see whether gossip is running or not
+ public boolean isGossipRunning() {
+ log(" isGossipRunning()");
+ return c.getBooleanValue("");
+ }
+
+ // allows a user to forcibly completely stop cassandra
+ public void stopDaemon() {
+ log(" stopDaemon()");
+ }
+
+ // to determine if gossip is disabled
+ public boolean isInitialized() {
+ log(" isInitialized()");
+ return c.getBooleanValue("");
+ }
+
+ // allows a user to disable thrift
+ public void stopRPCServer() {
+ log(" stopRPCServer()");
+ }
+
+ // allows a user to reenable thrift
+ public void startRPCServer() {
+ log(" startRPCServer()");
+ }
+
+ // to determine if thrift is running
+ public boolean isRPCServerRunning() {
+ log(" isRPCServerRunning()");
+ return c.getBooleanValue("");
+ }
+
+ public void stopNativeTransport() {
+ log(" stopNativeTransport()");
+ }
+
+ public void startNativeTransport() {
+ log(" startNativeTransport()");
+ }
+
+ public boolean isNativeTransportRunning() {
+ log(" isNativeTransportRunning()");
+ return c.getBooleanValue("");
+ }
+
+ // allows a node that have been started without joining the ring to join it
+ public void joinRing() throws IOException {
+ log(" joinRing() throws IOException");
+ }
+
+ public boolean isJoined() {
+ log(" isJoined()");
+ return c.getBooleanValue("");
+ }
+
+ @Deprecated
+ public int getExceptionCount() {
+ log(" getExceptionCount()");
+ return c.getIntValue("");
+ }
+
+ public void setStreamThroughputMbPerSec(int value) {
+ log(" setStreamThroughputMbPerSec(int value)");
+ }
+
+ public int getStreamThroughputMbPerSec() {
+ log(" getStreamThroughputMbPerSec()");
+ return c.getIntValue("");
+ }
+
+ public int getCompactionThroughputMbPerSec() {
+ log(" getCompactionThroughputMbPerSec()");
+ return c.getIntValue("");
+ }
+
+ public void setCompactionThroughputMbPerSec(int value) {
+ log(" setCompactionThroughputMbPerSec(int value)");
+ }
+
+ public boolean isIncrementalBackupsEnabled() {
+ log(" isIncrementalBackupsEnabled()");
+ return c.getBooleanValue("");
+ }
+
+ public void setIncrementalBackupsEnabled(boolean value) {
+ log(" setIncrementalBackupsEnabled(boolean value)");
+ }
+
+ /**
+ * Initiate a process of streaming data for which we are responsible from
+ * other nodes. It is similar to bootstrap except meant to be used on a node
+ * which is already in the cluster (typically containing no data) as an
+ * alternative to running repair.
+ *
+ * @param sourceDc
+ * Name of DC from which to select sources for streaming or null
+ * to pick any node
+ */
+ public void rebuild(String sourceDc) {
+ log(" rebuild(String sourceDc)");
+ }
+
+ /** Starts a bulk load and blocks until it completes. */
+ public void bulkLoad(String directory) {
+ log(" bulkLoad(String directory)");
+ }
+
+ /**
+ * Starts a bulk load asynchronously and returns the String representation
+ * of the planID for the new streaming session.
+ */
+ public String bulkLoadAsync(String directory) {
+ log(" bulkLoadAsync(String directory)");
+ return c.getStringValue("");
+ }
+
+ public void rescheduleFailedDeletions() {
+ log(" rescheduleFailedDeletions()");
+ }
+
+ /**
+ * Load new SSTables to the given keyspace/columnFamily
+ *
+ * @param ksName
+ * The parent keyspace name
+ * @param cfName
+ * The ColumnFamily name where SSTables belong
+ */
+ public void loadNewSSTables(String ksName, String cfName) {
+ log(" loadNewSSTables(String ksName, String cfName)");
+ }
+
+ /**
+ * Return a List of Tokens representing a sample of keys across all
+ * ColumnFamilyStores.
+ *
+ * Note: this should be left as an operation, not an attribute (methods
+ * starting with "get") to avoid sending potentially multiple MB of data
+ * when accessing this mbean by default. See CASSANDRA-4452.
+ *
+ * @return set of Tokens as Strings
+ */
+ public List sampleKeyRange() {
+ log(" sampleKeyRange()");
+ return c.getListStrValue("");
+ }
+
+ /**
+ * rebuild the specified indexes
+ */
+ public void rebuildSecondaryIndex(String ksName, String cfName,
+ String... idxNames) {
+ log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)");
+ }
+
+ public void resetLocalSchema() throws IOException {
+ log(" resetLocalSchema() throws IOException");
+ }
+
+ /**
+ * Enables/Disables tracing for the whole system. Only thrift requests can
+ * start tracing currently.
+ *
+ * @param probability
+ * ]0,1[ will enable tracing on a partial number of requests with
+ * the provided probability. 0 will disable tracing and 1 will
+ * enable tracing for all requests (which mich severely cripple
+ * the system)
+ */
+ public void setTraceProbability(double probability) {
+ log(" setTraceProbability(double probability)");
+ }
+
+ /**
+ * Returns the configured tracing probability.
+ */
+ public double getTraceProbability() {
+ log(" getTraceProbability()");
+ return c.getDoubleValue("");
+ }
+
+ public void disableAutoCompaction(String ks, String... columnFamilies)
+ throws IOException {
+
+ }
+
+ public void enableAutoCompaction(String ks, String... columnFamilies)
+ throws IOException {
+
+ }
+
+ public void deliverHints(String host) throws UnknownHostException {
+ log(" deliverHints(String host) throws UnknownHostException");
+ }
+
+ /** Returns the name of the cluster */
+ public String getClusterName() {
+ log(" getClusterName()");
+ return c.getStringValue("");
+ }
+
+ /** Returns the cluster partitioner */
+ public String getPartitionerName() {
+ log(" getPartitionerName()");
+ return c.getStringValue("");
+ }
+
+ /** Returns the threshold for warning of queries with many tombstones */
+ public int getTombstoneWarnThreshold() {
+ log(" getTombstoneWarnThreshold()");
+ return c.getIntValue("");
+ }
+
+ /** Sets the threshold for warning queries with many tombstones */
+ public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) {
+ log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)");
+ }
+
+ /** Returns the threshold for abandoning queries with many tombstones */
+ public int getTombstoneFailureThreshold() {
+ log(" getTombstoneFailureThreshold()");
+ return c.getIntValue("");
+ }
+
+ /** Sets the threshold for abandoning queries with many tombstones */
+ public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) {
+ log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)");
+ }
+
+ /** Returns the threshold for rejecting queries due to a large batch size */
+ public int getBatchSizeFailureThreshold() {
+ log(" getBatchSizeFailureThreshold()");
+ return c.getIntValue("");
+ }
+
+ /** Sets the threshold for rejecting queries due to a large batch size */
+ public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold) {
+ log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)");
+ }
+
+ /** Sets the hinted handoff throttle in kb per second, per delivery thread. */
+ public void setHintedHandoffThrottleInKB(int throttleInKB) {
+ log(" setHintedHandoffThrottleInKB(int throttleInKB)");
+ }
+}
diff --git a/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java
new file mode 100644
index 0000000..07bdc0e
--- /dev/null
+++ b/src/main/java/org/apache/cassandra/service/StorageServiceMBean.java
@@ -0,0 +1,639 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.service;
+
+import org.apache.cassandra.repair.RepairParallelism;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+
+import javax.management.NotificationEmitter;
+import javax.management.openmbean.TabularData;
+
+public interface StorageServiceMBean extends NotificationEmitter {
+ /**
+ * Retrieve the list of live nodes in the cluster, where "liveness" is
+ * determined by the failure detector of the node being queried.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getLiveNodes();
+
+ /**
+ * Retrieve the list of unreachable nodes in the cluster, as determined by
+ * this node's failure detector.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getUnreachableNodes();
+
+ /**
+ * Retrieve the list of nodes currently bootstrapping into the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getJoiningNodes();
+
+ /**
+ * Retrieve the list of nodes currently leaving the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getLeavingNodes();
+
+ /**
+ * Retrieve the list of nodes currently moving in the ring.
+ *
+ * @return set of IP addresses, as Strings
+ */
+ public List getMovingNodes();
+
+ /**
+ * Fetch string representations of the tokens for this node.
+ *
+ * @return a collection of tokens formatted as strings
+ */
+ public List getTokens();
+
+ /**
+ * Fetch string representations of the tokens for a specified node.
+ *
+ * @param endpoint
+ * string representation of an node
+ * @return a collection of tokens formatted as strings
+ */
+ public List getTokens(String endpoint) throws UnknownHostException;
+
+ /**
+ * Fetch a string representation of the Cassandra version.
+ *
+ * @return A string representation of the Cassandra version.
+ */
+ public String getReleaseVersion();
+
+ /**
+ * Fetch a string representation of the current Schema version.
+ *
+ * @return A string representation of the Schema version.
+ */
+ public String getSchemaVersion();
+
+ /**
+ * Get the list of all data file locations from conf
+ *
+ * @return String array of all locations
+ */
+ public String[] getAllDataFileLocations();
+
+ /**
+ * Get location of the commit log
+ *
+ * @return a string path
+ */
+ public String getCommitLogLocation();
+
+ /**
+ * Get location of the saved caches dir
+ *
+ * @return a string path
+ */
+ public String getSavedCachesLocation();
+
+ /**
+ * Retrieve a map of range to end points that describe the ring topology of
+ * a Cassandra cluster.
+ *
+ * @return mapping of ranges to end points
+ */
+ public Map, List> getRangeToEndpointMap(String keyspace);
+
+ /**
+ * Retrieve a map of range to rpc addresses that describe the ring topology
+ * of a Cassandra cluster.
+ *
+ * @return mapping of ranges to rpc addresses
+ */
+ public Map, List> getRangeToRpcaddressMap(
+ String keyspace);
+
+ /**
+ * The same as {@code describeRing(String)} but converts TokenRange to the
+ * String for JMX compatibility
+ *
+ * @param keyspace
+ * The keyspace to fetch information about
+ *
+ * @return a List of TokenRange(s) converted to String for the given
+ * keyspace
+ */
+ public List describeRingJMX(String keyspace) throws IOException;
+
+ /**
+ * Retrieve a map of pending ranges to endpoints that describe the ring
+ * topology
+ *
+ * @param keyspace
+ * the keyspace to get the pending range map for.
+ * @return a map of pending ranges to endpoints
+ */
+ public Map, List> getPendingRangeToEndpointMap(
+ String keyspace);
+
+ /**
+ * Retrieve a map of tokens to endpoints, including the bootstrapping ones.
+ *
+ * @return a map of tokens to endpoints in ascending order
+ */
+ public Map getTokenToEndpointMap();
+
+ /** Retrieve this hosts unique ID */
+ public String getLocalHostId();
+
+ /** Retrieve the mapping of endpoint to host ID */
+ public Map getHostIdMap();
+
+ /**
+ * Numeric load value.
+ *
+ * @see org.apache.cassandra.metrics.StorageMetrics#load
+ */
+ @Deprecated
+ public double getLoad();
+
+ /** Human-readable load value */
+ public String getLoadString();
+
+ /** Human-readable load value. Keys are IP addresses. */
+ public Map getLoadMap();
+
+ /**
+ * Return the generation value for this node.
+ *
+ * @return generation number
+ */
+ public int getCurrentGenerationNumber();
+
+ /**
+ * This method returns the N endpoints that are responsible for storing the
+ * specified key i.e for replication.
+ *
+ * @param keyspaceName
+ * keyspace name
+ * @param cf
+ * Column family name
+ * @param key
+ * - key for which we need to find the endpoint return value -
+ * the endpoint responsible for this key
+ */
+ public List getNaturalEndpoints(String keyspaceName,
+ String cf, String key);
+
+ public List getNaturalEndpoints(String keyspaceName,
+ ByteBuffer key);
+
+ /**
+ * Takes the snapshot for the given keyspaces. A snapshot name must be
+ * specified.
+ *
+ * @param tag
+ * the tag given to the snapshot; may not be null or empty
+ * @param keyspaceNames
+ * the name of the keyspaces to snapshot; empty means "all."
+ */
+ public void takeSnapshot(String tag, String... keyspaceNames)
+ throws IOException;
+
+ /**
+ * Takes the snapshot of a specific column family. A snapshot name must be
+ * specified.
+ *
+ * @param keyspaceName
+ * the keyspace which holds the specified column family
+ * @param columnFamilyName
+ * the column family to snapshot
+ * @param tag
+ * the tag given to the snapshot; may not be null or empty
+ */
+ public void takeColumnFamilySnapshot(String keyspaceName,
+ String columnFamilyName, String tag) throws IOException;
+
+ /**
+ * Remove the snapshot with the given name from the given keyspaces. If no
+ * tag is specified we will remove all snapshots.
+ */
+ public void clearSnapshot(String tag, String... keyspaceNames)
+ throws IOException;
+
+ /**
+ * Get the details of all the snapshot
+ *
+ * @return A map of snapshotName to all its details in Tabular form.
+ */
+ public Map getSnapshotDetails();
+
+ /**
+ * Get the true size taken by all snapshots across all keyspaces.
+ *
+ * @return True size taken by all the snapshots.
+ */
+ public long trueSnapshotsSize();
+
+ /**
+ * Forces major compaction of a single keyspace
+ */
+ public void forceKeyspaceCompaction(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException;
+
+ /**
+ * Trigger a cleanup of keys on a single keyspace
+ */
+ public int forceKeyspaceCleanup(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException;
+
+ /**
+ * Scrub (deserialize + reserialize at the latest version, skipping bad rows
+ * if any) the given keyspace. If columnFamilies array is empty, all CFs are
+ * scrubbed.
+ *
+ * Scrubbed CFs will be snapshotted first, if disableSnapshot is false
+ */
+ public int scrub(boolean disableSnapshot, boolean skipCorrupted,
+ String keyspaceName, String... columnFamilies) throws IOException,
+ ExecutionException, InterruptedException;
+
+ /**
+ * Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip
+ * bad rows and do not snapshot sstables first.
+ */
+ public int upgradeSSTables(String keyspaceName,
+ boolean excludeCurrentVersion, String... columnFamilies)
+ throws IOException, ExecutionException, InterruptedException;
+
+ /**
+ * Flush all memtables for the given column families, or all columnfamilies
+ * for the given keyspace if none are explicitly listed.
+ *
+ * @param keyspaceName
+ * @param columnFamilies
+ * @throws IOException
+ */
+ public void forceKeyspaceFlush(String keyspaceName,
+ String... columnFamilies) throws IOException, ExecutionException,
+ InterruptedException;
+
+ /**
+ * Invoke repair asynchronously. You can track repair progress by
+ * subscribing JMX notification sent from this StorageServiceMBean.
+ * Notification format is: type: "repair" userObject: int array of length 2,
+ * [0]=command number, [1]=ordinal of AntiEntropyService.Status
+ *
+ * @param keyspace
+ * Keyspace name to repair. Should not be null.
+ * @param options
+ * repair option.
+ * @return Repair command number, or 0 if nothing to repair
+ */
+ public int repairAsync(String keyspace, Map options);
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace, boolean isSequential,
+ Collection dataCenters, Collection hosts,
+ boolean primaryRange, boolean repairedAt, String... columnFamilies)
+ throws IOException;
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace,
+ RepairParallelism parallelismDegree,
+ Collection dataCenters, Collection hosts,
+ boolean primaryRange, boolean fullRepair, String... columnFamilies);
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, boolean isSequential,
+ Collection dataCenters, Collection hosts,
+ boolean repairedAt, String... columnFamilies) throws IOException;
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, RepairParallelism parallelismDegree,
+ Collection dataCenters, Collection hosts,
+ boolean fullRepair, String... columnFamilies);
+
+ @Deprecated
+ public int forceRepairAsync(String keyspace, boolean isSequential,
+ boolean isLocal, boolean primaryRange, boolean fullRepair,
+ String... columnFamilies);
+
+ @Deprecated
+ public int forceRepairRangeAsync(String beginToken, String endToken,
+ String keyspaceName, boolean isSequential, boolean isLocal,
+ boolean repairedAt, String... columnFamilies);
+
+ public void forceTerminateAllRepairSessions();
+
+ /**
+ * transfer this node's data to other machines and remove it from service.
+ */
+ public void decommission() throws InterruptedException;
+
+ /**
+ * @param newToken
+ * token to move this node to. This node will unload its data
+ * onto its neighbors, and bootstrap to the new token.
+ */
+ public void move(String newToken) throws IOException;
+
+ /**
+ * removeToken removes token (and all data associated with enpoint that had
+ * it) from the ring
+ */
+ public void removeNode(String token);
+
+ /**
+ * Get the status of a token removal.
+ */
+ public String getRemovalStatus();
+
+ /**
+ * Force a remove operation to finish.
+ */
+ public void forceRemoveCompletion();
+
+ /**
+ * set the logging level at runtime
+ *
+ * If both classQualifer and level are empty/null, it will reload the
+ * configuration to reset.
+ * If classQualifer is not empty but level is empty/null, it will set the
+ * level to null for the defined classQualifer
+ * If level cannot be parsed, then the level will be defaulted to DEBUG
+ *
+ * The logback configuration should have < jmxConfigurator /> set
+ *
+ * @param classQualifier
+ * The logger's classQualifer
+ * @param level
+ * The log level
+ * @throws Exception
+ *
+ * @see ch.qos.logback.classic.Level#toLevel(String)
+ */
+ public void setLoggingLevel(String classQualifier, String level)
+ throws Exception;
+
+ /** get the runtime logging levels */
+ public Map getLoggingLevels();
+
+ /**
+ * get the operational mode (leaving, joining, normal, decommissioned,
+ * client)
+ **/
+ public String getOperationMode();
+
+ /** Returns whether the storage service is starting or not */
+ public boolean isStarting();
+
+ /** get the progress of a drain operation */
+ public String getDrainProgress();
+
+ /**
+ * makes node unavailable for writes, flushes memtables and replays
+ * commitlog.
+ */
+ public void drain() throws IOException, InterruptedException,
+ ExecutionException;
+
+ /**
+ * Truncates (deletes) the given columnFamily from the provided keyspace.
+ * Calling truncate results in actual deletion of all data in the cluster
+ * under the given columnFamily and it will fail unless all hosts are up.
+ * All data in the given column family will be deleted, but its definition
+ * will not be affected.
+ *
+ * @param keyspace
+ * The keyspace to delete from
+ * @param columnFamily
+ * The column family to delete data from.
+ */
+ public void truncate(String keyspace, String columnFamily)
+ throws TimeoutException, IOException;
+
+ /**
+ * given a list of tokens (representing the nodes in the cluster), returns a
+ * mapping from "token -> %age of cluster owned by that token"
+ */
+ public Map getOwnership();
+
+ /**
+ * Effective ownership is % of the data each node owns given the keyspace we
+ * calculate the percentage using replication factor. If Keyspace == null,
+ * this method will try to verify if all the keyspaces in the cluster have
+ * the same replication strategies and if yes then we will use the first
+ * else a empty Map is returned.
+ */
+ public Map effectiveOwnership(String keyspace)
+ throws IllegalStateException;
+
+ public List getKeyspaces();
+
+ public List getNonSystemKeyspaces();
+
+ /**
+ * Change endpointsnitch class and dynamic-ness (and dynamic attributes) at
+ * runtime
+ *
+ * @param epSnitchClassName
+ * the canonical path name for a class implementing
+ * IEndpointSnitch
+ * @param dynamic
+ * boolean that decides whether dynamicsnitch is used or not
+ * @param dynamicUpdateInterval
+ * integer, in ms (default 100)
+ * @param dynamicResetInterval
+ * integer, in ms (default 600,000)
+ * @param dynamicBadnessThreshold
+ * double, (default 0.0)
+ */
+ public void updateSnitch(String epSnitchClassName, Boolean dynamic,
+ Integer dynamicUpdateInterval, Integer dynamicResetInterval,
+ Double dynamicBadnessThreshold) throws ClassNotFoundException;
+
+ // allows a user to forcibly 'kill' a sick node
+ public void stopGossiping();
+
+ // allows a user to recover a forcibly 'killed' node
+ public void startGossiping();
+
+ // allows a user to see whether gossip is running or not
+ public boolean isGossipRunning();
+
+ // allows a user to forcibly completely stop cassandra
+ public void stopDaemon();
+
+ // to determine if gossip is disabled
+ public boolean isInitialized();
+
+ // allows a user to disable thrift
+ public void stopRPCServer();
+
+ // allows a user to reenable thrift
+ public void startRPCServer();
+
+ // to determine if thrift is running
+ public boolean isRPCServerRunning();
+
+ public void stopNativeTransport();
+
+ public void startNativeTransport();
+
+ public boolean isNativeTransportRunning();
+
+ // allows a node that have been started without joining the ring to join it
+ public void joinRing() throws IOException;
+
+ public boolean isJoined();
+
+ @Deprecated
+ public int getExceptionCount();
+
+ public void setStreamThroughputMbPerSec(int value);
+
+ public int getStreamThroughputMbPerSec();
+
+ public int getCompactionThroughputMbPerSec();
+
+ public void setCompactionThroughputMbPerSec(int value);
+
+ public boolean isIncrementalBackupsEnabled();
+
+ public void setIncrementalBackupsEnabled(boolean value);
+
+ /**
+ * Initiate a process of streaming data for which we are responsible from
+ * other nodes. It is similar to bootstrap except meant to be used on a node
+ * which is already in the cluster (typically containing no data) as an
+ * alternative to running repair.
+ *
+ * @param sourceDc
+ * Name of DC from which to select sources for streaming or null
+ * to pick any node
+ */
+ public void rebuild(String sourceDc);
+
+ /** Starts a bulk load and blocks until it completes. */
+ public void bulkLoad(String directory);
+
+ /**
+ * Starts a bulk load asynchronously and returns the String representation
+ * of the planID for the new streaming session.
+ */
+ public String bulkLoadAsync(String directory);
+
+ public void rescheduleFailedDeletions();
+
+ /**
+ * Load new SSTables to the given keyspace/columnFamily
+ *
+ * @param ksName
+ * The parent keyspace name
+ * @param cfName
+ * The ColumnFamily name where SSTables belong
+ */
+ public void loadNewSSTables(String ksName, String cfName);
+
+ /**
+ * Return a List of Tokens representing a sample of keys across all
+ * ColumnFamilyStores.
+ *
+ * Note: this should be left as an operation, not an attribute (methods
+ * starting with "get") to avoid sending potentially multiple MB of data
+ * when accessing this mbean by default. See CASSANDRA-4452.
+ *
+ * @return set of Tokens as Strings
+ */
+ public List sampleKeyRange();
+
+ /**
+ * rebuild the specified indexes
+ */
+ public void rebuildSecondaryIndex(String ksName, String cfName,
+ String... idxNames);
+
+ public void resetLocalSchema() throws IOException;
+
+ /**
+ * Enables/Disables tracing for the whole system. Only thrift requests can
+ * start tracing currently.
+ *
+ * @param probability
+ * ]0,1[ will enable tracing on a partial number of requests with
+ * the provided probability. 0 will disable tracing and 1 will
+ * enable tracing for all requests (which mich severely cripple
+ * the system)
+ */
+ public void setTraceProbability(double probability);
+
+ /**
+ * Returns the configured tracing probability.
+ */
+ public double getTraceProbability();
+
+ void disableAutoCompaction(String ks, String... columnFamilies)
+ throws IOException;
+
+ void enableAutoCompaction(String ks, String... columnFamilies)
+ throws IOException;
+
+ public void deliverHints(String host) throws UnknownHostException;
+
+ /** Returns the name of the cluster */
+ public String getClusterName();
+
+ /** Returns the cluster partitioner */
+ public String getPartitionerName();
+
+ /** Returns the threshold for warning of queries with many tombstones */
+ public int getTombstoneWarnThreshold();
+
+ /** Sets the threshold for warning queries with many tombstones */
+ public void setTombstoneWarnThreshold(int tombstoneDebugThreshold);
+
+ /** Returns the threshold for abandoning queries with many tombstones */
+ public int getTombstoneFailureThreshold();
+
+ /** Sets the threshold for abandoning queries with many tombstones */
+ public void setTombstoneFailureThreshold(int tombstoneDebugThreshold);
+
+ /** Returns the threshold for rejecting queries due to a large batch size */
+ public int getBatchSizeFailureThreshold();
+
+ /** Sets the threshold for rejecting queries due to a large batch size */
+ public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold);
+
+ /** Sets the hinted handoff throttle in kb per second, per delivery thread. */
+ public void setHintedHandoffThrottleInKB(int throttleInKB);
+}