2015-04-13 08:30:10 +02:00
|
|
|
/*
|
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
* or more contributor license agreements. See the NOTICE file
|
|
|
|
* distributed with this work for additional information
|
|
|
|
* regarding copyright ownership. The ASF licenses this file
|
|
|
|
* to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright 2015 Cloudius Systems
|
|
|
|
*
|
|
|
|
* Modified by Cloudius Systems
|
|
|
|
*/
|
|
|
|
package org.apache.cassandra.service;
|
|
|
|
|
|
|
|
import java.io.*;
|
|
|
|
import java.lang.management.ManagementFactory;
|
|
|
|
import java.net.InetAddress;
|
|
|
|
import java.net.UnknownHostException;
|
|
|
|
import java.nio.ByteBuffer;
|
|
|
|
import java.util.*;
|
|
|
|
import java.util.concurrent.*;
|
2015-08-13 08:53:47 +02:00
|
|
|
import java.util.concurrent.atomic.AtomicLong;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2015-11-08 11:10:57 +01:00
|
|
|
import javax.json.Json;
|
|
|
|
import javax.json.JsonArray;
|
|
|
|
import javax.json.JsonObject;
|
|
|
|
import javax.json.JsonWriter;
|
2015-04-13 08:30:10 +02:00
|
|
|
import javax.management.*;
|
|
|
|
import javax.management.openmbean.TabularData;
|
2015-10-28 10:25:37 +01:00
|
|
|
import javax.ws.rs.core.MultivaluedHashMap;
|
2015-05-26 11:38:08 +02:00
|
|
|
import javax.ws.rs.core.MultivaluedMap;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2015-07-07 09:28:22 +02:00
|
|
|
import org.apache.cassandra.metrics.StorageMetrics;
|
2015-04-13 08:30:10 +02:00
|
|
|
import org.apache.cassandra.repair.RepairParallelism;
|
2015-11-03 10:08:28 +01:00
|
|
|
import org.apache.cassandra.streaming.StreamManager;
|
|
|
|
|
2015-12-17 08:26:19 +01:00
|
|
|
import com.scylladb.jmx.api.APIClient;
|
2016-01-31 12:48:55 +01:00
|
|
|
import com.scylladb.jmx.api.APIConfig;
|
2015-12-17 08:26:19 +01:00
|
|
|
import com.scylladb.jmx.utils.FileUtils;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2015-12-28 14:55:25 +01:00
|
|
|
import com.google.common.base.Joiner;
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* This abstraction contains the token/identifier of this node on the identifier
|
|
|
|
* space. This token gets gossiped around. This class will also maintain
|
|
|
|
* histograms of the load information of other nodes in the cluster.
|
|
|
|
*/
|
2015-07-30 10:37:03 +02:00
|
|
|
public class StorageService extends NotificationBroadcasterSupport
|
|
|
|
implements StorageServiceMBean {
|
2015-04-13 08:30:10 +02:00
|
|
|
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
|
|
|
.getLogger(StorageService.class.getName());
|
|
|
|
|
|
|
|
private APIClient c = new APIClient();
|
2015-08-13 08:53:47 +02:00
|
|
|
private static Timer timer = new Timer("Storage Service Repair");
|
2015-07-07 09:28:22 +02:00
|
|
|
private StorageMetrics metrics = new StorageMetrics();
|
2015-07-30 10:37:03 +02:00
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
public static final StorageService instance = new StorageService();
|
|
|
|
|
|
|
|
public static StorageService getInstance() {
|
|
|
|
return instance;
|
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
public static enum RepairStatus
|
|
|
|
{
|
|
|
|
STARTED, SESSION_SUCCESS, SESSION_FAILED, FINISHED
|
|
|
|
}
|
|
|
|
|
|
|
|
/* JMX notification serial number counter */
|
|
|
|
private final AtomicLong notificationSerialNumber = new AtomicLong();
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
private final ObjectName jmxObjectName;
|
|
|
|
|
|
|
|
public StorageService() {
|
|
|
|
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
|
|
try {
|
|
|
|
jmxObjectName = new ObjectName(
|
|
|
|
"org.apache.cassandra.db:type=StorageService");
|
|
|
|
mbs.registerMBean(this, jmxObjectName);
|
2015-11-03 10:08:28 +01:00
|
|
|
mbs.registerMBean(StreamManager.getInstance(), new ObjectName(StreamManager.OBJECT_NAME));
|
2015-04-13 08:30:10 +02:00
|
|
|
} catch (Exception e) {
|
|
|
|
throw new RuntimeException(e);
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
public void log(String str) {
|
2015-12-30 07:47:32 +01:00
|
|
|
logger.finest(str);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of live nodes in the cluster, where "liveness" is
|
|
|
|
* determined by the failure detector of the node being queried.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
|
|
|
public List<String> getLiveNodes() {
|
|
|
|
log(" getLiveNodes()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/gossiper/endpoint/live");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of unreachable nodes in the cluster, as determined by
|
|
|
|
* this node's failure detector.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
|
|
|
public List<String> getUnreachableNodes() {
|
|
|
|
log(" getUnreachableNodes()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/gossiper/endpoint/down");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently bootstrapping into the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
|
|
|
public List<String> getJoiningNodes() {
|
|
|
|
log(" getJoiningNodes()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/nodes/joining");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently leaving the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
|
|
|
public List<String> getLeavingNodes() {
|
|
|
|
log(" getLeavingNodes()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/nodes/leaving");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently moving in the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
|
|
|
public List<String> getMovingNodes() {
|
|
|
|
log(" getMovingNodes()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/nodes/moving");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch string representations of the tokens for this node.
|
|
|
|
*
|
|
|
|
* @return a collection of tokens formatted as strings
|
|
|
|
*/
|
|
|
|
public List<String> getTokens() {
|
|
|
|
log(" getTokens()");
|
2016-03-19 17:37:14 +01:00
|
|
|
try {
|
|
|
|
return getTokens(getLocalBroadCastingAddress());
|
|
|
|
} catch (UnknownHostException e) {
|
|
|
|
// We should never reach here,
|
|
|
|
// but it makes the compiler happy
|
|
|
|
return null;
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch string representations of the tokens for a specified node.
|
|
|
|
*
|
|
|
|
* @param endpoint
|
|
|
|
* string representation of an node
|
|
|
|
* @return a collection of tokens formatted as strings
|
|
|
|
*/
|
|
|
|
public List<String> getTokens(String endpoint) throws UnknownHostException {
|
|
|
|
log(" getTokens(String endpoint) throws UnknownHostException");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/tokens/" + endpoint);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch a string representation of the Cassandra version.
|
|
|
|
*
|
|
|
|
* @return A string representation of the Cassandra version.
|
|
|
|
*/
|
|
|
|
public String getReleaseVersion() {
|
|
|
|
log(" getReleaseVersion()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/release_version");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch a string representation of the current Schema version.
|
|
|
|
*
|
|
|
|
* @return A string representation of the Schema version.
|
|
|
|
*/
|
|
|
|
public String getSchemaVersion() {
|
|
|
|
log(" getSchemaVersion()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/schema_version");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the list of all data file locations from conf
|
|
|
|
*
|
|
|
|
* @return String array of all locations
|
|
|
|
*/
|
|
|
|
public String[] getAllDataFileLocations() {
|
|
|
|
log(" getAllDataFileLocations()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringArrValue("/storage_service/data_file/locations");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get location of the commit log
|
|
|
|
*
|
|
|
|
* @return a string path
|
|
|
|
*/
|
|
|
|
public String getCommitLogLocation() {
|
|
|
|
log(" getCommitLogLocation()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/commitlog");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get location of the saved caches dir
|
|
|
|
*
|
|
|
|
* @return a string path
|
|
|
|
*/
|
|
|
|
public String getSavedCachesLocation() {
|
|
|
|
log(" getSavedCachesLocation()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/saved_caches/location");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of range to end points that describe the ring topology of
|
|
|
|
* a Cassandra cluster.
|
|
|
|
*
|
|
|
|
* @return mapping of ranges to end points
|
|
|
|
*/
|
2015-07-30 10:37:03 +02:00
|
|
|
public Map<List<String>, List<String>> getRangeToEndpointMap(
|
|
|
|
String keyspace) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getRangeToEndpointMap(String keyspace)");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getMapListStrValue("/storage_service/range/" + keyspace);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of range to rpc addresses that describe the ring topology
|
|
|
|
* of a Cassandra cluster.
|
|
|
|
*
|
|
|
|
* @return mapping of ranges to rpc addresses
|
|
|
|
*/
|
|
|
|
public Map<List<String>, List<String>> getRangeToRpcaddressMap(
|
|
|
|
String keyspace) {
|
|
|
|
log(" getRangeToRpcaddressMap(String keyspace)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("rpc", "true");
|
|
|
|
return c.getMapListStrValue("/storage_service/range/" + keyspace,
|
|
|
|
queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The same as {@code describeRing(String)} but converts TokenRange to the
|
|
|
|
* String for JMX compatibility
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* The keyspace to fetch information about
|
|
|
|
*
|
|
|
|
* @return a List of TokenRange(s) converted to String for the given
|
|
|
|
* keyspace
|
|
|
|
*/
|
|
|
|
public List<String> describeRingJMX(String keyspace) throws IOException {
|
|
|
|
log(" describeRingJMX(String keyspace) throws IOException");
|
2015-11-08 11:10:57 +01:00
|
|
|
JsonArray arr = c.getJsonArray("/storage_service/describe_ring/" + keyspace);
|
|
|
|
List<String> res = new ArrayList<String>();
|
|
|
|
|
|
|
|
for (int i = 0; i < arr.size(); i++) {
|
|
|
|
JsonObject obj = arr.getJsonObject(i);
|
StorageService: format the describering output
The describeRingJMX method, returns a formated output. The output should
be similiar to origin as oppose to the current implementation that
returns a json representation.
After the change an example of nodetool describering:
$ nodetool describering keyspace1
Schema Version:1074c31b-1f39-3df2-90ff-7f0b64bb3ea4
TokenRange:
TokenRange(start_token:7485973865401664349,
end_token:-338297331236877217, endpoints:[127.0.0.1],
rpc_endpoints:[127.0.0.1],
endpoint_details:[EndpointDetails(host:127.0.0.1,
datacenter:datacenter1, rack:rack1)])
TokenRange(start_token:-338297331236877217,
end_token:7485973865401664349, endpoints:[127.0.0.2],
rpc_endpoints:[127.0.0.2],
endpoint_details:[EndpointDetails(host:127.0.0.2,
datacenter:datacenter1, rack:rack1)])
On sycall-jmx:
Fixes #21
Signed-off-by: Amnon Heiman <amnon@scylladb.com>
2015-12-23 09:51:50 +01:00
|
|
|
StringBuilder sb = new StringBuilder();
|
|
|
|
sb.append("TokenRange(");
|
|
|
|
sb.append("start_token:");
|
|
|
|
sb.append(obj.getString("start_token"));
|
|
|
|
sb.append(", end_token:");
|
|
|
|
sb.append(obj.getString("end_token"));
|
|
|
|
sb.append(", endpoints:[");
|
|
|
|
JsonArray endpoints = obj.getJsonArray("endpoints");
|
|
|
|
for (int j = 0; j < endpoints.size(); j++) {
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append(endpoints.getString(j));
|
|
|
|
}
|
|
|
|
sb.append("], rpc_endpoints:[");
|
|
|
|
JsonArray rpc_endpoints = obj.getJsonArray("rpc_endpoints");
|
|
|
|
for (int j = 0; j < rpc_endpoints.size(); j++) {
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append(rpc_endpoints.getString(j));
|
|
|
|
}
|
|
|
|
|
|
|
|
sb.append("], endpoint_details:[");
|
|
|
|
JsonArray endpoint_details = obj.getJsonArray("endpoint_details");
|
|
|
|
for (int j = 0; j < endpoint_details.size(); j++) {
|
|
|
|
JsonObject detail = endpoint_details.getJsonObject(j);
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append("EndpointDetails(");
|
|
|
|
sb.append("host:");
|
|
|
|
sb.append(detail.getString("host"));
|
|
|
|
sb.append(", datacenter:");
|
|
|
|
sb.append(detail.getString("datacenter"));
|
|
|
|
sb.append(", rack:");
|
|
|
|
sb.append(detail.getString("rack"));
|
|
|
|
sb.append(')');
|
|
|
|
}
|
|
|
|
sb.append("])");
|
|
|
|
res.add(sb.toString());
|
2015-11-08 11:10:57 +01:00
|
|
|
}
|
|
|
|
return res;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of pending ranges to endpoints that describe the ring
|
|
|
|
* topology
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* the keyspace to get the pending range map for.
|
|
|
|
* @return a map of pending ranges to endpoints
|
|
|
|
*/
|
|
|
|
public Map<List<String>, List<String>> getPendingRangeToEndpointMap(
|
|
|
|
String keyspace) {
|
|
|
|
log(" getPendingRangeToEndpointMap(String keyspace)");
|
2015-07-30 10:37:03 +02:00
|
|
|
return c.getMapListStrValue(
|
|
|
|
"/storage_service/pending_range/" + keyspace);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of tokens to endpoints, including the bootstrapping ones.
|
|
|
|
*
|
|
|
|
* @return a map of tokens to endpoints in ascending order
|
|
|
|
*/
|
|
|
|
public Map<String, String> getTokenToEndpointMap() {
|
|
|
|
log(" getTokenToEndpointMap()");
|
2016-02-22 13:08:05 +01:00
|
|
|
Map<String, String> mapInetAddress = c.getMapStrValue("/storage_service/tokens_endpoint");
|
|
|
|
// in order to preserve tokens in ascending order, we use LinkedHashMap here
|
|
|
|
Map<String, String> mapString = new LinkedHashMap<>(mapInetAddress.size());
|
|
|
|
List<String> tokens = new ArrayList<>(mapInetAddress.keySet());
|
|
|
|
Collections.sort(tokens);
|
|
|
|
for (String token : tokens)
|
|
|
|
{
|
|
|
|
mapString.put(token, mapInetAddress.get(token));
|
|
|
|
}
|
|
|
|
return mapString;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Retrieve this hosts unique ID */
|
|
|
|
public String getLocalHostId() {
|
|
|
|
log(" getLocalHostId()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/hostid/local");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-03-19 17:37:14 +01:00
|
|
|
public String getLocalBroadCastingAddress() {
|
|
|
|
// FIXME:
|
|
|
|
// There is no straight API to get the broadcasting
|
|
|
|
// address, instead of trying to figure it out from the configuration
|
|
|
|
// we will use the getHostIdToAddressMap with the hostid
|
|
|
|
return getHostIdToAddressMap().get(getLocalHostId());
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
/** Retrieve the mapping of endpoint to host ID */
|
|
|
|
public Map<String, String> getHostIdMap() {
|
|
|
|
log(" getHostIdMap()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getMapStrValue("/storage_service/host_id");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-03-19 17:37:14 +01:00
|
|
|
/** Retrieve the mapping of endpoint to host ID */
|
|
|
|
public Map<String, String> getHostIdToAddressMap() {
|
|
|
|
log(" getHostIdToAddressMap()");
|
|
|
|
return c.getReverseMapStrValue("/storage_service/host_id");
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Numeric load value.
|
|
|
|
*
|
|
|
|
* @see org.apache.cassandra.metrics.StorageMetrics#load
|
|
|
|
*/
|
|
|
|
@Deprecated
|
|
|
|
public double getLoad() {
|
|
|
|
log(" getLoad()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getDoubleValue("/storage_service/load");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Human-readable load value */
|
|
|
|
public String getLoadString() {
|
|
|
|
log(" getLoadString()");
|
2015-10-29 10:46:07 +01:00
|
|
|
return FileUtils.stringifyFileSize(getLoad());
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Human-readable load value. Keys are IP addresses. */
|
|
|
|
public Map<String, String> getLoadMap() {
|
|
|
|
log(" getLoadMap()");
|
2015-12-29 16:43:48 +01:00
|
|
|
Map<String, Double> load = getLoadMapAsDouble();
|
2015-12-13 12:08:38 +01:00
|
|
|
Map<String, String> map = new HashMap<>();
|
2015-12-29 16:43:48 +01:00
|
|
|
for (Map.Entry<String, Double> entry : load.entrySet())
|
2015-12-13 12:08:38 +01:00
|
|
|
{
|
2015-12-29 16:43:48 +01:00
|
|
|
map.put(entry.getKey(), FileUtils.stringifyFileSize(entry.getValue()));
|
2015-12-13 12:08:38 +01:00
|
|
|
}
|
|
|
|
return map;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-12-29 16:43:48 +01:00
|
|
|
public Map<String, Double> getLoadMapAsDouble() {
|
|
|
|
log(" getLoadMapAsDouble()");
|
|
|
|
return c.getMapStringDouble("/storage_service/load_map");
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Return the generation value for this node.
|
|
|
|
*
|
|
|
|
* @return generation number
|
|
|
|
*/
|
|
|
|
public int getCurrentGenerationNumber() {
|
|
|
|
log(" getCurrentGenerationNumber()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/generation_number");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns the N endpoints that are responsible for storing the
|
|
|
|
* specified key i.e for replication.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* keyspace name
|
|
|
|
* @param cf
|
|
|
|
* Column family name
|
|
|
|
* @param key
|
|
|
|
* - key for which we need to find the endpoint return value -
|
|
|
|
* the endpoint responsible for this key
|
|
|
|
*/
|
2015-07-30 10:37:03 +02:00
|
|
|
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf,
|
|
|
|
String key) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getNaturalEndpoints(String keyspaceName, String cf, String key)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("cf", cf);
|
|
|
|
queryParams.add("key", key);
|
2015-07-30 10:37:03 +02:00
|
|
|
return c.getListInetAddressValue(
|
|
|
|
"/storage_service/natural_endpoints/" + keyspaceName,
|
|
|
|
queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public List<InetAddress> getNaturalEndpoints(String keyspaceName,
|
|
|
|
ByteBuffer key) {
|
|
|
|
log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)");
|
|
|
|
return c.getListInetAddressValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes the snapshot for the given keyspaces. A snapshot name must be
|
|
|
|
* specified.
|
|
|
|
*
|
|
|
|
* @param tag
|
|
|
|
* the tag given to the snapshot; may not be null or empty
|
|
|
|
* @param keyspaceNames
|
|
|
|
* the name of the keyspaces to snapshot; empty means "all."
|
|
|
|
*/
|
|
|
|
public void takeSnapshot(String tag, String... keyspaceNames)
|
|
|
|
throws IOException {
|
|
|
|
log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "tag", tag);
|
|
|
|
APIClient.set_query_param(queryParams, "kn",
|
|
|
|
APIClient.join(keyspaceNames));
|
2015-12-07 14:25:50 +01:00
|
|
|
c.post("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes the snapshot of a specific column family. A snapshot name must be
|
|
|
|
* specified.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* the keyspace which holds the specified column family
|
|
|
|
* @param columnFamilyName
|
|
|
|
* the column family to snapshot
|
|
|
|
* @param tag
|
|
|
|
* the tag given to the snapshot; may not be null or empty
|
|
|
|
*/
|
|
|
|
public void takeColumnFamilySnapshot(String keyspaceName,
|
|
|
|
String columnFamilyName, String tag) throws IOException {
|
|
|
|
log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
if (keyspaceName == null)
|
|
|
|
throw new IOException("You must supply a keyspace name");
|
|
|
|
if (columnFamilyName == null)
|
|
|
|
throw new IOException("You must supply a table name");
|
|
|
|
if (tag == null || tag.equals(""))
|
|
|
|
throw new IOException("You must supply a snapshot name.");
|
|
|
|
queryParams.add("tag", tag);
|
|
|
|
queryParams.add("kn", keyspaceName);
|
|
|
|
queryParams.add("cf", columnFamilyName);
|
|
|
|
c.post("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Remove the snapshot with the given name from the given keyspaces. If no
|
|
|
|
* tag is specified we will remove all snapshots.
|
|
|
|
*/
|
|
|
|
public void clearSnapshot(String tag, String... keyspaceNames)
|
|
|
|
throws IOException {
|
|
|
|
log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "tag", tag);
|
|
|
|
APIClient.set_query_param(queryParams, "kn",
|
|
|
|
APIClient.join(keyspaceNames));
|
|
|
|
c.delete("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the details of all the snapshot
|
|
|
|
*
|
|
|
|
* @return A map of snapshotName to all its details in Tabular form.
|
|
|
|
*/
|
|
|
|
public Map<String, TabularData> getSnapshotDetails() {
|
|
|
|
log(" getSnapshotDetails()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getMapStringSnapshotTabularDataValue(
|
|
|
|
"/storage_service/snapshots", null);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the true size taken by all snapshots across all keyspaces.
|
|
|
|
*
|
|
|
|
* @return True size taken by all the snapshots.
|
|
|
|
*/
|
|
|
|
public long trueSnapshotsSize() {
|
|
|
|
log(" trueSnapshotsSize()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getLongValue("/storage_service/snapshots/size/true");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Forces major compaction of a single keyspace
|
|
|
|
*/
|
|
|
|
public void forceKeyspaceCompaction(String keyspaceName,
|
|
|
|
String... columnFamilies) throws IOException, ExecutionException,
|
2015-07-30 10:37:03 +02:00
|
|
|
InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
|
|
|
c.post("/storage_service/keyspace_compaction/" + keyspaceName,
|
|
|
|
queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Trigger a cleanup of keys on a single keyspace
|
|
|
|
*/
|
|
|
|
public int forceKeyspaceCleanup(String keyspaceName,
|
|
|
|
String... columnFamilies) throws IOException, ExecutionException,
|
2015-07-30 10:37:03 +02:00
|
|
|
InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
2015-10-11 12:45:59 +02:00
|
|
|
return c.postInt("/storage_service/keyspace_cleanup/" + keyspaceName,
|
2015-07-30 10:37:03 +02:00
|
|
|
queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Scrub (deserialize + reserialize at the latest version, skipping bad rows
|
|
|
|
* if any) the given keyspace. If columnFamilies array is empty, all CFs are
|
|
|
|
* scrubbed.
|
|
|
|
*
|
|
|
|
* Scrubbed CFs will be snapshotted first, if disableSnapshot is false
|
|
|
|
*/
|
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted,
|
|
|
|
String keyspaceName, String... columnFamilies) throws IOException,
|
2015-07-30 10:37:03 +02:00
|
|
|
ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2016-03-15 10:55:36 +01:00
|
|
|
return scrub(disableSnapshot, skipCorrupted, true, keyspaceName, columnFamilies);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted,
|
|
|
|
boolean checkData, String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException,
|
|
|
|
InterruptedException {
|
|
|
|
log(" scrub(boolean disableSnapshot, boolean skipCorrupted, bool checkData, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "disable_snapshot",
|
|
|
|
disableSnapshot);
|
|
|
|
APIClient.set_bool_query_param(queryParams, "skip_corrupted",
|
|
|
|
skipCorrupted);
|
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
|
|
|
return c.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip
|
|
|
|
* bad rows and do not snapshot sstables first.
|
|
|
|
*/
|
|
|
|
public int upgradeSSTables(String keyspaceName,
|
|
|
|
boolean excludeCurrentVersion, String... columnFamilies)
|
2015-07-30 10:37:03 +02:00
|
|
|
throws IOException, ExecutionException,
|
|
|
|
InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "exclude_current_version",
|
|
|
|
excludeCurrentVersion);
|
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
2015-07-30 10:37:03 +02:00
|
|
|
return c.getIntValue(
|
|
|
|
"/storage_service/keyspace_upgrade_sstables/" + keyspaceName);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush all memtables for the given column families, or all columnfamilies
|
|
|
|
* for the given keyspace if none are explicitly listed.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* @param columnFamilies
|
|
|
|
* @throws IOException
|
|
|
|
*/
|
|
|
|
public void forceKeyspaceFlush(String keyspaceName,
|
|
|
|
String... columnFamilies) throws IOException, ExecutionException,
|
2015-07-30 10:37:03 +02:00
|
|
|
InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
|
|
|
c.post("/storage_service/keyspace_flush/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
class CheckRepair extends TimerTask {
|
|
|
|
private APIClient c = new APIClient();
|
|
|
|
int id;
|
|
|
|
String keyspace;
|
|
|
|
String message;
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-08-13 08:53:47 +02:00
|
|
|
int cmd;
|
|
|
|
public CheckRepair(int id, String keyspace) {
|
|
|
|
this.id = id;
|
|
|
|
this.keyspace = keyspace;
|
|
|
|
APIClient.set_query_param(queryParams, "id", Integer.toString(id));
|
|
|
|
message = String.format("Repair session %d ", id);
|
|
|
|
// The returned id is the command number
|
|
|
|
this.cmd = id;
|
|
|
|
}
|
|
|
|
@Override
|
|
|
|
public void run() {
|
|
|
|
String status = c.getStringValue("/storage_service/repair_async/" + keyspace, queryParams);
|
|
|
|
if (!status.equals("RUNNING")) {
|
|
|
|
cancel();
|
|
|
|
if (!status.equals("SUCCESSFUL")) {
|
|
|
|
sendNotification("repair", message + "failed", new int[]{cmd, RepairStatus.SESSION_FAILED.ordinal()});
|
|
|
|
}
|
|
|
|
sendNotification("repair", message + "finished", new int[]{cmd, RepairStatus.FINISHED.ordinal()});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Sends JMX notification to subscribers.
|
|
|
|
*
|
|
|
|
* @param type Message type
|
|
|
|
* @param message Message itself
|
|
|
|
* @param userObject Arbitrary object to attach to notification
|
|
|
|
*/
|
|
|
|
public void sendNotification(String type, String message, Object userObject)
|
|
|
|
{
|
|
|
|
Notification jmxNotification = new Notification(type, jmxObjectName, notificationSerialNumber.incrementAndGet(), message);
|
|
|
|
jmxNotification.setUserData(userObject);
|
|
|
|
sendNotification(jmxNotification);
|
|
|
|
}
|
|
|
|
|
|
|
|
public String getRepairMessage(final int cmd,
|
|
|
|
final String keyspace,
|
|
|
|
final int ranges_size,
|
|
|
|
final RepairParallelism parallelismDegree,
|
|
|
|
final boolean fullRepair) {
|
|
|
|
return String.format("Starting repair command #%d, repairing %d ranges for keyspace %s (parallelism=%s, full=%b)",
|
|
|
|
cmd, ranges_size, keyspace, parallelismDegree, fullRepair);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param repair
|
|
|
|
*/
|
|
|
|
public int waitAndNotifyRepair(int cmd, String keyspace, String message) {
|
2015-12-30 07:47:32 +01:00
|
|
|
logger.finest(message);
|
2015-08-13 08:53:47 +02:00
|
|
|
sendNotification("repair", message, new int[]{cmd, RepairStatus.STARTED.ordinal()});
|
|
|
|
TimerTask taskToExecute = new CheckRepair(cmd, keyspace);
|
|
|
|
timer.schedule(taskToExecute, 100, 1000);
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Invoke repair asynchronously. You can track repair progress by
|
|
|
|
* subscribing JMX notification sent from this StorageServiceMBean.
|
|
|
|
* Notification format is: type: "repair" userObject: int array of length 2,
|
|
|
|
* [0]=command number, [1]=ordinal of AntiEntropyService.Status
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* Keyspace name to repair. Should not be null.
|
|
|
|
* @param options
|
|
|
|
* repair option.
|
|
|
|
* @return Repair command number, or 0 if nothing to repair
|
|
|
|
*/
|
|
|
|
public int repairAsync(String keyspace, Map<String, String> options) {
|
|
|
|
log(" repairAsync(String keyspace, Map<String, String> options)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
for (String op : options.keySet()) {
|
2015-12-29 16:09:22 +01:00
|
|
|
APIClient.set_query_param(queryParams, op, options.get(op));
|
2015-05-26 11:38:08 +02:00
|
|
|
}
|
2015-12-29 16:09:22 +01:00
|
|
|
|
2015-12-28 13:58:43 +01:00
|
|
|
int cmd = c.postInt("/storage_service/repair_async/" + keyspace, queryParams);
|
2015-08-13 08:53:47 +02:00
|
|
|
waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true));
|
|
|
|
return cmd;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int forceRepairAsync(String keyspace, boolean isSequential,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts,
|
|
|
|
boolean primaryRange, boolean repairedAt, String... columnFamilies)
|
2015-07-30 10:37:03 +02:00
|
|
|
throws IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceRepairAsync(String keyspace, boolean isSequential, Collection<String> dataCenters, Collection<String> hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException");
|
2015-08-13 08:53:47 +02:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
return repairAsync(keyspace, options);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
public int forceRepairAsync(String keyspace) {
|
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
return repairAsync(keyspace, options);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken,
|
|
|
|
String keyspaceName, boolean isSequential,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts,
|
|
|
|
boolean repairedAt, String... columnFamilies) throws IOException {
|
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection<String> dataCenters, Collection<String> hosts, boolean repairedAt, String... columnFamilies) throws IOException");
|
|
|
|
return c.getIntValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
@Deprecated
|
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken,
|
|
|
|
String keyspaceName, RepairParallelism parallelismDegree,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts,
|
|
|
|
boolean fullRepair, String... columnFamilies) {
|
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, RepairParallelism parallelismDegree, Collection<String> dataCenters, Collection<String> hosts, boolean fullRepair, String... columnFamilies)");
|
|
|
|
return c.getIntValue("");
|
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int forceRepairAsync(String keyspace, boolean isSequential,
|
|
|
|
boolean isLocal, boolean primaryRange, boolean fullRepair,
|
|
|
|
String... columnFamilies) {
|
|
|
|
log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)");
|
2015-08-13 08:53:47 +02:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
return repairAsync(keyspace, options);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Deprecated
|
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken,
|
|
|
|
String keyspaceName, boolean isSequential, boolean isLocal,
|
|
|
|
boolean repairedAt, String... columnFamilies) {
|
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)");
|
|
|
|
return c.getIntValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
public void forceTerminateAllRepairSessions() {
|
|
|
|
log(" forceTerminateAllRepairSessions()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/force_terminate");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* transfer this node's data to other machines and remove it from service.
|
|
|
|
*/
|
|
|
|
public void decommission() throws InterruptedException {
|
|
|
|
log(" decommission() throws InterruptedException");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/decommission");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param newToken
|
|
|
|
* token to move this node to. This node will unload its data
|
|
|
|
* onto its neighbors, and bootstrap to the new token.
|
|
|
|
*/
|
|
|
|
public void move(String newToken) throws IOException {
|
|
|
|
log(" move(String newToken) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "new_token", newToken);
|
2015-11-05 13:48:34 +01:00
|
|
|
c.post("/storage_service/move", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* removeToken removes token (and all data associated with enpoint that had
|
|
|
|
* it) from the ring
|
2015-10-26 08:41:31 +01:00
|
|
|
* @param hostIdString the host id to remove
|
2015-04-13 08:30:10 +02:00
|
|
|
*/
|
2015-10-26 08:41:31 +01:00
|
|
|
public void removeNode(String hostIdString) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" removeNode(String token)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-10-26 08:41:31 +01:00
|
|
|
APIClient.set_query_param(queryParams, "host_id", hostIdString);
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/remove_node", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the status of a token removal.
|
|
|
|
*/
|
|
|
|
public String getRemovalStatus() {
|
|
|
|
log(" getRemovalStatus()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/removal_status");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Force a remove operation to finish.
|
|
|
|
*/
|
|
|
|
public void forceRemoveCompletion() {
|
|
|
|
log(" forceRemoveCompletion()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/force_remove_completion");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* set the logging level at runtime<br>
|
|
|
|
* <br>
|
|
|
|
* If both classQualifer and level are empty/null, it will reload the
|
|
|
|
* configuration to reset.<br>
|
|
|
|
* If classQualifer is not empty but level is empty/null, it will set the
|
|
|
|
* level to null for the defined classQualifer<br>
|
|
|
|
* If level cannot be parsed, then the level will be defaulted to DEBUG<br>
|
|
|
|
* <br>
|
|
|
|
* The logback configuration should have < jmxConfigurator /> set
|
|
|
|
*
|
|
|
|
* @param classQualifier
|
|
|
|
* The logger's classQualifer
|
|
|
|
* @param level
|
|
|
|
* The log level
|
|
|
|
* @throws Exception
|
|
|
|
*
|
|
|
|
* @see ch.qos.logback.classic.Level#toLevel(String)
|
|
|
|
*/
|
|
|
|
public void setLoggingLevel(String classQualifier, String level)
|
|
|
|
throws Exception {
|
|
|
|
log(" setLoggingLevel(String classQualifier, String level) throws Exception");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "level", level);
|
2016-01-21 10:10:12 +01:00
|
|
|
c.post("/system/logger/" + classQualifier, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** get the runtime logging levels */
|
|
|
|
public Map<String, String> getLoggingLevels() {
|
|
|
|
log(" getLoggingLevels()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getMapStrValue("/storage_service/logging_level");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get the operational mode (leaving, joining, normal, decommissioned,
|
|
|
|
* client)
|
|
|
|
**/
|
|
|
|
public String getOperationMode() {
|
|
|
|
log(" getOperationMode()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/operation_mode");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns whether the storage service is starting or not */
|
|
|
|
public boolean isStarting() {
|
|
|
|
log(" isStarting()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/is_starting");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** get the progress of a drain operation */
|
|
|
|
public String getDrainProgress() {
|
|
|
|
log(" getDrainProgress()");
|
2015-11-10 16:03:50 +01:00
|
|
|
// FIXME
|
|
|
|
// This is a workaround so the nodetool would work
|
|
|
|
// it should be revert when the drain progress will be implemented
|
|
|
|
//return c.getStringValue("/storage_service/drain");
|
|
|
|
return String.format("Drained %s/%s ColumnFamilies", 0, 0);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* makes node unavailable for writes, flushes memtables and replays
|
|
|
|
* commitlog.
|
|
|
|
*/
|
2015-07-30 10:37:03 +02:00
|
|
|
public void drain()
|
|
|
|
throws IOException, InterruptedException, ExecutionException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" drain() throws IOException, InterruptedException, ExecutionException");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/drain");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Truncates (deletes) the given columnFamily from the provided keyspace.
|
|
|
|
* Calling truncate results in actual deletion of all data in the cluster
|
|
|
|
* under the given columnFamily and it will fail unless all hosts are up.
|
|
|
|
* All data in the given column family will be deleted, but its definition
|
|
|
|
* will not be affected.
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* The keyspace to delete from
|
|
|
|
* @param columnFamily
|
|
|
|
* The column family to delete data from.
|
|
|
|
*/
|
|
|
|
public void truncate(String keyspace, String columnFamily)
|
|
|
|
throws TimeoutException, IOException {
|
|
|
|
log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", columnFamily);
|
|
|
|
c.post("/storage_service/truncate/" + keyspace, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* given a list of tokens (representing the nodes in the cluster), returns a
|
|
|
|
* mapping from "token -> %age of cluster owned by that token"
|
|
|
|
*/
|
|
|
|
public Map<InetAddress, Float> getOwnership() {
|
|
|
|
log(" getOwnership()");
|
2015-08-24 09:35:05 +02:00
|
|
|
return c.getMapInetAddressFloatValue("/storage_service/ownership/");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Effective ownership is % of the data each node owns given the keyspace we
|
|
|
|
* calculate the percentage using replication factor. If Keyspace == null,
|
|
|
|
* this method will try to verify if all the keyspaces in the cluster have
|
|
|
|
* the same replication strategies and if yes then we will use the first
|
|
|
|
* else a empty Map is returned.
|
|
|
|
*/
|
|
|
|
public Map<InetAddress, Float> effectiveOwnership(String keyspace)
|
|
|
|
throws IllegalStateException {
|
|
|
|
log(" effectiveOwnership(String keyspace) throws IllegalStateException");
|
2015-08-24 09:35:05 +02:00
|
|
|
try {
|
|
|
|
return c.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace);
|
|
|
|
} catch (Exception e) {
|
|
|
|
throw new IllegalStateException("Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless");
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public List<String> getKeyspaces() {
|
|
|
|
log(" getKeyspaces()");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("non_system", "true");
|
|
|
|
return c.getListStrValue("/storage_service/keyspaces", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public List<String> getNonSystemKeyspaces() {
|
|
|
|
log(" getNonSystemKeyspaces()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/keyspaces");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Change endpointsnitch class and dynamic-ness (and dynamic attributes) at
|
|
|
|
* runtime
|
|
|
|
*
|
|
|
|
* @param epSnitchClassName
|
|
|
|
* the canonical path name for a class implementing
|
|
|
|
* IEndpointSnitch
|
|
|
|
* @param dynamic
|
|
|
|
* boolean that decides whether dynamicsnitch is used or not
|
|
|
|
* @param dynamicUpdateInterval
|
|
|
|
* integer, in ms (default 100)
|
|
|
|
* @param dynamicResetInterval
|
|
|
|
* integer, in ms (default 600,000)
|
|
|
|
* @param dynamicBadnessThreshold
|
|
|
|
* double, (default 0.0)
|
|
|
|
*/
|
|
|
|
public void updateSnitch(String epSnitchClassName, Boolean dynamic,
|
|
|
|
Integer dynamicUpdateInterval, Integer dynamicResetInterval,
|
|
|
|
Double dynamicBadnessThreshold) throws ClassNotFoundException {
|
|
|
|
log(" updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "dynamic", dynamic);
|
|
|
|
APIClient.set_query_param(queryParams, "epSnitchClassName",
|
|
|
|
epSnitchClassName);
|
|
|
|
if (dynamicUpdateInterval != null) {
|
|
|
|
queryParams.add("dynamic_update_interval",
|
|
|
|
dynamicUpdateInterval.toString());
|
|
|
|
}
|
|
|
|
if (dynamicResetInterval != null) {
|
|
|
|
queryParams.add("dynamic_reset_interval",
|
|
|
|
dynamicResetInterval.toString());
|
|
|
|
}
|
|
|
|
if (dynamicBadnessThreshold != null) {
|
|
|
|
queryParams.add("dynamic_badness_threshold",
|
|
|
|
dynamicBadnessThreshold.toString());
|
|
|
|
}
|
|
|
|
c.post("/storage_service/update_snitch", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to forcibly 'kill' a sick node
|
|
|
|
public void stopGossiping() {
|
|
|
|
log(" stopGossiping()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.delete("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to recover a forcibly 'killed' node
|
|
|
|
public void startGossiping() {
|
|
|
|
log(" startGossiping()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to see whether gossip is running or not
|
|
|
|
public boolean isGossipRunning() {
|
|
|
|
log(" isGossipRunning()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to forcibly completely stop cassandra
|
|
|
|
public void stopDaemon() {
|
|
|
|
log(" stopDaemon()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/stop_daemon");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// to determine if gossip is disabled
|
|
|
|
public boolean isInitialized() {
|
|
|
|
log(" isInitialized()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/is_initialized");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to disable thrift
|
|
|
|
public void stopRPCServer() {
|
|
|
|
log(" stopRPCServer()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.delete("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to reenable thrift
|
|
|
|
public void startRPCServer() {
|
|
|
|
log(" startRPCServer()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// to determine if thrift is running
|
|
|
|
public boolean isRPCServerRunning() {
|
|
|
|
log(" isRPCServerRunning()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void stopNativeTransport() {
|
|
|
|
log(" stopNativeTransport()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.delete("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void startNativeTransport() {
|
|
|
|
log(" startNativeTransport()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public boolean isNativeTransportRunning() {
|
|
|
|
log(" isNativeTransportRunning()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a node that have been started without joining the ring to join it
|
|
|
|
public void joinRing() throws IOException {
|
|
|
|
log(" joinRing() throws IOException");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/join_ring");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public boolean isJoined() {
|
|
|
|
log(" isJoined()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/join_ring");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Deprecated
|
|
|
|
public int getExceptionCount() {
|
|
|
|
log(" getExceptionCount()");
|
|
|
|
return c.getIntValue("");
|
|
|
|
}
|
|
|
|
|
|
|
|
public void setStreamThroughputMbPerSec(int value) {
|
|
|
|
log(" setStreamThroughputMbPerSec(int value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Integer.toString(value));
|
|
|
|
c.post("/storage_service/stream_throughput", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public int getStreamThroughputMbPerSec() {
|
|
|
|
log(" getStreamThroughputMbPerSec()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/stream_throughput");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public int getCompactionThroughputMbPerSec() {
|
|
|
|
log(" getCompactionThroughputMbPerSec()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/compaction_throughput");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void setCompactionThroughputMbPerSec(int value) {
|
|
|
|
log(" setCompactionThroughputMbPerSec(int value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Integer.toString(value));
|
|
|
|
c.post("/storage_service/compaction_throughput", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public boolean isIncrementalBackupsEnabled() {
|
|
|
|
log(" isIncrementalBackupsEnabled()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getBooleanValue("/storage_service/incremental_backups");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void setIncrementalBackupsEnabled(boolean value) {
|
|
|
|
log(" setIncrementalBackupsEnabled(boolean value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Boolean.toString(value));
|
|
|
|
c.post("/storage_service/incremental_backups", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initiate a process of streaming data for which we are responsible from
|
|
|
|
* other nodes. It is similar to bootstrap except meant to be used on a node
|
|
|
|
* which is already in the cluster (typically containing no data) as an
|
|
|
|
* alternative to running repair.
|
|
|
|
*
|
|
|
|
* @param sourceDc
|
|
|
|
* Name of DC from which to select sources for streaming or null
|
|
|
|
* to pick any node
|
|
|
|
*/
|
|
|
|
public void rebuild(String sourceDc) {
|
|
|
|
log(" rebuild(String sourceDc)");
|
2015-12-17 17:15:50 +01:00
|
|
|
if (sourceDc != null) {
|
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
APIClient.set_query_param(queryParams, "source_dc", sourceDc);
|
|
|
|
c.post("/storage_service/rebuild", queryParams);
|
|
|
|
} else {
|
|
|
|
c.post("/storage_service/rebuild");
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Starts a bulk load and blocks until it completes. */
|
|
|
|
public void bulkLoad(String directory) {
|
|
|
|
log(" bulkLoad(String directory)");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/bulk_load/" + directory);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Starts a bulk load asynchronously and returns the String representation
|
|
|
|
* of the planID for the new streaming session.
|
|
|
|
*/
|
|
|
|
public String bulkLoadAsync(String directory) {
|
|
|
|
log(" bulkLoadAsync(String directory)");
|
2015-07-30 10:37:03 +02:00
|
|
|
return c.getStringValue(
|
|
|
|
"/storage_service/bulk_load_async/" + directory);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void rescheduleFailedDeletions() {
|
|
|
|
log(" rescheduleFailedDeletions()");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/reschedule_failed_deletions");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Load new SSTables to the given keyspace/columnFamily
|
|
|
|
*
|
|
|
|
* @param ksName
|
|
|
|
* The parent keyspace name
|
|
|
|
* @param cfName
|
|
|
|
* The ColumnFamily name where SSTables belong
|
|
|
|
*/
|
|
|
|
public void loadNewSSTables(String ksName, String cfName) {
|
|
|
|
log(" loadNewSSTables(String ksName, String cfName)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("cf", cfName);
|
2015-10-11 12:45:59 +02:00
|
|
|
c.post("/storage_service/sstables/" + ksName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return a List of Tokens representing a sample of keys across all
|
|
|
|
* ColumnFamilyStores.
|
|
|
|
*
|
|
|
|
* Note: this should be left as an operation, not an attribute (methods
|
|
|
|
* starting with "get") to avoid sending potentially multiple MB of data
|
|
|
|
* when accessing this mbean by default. See CASSANDRA-4452.
|
|
|
|
*
|
|
|
|
* @return set of Tokens as Strings
|
|
|
|
*/
|
|
|
|
public List<String> sampleKeyRange() {
|
|
|
|
log(" sampleKeyRange()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getListStrValue("/storage_service/sample_key_range");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rebuild the specified indexes
|
|
|
|
*/
|
|
|
|
public void rebuildSecondaryIndex(String ksName, String cfName,
|
|
|
|
String... idxNames) {
|
|
|
|
log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)");
|
|
|
|
}
|
|
|
|
|
|
|
|
public void resetLocalSchema() throws IOException {
|
|
|
|
log(" resetLocalSchema() throws IOException");
|
2015-05-26 11:38:08 +02:00
|
|
|
c.post("/storage_service/relocal_schema");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enables/Disables tracing for the whole system. Only thrift requests can
|
|
|
|
* start tracing currently.
|
|
|
|
*
|
|
|
|
* @param probability
|
|
|
|
* ]0,1[ will enable tracing on a partial number of requests with
|
|
|
|
* the provided probability. 0 will disable tracing and 1 will
|
|
|
|
* enable tracing for all requests (which mich severely cripple
|
|
|
|
* the system)
|
|
|
|
*/
|
|
|
|
public void setTraceProbability(double probability) {
|
|
|
|
log(" setTraceProbability(double probability)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("probability", Double.toString(probability));
|
|
|
|
c.post("/storage_service/trace_probability", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the configured tracing probability.
|
|
|
|
*/
|
|
|
|
public double getTraceProbability() {
|
|
|
|
log(" getTraceProbability()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getDoubleValue("/storage_service/trace_probability");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void disableAutoCompaction(String ks, String... columnFamilies)
|
|
|
|
throws IOException {
|
2015-05-26 11:38:08 +02:00
|
|
|
log("disableAutoCompaction(String ks, String... columnFamilies)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
|
|
|
c.delete("/storage_service/auto_compaction/", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public void enableAutoCompaction(String ks, String... columnFamilies)
|
|
|
|
throws IOException {
|
2015-05-26 11:38:08 +02:00
|
|
|
log("enableAutoCompaction(String ks, String... columnFamilies)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf",
|
|
|
|
APIClient.join(columnFamilies));
|
2015-10-28 10:25:37 +01:00
|
|
|
try {
|
|
|
|
c.post("/storage_service/auto_compaction/", queryParams);
|
|
|
|
} catch (RuntimeException e) {
|
|
|
|
// FIXME should throw the right exception
|
|
|
|
throw new IOException(e.getMessage());
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
public void deliverHints(String host) throws UnknownHostException {
|
|
|
|
log(" deliverHints(String host) throws UnknownHostException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("host", host);
|
|
|
|
c.post("/storage_service/deliver_hints", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the name of the cluster */
|
|
|
|
public String getClusterName() {
|
|
|
|
log(" getClusterName()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/cluster_name");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the cluster partitioner */
|
|
|
|
public String getPartitionerName() {
|
|
|
|
log(" getPartitionerName()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getStringValue("/storage_service/partitioner_name");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for warning of queries with many tombstones */
|
|
|
|
public int getTombstoneWarnThreshold() {
|
|
|
|
log(" getTombstoneWarnThreshold()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/tombstone_warn_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for warning queries with many tombstones */
|
|
|
|
public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) {
|
|
|
|
log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("debug_threshold",
|
|
|
|
Integer.toString(tombstoneDebugThreshold));
|
|
|
|
c.post("/storage_service/tombstone_warn_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for abandoning queries with many tombstones */
|
|
|
|
public int getTombstoneFailureThreshold() {
|
|
|
|
log(" getTombstoneFailureThreshold()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/tombstone_failure_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for abandoning queries with many tombstones */
|
|
|
|
public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) {
|
|
|
|
log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("debug_threshold",
|
|
|
|
Integer.toString(tombstoneDebugThreshold));
|
|
|
|
c.post("/storage_service/tombstone_failure_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for rejecting queries due to a large batch size */
|
|
|
|
public int getBatchSizeFailureThreshold() {
|
|
|
|
log(" getBatchSizeFailureThreshold()");
|
2015-05-26 11:38:08 +02:00
|
|
|
return c.getIntValue("/storage_service/batch_size_failure_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for rejecting queries due to a large batch size */
|
|
|
|
public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold) {
|
|
|
|
log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("threshold", Integer.toString(batchSizeDebugThreshold));
|
|
|
|
c.post("/storage_service/batch_size_failure_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-07-30 10:37:03 +02:00
|
|
|
/**
|
|
|
|
* Sets the hinted handoff throttle in kb per second, per delivery thread.
|
|
|
|
*/
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setHintedHandoffThrottleInKB(int throttleInKB) {
|
|
|
|
log(" setHintedHandoffThrottleInKB(int throttleInKB)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("throttle", Integer.toString(throttleInKB));
|
|
|
|
c.post("/storage_service/hinted_handoff", queryParams);
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
2015-07-30 11:01:05 +02:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public void takeMultipleColumnFamilySnapshot(String tag,
|
|
|
|
String... columnFamilyList) throws IOException {
|
|
|
|
// TODO Auto-generated method stub
|
|
|
|
log(" takeMultipleColumnFamilySnapshot");
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int forceRepairAsync(String keyspace, int parallelismDegree,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts,
|
|
|
|
boolean primaryRange, boolean fullRepair,
|
|
|
|
String... columnFamilies) {
|
2015-12-28 14:55:25 +01:00
|
|
|
log(" forceRepairAsync(keyspace, parallelismDegree, dataCenters, hosts, primaryRange, fullRepair, columnFamilies)");
|
2015-08-13 08:53:47 +02:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
2015-12-28 14:55:25 +01:00
|
|
|
Joiner commas = Joiner.on(",");
|
|
|
|
options.put("parallelism", Integer.toString(parallelismDegree));
|
|
|
|
if (dataCenters != null) {
|
|
|
|
options.put("dataCenters", commas.join(dataCenters));
|
|
|
|
}
|
|
|
|
if (hosts != null) {
|
|
|
|
options.put("hosts", commas.join(hosts));
|
|
|
|
}
|
|
|
|
options.put("primaryRange", Boolean.toString(primaryRange));
|
|
|
|
options.put("incremental", Boolean.toString(!fullRepair));
|
|
|
|
if (columnFamilies != null && columnFamilies.length > 0) {
|
|
|
|
options.put("columnFamilies", commas.join(columnFamilies));
|
|
|
|
}
|
2015-08-13 08:53:47 +02:00
|
|
|
return repairAsync(keyspace, options);
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken,
|
|
|
|
String keyspaceName, int parallelismDegree,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts,
|
|
|
|
boolean fullRepair, String... columnFamilies) {
|
2015-12-28 14:55:25 +01:00
|
|
|
log(" forceRepairRangeAsync(beginToken, endToken, keyspaceName, parallelismDegree, dataCenters, hosts, fullRepair, columnFamilies)");
|
2016-02-18 16:10:38 +01:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
Joiner commas = Joiner.on(",");
|
|
|
|
options.put("parallelism", Integer.toString(parallelismDegree));
|
|
|
|
if (dataCenters != null) {
|
|
|
|
options.put("dataCenters", commas.join(dataCenters));
|
|
|
|
}
|
|
|
|
if (hosts != null) {
|
|
|
|
options.put("hosts", commas.join(hosts));
|
|
|
|
}
|
|
|
|
options.put("incremental", Boolean.toString(!fullRepair));
|
|
|
|
options.put("startToken", beginToken);
|
|
|
|
options.put("endToken", endToken);
|
|
|
|
return repairAsync(keyspaceName, options);
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public double getTracingProbability() {
|
|
|
|
// TODO Auto-generated method stub
|
|
|
|
log(" getTracingProbability()");
|
|
|
|
return c.getDoubleValue("");
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|