2015-04-13 08:30:10 +02:00
|
|
|
/*
|
|
|
|
* Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
* or more contributor license agreements. See the NOTICE file
|
|
|
|
* distributed with this work for additional information
|
|
|
|
* regarding copyright ownership. The ASF licenses this file
|
|
|
|
* to you under the Apache License, Version 2.0 (the
|
|
|
|
* "License"); you may not use this file except in compliance
|
|
|
|
* with the License. You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
|
|
|
/*
|
|
|
|
* Copyright 2015 Cloudius Systems
|
|
|
|
*
|
|
|
|
* Modified by Cloudius Systems
|
|
|
|
*/
|
|
|
|
package org.apache.cassandra.service;
|
|
|
|
|
2016-10-17 13:34:21 +02:00
|
|
|
import static java.util.Arrays.asList;
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
import java.io.IOException;
|
2015-04-13 08:30:10 +02:00
|
|
|
import java.net.InetAddress;
|
|
|
|
import java.net.UnknownHostException;
|
|
|
|
import java.nio.ByteBuffer;
|
2021-05-10 11:53:34 +02:00
|
|
|
import java.util.Arrays;
|
2016-08-17 10:43:28 +02:00
|
|
|
import java.util.ArrayList;
|
|
|
|
import java.util.Collection;
|
|
|
|
import java.util.HashMap;
|
|
|
|
import java.util.HashSet;
|
|
|
|
import java.util.List;
|
|
|
|
import java.util.Map;
|
2016-04-26 10:34:38 +02:00
|
|
|
import java.util.Map.Entry;
|
2016-08-17 10:43:28 +02:00
|
|
|
import java.util.Set;
|
|
|
|
import java.util.Timer;
|
|
|
|
import java.util.TimerTask;
|
|
|
|
import java.util.concurrent.ExecutionException;
|
|
|
|
import java.util.concurrent.TimeoutException;
|
2015-08-13 08:53:47 +02:00
|
|
|
import java.util.concurrent.atomic.AtomicLong;
|
2016-10-11 14:07:03 +02:00
|
|
|
import java.util.logging.Logger;
|
2019-07-24 16:33:13 +02:00
|
|
|
import java.util.stream.Collectors;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2015-11-08 11:10:57 +01:00
|
|
|
import javax.json.JsonArray;
|
|
|
|
import javax.json.JsonObject;
|
2016-10-11 14:07:03 +02:00
|
|
|
import javax.management.ListenerNotFoundException;
|
|
|
|
import javax.management.MBeanNotificationInfo;
|
2016-08-17 10:43:28 +02:00
|
|
|
import javax.management.Notification;
|
2016-10-11 14:07:03 +02:00
|
|
|
import javax.management.NotificationBroadcaster;
|
2016-08-17 10:43:28 +02:00
|
|
|
import javax.management.NotificationBroadcasterSupport;
|
2016-10-11 14:07:03 +02:00
|
|
|
import javax.management.NotificationFilter;
|
|
|
|
import javax.management.NotificationListener;
|
2019-07-24 16:33:13 +02:00
|
|
|
import javax.management.openmbean.CompositeData;
|
2021-02-15 17:26:42 +01:00
|
|
|
import javax.management.openmbean.CompositeDataSupport;
|
|
|
|
import javax.management.openmbean.CompositeType;
|
|
|
|
import javax.management.openmbean.OpenDataException;
|
|
|
|
import javax.management.openmbean.OpenType;
|
|
|
|
import javax.management.openmbean.SimpleType;
|
2015-04-13 08:30:10 +02:00
|
|
|
import javax.management.openmbean.TabularData;
|
2021-02-15 17:26:42 +01:00
|
|
|
import javax.management.openmbean.TabularDataSupport;
|
|
|
|
import javax.management.openmbean.TabularType;
|
2019-07-24 16:33:13 +02:00
|
|
|
import javax.ws.rs.core.GenericType;
|
2015-10-28 10:25:37 +01:00
|
|
|
import javax.ws.rs.core.MultivaluedHashMap;
|
2015-05-26 11:38:08 +02:00
|
|
|
import javax.ws.rs.core.MultivaluedMap;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2015-07-07 09:28:22 +02:00
|
|
|
import org.apache.cassandra.metrics.StorageMetrics;
|
2015-04-13 08:30:10 +02:00
|
|
|
import org.apache.cassandra.repair.RepairParallelism;
|
2015-11-03 10:08:28 +01:00
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
import com.google.common.base.Joiner;
|
2015-12-17 08:26:19 +01:00
|
|
|
import com.scylladb.jmx.api.APIClient;
|
2016-10-11 14:07:03 +02:00
|
|
|
import com.scylladb.jmx.metrics.MetricsMBean;
|
2015-12-17 08:26:19 +01:00
|
|
|
import com.scylladb.jmx.utils.FileUtils;
|
2021-02-15 17:26:42 +01:00
|
|
|
import com.google.common.base.Throwables;
|
2015-04-13 08:30:10 +02:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This abstraction contains the token/identifier of this node on the identifier
|
|
|
|
* space. This token gets gossiped around. This class will also maintain
|
|
|
|
* histograms of the load information of other nodes in the cluster.
|
|
|
|
*/
|
2016-10-11 14:07:03 +02:00
|
|
|
public class StorageService extends MetricsMBean implements StorageServiceMBean, NotificationBroadcaster {
|
|
|
|
private static final Logger logger = Logger.getLogger(StorageService.class.getName());
|
|
|
|
private static final Timer timer = new Timer("Storage Service Repair", true);
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2021-02-15 17:26:42 +01:00
|
|
|
private static final String[] COUNTER_NAMES = new String[]{"raw", "count", "error", "string"};
|
|
|
|
private static final String[] COUNTER_DESCS = new String[]
|
|
|
|
{ "partition key in raw hex bytes",
|
|
|
|
"value of this partition for given sampler",
|
|
|
|
"value is within the error bounds plus or minus of this",
|
|
|
|
"the partition key turned into a human readable format" };
|
|
|
|
private static final CompositeType COUNTER_COMPOSITE_TYPE;
|
|
|
|
private static final TabularType COUNTER_TYPE;
|
|
|
|
|
2021-05-10 11:53:34 +02:00
|
|
|
private static final String[] OPERATION_NAMES = new String[]{"read", "write"};
|
|
|
|
|
2021-02-15 17:26:42 +01:00
|
|
|
private static final String[] SAMPLER_NAMES = new String[]{"cardinality", "partitions"};
|
|
|
|
private static final String[] SAMPLER_DESCS = new String[]
|
|
|
|
{ "cardinality of partitions",
|
|
|
|
"list of counter results" };
|
|
|
|
|
|
|
|
private static final String SAMPLING_RESULTS_NAME = "SAMPLING_RESULTS";
|
|
|
|
private static final CompositeType SAMPLING_RESULT;
|
|
|
|
|
|
|
|
static
|
|
|
|
{
|
|
|
|
try
|
|
|
|
{
|
|
|
|
OpenType<?>[] counterTypes = new OpenType[] { SimpleType.STRING, SimpleType.LONG, SimpleType.LONG, SimpleType.STRING };
|
|
|
|
COUNTER_COMPOSITE_TYPE = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_NAMES, COUNTER_DESCS, counterTypes);
|
|
|
|
COUNTER_TYPE = new TabularType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_COMPOSITE_TYPE, COUNTER_NAMES);
|
|
|
|
|
|
|
|
OpenType<?>[] samplerTypes = new OpenType[] { SimpleType.LONG, COUNTER_TYPE };
|
|
|
|
SAMPLING_RESULT = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, SAMPLER_NAMES, SAMPLER_DESCS, samplerTypes);
|
|
|
|
} catch (OpenDataException e)
|
|
|
|
{
|
|
|
|
throw Throwables.propagate(e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
private final NotificationBroadcasterSupport notificationBroadcasterSupport = new NotificationBroadcasterSupport();
|
2015-07-30 10:37:03 +02:00
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
@Override
|
|
|
|
public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) {
|
|
|
|
notificationBroadcasterSupport.addNotificationListener(listener, filter, handback);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void removeNotificationListener(NotificationListener listener) throws ListenerNotFoundException {
|
|
|
|
notificationBroadcasterSupport.removeNotificationListener(listener);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void removeNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback)
|
|
|
|
throws ListenerNotFoundException {
|
|
|
|
notificationBroadcasterSupport.removeNotificationListener(listener, filter, handback);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public MBeanNotificationInfo[] getNotificationInfo() {
|
|
|
|
return notificationBroadcasterSupport.getNotificationInfo();
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
public void sendNotification(Notification notification) {
|
|
|
|
notificationBroadcasterSupport.sendNotification(notification);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
public static enum RepairStatus {
|
2015-08-13 08:53:47 +02:00
|
|
|
STARTED, SESSION_SUCCESS, SESSION_FAILED, FINISHED
|
|
|
|
}
|
|
|
|
|
|
|
|
/* JMX notification serial number counter */
|
|
|
|
private final AtomicLong notificationSerialNumber = new AtomicLong();
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
public StorageService(APIClient client) {
|
|
|
|
super("org.apache.cassandra.db:type=StorageService", client, new StorageMetrics());
|
2015-04-13 08:30:10 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
public void log(String str) {
|
2015-12-30 07:47:32 +01:00
|
|
|
logger.finest(str);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of live nodes in the cluster, where "liveness" is
|
|
|
|
* determined by the failure detector of the node being queried.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getLiveNodes() {
|
|
|
|
log(" getLiveNodes()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/gossiper/endpoint/live");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of unreachable nodes in the cluster, as determined by
|
|
|
|
* this node's failure detector.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getUnreachableNodes() {
|
|
|
|
log(" getUnreachableNodes()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/gossiper/endpoint/down");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently bootstrapping into the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getJoiningNodes() {
|
|
|
|
log(" getJoiningNodes()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/nodes/joining");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently leaving the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getLeavingNodes() {
|
|
|
|
log(" getLeavingNodes()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/nodes/leaving");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve the list of nodes currently moving in the ring.
|
|
|
|
*
|
|
|
|
* @return set of IP addresses, as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getMovingNodes() {
|
|
|
|
log(" getMovingNodes()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/nodes/moving");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch string representations of the tokens for this node.
|
|
|
|
*
|
|
|
|
* @return a collection of tokens formatted as strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getTokens() {
|
|
|
|
log(" getTokens()");
|
2016-03-19 17:37:14 +01:00
|
|
|
try {
|
|
|
|
return getTokens(getLocalBroadCastingAddress());
|
|
|
|
} catch (UnknownHostException e) {
|
|
|
|
// We should never reach here,
|
|
|
|
// but it makes the compiler happy
|
|
|
|
return null;
|
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch string representations of the tokens for a specified node.
|
|
|
|
*
|
|
|
|
* @param endpoint
|
|
|
|
* string representation of an node
|
|
|
|
* @return a collection of tokens formatted as strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getTokens(String endpoint) throws UnknownHostException {
|
|
|
|
log(" getTokens(String endpoint) throws UnknownHostException");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/tokens/" + endpoint);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch a string representation of the Cassandra version.
|
|
|
|
*
|
|
|
|
* @return A string representation of the Cassandra version.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getReleaseVersion() {
|
|
|
|
log(" getReleaseVersion()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/release_version");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Fetch a string representation of the current Schema version.
|
|
|
|
*
|
|
|
|
* @return A string representation of the Schema version.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getSchemaVersion() {
|
|
|
|
log(" getSchemaVersion()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/schema_version");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the list of all data file locations from conf
|
|
|
|
*
|
|
|
|
* @return String array of all locations
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String[] getAllDataFileLocations() {
|
|
|
|
log(" getAllDataFileLocations()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringArrValue("/storage_service/data_file/locations");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get location of the commit log
|
|
|
|
*
|
|
|
|
* @return a string path
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getCommitLogLocation() {
|
|
|
|
log(" getCommitLogLocation()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/commitlog");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get location of the saved caches dir
|
|
|
|
*
|
|
|
|
* @return a string path
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getSavedCachesLocation() {
|
|
|
|
log(" getSavedCachesLocation()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/saved_caches/location");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of range to end points that describe the ring topology of
|
|
|
|
* a Cassandra cluster.
|
|
|
|
*
|
|
|
|
* @return mapping of ranges to end points
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public Map<List<String>, List<String>> getRangeToEndpointMap(String keyspace) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getRangeToEndpointMap(String keyspace)");
|
2020-10-05 08:38:00 +02:00
|
|
|
return client.getMapListStrValue("/storage_service/range_to_endpoint_map/" + keyspace);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of range to rpc addresses that describe the ring topology
|
|
|
|
* of a Cassandra cluster.
|
|
|
|
*
|
|
|
|
* @return mapping of ranges to rpc addresses
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public Map<List<String>, List<String>> getRangeToRpcaddressMap(String keyspace) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getRangeToRpcaddressMap(String keyspace)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("rpc", "true");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapListStrValue("/storage_service/range/" + keyspace, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The same as {@code describeRing(String)} but converts TokenRange to the
|
|
|
|
* String for JMX compatibility
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* The keyspace to fetch information about
|
|
|
|
*
|
|
|
|
* @return a List of TokenRange(s) converted to String for the given
|
|
|
|
* keyspace
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> describeRingJMX(String keyspace) throws IOException {
|
|
|
|
log(" describeRingJMX(String keyspace) throws IOException");
|
2016-10-11 14:07:03 +02:00
|
|
|
JsonArray arr = client.getJsonArray("/storage_service/describe_ring/" + keyspace);
|
2015-11-08 11:10:57 +01:00
|
|
|
List<String> res = new ArrayList<String>();
|
|
|
|
|
|
|
|
for (int i = 0; i < arr.size(); i++) {
|
|
|
|
JsonObject obj = arr.getJsonObject(i);
|
StorageService: format the describering output
The describeRingJMX method, returns a formated output. The output should
be similiar to origin as oppose to the current implementation that
returns a json representation.
After the change an example of nodetool describering:
$ nodetool describering keyspace1
Schema Version:1074c31b-1f39-3df2-90ff-7f0b64bb3ea4
TokenRange:
TokenRange(start_token:7485973865401664349,
end_token:-338297331236877217, endpoints:[127.0.0.1],
rpc_endpoints:[127.0.0.1],
endpoint_details:[EndpointDetails(host:127.0.0.1,
datacenter:datacenter1, rack:rack1)])
TokenRange(start_token:-338297331236877217,
end_token:7485973865401664349, endpoints:[127.0.0.2],
rpc_endpoints:[127.0.0.2],
endpoint_details:[EndpointDetails(host:127.0.0.2,
datacenter:datacenter1, rack:rack1)])
On sycall-jmx:
Fixes #21
Signed-off-by: Amnon Heiman <amnon@scylladb.com>
2015-12-23 09:51:50 +01:00
|
|
|
StringBuilder sb = new StringBuilder();
|
|
|
|
sb.append("TokenRange(");
|
|
|
|
sb.append("start_token:");
|
|
|
|
sb.append(obj.getString("start_token"));
|
|
|
|
sb.append(", end_token:");
|
|
|
|
sb.append(obj.getString("end_token"));
|
|
|
|
sb.append(", endpoints:[");
|
|
|
|
JsonArray endpoints = obj.getJsonArray("endpoints");
|
|
|
|
for (int j = 0; j < endpoints.size(); j++) {
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append(endpoints.getString(j));
|
|
|
|
}
|
|
|
|
sb.append("], rpc_endpoints:[");
|
|
|
|
JsonArray rpc_endpoints = obj.getJsonArray("rpc_endpoints");
|
|
|
|
for (int j = 0; j < rpc_endpoints.size(); j++) {
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append(rpc_endpoints.getString(j));
|
|
|
|
}
|
|
|
|
|
|
|
|
sb.append("], endpoint_details:[");
|
|
|
|
JsonArray endpoint_details = obj.getJsonArray("endpoint_details");
|
|
|
|
for (int j = 0; j < endpoint_details.size(); j++) {
|
|
|
|
JsonObject detail = endpoint_details.getJsonObject(j);
|
|
|
|
if (j > 0) {
|
|
|
|
sb.append(", ");
|
|
|
|
}
|
|
|
|
sb.append("EndpointDetails(");
|
|
|
|
sb.append("host:");
|
|
|
|
sb.append(detail.getString("host"));
|
|
|
|
sb.append(", datacenter:");
|
|
|
|
sb.append(detail.getString("datacenter"));
|
|
|
|
sb.append(", rack:");
|
|
|
|
sb.append(detail.getString("rack"));
|
|
|
|
sb.append(')');
|
|
|
|
}
|
|
|
|
sb.append("])");
|
|
|
|
res.add(sb.toString());
|
2015-11-08 11:10:57 +01:00
|
|
|
}
|
|
|
|
return res;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of pending ranges to endpoints that describe the ring
|
|
|
|
* topology
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* the keyspace to get the pending range map for.
|
|
|
|
* @return a map of pending ranges to endpoints
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public Map<List<String>, List<String>> getPendingRangeToEndpointMap(String keyspace) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getPendingRangeToEndpointMap(String keyspace)");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapListStrValue("/storage_service/pending_range/" + keyspace);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Retrieve a map of tokens to endpoints, including the bootstrapping ones.
|
|
|
|
*
|
|
|
|
* @return a map of tokens to endpoints in ascending order
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<String, String> getTokenToEndpointMap() {
|
|
|
|
log(" getTokenToEndpointMap()");
|
2016-12-28 11:48:03 +01:00
|
|
|
return client.getMapStrValue("/storage_service/tokens_endpoint");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Retrieve this hosts unique ID */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getLocalHostId() {
|
|
|
|
log(" getLocalHostId()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/hostid/local");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-03-19 17:37:14 +01:00
|
|
|
public String getLocalBroadCastingAddress() {
|
|
|
|
// FIXME:
|
|
|
|
// There is no straight API to get the broadcasting
|
|
|
|
// address, instead of trying to figure it out from the configuration
|
|
|
|
// we will use the getHostIdToAddressMap with the hostid
|
|
|
|
return getHostIdToAddressMap().get(getLocalHostId());
|
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/** Retrieve the mapping of endpoint to host ID */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<String, String> getHostIdMap() {
|
|
|
|
log(" getHostIdMap()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapStrValue("/storage_service/host_id");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-03-19 17:37:14 +01:00
|
|
|
/** Retrieve the mapping of endpoint to host ID */
|
|
|
|
public Map<String, String> getHostIdToAddressMap() {
|
|
|
|
log(" getHostIdToAddressMap()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getReverseMapStrValue("/storage_service/host_id");
|
2016-03-19 17:37:14 +01:00
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Numeric load value.
|
|
|
|
*
|
|
|
|
* @see org.apache.cassandra.metrics.StorageMetrics#load
|
|
|
|
*/
|
|
|
|
@Deprecated
|
|
|
|
public double getLoad() {
|
|
|
|
log(" getLoad()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getDoubleValue("/storage_service/load");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Human-readable load value */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getLoadString() {
|
|
|
|
log(" getLoadString()");
|
2015-10-29 10:46:07 +01:00
|
|
|
return FileUtils.stringifyFileSize(getLoad());
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Human-readable load value. Keys are IP addresses. */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<String, String> getLoadMap() {
|
|
|
|
log(" getLoadMap()");
|
2015-12-29 16:43:48 +01:00
|
|
|
Map<String, Double> load = getLoadMapAsDouble();
|
2015-12-13 12:08:38 +01:00
|
|
|
Map<String, String> map = new HashMap<>();
|
2016-10-11 14:07:03 +02:00
|
|
|
for (Map.Entry<String, Double> entry : load.entrySet()) {
|
2015-12-29 16:43:48 +01:00
|
|
|
map.put(entry.getKey(), FileUtils.stringifyFileSize(entry.getValue()));
|
2015-12-13 12:08:38 +01:00
|
|
|
}
|
|
|
|
return map;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-12-29 16:43:48 +01:00
|
|
|
public Map<String, Double> getLoadMapAsDouble() {
|
|
|
|
log(" getLoadMapAsDouble()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapStringDouble("/storage_service/load_map");
|
2015-12-29 16:43:48 +01:00
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Return the generation value for this node.
|
|
|
|
*
|
|
|
|
* @return generation number
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int getCurrentGenerationNumber() {
|
|
|
|
log(" getCurrentGenerationNumber()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/generation_number");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This method returns the N endpoints that are responsible for storing the
|
|
|
|
* specified key i.e for replication.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* keyspace name
|
|
|
|
* @param cf
|
|
|
|
* Column family name
|
|
|
|
* @param key
|
|
|
|
* - key for which we need to find the endpoint return value -
|
|
|
|
* the endpoint responsible for this key
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf, String key) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getNaturalEndpoints(String keyspaceName, String cf, String key)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("cf", cf);
|
|
|
|
queryParams.add("key", key);
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListInetAddressValue("/storage_service/natural_endpoints/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public List<InetAddress> getNaturalEndpoints(String keyspaceName, ByteBuffer key) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" getNaturalEndpoints(String keyspaceName, ByteBuffer key)");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListInetAddressValue("");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2020-05-21 14:40:35 +02:00
|
|
|
@Override
|
|
|
|
public void checkAndRepairCdcStreams() throws IOException {
|
|
|
|
log(" checkAndRepairCdcStreams() throws IOException");
|
|
|
|
client.post("/storage_service/cdc_streams_check_and_repair");
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Takes the snapshot for the given keyspaces. A snapshot name must be
|
|
|
|
* specified.
|
|
|
|
*
|
|
|
|
* @param tag
|
|
|
|
* the tag given to the snapshot; may not be null or empty
|
|
|
|
* @param keyspaceNames
|
|
|
|
* the name of the keyspaces to snapshot; empty means "all."
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void takeSnapshot(String tag, String... keyspaceNames) throws IOException {
|
2018-07-30 13:37:41 +02:00
|
|
|
takeSnapshot(tag, null, keyspaceNames);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void takeSnapshot(String tag, Map<String, String> options, String... keyspaceNames) throws IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" takeSnapshot(String tag, String... keyspaceNames) throws IOException");
|
2019-07-24 16:33:13 +02:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "tag", tag);
|
2019-07-24 16:33:13 +02:00
|
|
|
|
2018-07-30 13:37:41 +02:00
|
|
|
if (keyspaceNames.length == 1 && keyspaceNames[0].indexOf('.') != -1) {
|
|
|
|
String[] parts = keyspaceNames[0].split("\\.");
|
2019-07-24 16:33:13 +02:00
|
|
|
keyspaceNames = new String[] { parts[0] };
|
2018-07-30 13:37:41 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", parts[1]);
|
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames));
|
2021-06-02 12:19:41 +02:00
|
|
|
if (options.containsKey("skipFlush")) {
|
|
|
|
APIClient.set_query_param(queryParams, "sf", options.get("skipFlush"));
|
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Takes the snapshot of a specific column family. A snapshot name must be
|
|
|
|
* specified.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* the keyspace which holds the specified column family
|
|
|
|
* @param columnFamilyName
|
|
|
|
* the column family to snapshot
|
|
|
|
* @param tag
|
|
|
|
* the tag given to the snapshot; may not be null or empty
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
if (keyspaceName == null) {
|
2015-05-26 11:38:08 +02:00
|
|
|
throw new IOException("You must supply a keyspace name");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
|
|
|
if (columnFamilyName == null) {
|
2015-05-26 11:38:08 +02:00
|
|
|
throw new IOException("You must supply a table name");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
|
|
|
if (tag == null || tag.equals("")) {
|
2015-05-26 11:38:08 +02:00
|
|
|
throw new IOException("You must supply a snapshot name.");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("tag", tag);
|
|
|
|
queryParams.add("kn", keyspaceName);
|
|
|
|
queryParams.add("cf", columnFamilyName);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Remove the snapshot with the given name from the given keyspaces. If no
|
|
|
|
* tag is specified we will remove all snapshots.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void clearSnapshot(String tag, String... keyspaceNames) throws IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" clearSnapshot(String tag, String... keyspaceNames) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "tag", tag);
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "kn", APIClient.join(keyspaceNames));
|
|
|
|
client.delete("/storage_service/snapshots", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the details of all the snapshot
|
|
|
|
*
|
|
|
|
* @return A map of snapshotName to all its details in Tabular form.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<String, TabularData> getSnapshotDetails() {
|
|
|
|
log(" getSnapshotDetails()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapStringSnapshotTabularDataValue("/storage_service/snapshots", null);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-04-26 10:34:38 +02:00
|
|
|
public Map<String, Map<String, Set<String>>> getSnapshotKeyspaceColumnFamily() {
|
2016-10-11 14:07:03 +02:00
|
|
|
JsonArray arr = client.getJsonArray("/storage_service/snapshots");
|
2016-04-26 10:34:38 +02:00
|
|
|
Map<String, Map<String, Set<String>>> res = new HashMap<String, Map<String, Set<String>>>();
|
|
|
|
for (int i = 0; i < arr.size(); i++) {
|
|
|
|
JsonObject obj = arr.getJsonObject(i);
|
|
|
|
Map<String, Set<String>> kscf = new HashMap<String, Set<String>>();
|
|
|
|
JsonArray snapshots = obj.getJsonArray("value");
|
|
|
|
for (int j = 0; j < snapshots.size(); j++) {
|
|
|
|
JsonObject s = snapshots.getJsonObject(j);
|
|
|
|
String ks = s.getString("ks");
|
|
|
|
String cf = s.getString("cf");
|
|
|
|
if (!kscf.containsKey(ks)) {
|
|
|
|
kscf.put(ks, new HashSet<String>());
|
|
|
|
}
|
|
|
|
kscf.get(ks).add(cf);
|
|
|
|
}
|
|
|
|
res.put(obj.getString("key"), kscf);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Get the true size taken by all snapshots across all keyspaces.
|
|
|
|
*
|
|
|
|
* @return True size taken by all the snapshots.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public long trueSnapshotsSize() {
|
|
|
|
log(" trueSnapshotsSize()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getLongValue("/storage_service/snapshots/size/true");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Forces major compaction of a single keyspace
|
|
|
|
*/
|
2017-04-06 11:21:23 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void forceKeyspaceCompaction(String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceCompaction(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
|
|
|
client.post("/storage_service/keyspace_compaction/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2019-07-24 16:33:13 +02:00
|
|
|
@Override
|
|
|
|
public void forceKeyspaceCompactionForTokenRange(String keyspaceName, String startToken, String endToken,
|
|
|
|
String... tableNames) throws IOException, ExecutionException, InterruptedException {
|
2018-07-30 13:37:41 +02:00
|
|
|
// TODO: actually handle token ranges.
|
|
|
|
forceKeyspaceCompaction(keyspaceName, tableNames);
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Trigger a cleanup of keys on a single keyspace
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceKeyspaceCleanup(String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceCleanup(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
|
|
|
return client.postInt("/storage_service/keyspace_cleanup/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Scrub (deserialize + reserialize at the latest version, skipping bad rows
|
|
|
|
* if any) the given keyspace. If columnFamilies array is empty, all CFs are
|
|
|
|
* scrubbed.
|
|
|
|
*
|
|
|
|
* Scrubbed CFs will be snapshotted first, if disableSnapshot is false
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2016-03-15 10:55:36 +01:00
|
|
|
return scrub(disableSnapshot, skipCorrupted, true, keyspaceName, columnFamilies);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName,
|
|
|
|
String... columnFamilies) throws IOException, ExecutionException, InterruptedException {
|
2016-03-15 10:55:36 +01:00
|
|
|
log(" scrub(boolean disableSnapshot, boolean skipCorrupted, bool checkData, String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "disable_snapshot", disableSnapshot);
|
|
|
|
APIClient.set_bool_query_param(queryParams, "skip_corrupted", skipCorrupted);
|
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
2020-04-01 09:13:16 +02:00
|
|
|
return client.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip
|
|
|
|
* bad rows and do not snapshot sstables first.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "exclude_current_version", excludeCurrentVersion);
|
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
2019-02-19 14:34:31 +01:00
|
|
|
return client.getIntValue("/storage_service/keyspace_upgrade_sstables/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Flush all memtables for the given column families, or all columnfamilies
|
|
|
|
* for the given keyspace if none are explicitly listed.
|
|
|
|
*
|
|
|
|
* @param keyspaceName
|
|
|
|
* @param columnFamilies
|
|
|
|
* @throws IOException
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceKeyspaceFlush(String keyspaceName, String... columnFamilies) throws IOException, ExecutionException, InterruptedException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
|
|
|
client.post("/storage_service/keyspace_flush/" + keyspaceName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
private class CheckRepair extends TimerTask {
|
|
|
|
@SuppressWarnings("unused")
|
|
|
|
private int id;
|
|
|
|
private String keyspace;
|
|
|
|
private String message;
|
|
|
|
private MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
private int cmd;
|
2016-10-17 13:34:21 +02:00
|
|
|
private final boolean legacy;
|
2016-10-11 14:07:03 +02:00
|
|
|
|
2016-10-17 13:34:21 +02:00
|
|
|
public CheckRepair(int id, String keyspace, boolean legacy) {
|
2015-08-13 08:53:47 +02:00
|
|
|
this.id = id;
|
|
|
|
this.keyspace = keyspace;
|
2016-10-17 13:34:21 +02:00
|
|
|
this.legacy = legacy;
|
2015-08-13 08:53:47 +02:00
|
|
|
APIClient.set_query_param(queryParams, "id", Integer.toString(id));
|
|
|
|
message = String.format("Repair session %d ", id);
|
|
|
|
// The returned id is the command number
|
|
|
|
this.cmd = id;
|
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
@Override
|
|
|
|
public void run() {
|
2016-10-11 14:07:03 +02:00
|
|
|
String status = client.getStringValue("/storage_service/repair_async/" + keyspace, queryParams);
|
2015-08-13 08:53:47 +02:00
|
|
|
if (!status.equals("RUNNING")) {
|
|
|
|
cancel();
|
2016-10-17 13:34:21 +02:00
|
|
|
if (status.equals("SUCCESSFUL")) {
|
|
|
|
sendMessage(cmd, RepairStatus.SESSION_SUCCESS, message, legacy);
|
|
|
|
} else {
|
|
|
|
sendMessage(cmd, RepairStatus.SESSION_FAILED, message + "failed", legacy);
|
2015-08-13 08:53:47 +02:00
|
|
|
}
|
2016-10-17 13:34:21 +02:00
|
|
|
sendMessage(cmd, RepairStatus.FINISHED, message + "finished", legacy);
|
2015-08-13 08:53:47 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Sends JMX notification to subscribers.
|
|
|
|
*
|
2016-10-11 14:07:03 +02:00
|
|
|
* @param type
|
|
|
|
* Message type
|
|
|
|
* @param message
|
|
|
|
* Message itself
|
|
|
|
* @param userObject
|
|
|
|
* Arbitrary object to attach to notification
|
2015-08-13 08:53:47 +02:00
|
|
|
*/
|
2016-10-11 14:07:03 +02:00
|
|
|
public void sendNotification(String type, String message, Object userObject) {
|
|
|
|
Notification jmxNotification = new Notification(type, getBoundName(),
|
|
|
|
notificationSerialNumber.incrementAndGet(), message);
|
2015-08-13 08:53:47 +02:00
|
|
|
jmxNotification.setUserData(userObject);
|
|
|
|
sendNotification(jmxNotification);
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
public String getRepairMessage(final int cmd, final String keyspace, final int ranges_size,
|
|
|
|
final RepairParallelism parallelismDegree, final boolean fullRepair) {
|
|
|
|
return String.format(
|
|
|
|
"Starting repair command #%d, repairing %d ranges for keyspace %s (parallelism=%s, full=%b)", cmd,
|
|
|
|
ranges_size, keyspace, parallelismDegree, fullRepair);
|
2015-08-13 08:53:47 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
* @param repair
|
|
|
|
*/
|
2016-10-17 13:34:21 +02:00
|
|
|
private int waitAndNotifyRepair(int cmd, String keyspace, String message, boolean legacy) {
|
2015-12-30 07:47:32 +01:00
|
|
|
logger.finest(message);
|
2016-10-17 13:34:21 +02:00
|
|
|
|
|
|
|
sendMessage(cmd, RepairStatus.STARTED, message, legacy);
|
|
|
|
|
|
|
|
TimerTask taskToExecute = new CheckRepair(cmd, keyspace, legacy);
|
2015-08-13 08:53:47 +02:00
|
|
|
timer.schedule(taskToExecute, 100, 1000);
|
|
|
|
return cmd;
|
|
|
|
}
|
|
|
|
|
2016-10-17 13:34:21 +02:00
|
|
|
// See org.apache.cassandra.utils.progress.ProgressEventType
|
|
|
|
private static enum ProgressEventType {
|
|
|
|
START, PROGRESS, ERROR, ABORT, SUCCESS, COMPLETE, NOTIFICATION
|
|
|
|
}
|
|
|
|
|
|
|
|
private void sendMessage(int cmd, RepairStatus status, String message, boolean legacy) {
|
|
|
|
String tag = "repair:" + cmd;
|
|
|
|
|
|
|
|
ProgressEventType type = ProgressEventType.ERROR;
|
|
|
|
int total = 100;
|
|
|
|
int count = 0;
|
|
|
|
switch (status) {
|
|
|
|
case STARTED:
|
|
|
|
type = ProgressEventType.START;
|
|
|
|
break;
|
|
|
|
case FINISHED:
|
|
|
|
type = ProgressEventType.COMPLETE;
|
|
|
|
count = 100;
|
|
|
|
break;
|
|
|
|
case SESSION_SUCCESS:
|
|
|
|
type = ProgressEventType.SUCCESS;
|
|
|
|
count = 100;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
Notification jmxNotification = new Notification("progress", tag, notificationSerialNumber.incrementAndGet(),
|
|
|
|
message);
|
|
|
|
Map<String, Integer> userData = new HashMap<>();
|
|
|
|
userData.put("type", type.ordinal());
|
|
|
|
userData.put("progressCount", count);
|
|
|
|
userData.put("total", total);
|
|
|
|
jmxNotification.setUserData(userData);
|
|
|
|
sendNotification(jmxNotification);
|
|
|
|
|
|
|
|
if (legacy) {
|
|
|
|
sendNotification("repair", message, new int[] { cmd, status.ordinal() });
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Invoke repair asynchronously. You can track repair progress by
|
|
|
|
* subscribing JMX notification sent from this StorageServiceMBean.
|
|
|
|
* Notification format is: type: "repair" userObject: int array of length 2,
|
|
|
|
* [0]=command number, [1]=ordinal of AntiEntropyService.Status
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* Keyspace name to repair. Should not be null.
|
|
|
|
* @param options
|
|
|
|
* repair option.
|
|
|
|
* @return Repair command number, or 0 if nothing to repair
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int repairAsync(String keyspace, Map<String, String> options) {
|
2016-10-17 13:34:21 +02:00
|
|
|
return repairAsync(keyspace, options, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
@SuppressWarnings("unused")
|
|
|
|
private static final String PARALLELISM_KEY = "parallelism";
|
|
|
|
private static final String PRIMARY_RANGE_KEY = "primaryRange";
|
|
|
|
@SuppressWarnings("unused")
|
|
|
|
private static final String INCREMENTAL_KEY = "incremental";
|
|
|
|
@SuppressWarnings("unused")
|
|
|
|
private static final String JOB_THREADS_KEY = "jobThreads";
|
|
|
|
private static final String RANGES_KEY = "ranges";
|
|
|
|
private static final String COLUMNFAMILIES_KEY = "columnFamilies";
|
|
|
|
private static final String DATACENTERS_KEY = "dataCenters";
|
|
|
|
private static final String HOSTS_KEY = "hosts";
|
|
|
|
@SuppressWarnings("unused")
|
|
|
|
private static final String TRACE_KEY = "trace";
|
|
|
|
|
|
|
|
private int repairAsync(String keyspace, Map<String, String> options, boolean legacy) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" repairAsync(String keyspace, Map<String, String> options)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
for (String op : options.keySet()) {
|
2015-12-29 16:09:22 +01:00
|
|
|
APIClient.set_query_param(queryParams, op, options.get(op));
|
2015-05-26 11:38:08 +02:00
|
|
|
}
|
2015-12-29 16:09:22 +01:00
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
int cmd = client.postInt("/storage_service/repair_async/" + keyspace, queryParams);
|
2016-10-17 13:34:21 +02:00
|
|
|
waitAndNotifyRepair(cmd, keyspace, getRepairMessage(cmd, keyspace, 1, RepairParallelism.SEQUENTIAL, true),
|
|
|
|
legacy);
|
2015-08-13 08:53:47 +02:00
|
|
|
return cmd;
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-10-17 13:34:21 +02:00
|
|
|
private static String commaSeparated(Collection<?> c) {
|
|
|
|
String s = c.toString();
|
|
|
|
return s.substring(1, s.length() - 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
private int repairRangeAsync(String beginToken, String endToken, String keyspaceName, Boolean isSequential,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts, Boolean primaryRange, Boolean repairedAt,
|
|
|
|
String... columnFamilies) {
|
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection<String> dataCenters, Collection<String> hosts, boolean repairedAt, String... columnFamilies) throws IOException");
|
|
|
|
|
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
if (beginToken != null && endToken != null) {
|
|
|
|
options.put(RANGES_KEY, beginToken + ":" + endToken);
|
|
|
|
}
|
|
|
|
if (dataCenters != null) {
|
|
|
|
options.put(DATACENTERS_KEY, commaSeparated(dataCenters));
|
|
|
|
}
|
|
|
|
if (hosts != null) {
|
|
|
|
options.put(HOSTS_KEY, commaSeparated(hosts));
|
|
|
|
}
|
|
|
|
if (columnFamilies != null && columnFamilies.length != 0) {
|
|
|
|
options.put(COLUMNFAMILIES_KEY, commaSeparated(asList(columnFamilies)));
|
|
|
|
}
|
|
|
|
if (primaryRange != null) {
|
|
|
|
options.put(PRIMARY_RANGE_KEY, primaryRange.toString());
|
|
|
|
}
|
|
|
|
|
|
|
|
return repairAsync(keyspaceName, options, true);
|
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
@Override
|
2016-10-17 13:34:21 +02:00
|
|
|
@Deprecated
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceRepairAsync(String keyspace, boolean isSequential, Collection<String> dataCenters,
|
|
|
|
Collection<String> hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies)
|
2019-07-24 16:33:13 +02:00
|
|
|
throws IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceRepairAsync(String keyspace, boolean isSequential, Collection<String> dataCenters, Collection<String> hosts, boolean primaryRange, boolean repairedAt, String... columnFamilies) throws IOException");
|
2016-10-17 13:34:21 +02:00
|
|
|
return repairRangeAsync(null, null, keyspace, isSequential, dataCenters, hosts, primaryRange, repairedAt,
|
|
|
|
columnFamilies);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-17 13:34:21 +02:00
|
|
|
@Deprecated
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential,
|
2016-10-17 13:34:21 +02:00
|
|
|
Collection<String> dataCenters, Collection<String> hosts, boolean repairedAt, String... columnFamilies) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, Collection<String> dataCenters, Collection<String> hosts, boolean repairedAt, String... columnFamilies) throws IOException");
|
2016-10-17 13:34:21 +02:00
|
|
|
return repairRangeAsync(beginToken, endToken, keyspaceName, isSequential, dataCenters, hosts, null, repairedAt,
|
|
|
|
columnFamilies);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-08-13 08:53:47 +02:00
|
|
|
@Override
|
2016-10-17 13:34:21 +02:00
|
|
|
@Deprecated
|
|
|
|
public int forceRepairAsync(String keyspaceName, boolean isSequential, boolean isLocal, boolean primaryRange,
|
2016-10-11 14:07:03 +02:00
|
|
|
boolean fullRepair, String... columnFamilies) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange, boolean fullRepair, String... columnFamilies)");
|
2016-10-17 13:34:21 +02:00
|
|
|
return repairRangeAsync(null, null, keyspaceName, isSequential, null, null, primaryRange, null, columnFamilies);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
@Deprecated
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential,
|
|
|
|
boolean isLocal, boolean repairedAt, String... columnFamilies) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential, boolean isLocal, boolean repairedAt, String... columnFamilies)");
|
2016-10-17 13:34:21 +02:00
|
|
|
return forceRepairRangeAsync(beginToken, endToken, keyspaceName, isSequential, null, null, repairedAt,
|
|
|
|
columnFamilies);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void forceTerminateAllRepairSessions() {
|
|
|
|
log(" forceTerminateAllRepairSessions()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/force_terminate");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* transfer this node's data to other machines and remove it from service.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void decommission() throws InterruptedException {
|
|
|
|
log(" decommission() throws InterruptedException");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/decommission");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* @param newToken
|
|
|
|
* token to move this node to. This node will unload its data
|
|
|
|
* onto its neighbors, and bootstrap to the new token.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void move(String newToken) throws IOException {
|
|
|
|
log(" move(String newToken) throws IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "new_token", newToken);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/move", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* removeToken removes token (and all data associated with enpoint that had
|
|
|
|
* it) from the ring
|
2016-10-11 14:07:03 +02:00
|
|
|
*
|
|
|
|
* @param hostIdString
|
|
|
|
* the host id to remove
|
2015-04-13 08:30:10 +02:00
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-10-26 08:41:31 +01:00
|
|
|
public void removeNode(String hostIdString) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" removeNode(String token)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-10-26 08:41:31 +01:00
|
|
|
APIClient.set_query_param(queryParams, "host_id", hostIdString);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/remove_node", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Get the status of a token removal.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getRemovalStatus() {
|
|
|
|
log(" getRemovalStatus()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/removal_status");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Force a remove operation to finish.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void forceRemoveCompletion() {
|
|
|
|
log(" forceRemoveCompletion()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/force_remove_completion");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* set the logging level at runtime<br>
|
|
|
|
* <br>
|
|
|
|
* If both classQualifer and level are empty/null, it will reload the
|
|
|
|
* configuration to reset.<br>
|
|
|
|
* If classQualifer is not empty but level is empty/null, it will set the
|
|
|
|
* level to null for the defined classQualifer<br>
|
|
|
|
* If level cannot be parsed, then the level will be defaulted to DEBUG<br>
|
|
|
|
* <br>
|
|
|
|
* The logback configuration should have < jmxConfigurator /> set
|
|
|
|
*
|
|
|
|
* @param classQualifier
|
|
|
|
* The logger's classQualifer
|
|
|
|
* @param level
|
|
|
|
* The log level
|
|
|
|
* @throws Exception
|
|
|
|
*
|
|
|
|
* @see ch.qos.logback.classic.Level#toLevel(String)
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void setLoggingLevel(String classQualifier, String level) throws Exception {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" setLoggingLevel(String classQualifier, String level) throws Exception");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "level", level);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/system/logger/" + classQualifier, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** get the runtime logging levels */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<String, String> getLoggingLevels() {
|
|
|
|
log(" getLoggingLevels()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapStrValue("/storage_service/logging_level");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* get the operational mode (leaving, joining, normal, decommissioned,
|
|
|
|
* client)
|
|
|
|
**/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getOperationMode() {
|
|
|
|
log(" getOperationMode()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/operation_mode");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns whether the storage service is starting or not */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isStarting() {
|
|
|
|
log(" isStarting()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/is_starting");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** get the progress of a drain operation */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getDrainProgress() {
|
|
|
|
log(" getDrainProgress()");
|
2015-11-10 16:03:50 +01:00
|
|
|
// FIXME
|
|
|
|
// This is a workaround so the nodetool would work
|
|
|
|
// it should be revert when the drain progress will be implemented
|
2016-10-11 14:07:03 +02:00
|
|
|
// return c.getStringValue("/storage_service/drain");
|
2015-11-10 16:03:50 +01:00
|
|
|
return String.format("Drained %s/%s ColumnFamilies", 0, 0);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* makes node unavailable for writes, flushes memtables and replays
|
|
|
|
* commitlog.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void drain() throws IOException, InterruptedException, ExecutionException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" drain() throws IOException, InterruptedException, ExecutionException");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/drain");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Truncates (deletes) the given columnFamily from the provided keyspace.
|
|
|
|
* Calling truncate results in actual deletion of all data in the cluster
|
|
|
|
* under the given columnFamily and it will fail unless all hosts are up.
|
|
|
|
* All data in the given column family will be deleted, but its definition
|
|
|
|
* will not be affected.
|
|
|
|
*
|
|
|
|
* @param keyspace
|
|
|
|
* The keyspace to delete from
|
|
|
|
* @param columnFamily
|
|
|
|
* The column family to delete data from.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" truncate(String keyspace, String columnFamily)throws TimeoutException, IOException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", columnFamily);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/truncate/" + keyspace, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* given a list of tokens (representing the nodes in the cluster), returns a
|
|
|
|
* mapping from "token -> %age of cluster owned by that token"
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public Map<InetAddress, Float> getOwnership() {
|
|
|
|
log(" getOwnership()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapInetAddressFloatValue("/storage_service/ownership/");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Effective ownership is % of the data each node owns given the keyspace we
|
|
|
|
* calculate the percentage using replication factor. If Keyspace == null,
|
|
|
|
* this method will try to verify if all the keyspaces in the cluster have
|
|
|
|
* the same replication strategies and if yes then we will use the first
|
|
|
|
* else a empty Map is returned.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public Map<InetAddress, Float> effectiveOwnership(String keyspace) throws IllegalStateException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" effectiveOwnership(String keyspace) throws IllegalStateException");
|
2015-08-24 09:35:05 +02:00
|
|
|
try {
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getMapInetAddressFloatValue("/storage_service/ownership/" + keyspace);
|
2015-08-24 09:35:05 +02:00
|
|
|
} catch (Exception e) {
|
2016-10-11 14:07:03 +02:00
|
|
|
throw new IllegalStateException(
|
|
|
|
"Non-system keyspaces don't have the same replication settings, effective ownership information is meaningless");
|
2015-08-24 09:35:05 +02:00
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getKeyspaces() {
|
|
|
|
log(" getKeyspaces()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/keyspaces");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-04-26 10:34:38 +02:00
|
|
|
public Map<String, Set<String>> getColumnFamilyPerKeyspace() {
|
|
|
|
Map<String, Set<String>> res = new HashMap<String, Set<String>>();
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
JsonArray mbeans = client.getJsonArray("/column_family/");
|
2016-04-26 10:34:38 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < mbeans.size(); i++) {
|
|
|
|
JsonObject mbean = mbeans.getJsonObject(i);
|
|
|
|
String ks = mbean.getString("ks");
|
|
|
|
String cf = mbean.getString("cf");
|
|
|
|
if (!res.containsKey(ks)) {
|
|
|
|
res.put(ks, new HashSet<String>());
|
|
|
|
}
|
|
|
|
res.get(ks).add(cf);
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> getNonSystemKeyspaces() {
|
|
|
|
log(" getNonSystemKeyspaces()");
|
2016-08-17 10:43:28 +02:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
queryParams.add("type", "user");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/keyspaces", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2018-03-27 02:23:42 +02:00
|
|
|
@Override
|
|
|
|
public Map<String, String> getViewBuildStatuses(String keyspace, String view) {
|
|
|
|
log(" getViewBuildStatuses()");
|
|
|
|
return client.getMapStrValue("storage_service/view_build_statuses/" + keyspace + "/" + view);
|
|
|
|
}
|
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
/**
|
|
|
|
* Change endpointsnitch class and dynamic-ness (and dynamic attributes) at
|
|
|
|
* runtime
|
|
|
|
*
|
|
|
|
* @param epSnitchClassName
|
|
|
|
* the canonical path name for a class implementing
|
|
|
|
* IEndpointSnitch
|
|
|
|
* @param dynamic
|
|
|
|
* boolean that decides whether dynamicsnitch is used or not
|
|
|
|
* @param dynamicUpdateInterval
|
|
|
|
* integer, in ms (default 100)
|
|
|
|
* @param dynamicResetInterval
|
|
|
|
* integer, in ms (default 600,000)
|
|
|
|
* @param dynamicBadnessThreshold
|
|
|
|
* double, (default 0.0)
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval,
|
|
|
|
Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval, Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
APIClient.set_bool_query_param(queryParams, "dynamic", dynamic);
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "epSnitchClassName", epSnitchClassName);
|
2015-05-26 11:38:08 +02:00
|
|
|
if (dynamicUpdateInterval != null) {
|
2016-10-11 14:07:03 +02:00
|
|
|
queryParams.add("dynamic_update_interval", dynamicUpdateInterval.toString());
|
2015-05-26 11:38:08 +02:00
|
|
|
}
|
|
|
|
if (dynamicResetInterval != null) {
|
2016-10-11 14:07:03 +02:00
|
|
|
queryParams.add("dynamic_reset_interval", dynamicResetInterval.toString());
|
2015-05-26 11:38:08 +02:00
|
|
|
}
|
|
|
|
if (dynamicBadnessThreshold != null) {
|
2016-10-11 14:07:03 +02:00
|
|
|
queryParams.add("dynamic_badness_threshold", dynamicBadnessThreshold.toString());
|
2015-05-26 11:38:08 +02:00
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/update_snitch", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to forcibly 'kill' a sick node
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void stopGossiping() {
|
|
|
|
log(" stopGossiping()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.delete("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to recover a forcibly 'killed' node
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void startGossiping() {
|
|
|
|
log(" startGossiping()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to see whether gossip is running or not
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isGossipRunning() {
|
|
|
|
log(" isGossipRunning()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/gossiping");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to forcibly completely stop cassandra
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void stopDaemon() {
|
|
|
|
log(" stopDaemon()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/stop_daemon");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// to determine if gossip is disabled
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isInitialized() {
|
|
|
|
log(" isInitialized()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/is_initialized");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to disable thrift
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void stopRPCServer() {
|
|
|
|
log(" stopRPCServer()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.delete("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a user to reenable thrift
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void startRPCServer() {
|
|
|
|
log(" startRPCServer()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// to determine if thrift is running
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isRPCServerRunning() {
|
|
|
|
log(" isRPCServerRunning()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/rpc_server");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void stopNativeTransport() {
|
|
|
|
log(" stopNativeTransport()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.delete("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void startNativeTransport() {
|
|
|
|
log(" startNativeTransport()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isNativeTransportRunning() {
|
|
|
|
log(" isNativeTransportRunning()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/native_transport");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// allows a node that have been started without joining the ring to join it
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void joinRing() throws IOException {
|
|
|
|
log(" joinRing() throws IOException");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/join_ring");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isJoined() {
|
|
|
|
log(" isJoined()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/join_ring");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setStreamThroughputMbPerSec(int value) {
|
|
|
|
log(" setStreamThroughputMbPerSec(int value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Integer.toString(value));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/stream_throughput", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int getStreamThroughputMbPerSec() {
|
|
|
|
log(" getStreamThroughputMbPerSec()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/stream_throughput");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
public int getCompactionThroughputMbPerSec() {
|
|
|
|
log(" getCompactionThroughputMbPerSec()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/compaction_throughput");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setCompactionThroughputMbPerSec(int value) {
|
|
|
|
log(" setCompactionThroughputMbPerSec(int value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Integer.toString(value));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/compaction_throughput", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public boolean isIncrementalBackupsEnabled() {
|
|
|
|
log(" isIncrementalBackupsEnabled()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getBooleanValue("/storage_service/incremental_backups");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setIncrementalBackupsEnabled(boolean value) {
|
|
|
|
log(" setIncrementalBackupsEnabled(boolean value)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("value", Boolean.toString(value));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/incremental_backups", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Initiate a process of streaming data for which we are responsible from
|
|
|
|
* other nodes. It is similar to bootstrap except meant to be used on a node
|
|
|
|
* which is already in the cluster (typically containing no data) as an
|
|
|
|
* alternative to running repair.
|
|
|
|
*
|
|
|
|
* @param sourceDc
|
|
|
|
* Name of DC from which to select sources for streaming or null
|
|
|
|
* to pick any node
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void rebuild(String sourceDc) {
|
2018-07-30 13:37:41 +02:00
|
|
|
rebuild(sourceDc, null, null, null);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2019-07-24 16:33:13 +02:00
|
|
|
* Same as {@link #rebuild(String)}, but only for specified keyspace and
|
|
|
|
* ranges.
|
2018-07-30 13:37:41 +02:00
|
|
|
*
|
2019-07-24 16:33:13 +02:00
|
|
|
* @param sourceDc
|
|
|
|
* Name of DC from which to select sources for streaming or null
|
|
|
|
* to pick any node
|
|
|
|
* @param keyspace
|
|
|
|
* Name of the keyspace which to rebuild or null to rebuild all
|
|
|
|
* keyspaces.
|
|
|
|
* @param tokens
|
|
|
|
* Range of tokens to rebuild or null to rebuild all token
|
|
|
|
* ranges. In the format of:
|
|
|
|
* "(start_token_1,end_token_1],(start_token_2,end_token_2],...(start_token_n,end_token_n]"
|
2018-07-30 13:37:41 +02:00
|
|
|
*/
|
|
|
|
@Override
|
|
|
|
public void rebuild(String sourceDc, String keyspace, String tokens, String specificSources) {
|
|
|
|
log(" rebuild(String sourceDc, String keyspace, String tokens, String specificSources)");
|
|
|
|
if (keyspace != null) {
|
|
|
|
throw new UnsupportedOperationException("Rebuild: 'keyspace' not yet supported");
|
|
|
|
}
|
|
|
|
if (tokens != null) {
|
|
|
|
throw new UnsupportedOperationException("Rebuild: 'token range' not yet supported");
|
|
|
|
}
|
|
|
|
if (specificSources != null) {
|
|
|
|
throw new UnsupportedOperationException("Rebuild: 'specific sources' not yet supported");
|
|
|
|
}
|
2015-12-17 17:15:50 +01:00
|
|
|
if (sourceDc != null) {
|
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
APIClient.set_query_param(queryParams, "source_dc", sourceDc);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/rebuild", queryParams);
|
2015-12-17 17:15:50 +01:00
|
|
|
} else {
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/rebuild");
|
2015-12-17 17:15:50 +01:00
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Starts a bulk load and blocks until it completes. */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void bulkLoad(String directory) {
|
|
|
|
log(" bulkLoad(String directory)");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/bulk_load/" + directory);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Starts a bulk load asynchronously and returns the String representation
|
|
|
|
* of the planID for the new streaming session.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String bulkLoadAsync(String directory) {
|
|
|
|
log(" bulkLoadAsync(String directory)");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/bulk_load_async/" + directory);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void rescheduleFailedDeletions() {
|
|
|
|
log(" rescheduleFailedDeletions()");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/reschedule_failed_deletions");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Load new SSTables to the given keyspace/columnFamily
|
|
|
|
*
|
|
|
|
* @param ksName
|
|
|
|
* The parent keyspace name
|
|
|
|
* @param cfName
|
|
|
|
* The ColumnFamily name where SSTables belong
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void loadNewSSTables(String ksName, String cfName) {
|
|
|
|
log(" loadNewSSTables(String ksName, String cfName)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("cf", cfName);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/sstables/" + ksName, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return a List of Tokens representing a sample of keys across all
|
|
|
|
* ColumnFamilyStores.
|
|
|
|
*
|
|
|
|
* Note: this should be left as an operation, not an attribute (methods
|
|
|
|
* starting with "get") to avoid sending potentially multiple MB of data
|
|
|
|
* when accessing this mbean by default. See CASSANDRA-4452.
|
|
|
|
*
|
|
|
|
* @return set of Tokens as Strings
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public List<String> sampleKeyRange() {
|
|
|
|
log(" sampleKeyRange()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/sample_key_range");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rebuild the specified indexes
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames) {
|
2015-04-13 08:30:10 +02:00
|
|
|
log(" rebuildSecondaryIndex(String ksName, String cfName, String... idxNames)");
|
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void resetLocalSchema() throws IOException {
|
|
|
|
log(" resetLocalSchema() throws IOException");
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/relocal_schema");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Enables/Disables tracing for the whole system. Only thrift requests can
|
|
|
|
* start tracing currently.
|
|
|
|
*
|
|
|
|
* @param probability
|
|
|
|
* ]0,1[ will enable tracing on a partial number of requests with
|
|
|
|
* the provided probability. 0 will disable tracing and 1 will
|
|
|
|
* enable tracing for all requests (which mich severely cripple
|
|
|
|
* the system)
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setTraceProbability(double probability) {
|
|
|
|
log(" setTraceProbability(double probability)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("probability", Double.toString(probability));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/trace_probability", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns the configured tracing probability.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public double getTraceProbability() {
|
|
|
|
log(" getTraceProbability()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getDoubleValue("/storage_service/trace_probability");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void disableAutoCompaction(String ks, String... columnFamilies) throws IOException {
|
2015-05-26 11:38:08 +02:00
|
|
|
log("disableAutoCompaction(String ks, String... columnFamilies)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
2017-08-22 04:50:38 +02:00
|
|
|
client.delete("/storage_service/auto_compaction/" + ks, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void enableAutoCompaction(String ks, String... columnFamilies) throws IOException {
|
2015-05-26 11:38:08 +02:00
|
|
|
log("enableAutoCompaction(String ks, String... columnFamilies)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
2020-05-25 18:58:32 +02:00
|
|
|
client.post("/storage_service/auto_compaction/" + ks, queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void deliverHints(String host) throws UnknownHostException {
|
|
|
|
log(" deliverHints(String host) throws UnknownHostException");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("host", host);
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/deliver_hints", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the name of the cluster */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getClusterName() {
|
|
|
|
log(" getClusterName()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/cluster_name");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the cluster partitioner */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public String getPartitionerName() {
|
|
|
|
log(" getPartitionerName()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getStringValue("/storage_service/partitioner_name");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for warning of queries with many tombstones */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int getTombstoneWarnThreshold() {
|
|
|
|
log(" getTombstoneWarnThreshold()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/tombstone_warn_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for warning queries with many tombstones */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setTombstoneWarnThreshold(int tombstoneDebugThreshold) {
|
|
|
|
log(" setTombstoneWarnThreshold(int tombstoneDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold));
|
|
|
|
client.post("/storage_service/tombstone_warn_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for abandoning queries with many tombstones */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int getTombstoneFailureThreshold() {
|
|
|
|
log(" getTombstoneFailureThreshold()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/tombstone_failure_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for abandoning queries with many tombstones */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setTombstoneFailureThreshold(int tombstoneDebugThreshold) {
|
|
|
|
log(" setTombstoneFailureThreshold(int tombstoneDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2016-10-11 14:07:03 +02:00
|
|
|
queryParams.add("debug_threshold", Integer.toString(tombstoneDebugThreshold));
|
|
|
|
client.post("/storage_service/tombstone_failure_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Returns the threshold for rejecting queries due to a large batch size */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public int getBatchSizeFailureThreshold() {
|
|
|
|
log(" getBatchSizeFailureThreshold()");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getIntValue("/storage_service/batch_size_failure_threshold");
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/** Sets the threshold for rejecting queries due to a large batch size */
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold) {
|
|
|
|
log(" setBatchSizeFailureThreshold(int batchSizeDebugThreshold)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("threshold", Integer.toString(batchSizeDebugThreshold));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/batch_size_failure_threshold", queryParams);
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
|
|
|
|
2015-07-30 10:37:03 +02:00
|
|
|
/**
|
|
|
|
* Sets the hinted handoff throttle in kb per second, per delivery thread.
|
|
|
|
*/
|
2016-08-17 10:43:28 +02:00
|
|
|
@Override
|
2015-04-13 08:30:10 +02:00
|
|
|
public void setHintedHandoffThrottleInKB(int throttleInKB) {
|
|
|
|
log(" setHintedHandoffThrottleInKB(int throttleInKB)");
|
2015-10-28 10:25:37 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
2015-05-26 11:38:08 +02:00
|
|
|
queryParams.add("throttle", Integer.toString(throttleInKB));
|
2016-10-11 14:07:03 +02:00
|
|
|
client.post("/storage_service/hinted_handoff", queryParams);
|
2015-05-26 11:38:08 +02:00
|
|
|
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|
2015-07-30 11:01:05 +02:00
|
|
|
|
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException {
|
2015-07-30 11:01:05 +02:00
|
|
|
log(" takeMultipleColumnFamilySnapshot");
|
2016-04-26 10:34:38 +02:00
|
|
|
Map<String, List<String>> keyspaceColumnfamily = new HashMap<String, List<String>>();
|
|
|
|
Map<String, Set<String>> kss = getColumnFamilyPerKeyspace();
|
|
|
|
Map<String, Map<String, Set<String>>> snapshots = getSnapshotKeyspaceColumnFamily();
|
2016-10-11 14:07:03 +02:00
|
|
|
for (String columnFamily : columnFamilyList) {
|
2016-04-26 10:34:38 +02:00
|
|
|
String splittedString[] = columnFamily.split("\\.");
|
2016-10-11 14:07:03 +02:00
|
|
|
if (splittedString.length == 2) {
|
2016-04-26 10:34:38 +02:00
|
|
|
String keyspaceName = splittedString[0];
|
|
|
|
String columnFamilyName = splittedString[1];
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
if (keyspaceName == null) {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IOException("You must supply a keyspace name");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
|
|
|
if (columnFamilyName == null) {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IOException("You must supply a column family name");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
|
|
|
if (tag == null || tag.equals("")) {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IOException("You must supply a snapshot name.");
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
|
|
|
if (!kss.containsKey(keyspaceName)) {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IOException("Keyspace " + keyspaceName + " does not exist");
|
|
|
|
}
|
|
|
|
if (!kss.get(keyspaceName).contains(columnFamilyName)) {
|
2016-10-11 14:07:03 +02:00
|
|
|
throw new IllegalArgumentException(
|
|
|
|
String.format("Unknown keyspace/cf pair (%s.%s)", keyspaceName, columnFamilyName));
|
2016-04-26 10:34:38 +02:00
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
// As there can be multiple column family from same keyspace
|
|
|
|
// check if snapshot exist for that specific
|
2016-04-26 10:34:38 +02:00
|
|
|
// columnfamily and not for whole keyspace
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
if (snapshots.containsKey(tag) && snapshots.get(tag).containsKey(keyspaceName)
|
|
|
|
&& snapshots.get(tag).get(keyspaceName).contains(columnFamilyName)) {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IOException("Snapshot " + tag + " already exists.");
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
if (!keyspaceColumnfamily.containsKey(keyspaceName)) {
|
2016-04-26 10:34:38 +02:00
|
|
|
keyspaceColumnfamily.put(keyspaceName, new ArrayList<String>());
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
// Add Keyspace columnfamily to map in order to support
|
|
|
|
// atomicity for snapshot process.
|
|
|
|
// So no snapshot should happen if any one of the above
|
|
|
|
// conditions fail for any keyspace or columnfamily
|
2016-04-26 10:34:38 +02:00
|
|
|
keyspaceColumnfamily.get(keyspaceName).add(columnFamilyName);
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
} else {
|
2016-04-26 10:34:38 +02:00
|
|
|
throw new IllegalArgumentException(
|
|
|
|
"Cannot take a snapshot on secondary index or invalid column family name. You must supply a column family name in the form of keyspace.columnfamily");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
for (Entry<String, List<String>> entry : keyspaceColumnfamily.entrySet()) {
|
|
|
|
for (String columnFamily : entry.getValue()) {
|
2016-04-26 10:34:38 +02:00
|
|
|
takeColumnFamilySnapshot(entry.getKey(), columnFamily, tag);
|
2016-10-11 14:07:03 +02:00
|
|
|
}
|
2016-04-26 10:34:38 +02:00
|
|
|
}
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceRepairAsync(String keyspace, int parallelismDegree, Collection<String> dataCenters,
|
|
|
|
Collection<String> hosts, boolean primaryRange, boolean fullRepair, String... columnFamilies) {
|
2015-12-28 14:55:25 +01:00
|
|
|
log(" forceRepairAsync(keyspace, parallelismDegree, dataCenters, hosts, primaryRange, fullRepair, columnFamilies)");
|
2015-08-13 08:53:47 +02:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
2015-12-28 14:55:25 +01:00
|
|
|
Joiner commas = Joiner.on(",");
|
|
|
|
options.put("parallelism", Integer.toString(parallelismDegree));
|
|
|
|
if (dataCenters != null) {
|
|
|
|
options.put("dataCenters", commas.join(dataCenters));
|
|
|
|
}
|
|
|
|
if (hosts != null) {
|
|
|
|
options.put("hosts", commas.join(hosts));
|
|
|
|
}
|
|
|
|
options.put("primaryRange", Boolean.toString(primaryRange));
|
2016-10-11 14:07:03 +02:00
|
|
|
options.put("incremental", Boolean.toString(!fullRepair));
|
2015-12-28 14:55:25 +01:00
|
|
|
if (columnFamilies != null && columnFamilies.length > 0) {
|
|
|
|
options.put("columnFamilies", commas.join(columnFamilies));
|
|
|
|
}
|
2015-08-13 08:53:47 +02:00
|
|
|
return repairAsync(keyspace, options);
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-10-11 14:07:03 +02:00
|
|
|
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree,
|
|
|
|
Collection<String> dataCenters, Collection<String> hosts, boolean fullRepair, String... columnFamilies) {
|
2015-12-28 14:55:25 +01:00
|
|
|
log(" forceRepairRangeAsync(beginToken, endToken, keyspaceName, parallelismDegree, dataCenters, hosts, fullRepair, columnFamilies)");
|
2016-02-18 16:10:38 +01:00
|
|
|
Map<String, String> options = new HashMap<String, String>();
|
|
|
|
Joiner commas = Joiner.on(",");
|
|
|
|
options.put("parallelism", Integer.toString(parallelismDegree));
|
|
|
|
if (dataCenters != null) {
|
|
|
|
options.put("dataCenters", commas.join(dataCenters));
|
|
|
|
}
|
|
|
|
if (hosts != null) {
|
|
|
|
options.put("hosts", commas.join(hosts));
|
|
|
|
}
|
2016-10-11 14:07:03 +02:00
|
|
|
options.put("incremental", Boolean.toString(!fullRepair));
|
2016-02-18 16:10:38 +01:00
|
|
|
options.put("startToken", beginToken);
|
2016-10-11 14:07:03 +02:00
|
|
|
options.put("endToken", endToken);
|
2016-02-18 16:10:38 +01:00
|
|
|
return repairAsync(keyspaceName, options);
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
2016-08-17 10:43:28 +02:00
|
|
|
public Map<String, String> getEndpointToHostId() {
|
|
|
|
return getHostIdMap();
|
|
|
|
}
|
|
|
|
|
2016-10-11 14:07:03 +02:00
|
|
|
@Override
|
2016-08-17 10:43:28 +02:00
|
|
|
public Map<String, String> getHostIdToEndpoint() {
|
|
|
|
return getHostIdToAddressMap();
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void refreshSizeEstimates() throws ExecutionException {
|
2016-10-11 14:07:03 +02:00
|
|
|
// TODO Auto-generated method stub
|
2016-08-17 10:43:28 +02:00
|
|
|
log(" refreshSizeEstimates");
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
|
|
|
// "splitOutput" afaik not relevant for scylla (yet?...)
|
|
|
|
forceKeyspaceCompaction(keyspaceName, tableNames);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2016-10-11 14:07:03 +02:00
|
|
|
// "jobs" not (yet) relevant for scylla. (though possibly useful...)
|
2016-08-17 10:43:28 +02:00
|
|
|
return forceKeyspaceCleanup(keyspaceName, tables);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName,
|
|
|
|
String... columnFamilies) throws IOException, ExecutionException, InterruptedException {
|
2016-10-11 14:07:03 +02:00
|
|
|
// "jobs" not (yet) relevant for scylla. (though possibly useful...)
|
2020-04-01 09:13:16 +02:00
|
|
|
return scrub(disableSnapshot, skipCorrupted, checkData, keyspaceName, columnFamilies);
|
2016-08-17 10:43:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int verify(boolean extendedVerify, String keyspaceName, String... tableNames)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2015-07-30 11:01:05 +02:00
|
|
|
// TODO Auto-generated method stub
|
2016-08-17 10:43:28 +02:00
|
|
|
log(" verify");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
2016-10-11 14:07:03 +02:00
|
|
|
// "jobs" not (yet) relevant for scylla. (though possibly useful...)
|
2016-08-17 10:43:28 +02:00
|
|
|
return upgradeSSTables(keyspaceName, excludeCurrentVersion, tableNames);
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<String> getNonLocalStrategyKeyspaces() {
|
|
|
|
log(" getNonLocalStrategyKeyspaces");
|
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
queryParams.add("type", "non_local_strategy");
|
2016-10-11 14:07:03 +02:00
|
|
|
return client.getListStrValue("/storage_service/keyspaces", queryParams);
|
2016-08-17 10:43:28 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public void setInterDCStreamThroughputMbPerSec(int value) {
|
2016-10-11 14:07:03 +02:00
|
|
|
// TODO Auto-generated method stub
|
2016-08-17 10:43:28 +02:00
|
|
|
log(" setInterDCStreamThroughputMbPerSec");
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public int getInterDCStreamThroughputMbPerSec() {
|
|
|
|
// TODO Auto-generated method stub
|
|
|
|
log(" getInterDCStreamThroughputMbPerSec");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public boolean resumeBootstrap() {
|
|
|
|
log(" resumeBootstrap");
|
|
|
|
return false;
|
2015-07-30 11:01:05 +02:00
|
|
|
}
|
2019-07-24 16:33:13 +02:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<CompositeData> getSSTableInfo(String keyspace, String table) {
|
|
|
|
if (keyspace == null && table != null) {
|
|
|
|
throw new IllegalArgumentException("Missing keyspace name");
|
|
|
|
}
|
|
|
|
MultivaluedMap<String, String> queryParams = null;
|
|
|
|
|
|
|
|
if (keyspace != null) {
|
|
|
|
queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
queryParams.add("keyspace", keyspace);
|
|
|
|
}
|
|
|
|
if (table != null) {
|
|
|
|
queryParams.add("cf", table);
|
|
|
|
}
|
|
|
|
|
|
|
|
return client.get("/storage_service/sstable_info", queryParams)
|
|
|
|
.get(new GenericType<List<PerTableSSTableInfo>>() {
|
|
|
|
}).stream().map((i) -> i.toCompositeData()).collect(Collectors.toList());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public List<CompositeData> getSSTableInfo() {
|
|
|
|
return getSSTableInfo(null, null);
|
|
|
|
}
|
2020-04-01 09:13:16 +02:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, boolean reinsertOverflowedTTL,
|
|
|
|
int jobs, String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
APIClient.set_bool_query_param(queryParams, "disable_snapshot", disableSnapshot);
|
|
|
|
APIClient.set_bool_query_param(queryParams, "skip_corrupted", skipCorrupted);
|
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
|
|
|
return client.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName, queryParams);
|
|
|
|
}
|
2021-02-15 17:26:42 +01:00
|
|
|
|
2021-08-15 18:24:51 +02:00
|
|
|
@Override
|
|
|
|
public int scrub(boolean disableSnapshot, String scrubMode, boolean checkData, boolean reinsertOverflowedTTL,
|
|
|
|
int jobs, String keyspaceName, String... columnFamilies)
|
|
|
|
throws IOException, ExecutionException, InterruptedException {
|
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
APIClient.set_bool_query_param(queryParams, "disable_snapshot", disableSnapshot);
|
|
|
|
if (scrubMode != "") {
|
|
|
|
APIClient.set_query_param(queryParams, "scrub_mode", scrubMode);
|
|
|
|
}
|
|
|
|
APIClient.set_query_param(queryParams, "cf", APIClient.join(columnFamilies));
|
|
|
|
return client.getIntValue("/storage_service/keyspace_scrub/" + keyspaceName, queryParams);
|
|
|
|
}
|
|
|
|
|
2021-01-31 17:12:29 +01:00
|
|
|
@Override
|
|
|
|
public long getUptime() {
|
|
|
|
log("getUptime()");
|
|
|
|
return client.getLongValue("/system/uptime_ms");
|
|
|
|
}
|
2021-02-15 17:26:42 +01:00
|
|
|
|
|
|
|
@Override
|
|
|
|
public CompositeData getToppartitions(String sampler, List<String> keyspaceFilters, List<String> tableFilters, int duration, int capacity, int count) throws OpenDataException {
|
2021-05-10 11:53:34 +02:00
|
|
|
return getToppartitions(Arrays.asList(sampler), keyspaceFilters, tableFilters, duration, capacity, count).get(sampler.toLowerCase());
|
|
|
|
}
|
|
|
|
|
|
|
|
@Override
|
|
|
|
public Map<String, CompositeData> getToppartitions(List<String> samplers, List<String> keyspaceFilters, List<String> tableFilters, int duration, int capacity, int count) throws OpenDataException {
|
2021-02-15 17:26:42 +01:00
|
|
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
|
|
|
APIClient.set_query_param(queryParams, "duration", Integer.toString(duration));
|
|
|
|
APIClient.set_query_param(queryParams, "capacity", Integer.toString(capacity));
|
|
|
|
APIClient.set_query_param(queryParams, "keyspace_filters", keyspaceFilters != null ? APIClient.join(keyspaceFilters.toArray(new String[0])) : null);
|
|
|
|
APIClient.set_query_param(queryParams, "table_filters", tableFilters != null ? APIClient.join(tableFilters.toArray(new String[0])) : null);
|
|
|
|
JsonObject result = client.getJsonObj("/storage_service/toppartitions", queryParams);
|
|
|
|
|
2021-05-10 11:53:34 +02:00
|
|
|
Map<String, CompositeData> resultsMap = new HashMap<>();
|
|
|
|
|
|
|
|
for (String operation : OPERATION_NAMES) {
|
|
|
|
JsonArray counters = result.getJsonArray(operation);
|
|
|
|
long cardinality = result.getJsonNumber(operation + "_cardinality").longValue();
|
|
|
|
long size = 0;
|
|
|
|
TabularDataSupport tabularResult = new TabularDataSupport(COUNTER_TYPE);
|
|
|
|
|
|
|
|
if (counters != null) {
|
|
|
|
size = (count > counters.size()) ? counters.size() : count;
|
|
|
|
for (int i = 0; i < size; i++) {
|
|
|
|
JsonObject counter = counters.getJsonObject(i);
|
|
|
|
tabularResult.put(new CompositeDataSupport(COUNTER_COMPOSITE_TYPE, COUNTER_NAMES,
|
|
|
|
new Object[] { counter.getString("partition"), // raw
|
|
|
|
counter.getJsonNumber("count").longValue(), // count
|
|
|
|
counter.getJsonNumber("error").longValue(), // error
|
|
|
|
counter.getString("partition") })); // string
|
|
|
|
}
|
2021-02-15 17:26:42 +01:00
|
|
|
}
|
2021-05-10 11:53:34 +02:00
|
|
|
|
|
|
|
resultsMap.put(operation + "s", new CompositeDataSupport(SAMPLING_RESULT, SAMPLER_NAMES, new Object[] { cardinality, tabularResult }));
|
2021-02-15 17:26:42 +01:00
|
|
|
}
|
|
|
|
|
2021-05-10 11:53:34 +02:00
|
|
|
return resultsMap;
|
2021-02-15 17:26:42 +01:00
|
|
|
}
|
2015-04-13 08:30:10 +02:00
|
|
|
}
|