Merge branch 'cassandra3' into next
This commit is contained in:
commit
3838921ca3
14
pom.xml
14
pom.xml
@ -10,8 +10,8 @@
|
||||
<name>Scylla JMX</name>
|
||||
|
||||
<properties>
|
||||
<maven.compiler.target>1.7</maven.compiler.target>
|
||||
<maven.compiler.source>1.7</maven.compiler.source>
|
||||
<maven.compiler.target>1.8</maven.compiler.target>
|
||||
<maven.compiler.source>1.8</maven.compiler.source>
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
@ -71,21 +71,11 @@
|
||||
<artifactId>guava</artifactId>
|
||||
<version>18.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.yammer.metrics</groupId>
|
||||
<artifactId>metrics-core</artifactId>
|
||||
<version>2.2.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.collections</groupId>
|
||||
<artifactId>google-collections</artifactId>
|
||||
<version>1.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>mx4j</groupId>
|
||||
<artifactId>mx4j</artifactId>
|
||||
<version>3.0.2</version>
|
||||
</dependency>
|
||||
</dependencies>
|
||||
<build>
|
||||
<plugins>
|
||||
|
0
scripts/git-archive-all
Executable file → Normal file
0
scripts/git-archive-all
Executable file → Normal file
@ -14,6 +14,8 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Set;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.json.Json;
|
||||
import javax.json.JsonArray;
|
||||
@ -35,13 +37,12 @@ import javax.ws.rs.core.Response;
|
||||
|
||||
import org.glassfish.jersey.client.ClientConfig;
|
||||
|
||||
import com.scylladb.jmx.utils.EstimatedHistogram;
|
||||
import com.scylladb.jmx.utils.SnapshotDetailsTabularData;
|
||||
import com.yammer.metrics.core.HistogramValues;
|
||||
|
||||
public class APIClient {
|
||||
Map<String, CacheEntry> cache = new HashMap<String, CacheEntry>();
|
||||
String getCacheKey(String key, MultivaluedMap<String, String> param, long duration) {
|
||||
private Map<String, CacheEntry> cache = new HashMap<String, CacheEntry>();
|
||||
|
||||
private String getCacheKey(String key, MultivaluedMap<String, String> param, long duration) {
|
||||
if (duration <= 0) {
|
||||
return null;
|
||||
}
|
||||
@ -56,43 +57,40 @@ public class APIClient {
|
||||
return key;
|
||||
}
|
||||
|
||||
String getStringFromCache(String key, long duration) {
|
||||
private String getStringFromCache(String key, long duration) {
|
||||
if (key == null) {
|
||||
return null;
|
||||
}
|
||||
CacheEntry value = cache.get(key);
|
||||
return (value!= null && value.valid(duration))? value.stringValue() : null;
|
||||
return (value != null && value.valid(duration)) ? value.stringValue() : null;
|
||||
}
|
||||
|
||||
JsonObject getJsonObjectFromCache(String key, long duration) {
|
||||
private JsonObject getJsonObjectFromCache(String key, long duration) {
|
||||
if (key == null) {
|
||||
return null;
|
||||
}
|
||||
CacheEntry value = cache.get(key);
|
||||
return (value!= null && value.valid(duration))? value.jsonObject() : null;
|
||||
return (value != null && value.valid(duration)) ? value.jsonObject() : null;
|
||||
}
|
||||
|
||||
EstimatedHistogram getEstimatedHistogramFromCache(String key, long duration) {
|
||||
if (key == null) {
|
||||
return null;
|
||||
}
|
||||
CacheEntry value = cache.get(key);
|
||||
return (value!= null && value.valid(duration))? value.getEstimatedHistogram() : null;
|
||||
private JsonReaderFactory factory = Json.createReaderFactory(null);
|
||||
private static final Logger logger = Logger.getLogger(APIClient.class.getName());
|
||||
|
||||
private final APIConfig config;
|
||||
|
||||
public APIClient(APIConfig config) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
JsonReaderFactory factory = Json.createReaderFactory(null);
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(APIClient.class.getName());
|
||||
|
||||
public static String getBaseUrl() {
|
||||
return APIConfig.getBaseUrl();
|
||||
private String getBaseUrl() {
|
||||
return config.getBaseUrl();
|
||||
}
|
||||
|
||||
public Invocation.Builder get(String path, MultivaluedMap<String, String> queryParams) {
|
||||
Client client = ClientBuilder.newClient( new ClientConfig());
|
||||
Client client = ClientBuilder.newClient(new ClientConfig());
|
||||
WebTarget webTarget = client.target(getBaseUrl()).path(path);
|
||||
if (queryParams != null) {
|
||||
for (Entry<String, List<String>> qp : queryParams.entrySet()) {
|
||||
for (Entry<String, List<String>> qp : queryParams.entrySet()) {
|
||||
for (String e : qp.getValue()) {
|
||||
webTarget = webTarget.queryParam(qp.getKey(), e);
|
||||
}
|
||||
@ -106,10 +104,15 @@ public class APIClient {
|
||||
}
|
||||
|
||||
public Response post(String path, MultivaluedMap<String, String> queryParams) {
|
||||
return post(path, queryParams, null);
|
||||
}
|
||||
|
||||
public Response post(String path, MultivaluedMap<String, String> queryParams, Object object, String type) {
|
||||
try {
|
||||
Response response = get(path, queryParams).post(Entity.entity(null, MediaType.TEXT_PLAIN));
|
||||
if (response.getStatus() != Response.Status.OK.getStatusCode() ) {
|
||||
throw getException("Scylla API server HTTP POST to URL '" + path + "' failed", response.readEntity(String.class));
|
||||
Response response = get(path, queryParams).post(Entity.entity(object, type));
|
||||
if (response.getStatus() != Response.Status.OK.getStatusCode()) {
|
||||
throw getException("Scylla API server HTTP POST to URL '" + path + "' failed",
|
||||
response.readEntity(String.class));
|
||||
}
|
||||
return response;
|
||||
} catch (ProcessingException e) {
|
||||
@ -117,6 +120,10 @@ public class APIClient {
|
||||
}
|
||||
}
|
||||
|
||||
public Response post(String path, MultivaluedMap<String, String> queryParams, Object object) {
|
||||
return post(path, queryParams, object, MediaType.TEXT_PLAIN);
|
||||
}
|
||||
|
||||
public void post(String path) {
|
||||
post(path, null);
|
||||
}
|
||||
@ -151,8 +158,7 @@ public class APIClient {
|
||||
delete(path, null);
|
||||
}
|
||||
|
||||
public String getRawValue(String string,
|
||||
MultivaluedMap<String, String> queryParams, long duration) {
|
||||
public String getRawValue(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||
try {
|
||||
if (string.equals("")) {
|
||||
return "";
|
||||
@ -168,7 +174,8 @@ public class APIClient {
|
||||
// TBD
|
||||
// We are currently not caching errors,
|
||||
// it should be reconsider.
|
||||
throw getException("Scylla API server HTTP GET to URL '" + string + "' failed", response.readEntity(String.class));
|
||||
throw getException("Scylla API server HTTP GET to URL '" + string + "' failed",
|
||||
response.readEntity(String.class));
|
||||
}
|
||||
res = response.readEntity(String.class);
|
||||
if (duration > 0) {
|
||||
@ -180,8 +187,7 @@ public class APIClient {
|
||||
}
|
||||
}
|
||||
|
||||
public String getRawValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public String getRawValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
return getRawValue(string, queryParams, 0);
|
||||
}
|
||||
|
||||
@ -194,23 +200,19 @@ public class APIClient {
|
||||
}
|
||||
|
||||
public String getStringValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
return getRawValue(string,
|
||||
queryParams).replaceAll("^\"|\"$", "");
|
||||
return getRawValue(string, queryParams).replaceAll("^\"|\"$", "");
|
||||
}
|
||||
|
||||
public String getStringValue(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||
return getRawValue(string,
|
||||
queryParams, duration).replaceAll("^\"|\"$", "");
|
||||
return getRawValue(string, queryParams, duration).replaceAll("^\"|\"$", "");
|
||||
}
|
||||
|
||||
public String getStringValue(String string) {
|
||||
return getStringValue(string, null);
|
||||
}
|
||||
|
||||
public JsonReader getReader(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
return factory.createReader(new StringReader(getRawValue(string,
|
||||
queryParams)));
|
||||
public JsonReader getReader(String string, MultivaluedMap<String, String> queryParams) {
|
||||
return factory.createReader(new StringReader(getRawValue(string, queryParams)));
|
||||
}
|
||||
|
||||
public JsonReader getReader(String string) {
|
||||
@ -222,8 +224,7 @@ public class APIClient {
|
||||
return val.toArray(new String[val.size()]);
|
||||
}
|
||||
|
||||
public int getIntValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public int getIntValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
return Integer.parseInt(getRawValue(string, queryParams));
|
||||
}
|
||||
|
||||
@ -231,6 +232,19 @@ public class APIClient {
|
||||
return getIntValue(string, null);
|
||||
}
|
||||
|
||||
public static <T> BiFunction<APIClient, String, T> getReader(Class<T> type) {
|
||||
if (type == String.class) {
|
||||
return (c, s) -> type.cast(c.getRawValue(s));
|
||||
} else if (type == Integer.class) {
|
||||
return (c, s) -> type.cast(c.getIntValue(s));
|
||||
} else if (type == Double.class) {
|
||||
return (c, s) -> type.cast(c.getDoubleValue(s));
|
||||
} else if (type == Long.class) {
|
||||
return (c, s) -> type.cast(c.getLongValue(s));
|
||||
}
|
||||
throw new IllegalArgumentException(type.getName());
|
||||
}
|
||||
|
||||
public boolean getBooleanValue(String string) {
|
||||
return Boolean.parseBoolean(getRawValue(string));
|
||||
}
|
||||
@ -239,8 +253,7 @@ public class APIClient {
|
||||
return Double.parseDouble(getRawValue(string));
|
||||
}
|
||||
|
||||
public List<String> getListStrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public List<String> getListStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
JsonArray arr = reader.readArray();
|
||||
List<String> res = new ArrayList<String>(arr.size());
|
||||
@ -295,8 +308,7 @@ public class APIClient {
|
||||
return join(arr, ",");
|
||||
}
|
||||
|
||||
public static String mapToString(Map<String, String> mp, String pairJoin,
|
||||
String joiner) {
|
||||
public static String mapToString(Map<String, String> mp, String pairJoin, String joiner) {
|
||||
String res = "";
|
||||
if (mp != null) {
|
||||
for (String name : mp.keySet()) {
|
||||
@ -313,19 +325,15 @@ public class APIClient {
|
||||
return mapToString(mp, "=", ",");
|
||||
}
|
||||
|
||||
public static boolean set_query_param(
|
||||
MultivaluedMap<String, String> queryParams, String key, String value) {
|
||||
if (queryParams != null && key != null && value != null
|
||||
&& !value.equals("")) {
|
||||
public static boolean set_query_param(MultivaluedMap<String, String> queryParams, String key, String value) {
|
||||
if (queryParams != null && key != null && value != null && !value.equals("")) {
|
||||
queryParams.add(key, value);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
public static boolean set_bool_query_param(
|
||||
MultivaluedMap<String, String> queryParams, String key,
|
||||
boolean value) {
|
||||
public static boolean set_bool_query_param(MultivaluedMap<String, String> queryParams, String key, boolean value) {
|
||||
if (queryParams != null && key != null && value) {
|
||||
queryParams.add(key, "true");
|
||||
return true;
|
||||
@ -344,8 +352,7 @@ public class APIClient {
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||
map.put(obj.getString("key"),
|
||||
listStrFromJArr(obj.getJsonArray("value")));
|
||||
map.put(obj.getString("key"), listStrFromJArr(obj.getJsonArray("value")));
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
@ -367,8 +374,7 @@ public class APIClient {
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||
map.put(listStrFromJArr(obj.getJsonArray("key")),
|
||||
listStrFromJArr(obj.getJsonArray("value")));
|
||||
map.put(listStrFromJArr(obj.getJsonArray("key")), listStrFromJArr(obj.getJsonArray("value")));
|
||||
}
|
||||
}
|
||||
reader.close();
|
||||
@ -379,8 +385,7 @@ public class APIClient {
|
||||
return getMapListStrValue(string, null);
|
||||
}
|
||||
|
||||
public Set<String> getSetStringValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Set<String> getSetStringValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
JsonArray arr = reader.readArray();
|
||||
Set<String> res = new HashSet<String>();
|
||||
@ -395,8 +400,7 @@ public class APIClient {
|
||||
return getSetStringValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, String> getMapStrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, String> getMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -417,8 +421,7 @@ public class APIClient {
|
||||
return getMapStrValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, String> getReverseMapStrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, String> getReverseMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -435,12 +438,11 @@ public class APIClient {
|
||||
return map;
|
||||
}
|
||||
|
||||
public Map<String, String> getReverseMapStrValue(String string) {
|
||||
public Map<String, String> getReverseMapStrValue(String string) {
|
||||
return getReverseMapStrValue(string, null);
|
||||
}
|
||||
|
||||
public List<InetAddress> getListInetAddressValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public List<InetAddress> getListInetAddressValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
List<String> vals = getListStrValue(string, queryParams);
|
||||
List<InetAddress> res = new ArrayList<InetAddress>();
|
||||
for (String val : vals) {
|
||||
@ -464,22 +466,20 @@ public class APIClient {
|
||||
}
|
||||
|
||||
private TabularDataSupport getSnapshotData(String key, JsonArray arr) {
|
||||
TabularDataSupport data = new TabularDataSupport(
|
||||
SnapshotDetailsTabularData.TABULAR_TYPE);
|
||||
TabularDataSupport data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE);
|
||||
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
if (obj.containsKey("ks") && obj.containsKey("cf")) {
|
||||
SnapshotDetailsTabularData.from(key, obj.getString("ks"),
|
||||
obj.getString("cf"), obj.getInt("total"),
|
||||
SnapshotDetailsTabularData.from(key, obj.getString("ks"), obj.getString("cf"), obj.getInt("total"),
|
||||
obj.getInt("live"), data);
|
||||
}
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
public Map<String, TabularData> getMapStringSnapshotTabularDataValue(
|
||||
String string, MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, TabularData> getMapStringSnapshotTabularDataValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -513,8 +513,7 @@ public class APIClient {
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
try {
|
||||
obj = arr.getJsonObject(i);
|
||||
res.put(InetAddress.getByName(obj.getString("key")),
|
||||
Float.parseFloat(obj.getString("value")));
|
||||
res.put(InetAddress.getByName(obj.getString("key")), Float.parseFloat(obj.getString("value")));
|
||||
} catch (UnknownHostException e) {
|
||||
logger.warning("Bad formatted address " + obj.getString("key"));
|
||||
}
|
||||
@ -526,8 +525,7 @@ public class APIClient {
|
||||
return getMapInetAddressFloatValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, Long> getMapStringLongValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, Long> getMapStringLongValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
Map<String, Long> res = new HashMap<String, Long>();
|
||||
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
@ -545,8 +543,7 @@ public class APIClient {
|
||||
return getMapStringLongValue(string, null);
|
||||
}
|
||||
|
||||
public long[] getLongArrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public long[] getLongArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
JsonArray arr = reader.readArray();
|
||||
long[] res = new long[arr.size()];
|
||||
@ -561,8 +558,7 @@ public class APIClient {
|
||||
return getLongArrValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, Integer> getMapStringIntegerValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, Integer> getMapStringIntegerValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
Map<String, Integer> res = new HashMap<String, Integer>();
|
||||
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
@ -580,8 +576,7 @@ public class APIClient {
|
||||
return getMapStringIntegerValue(string, null);
|
||||
}
|
||||
|
||||
public int[] getIntArrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public int[] getIntArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonReader reader = getReader(string, queryParams);
|
||||
JsonArray arr = reader.readArray();
|
||||
int[] res = new int[arr.size()];
|
||||
@ -596,8 +591,7 @@ public class APIClient {
|
||||
return getIntArrValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, Long> getListMapStringLongValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, Long> getListMapStringLongValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -630,8 +624,7 @@ public class APIClient {
|
||||
return getListMapStringLongValue(string, null);
|
||||
}
|
||||
|
||||
public JsonArray getJsonArray(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public JsonArray getJsonArray(String string, MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -645,8 +638,7 @@ public class APIClient {
|
||||
return getJsonArray(string, null);
|
||||
}
|
||||
|
||||
public List<Map<String, String>> getListMapStrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public List<Map<String, String>> getListMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonArray arr = getJsonArray(string, queryParams);
|
||||
List<Map<String, String>> res = new ArrayList<Map<String, String>>();
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
@ -664,8 +656,7 @@ public class APIClient {
|
||||
return null;
|
||||
}
|
||||
|
||||
public JsonObject getJsonObj(String string,
|
||||
MultivaluedMap<String, String> queryParams, long duration) {
|
||||
public JsonObject getJsonObj(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -682,61 +673,19 @@ public class APIClient {
|
||||
}
|
||||
return res;
|
||||
}
|
||||
public JsonObject getJsonObj(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
|
||||
public JsonObject getJsonObj(String string, MultivaluedMap<String, String> queryParams) {
|
||||
return getJsonObj(string, queryParams, 0);
|
||||
}
|
||||
|
||||
public static HistogramValues json2histogram(JsonObject obj) {
|
||||
HistogramValues res = new HistogramValues();
|
||||
res.count = obj.getJsonNumber("count").longValue();
|
||||
res.max = obj.getJsonNumber("max").longValue();
|
||||
res.min = obj.getJsonNumber("min").longValue();
|
||||
res.sum = obj.getJsonNumber("sum").longValue();
|
||||
res.variance = obj.getJsonNumber("variance").doubleValue();
|
||||
res.mean = obj.getJsonNumber("mean").doubleValue();
|
||||
JsonArray arr = obj.getJsonArray("sample");
|
||||
if (arr != null) {
|
||||
res.sample = new long[arr.size()];
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
res.sample[i] = arr.getJsonNumber(i).longValue();
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public HistogramValues getHistogramValue(String url,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
return json2histogram(getJsonObj(url, queryParams));
|
||||
}
|
||||
|
||||
public HistogramValues getHistogramValue(String url) {
|
||||
return getHistogramValue(url, null);
|
||||
}
|
||||
|
||||
public EstimatedHistogram getEstimatedHistogram(String string,
|
||||
MultivaluedMap<String, String> queryParams, long duration) {
|
||||
String key = getCacheKey(string, queryParams, duration);
|
||||
EstimatedHistogram res = getEstimatedHistogramFromCache(key, duration);
|
||||
if (res != null) {
|
||||
return res;
|
||||
}
|
||||
res = new EstimatedHistogram(getEstimatedHistogramAsLongArrValue(string, queryParams));
|
||||
if (duration > 0) {
|
||||
cache.put(key, new CacheEntry(res));
|
||||
}
|
||||
return res;
|
||||
|
||||
}
|
||||
public long[] getEstimatedHistogramAsLongArrValue(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public long[] getEstimatedHistogramAsLongArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||
JsonObject obj = getJsonObj(string, queryParams);
|
||||
JsonArray arr = obj.getJsonArray("buckets");
|
||||
if (arr == null) {
|
||||
return new long[0];
|
||||
}
|
||||
long res[] = new long[arr.size()];
|
||||
for (int i = 0; i< arr.size(); i++) {
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
res[i] = arr.getJsonNumber(i).longValue();
|
||||
}
|
||||
return res;
|
||||
@ -746,8 +695,7 @@ public class APIClient {
|
||||
return getEstimatedHistogramAsLongArrValue(string, null);
|
||||
}
|
||||
|
||||
public Map<String, Double> getMapStringDouble(String string,
|
||||
MultivaluedMap<String, String> queryParams) {
|
||||
public Map<String, Double> getMapStringDouble(String string, MultivaluedMap<String, String> queryParams) {
|
||||
if (string.equals("")) {
|
||||
return null;
|
||||
}
|
||||
@ -775,6 +723,7 @@ public class APIClient {
|
||||
reader.close();
|
||||
return map;
|
||||
}
|
||||
|
||||
public Map<String, Double> getMapStringDouble(String string) {
|
||||
return getMapStringDouble(string, null);
|
||||
}
|
||||
|
@ -30,23 +30,22 @@ import org.yaml.snakeyaml.Yaml;
|
||||
*/
|
||||
|
||||
public class APIConfig {
|
||||
static String address = "localhost";
|
||||
static String port = "10000";
|
||||
private String address = "localhost";
|
||||
private String port = "10000";
|
||||
|
||||
public static String getAddress() {
|
||||
public String getAddress() {
|
||||
return address;
|
||||
}
|
||||
|
||||
public static String getPort() {
|
||||
public String getPort() {
|
||||
return port;
|
||||
}
|
||||
|
||||
public static String getBaseUrl() {
|
||||
return "http://" + address + ":"
|
||||
+ port;
|
||||
public String getBaseUrl() {
|
||||
return "http://" + address + ":" + port;
|
||||
}
|
||||
|
||||
public static void readFile(String name) {
|
||||
private void readFile(String name) {
|
||||
System.out.println("Using config file: " + name);
|
||||
InputStream input;
|
||||
try {
|
||||
@ -61,7 +60,7 @@ public class APIConfig {
|
||||
address = (String) map.get("api_address");
|
||||
}
|
||||
if (map.containsKey("api_port")) {
|
||||
port = (String) map.get("api_port").toString();
|
||||
port = map.get("api_port").toString();
|
||||
}
|
||||
} catch (FileNotFoundException e) {
|
||||
System.err.println("fail reading from config file: " + name);
|
||||
@ -74,7 +73,7 @@ public class APIConfig {
|
||||
return varTmpDir.exists();
|
||||
}
|
||||
|
||||
public static boolean loadIfExists(String path, String name) {
|
||||
private boolean loadIfExists(String path, String name) {
|
||||
if (path == null) {
|
||||
return false;
|
||||
}
|
||||
@ -84,24 +83,21 @@ public class APIConfig {
|
||||
readFile(path + name);
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* setConfig load the JMX proxy configuration
|
||||
* The configuration hierarchy is as follow:
|
||||
* Command line argument takes precedence over everything
|
||||
* Then configuration file in the command line (command line
|
||||
* argument can replace specific values in it.
|
||||
* Then SCYLLA_CONF/scylla.yaml
|
||||
* Then SCYLLA_HOME/conf/scylla.yaml
|
||||
* Then conf/scylla.yaml
|
||||
* Then the default values
|
||||
* With file configuration, to make it clearer what is been used, only
|
||||
* one file will be chosen with the highest precedence
|
||||
* setConfig load the JMX proxy configuration The configuration hierarchy is
|
||||
* as follow: Command line argument takes precedence over everything Then
|
||||
* configuration file in the command line (command line argument can replace
|
||||
* specific values in it. Then SCYLLA_CONF/scylla.yaml Then
|
||||
* SCYLLA_HOME/conf/scylla.yaml Then conf/scylla.yaml Then the default
|
||||
* values With file configuration, to make it clearer what is been used,
|
||||
* only one file will be chosen with the highest precedence
|
||||
*/
|
||||
public static void setConfig() {
|
||||
if (!System.getProperty("apiconfig","").equals("")) {
|
||||
public APIConfig() {
|
||||
if (!System.getProperty("apiconfig", "").equals("")) {
|
||||
readFile(System.getProperty("apiconfig"));
|
||||
} else if (!loadIfExists(System.getenv("SCYLLA_CONF"), "/scylla.yaml") &&
|
||||
!loadIfExists(System.getenv("SCYLLA_HOME"), "/conf/scylla.yaml")) {
|
||||
} else if (!loadIfExists(System.getenv("SCYLLA_CONF"), "/scylla.yaml")
|
||||
&& !loadIfExists(System.getenv("SCYLLA_HOME"), "/conf/scylla.yaml")) {
|
||||
loadIfExists("", "conf/scylla.yaml");
|
||||
}
|
||||
|
||||
|
@ -23,13 +23,11 @@ package com.scylladb.jmx.api;
|
||||
|
||||
import javax.json.JsonObject;
|
||||
|
||||
import com.scylladb.jmx.utils.EstimatedHistogram;
|
||||
class CacheEntry {
|
||||
private long time;
|
||||
private Object value;
|
||||
|
||||
public class CacheEntry {
|
||||
long time;
|
||||
Object value;
|
||||
|
||||
CacheEntry(Object res) {
|
||||
public CacheEntry(Object res) {
|
||||
time = System.currentTimeMillis();
|
||||
this.value = res;
|
||||
}
|
||||
@ -42,10 +40,6 @@ public class CacheEntry {
|
||||
return (String) value;
|
||||
}
|
||||
|
||||
public EstimatedHistogram getEstimatedHistogram() {
|
||||
return (EstimatedHistogram)value;
|
||||
}
|
||||
|
||||
public JsonObject jsonObject() {
|
||||
return (JsonObject) value;
|
||||
}
|
||||
|
@ -3,38 +3,128 @@
|
||||
*/
|
||||
package com.scylladb.jmx.main;
|
||||
|
||||
import com.scylladb.jmx.api.APIConfig;
|
||||
import com.scylladb.jmx.utils.RMIServerSocketFactoryImpl;
|
||||
import static java.lang.management.ManagementFactory.getPlatformMBeanServer;
|
||||
import static java.rmi.registry.LocateRegistry.createRegistry;
|
||||
import static java.util.Arrays.asList;
|
||||
import static javax.net.ServerSocketFactory.getDefault;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.rmi.server.RMIServerSocketFactory;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.remote.JMXConnectorServer;
|
||||
import javax.management.remote.JMXServiceURL;
|
||||
import javax.management.remote.rmi.RMIConnectorServer;
|
||||
|
||||
import org.apache.cassandra.db.commitlog.CommitLog;
|
||||
import org.apache.cassandra.db.compaction.CompactionManager;
|
||||
import org.apache.cassandra.gms.Gossiper;
|
||||
import org.apache.cassandra.gms.FailureDetector;
|
||||
import org.apache.cassandra.gms.Gossiper;
|
||||
import org.apache.cassandra.locator.EndpointSnitchInfo;
|
||||
import org.apache.cassandra.net.MessagingService;
|
||||
import org.apache.cassandra.service.CacheService;
|
||||
import org.apache.cassandra.service.GCInspector;
|
||||
import org.apache.cassandra.service.StorageProxy;
|
||||
import org.apache.cassandra.service.StorageService;
|
||||
import org.apache.cassandra.streaming.StreamManager;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.api.APIConfig;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
public class Main {
|
||||
// todo: command line options. Make us an agent class (also)
|
||||
private static final APIConfig config = new APIConfig();
|
||||
public static final APIClient client = new APIClient(config);
|
||||
|
||||
private static JMXConnectorServer jmxServer = null;
|
||||
|
||||
private static void setupJmx() {
|
||||
System.setProperty("javax.management.builder.initial", "com.scylladb.jmx.utils.APIBuilder");
|
||||
String jmxPort = System.getProperty("com.sun.management.jmxremote.port");
|
||||
|
||||
if (jmxPort == null) {
|
||||
System.out.println("JMX is not enabled to receive remote connections.");
|
||||
|
||||
jmxPort = System.getProperty("cassandra.jmx.local.port", "7199");
|
||||
String address = System.getProperty("jmx.address", "localhost");
|
||||
if (address.equals("localhost")) {
|
||||
System.setProperty("java.rmi.server.hostname", InetAddress.getLoopbackAddress().getHostAddress());
|
||||
} else {
|
||||
try {
|
||||
System.setProperty("java.rmi.server.hostname", InetAddress.getByName(address).getHostAddress());
|
||||
} catch (UnknownHostException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
try {
|
||||
RMIServerSocketFactory serverFactory = pPort -> getDefault().createServerSocket(pPort, 0,
|
||||
InetAddress.getLoopbackAddress());
|
||||
createRegistry(Integer.valueOf(jmxPort), null, serverFactory);
|
||||
|
||||
StringBuffer url = new StringBuffer();
|
||||
url.append("service:jmx:");
|
||||
url.append("rmi://").append(address).append("/jndi/");
|
||||
url.append("rmi://").append(address).append(":").append(jmxPort).append("/jmxrmi");
|
||||
System.out.println(url);
|
||||
Map<String, Object> env = new HashMap<>();
|
||||
env.put(RMIConnectorServer.RMI_SERVER_SOCKET_FACTORY_ATTRIBUTE, serverFactory);
|
||||
|
||||
jmxServer = new RMIConnectorServer(new JMXServiceURL(url.toString()), env, getPlatformMBeanServer());
|
||||
|
||||
jmxServer.start();
|
||||
} catch (IOException e) {
|
||||
System.out.println("Error starting local jmx server: " + e.toString());
|
||||
}
|
||||
|
||||
} else {
|
||||
System.out.println("JMX is enabled to receive remote connections on port: " + jmxPort);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
APIConfig.setConfig();
|
||||
System.out.println("Connecting to " + APIConfig.getBaseUrl());
|
||||
System.out.println("Connecting to " + config.getBaseUrl());
|
||||
System.out.println("Starting the JMX server");
|
||||
RMIServerSocketFactoryImpl.maybeInitJmx();
|
||||
StorageService.getInstance();
|
||||
StorageProxy.getInstance();
|
||||
MessagingService.getInstance();
|
||||
CommitLog.getInstance();
|
||||
Gossiper.getInstance();
|
||||
EndpointSnitchInfo.getInstance();
|
||||
FailureDetector.getInstance();
|
||||
CacheService.getInstance();
|
||||
CompactionManager.getInstance();
|
||||
GCInspector.register();
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
|
||||
setupJmx();
|
||||
|
||||
try {
|
||||
MBeanServer server = getPlatformMBeanServer();
|
||||
for (Class<? extends APIMBean> clazz : asList(StorageService.class, StorageProxy.class,
|
||||
MessagingService.class, CommitLog.class, Gossiper.class, EndpointSnitchInfo.class,
|
||||
FailureDetector.class, CacheService.class, CompactionManager.class, GCInspector.class,
|
||||
StreamManager.class)) {
|
||||
Constructor<? extends APIMBean> c = clazz.getDeclaredConstructor(APIClient.class);
|
||||
APIMBean m = c.newInstance(client);
|
||||
server.registerMBean(m, null);
|
||||
}
|
||||
|
||||
try {
|
||||
// forces check for dynamically created mbeans
|
||||
server.queryNames(null, null);
|
||||
} catch (IllegalStateException e) {
|
||||
// ignore this. Just means we started before scylla.
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
Thread.sleep(Long.MAX_VALUE);
|
||||
}
|
||||
} finally {
|
||||
// make sure to kill the server otherwise we can hang. Not an issue
|
||||
// when killed perhaps, but any exception above etc would leave a
|
||||
// zombie.
|
||||
if (jmxServer != null) {
|
||||
jmxServer.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
189
src/main/java/com/scylladb/jmx/metrics/APIMBean.java
Normal file
189
src/main/java/com/scylladb/jmx/metrics/APIMBean.java
Normal file
@ -0,0 +1,189 @@
|
||||
package com.scylladb.jmx.metrics;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import javax.management.BadAttributeValueExpException;
|
||||
import javax.management.BadBinaryOpValueExpException;
|
||||
import javax.management.BadStringOperationException;
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.InstanceNotFoundException;
|
||||
import javax.management.InvalidApplicationException;
|
||||
import javax.management.MBeanRegistration;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.QueryExp;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
/**
|
||||
* Base type for MBeans in scylla-jmx. Wraps auto naming and {@link APIClient}
|
||||
* holding.
|
||||
*
|
||||
* @author calle
|
||||
*
|
||||
*/
|
||||
public class APIMBean implements MBeanRegistration {
|
||||
protected final APIClient client;
|
||||
protected final String mbeanName;
|
||||
|
||||
public APIMBean(APIClient client) {
|
||||
this(null, client);
|
||||
}
|
||||
|
||||
public APIMBean(String mbeanName, APIClient client) {
|
||||
this.mbeanName = mbeanName;
|
||||
this.client = client;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to add/remove dynamically created MBeans from a server
|
||||
* instance.
|
||||
*
|
||||
* @param server
|
||||
* The {@link MBeanServer} to check
|
||||
* @param all
|
||||
* All {@link ObjectName}s that should be bound
|
||||
* @param predicate
|
||||
* {@link QueryExp} predicate to filter relevant object names.
|
||||
* @param generator
|
||||
* {@link Function} to create a new MBean instance for a given
|
||||
* {@link ObjectName}
|
||||
*
|
||||
* @return
|
||||
* @throws MalformedObjectNameException
|
||||
*/
|
||||
public static boolean checkRegistration(MBeanServer server, Set<ObjectName> all,
|
||||
final Predicate<ObjectName> predicate, Function<ObjectName, Object> generator)
|
||||
throws MalformedObjectNameException {
|
||||
Set<ObjectName> registered = queryNames(server, predicate);
|
||||
for (ObjectName name : registered) {
|
||||
if (!all.contains(name)) {
|
||||
try {
|
||||
server.unregisterMBean(name);
|
||||
} catch (MBeanRegistrationException | InstanceNotFoundException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int added = 0;
|
||||
for (ObjectName name : all) {
|
||||
if (!registered.contains(name)) {
|
||||
try {
|
||||
server.registerMBean(generator.apply(name), name);
|
||||
added++;
|
||||
} catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return added > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper method to query {@link ObjectName}s from an {@link MBeanServer}
|
||||
* based on {@link Predicate}
|
||||
*
|
||||
* @param server
|
||||
* @param predicate
|
||||
* @return
|
||||
*/
|
||||
public static Set<ObjectName> queryNames(MBeanServer server, final Predicate<ObjectName> predicate) {
|
||||
@SuppressWarnings("serial")
|
||||
Set<ObjectName> registered = server.queryNames(null, new QueryExp() {
|
||||
@Override
|
||||
public void setMBeanServer(MBeanServer s) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean apply(ObjectName name) throws BadStringOperationException, BadBinaryOpValueExpException,
|
||||
BadAttributeValueExpException, InvalidApplicationException {
|
||||
return predicate.test(name);
|
||||
}
|
||||
});
|
||||
return registered;
|
||||
}
|
||||
|
||||
MBeanServer server;
|
||||
ObjectName name;
|
||||
|
||||
protected final ObjectName getBoundName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Figure out an {@link ObjectName} for this object based on either
|
||||
* contructor parameter, static field, or just package/class name.
|
||||
*
|
||||
* @return
|
||||
* @throws MalformedObjectNameException
|
||||
*/
|
||||
protected ObjectName generateName() throws MalformedObjectNameException {
|
||||
String mbeanName = this.mbeanName;
|
||||
if (mbeanName == null) {
|
||||
Field f;
|
||||
try {
|
||||
f = getClass().getDeclaredField("MBEAN_NAME");
|
||||
f.setAccessible(true);
|
||||
mbeanName = (String) f.get(null);
|
||||
} catch (Throwable t) {
|
||||
}
|
||||
}
|
||||
if (mbeanName == null) {
|
||||
for (Class<?> c : getClass().getInterfaces()) {
|
||||
Field f;
|
||||
try {
|
||||
f = c.getDeclaredField("OBJECT_NAME");
|
||||
f.setAccessible(true);
|
||||
mbeanName = (String) f.get(null);
|
||||
break;
|
||||
} catch (Throwable t) {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mbeanName == null) {
|
||||
String name = getClass().getName();
|
||||
int i = name.lastIndexOf('.');
|
||||
mbeanName = name.substring(0, i) + ":type=" + name.substring(i + 1);
|
||||
}
|
||||
return new ObjectName(mbeanName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Keeps track of bound server and optionally generates an
|
||||
* {@link ObjectName} for this instance.
|
||||
*/
|
||||
@Override
|
||||
public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception {
|
||||
if (this.server != null) {
|
||||
throw new IllegalStateException("Can only exist in a single MBeanServer");
|
||||
}
|
||||
this.server = server;
|
||||
if (name == null) {
|
||||
name = generateName();
|
||||
}
|
||||
this.name = name;
|
||||
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postRegister(Boolean registrationDone) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void preDeregister() throws Exception {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postDeregister() {
|
||||
assert server != null;
|
||||
assert name != null;
|
||||
this.server = null;
|
||||
this.name = null;
|
||||
}
|
||||
}
|
@ -1,399 +0,0 @@
|
||||
package com.scylladb.jmx.metrics;
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.yammer.metrics.core.APIMetricsRegistry;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.yammer.metrics.core.Gauge;
|
||||
import com.yammer.metrics.core.Histogram;
|
||||
import com.yammer.metrics.core.Meter;
|
||||
import com.yammer.metrics.core.MetricName;
|
||||
import com.yammer.metrics.core.Timer;
|
||||
import com.yammer.metrics.reporting.JmxReporter;
|
||||
import com.yammer.metrics.core.APIMeter;
|
||||
|
||||
public class APIMetrics {
|
||||
private static final APIMetricsRegistry DEFAULT_REGISTRY = new APIMetricsRegistry();
|
||||
private static final Thread SHUTDOWN_HOOK = new Thread() {
|
||||
public void run() {
|
||||
JmxReporter.shutdownDefault();
|
||||
}
|
||||
};
|
||||
|
||||
static {
|
||||
JmxReporter.startDefault(DEFAULT_REGISTRY);
|
||||
Runtime.getRuntime().addShutdownHook(SHUTDOWN_HOOK);
|
||||
}
|
||||
|
||||
private APIMetrics() { /* unused */
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
||||
* given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param metric
|
||||
* the metric
|
||||
* @param <T>
|
||||
* the type of the value returned by the metric
|
||||
* @return {@code metric}
|
||||
*/
|
||||
public static <T> Gauge<T> newGauge(Class<?> klass, String name,
|
||||
Gauge<T> metric) {
|
||||
return DEFAULT_REGISTRY.newGauge(klass, name, metric);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
||||
* given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param metric
|
||||
* the metric
|
||||
* @param <T>
|
||||
* the type of the value returned by the metric
|
||||
* @return {@code metric}
|
||||
*/
|
||||
public static <T> Gauge<T> newGauge(Class<?> klass, String name,
|
||||
String scope, Gauge<T> metric) {
|
||||
return DEFAULT_REGISTRY.newGauge(klass, name, scope, metric);
|
||||
}
|
||||
|
||||
/**
|
||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
||||
* given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param metric
|
||||
* the metric
|
||||
* @param <T>
|
||||
* the type of the value returned by the metric
|
||||
* @return {@code metric}
|
||||
*/
|
||||
public static <T> Gauge<T> newGauge(MetricName metricName, Gauge<T> metric) {
|
||||
return DEFAULT_REGISTRY.newGauge(metricName, metric);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
||||
* under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
||||
*/
|
||||
public static Counter newCounter(String url, Class<?> klass, String name) {
|
||||
return DEFAULT_REGISTRY.newCounter(url, klass, name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
||||
* under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
||||
*/
|
||||
public static Counter newCounter(String url, Class<?> klass, String name,
|
||||
String scope) {
|
||||
return DEFAULT_REGISTRY.newCounter(url, klass, name, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
||||
* under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
||||
*/
|
||||
public static Counter newCounter(String url, MetricName metricName) {
|
||||
return DEFAULT_REGISTRY.newCounter(url, metricName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
||||
* under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, Class<?> klass,
|
||||
String name, boolean biased) {
|
||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, biased);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
||||
* under the given class, name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, Class<?> klass,
|
||||
String name, String scope, boolean biased) {
|
||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope, biased);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
||||
* under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, MetricName metricName,
|
||||
boolean biased) {
|
||||
return DEFAULT_REGISTRY.newHistogram(url, metricName, biased);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
||||
* registers it under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, Class<?> klass, String name) {
|
||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
||||
* registers it under the given class, name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, Class<?> klass,
|
||||
String name, String scope) {
|
||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
||||
* registers it under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
||||
*/
|
||||
public static Histogram newHistogram(String url, MetricName metricName) {
|
||||
return newHistogram(url, metricName, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
||||
* under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
||||
*/
|
||||
public static APIMeter newMeter(String url, Class<?> klass, String name,
|
||||
String eventType, TimeUnit unit) {
|
||||
return DEFAULT_REGISTRY.newMeter(url, klass, name, eventType, unit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
||||
* under the given class, name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
||||
*/
|
||||
public static APIMeter newMeter(String url, Class<?> klass, String name,
|
||||
String scope, String eventType, TimeUnit unit) {
|
||||
return DEFAULT_REGISTRY.newMeter(url, klass, name, scope, eventType,
|
||||
unit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
||||
* under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
||||
*/
|
||||
public static APIMeter newMeter(String url, MetricName metricName,
|
||||
String eventType, TimeUnit unit) {
|
||||
return DEFAULT_REGISTRY.newMeter(url, metricName, eventType, unit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
||||
* under the given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
||||
*/
|
||||
public static Timer newTimer(String url, Class<?> klass, String name,
|
||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, durationUnit, rateUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
||||
* under the given class and name, measuring elapsed time in milliseconds
|
||||
* and invocations per second.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
||||
*/
|
||||
public static Timer newTimer(String url, Class<?> klass, String name) {
|
||||
return DEFAULT_REGISTRY.newTimer(url, klass, name);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
||||
* under the given class, name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
||||
*/
|
||||
public static Timer newTimer(String url, Class<?> klass, String name, String scope,
|
||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, scope, durationUnit,
|
||||
rateUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
||||
* under the given class, name, and scope, measuring elapsed time in
|
||||
* milliseconds and invocations per second.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
||||
*/
|
||||
public static Timer newTimer(String url, Class<?> klass, String name, String scope) {
|
||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, scope);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
||||
* under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
||||
*/
|
||||
public static Timer newTimer(String url, MetricName metricName, TimeUnit durationUnit,
|
||||
TimeUnit rateUnit) {
|
||||
return DEFAULT_REGISTRY.newTimer(url, metricName, durationUnit, rateUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the (static) default registry.
|
||||
*
|
||||
* @return the metrics registry
|
||||
*/
|
||||
public static APIMetricsRegistry defaultRegistry() {
|
||||
return DEFAULT_REGISTRY;
|
||||
}
|
||||
|
||||
/**
|
||||
* Shuts down all thread pools for the default registry.
|
||||
*/
|
||||
public static void shutdown() {
|
||||
DEFAULT_REGISTRY.shutdown();
|
||||
JmxReporter.shutdownDefault();
|
||||
Runtime.getRuntime().removeShutdownHook(SHUTDOWN_HOOK);
|
||||
}
|
||||
|
||||
}
|
91
src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java
Normal file
91
src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java
Normal file
@ -0,0 +1,91 @@
|
||||
package com.scylladb.jmx.metrics;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import javax.management.InstanceNotFoundException;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.cassandra.metrics.Metrics;
|
||||
import org.apache.cassandra.metrics.MetricsRegistry;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
/**
|
||||
* Base type for MBeans containing {@link Metrics}.
|
||||
*
|
||||
* @author calle
|
||||
*
|
||||
*/
|
||||
public abstract class MetricsMBean extends APIMBean {
|
||||
private final Collection<Metrics> metrics;
|
||||
|
||||
public MetricsMBean(APIClient client, Metrics... metrics) {
|
||||
this(null, client, metrics);
|
||||
}
|
||||
|
||||
public MetricsMBean(String mbeanName, APIClient client, Metrics... metrics) {
|
||||
this(mbeanName, client, asList(metrics));
|
||||
}
|
||||
|
||||
public MetricsMBean(String mbeanName, APIClient client, Collection<Metrics> metrics) {
|
||||
super(mbeanName, client);
|
||||
this.metrics = metrics;
|
||||
}
|
||||
|
||||
protected Predicate<ObjectName> getTypePredicate() {
|
||||
String domain = name.getDomain();
|
||||
String type = name.getKeyProperty("type");
|
||||
return n -> {
|
||||
return domain.equals(n.getDomain()) && type.equals(n.getKeyProperty("type"));
|
||||
};
|
||||
}
|
||||
|
||||
private void register(MetricsRegistry registry, MBeanServer server) throws MalformedObjectNameException {
|
||||
// Check if we're the first/last of our type bound/removed.
|
||||
boolean empty = queryNames(server, getTypePredicate()).isEmpty();
|
||||
for (Metrics m : metrics) {
|
||||
if (empty) {
|
||||
m.registerGlobals(registry);
|
||||
}
|
||||
m.register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception {
|
||||
// Get name etc.
|
||||
name = super.preRegister(server, name);
|
||||
// Register all metrics in server
|
||||
register(new MetricsRegistry(client, server), server);
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postDeregister() {
|
||||
// We're officially unbound. Remove all metrics we added.
|
||||
try {
|
||||
register(new MetricsRegistry(client, server) {
|
||||
// Unbind instead of bind. Yes.
|
||||
@Override
|
||||
public void register(Supplier<MetricMBean> s, ObjectName... objectNames) {
|
||||
for (ObjectName name : objectNames) {
|
||||
try {
|
||||
server.unregisterMBean(name);
|
||||
} catch (MBeanRegistrationException | InstanceNotFoundException e) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}, server);
|
||||
} catch (MalformedObjectNameException e) {
|
||||
// TODO : log?
|
||||
}
|
||||
super.postDeregister();
|
||||
}
|
||||
}
|
@ -3,6 +3,8 @@ package com.scylladb.jmx.utils;
|
||||
* Copyright 2016 ScyllaDB
|
||||
*/
|
||||
|
||||
import static com.scylladb.jmx.main.Main.client;
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
@ -21,21 +23,13 @@ package com.scylladb.jmx.utils;
|
||||
*/
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MBeanServerBuilder;
|
||||
import javax.management.MBeanServerDelegate;
|
||||
|
||||
import mx4j.server.ChainedMBeanServerBuilder;
|
||||
|
||||
public class APIBuilder extends ChainedMBeanServerBuilder {
|
||||
public APIBuilder() {
|
||||
super(new mx4j.server.MX4JMBeanServerBuilder());
|
||||
}
|
||||
|
||||
public MBeanServer newMBeanServer(String defaultDomain, MBeanServer outer,
|
||||
MBeanServerDelegate delegate) {
|
||||
APIMBeanServer extern = new APIMBeanServer();
|
||||
MBeanServer nested = getMBeanServerBuilder().newMBeanServer(
|
||||
defaultDomain, outer == null ? extern : outer, delegate);
|
||||
extern.setMBeanServer(nested);
|
||||
return extern;
|
||||
public class APIBuilder extends MBeanServerBuilder {
|
||||
@Override
|
||||
public MBeanServer newMBeanServer(String defaultDomain, MBeanServer outer, MBeanServerDelegate delegate) {
|
||||
MBeanServer nested = super.newMBeanServer(defaultDomain, outer, delegate);
|
||||
return new APIMBeanServer(client, nested);
|
||||
}
|
||||
}
|
@ -1,103 +0,0 @@
|
||||
package com.scylladb.jmx.utils;
|
||||
/**
|
||||
* Copyright (C) The MX4J Contributors.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This software is distributed under the terms of the MX4J License version 1.0.
|
||||
* See the terms of the MX4J License in the documentation provided with this software.
|
||||
*/
|
||||
|
||||
/**
|
||||
* Modified by ScyllaDB
|
||||
* Copyright 2016 ScyllaDB
|
||||
*/
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
|
||||
import javax.management.MBeanInfo;
|
||||
|
||||
import mx4j.server.MBeanIntrospector;
|
||||
import mx4j.server.MBeanMetaData;
|
||||
|
||||
public class APIMBeanIntrospector extends MBeanIntrospector {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(APIMBeanIntrospector.class.getName());
|
||||
|
||||
public boolean isMBeanCompliant(MBeanMetaData metadata) {
|
||||
Class info = metadata.getMBeanInterface();
|
||||
if (info != null) {
|
||||
String cn = info.getName();
|
||||
if (cn != null) {
|
||||
if (cn.endsWith("MXBean")) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return super.isMBeanCompliant(metadata);
|
||||
}
|
||||
|
||||
public void apiIntrospectStandardMBean(MBeanMetaData metadata) {
|
||||
try {
|
||||
Class[] cArg = new Class[1];
|
||||
cArg[0] = MBeanMetaData.class;
|
||||
Method met = MBeanIntrospector.class
|
||||
.getDeclaredMethod("introspectStandardMBean", cArg);
|
||||
met.setAccessible(true);
|
||||
met.invoke((MBeanIntrospector) this, metadata);
|
||||
} catch (NoSuchMethodException | SecurityException
|
||||
| IllegalAccessException | IllegalArgumentException
|
||||
| InvocationTargetException e) {
|
||||
logger.warning("Failed setting mbean info " + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void apiIntrospect(MBeanMetaData metadata) {
|
||||
apiIntrospectStandardMBean(metadata);
|
||||
Class[] cArg = new Class[1];
|
||||
cArg[0] = MBeanMetaData.class;
|
||||
try {
|
||||
Method met = MBeanIntrospector.class
|
||||
.getDeclaredMethod("createStandardMBeanInfo", cArg);
|
||||
met.setAccessible(true);
|
||||
Object info = met.invoke((MBeanIntrospector) this, metadata);
|
||||
metadata.setMBeanInfo((MBeanInfo) info);
|
||||
} catch (IllegalAccessException | NoSuchMethodException
|
||||
| SecurityException | IllegalArgumentException
|
||||
| InvocationTargetException e) {
|
||||
logger.warning("Failed setting mbean info" + e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
public void introspect(MBeanMetaData metadata) {
|
||||
Class<?> mx_mbean = null;
|
||||
for (Class<?> it : metadata.getMBean().getClass().getInterfaces()) {
|
||||
if (it.getName().endsWith("MXBean")) {
|
||||
mx_mbean = it;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (mx_mbean != null) {
|
||||
metadata.setMBeanInterface(mx_mbean);
|
||||
apiIntrospect(metadata);
|
||||
return;
|
||||
}
|
||||
super.introspect(metadata);
|
||||
}
|
||||
}
|
@ -1,132 +1,290 @@
|
||||
package com.scylladb.jmx.utils;
|
||||
|
||||
/**
|
||||
* Copyright 2016 ScyllaDB
|
||||
*/
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.HashSet;
|
||||
import java.util.Hashtable;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.io.ObjectInputStream;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.regex.Pattern;
|
||||
|
||||
import javax.management.Attribute;
|
||||
import javax.management.AttributeList;
|
||||
import javax.management.AttributeNotFoundException;
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.InstanceNotFoundException;
|
||||
import javax.management.IntrospectionException;
|
||||
import javax.management.InvalidAttributeValueException;
|
||||
import javax.management.ListenerNotFoundException;
|
||||
import javax.management.MBeanException;
|
||||
import javax.management.MBeanInfo;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.NotificationFilter;
|
||||
import javax.management.NotificationListener;
|
||||
import javax.management.ObjectInstance;
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.OperationsException;
|
||||
import javax.management.QueryExp;
|
||||
import javax.management.ReflectionException;
|
||||
import javax.management.loading.ClassLoaderRepository;
|
||||
|
||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||
import org.apache.cassandra.metrics.StreamingMetrics;
|
||||
|
||||
import mx4j.server.ChainedMBeanServer;
|
||||
import mx4j.server.MX4JMBeanServer;
|
||||
import mx4j.util.Utils;
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
public class APIMBeanServer extends ChainedMBeanServer {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(APIMBeanServer.class.getName());
|
||||
public class APIMBeanServer implements MBeanServer {
|
||||
@SuppressWarnings("unused")
|
||||
private static final Logger logger = Logger.getLogger(APIMBeanServer.class.getName());
|
||||
|
||||
public static void log(String str) {
|
||||
logger.finest(str);
|
||||
private final APIClient client;
|
||||
private final MBeanServer server;
|
||||
|
||||
public APIMBeanServer(APIClient client, MBeanServer server) {
|
||||
this.client = client;
|
||||
this.server = server;
|
||||
}
|
||||
|
||||
public void setMBeanServer(MBeanServer server) {
|
||||
if (server != null) {
|
||||
try {
|
||||
Field f = server.getClass().getDeclaredField("introspector");
|
||||
f.setAccessible(true);
|
||||
f.set(server, new APIMBeanIntrospector());
|
||||
} catch (Exception e) {
|
||||
logger.warning(
|
||||
"Failed setting new interceptor" + e.getMessage());
|
||||
}
|
||||
}
|
||||
super.setMBeanServer(server);
|
||||
@Override
|
||||
public ObjectInstance createMBean(String className, ObjectName name) throws ReflectionException,
|
||||
InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException, NotCompliantMBeanException {
|
||||
return server.createMBean(className, name);
|
||||
}
|
||||
|
||||
public ObjectName apiNormalizeObjectName(ObjectName name) {
|
||||
try {
|
||||
Class[] cArg = new Class[1];
|
||||
cArg[0] = ObjectName.class;
|
||||
Method met = MX4JMBeanServer.class
|
||||
.getDeclaredMethod("normalizeObjectName", cArg);
|
||||
met.setAccessible(true);
|
||||
return (ObjectName) met.invoke((MX4JMBeanServer) getMBeanServer(),
|
||||
name);
|
||||
} catch (NoSuchMethodException | SecurityException | IllegalAccessException | IllegalArgumentException | InvocationTargetException e) {
|
||||
// TODO Auto-generated catch block
|
||||
return null;
|
||||
}
|
||||
@Override
|
||||
public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName)
|
||||
throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException,
|
||||
NotCompliantMBeanException, InstanceNotFoundException {
|
||||
return server.createMBean(className, name, loaderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectInstance createMBean(String className, ObjectName name, Object[] params, String[] signature)
|
||||
throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException,
|
||||
NotCompliantMBeanException {
|
||||
return server.createMBean(className, name, params, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName, Object[] params,
|
||||
String[] signature) throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException,
|
||||
MBeanException, NotCompliantMBeanException, InstanceNotFoundException {
|
||||
return server.createMBean(className, name, loaderName, params, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectInstance registerMBean(Object object, ObjectName name)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
return server.registerMBean(object, name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void unregisterMBean(ObjectName name) throws InstanceNotFoundException, MBeanRegistrationException {
|
||||
server.unregisterMBean(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectInstance getObjectInstance(ObjectName name) throws InstanceNotFoundException {
|
||||
checkRegistrations(name);
|
||||
return server.getObjectInstance(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ObjectName> queryNames(ObjectName name, QueryExp query) {
|
||||
if (name == null) {
|
||||
return super.queryNames(name, query);
|
||||
checkRegistrations(name);
|
||||
return server.queryNames(name, query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<ObjectInstance> queryMBeans(ObjectName name, QueryExp query) {
|
||||
checkRegistrations(name);
|
||||
return server.queryMBeans(name, query);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRegistered(ObjectName name) {
|
||||
checkRegistrations(name);
|
||||
return server.isRegistered(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getMBeanCount() {
|
||||
return server.getMBeanCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getAttribute(ObjectName name, String attribute)
|
||||
throws MBeanException, AttributeNotFoundException, InstanceNotFoundException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
return server.getAttribute(name, attribute);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AttributeList getAttributes(ObjectName name, String[] attributes)
|
||||
throws InstanceNotFoundException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
return server.getAttributes(name, attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setAttribute(ObjectName name, Attribute attribute) throws InstanceNotFoundException,
|
||||
AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
server.setAttribute(name, attribute);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AttributeList setAttributes(ObjectName name, AttributeList attributes)
|
||||
throws InstanceNotFoundException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
return server.setAttributes(name, attributes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature)
|
||||
throws InstanceNotFoundException, MBeanException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
return server.invoke(name, operationName, params, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDefaultDomain() {
|
||||
return server.getDefaultDomain();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] getDomains() {
|
||||
return server.getDomains();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter,
|
||||
Object handback) throws InstanceNotFoundException {
|
||||
server.addNotificationListener(name, listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter,
|
||||
Object handback) throws InstanceNotFoundException {
|
||||
server.addNotificationListener(name, listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(ObjectName name, ObjectName listener)
|
||||
throws InstanceNotFoundException, ListenerNotFoundException {
|
||||
server.removeNotificationListener(name, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter,
|
||||
Object handback) throws InstanceNotFoundException, ListenerNotFoundException {
|
||||
server.removeNotificationListener(name, listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(ObjectName name, NotificationListener listener)
|
||||
throws InstanceNotFoundException, ListenerNotFoundException {
|
||||
server.removeNotificationListener(name, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter,
|
||||
Object handback) throws InstanceNotFoundException, ListenerNotFoundException {
|
||||
server.removeNotificationListener(name, listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MBeanInfo getMBeanInfo(ObjectName name)
|
||||
throws InstanceNotFoundException, IntrospectionException, ReflectionException {
|
||||
checkRegistrations(name);
|
||||
return server.getMBeanInfo(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isInstanceOf(ObjectName name, String className) throws InstanceNotFoundException {
|
||||
return server.isInstanceOf(name, className);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object instantiate(String className) throws ReflectionException, MBeanException {
|
||||
return server.instantiate(className);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object instantiate(String className, ObjectName loaderName)
|
||||
throws ReflectionException, MBeanException, InstanceNotFoundException {
|
||||
return server.instantiate(className, loaderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object instantiate(String className, Object[] params, String[] signature)
|
||||
throws ReflectionException, MBeanException {
|
||||
return server.instantiate(className, params, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object instantiate(String className, ObjectName loaderName, Object[] params, String[] signature)
|
||||
throws ReflectionException, MBeanException, InstanceNotFoundException {
|
||||
return server.instantiate(className, loaderName, params, signature);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public ObjectInputStream deserialize(ObjectName name, byte[] data)
|
||||
throws InstanceNotFoundException, OperationsException {
|
||||
return server.deserialize(name, data);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public ObjectInputStream deserialize(String className, byte[] data)
|
||||
throws OperationsException, ReflectionException {
|
||||
return server.deserialize(className, data);
|
||||
}
|
||||
|
||||
@Override
|
||||
@Deprecated
|
||||
public ObjectInputStream deserialize(String className, ObjectName loaderName, byte[] data)
|
||||
throws InstanceNotFoundException, OperationsException, ReflectionException {
|
||||
return server.deserialize(className, loaderName, data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClassLoader getClassLoaderFor(ObjectName mbeanName) throws InstanceNotFoundException {
|
||||
return server.getClassLoaderFor(mbeanName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClassLoader getClassLoader(ObjectName loaderName) throws InstanceNotFoundException {
|
||||
return server.getClassLoader(loaderName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClassLoaderRepository getClassLoaderRepository() {
|
||||
return server.getClassLoaderRepository();
|
||||
}
|
||||
|
||||
static final Pattern tables = Pattern.compile("^(ColumnFamil(ies|y)|(Index)?Tables?)$");
|
||||
|
||||
private boolean checkRegistrations(ObjectName name) {
|
||||
if (name != null && server.isRegistered(name)) {
|
||||
return false;
|
||||
}
|
||||
if (name.getCanonicalKeyPropertyListString()
|
||||
.contains("ColumnFamilies")) {
|
||||
ColumnFamilyStore.checkRegistration();
|
||||
} else if (name.getCanonicalKeyPropertyListString()
|
||||
.contains("Stream")) {
|
||||
StreamingMetrics.checkRegistration();
|
||||
}
|
||||
ObjectName no = apiNormalizeObjectName(name);
|
||||
Hashtable patternProps = no.getKeyPropertyList();
|
||||
boolean paternFound = false;
|
||||
for (Iterator j = patternProps.entrySet().iterator(); j.hasNext();) {
|
||||
Map.Entry entry = (Map.Entry) j.next();
|
||||
String patternValue = (String) entry.getValue();
|
||||
if (patternValue.contains("*")) {
|
||||
paternFound = true;
|
||||
break;
|
||||
|
||||
boolean result = false;
|
||||
|
||||
try {
|
||||
String type = name != null ? name.getKeyProperty("type") : null;
|
||||
if (type == null || tables.matcher(type).matches()) {
|
||||
result |= ColumnFamilyStore.checkRegistration(client, server);
|
||||
}
|
||||
}
|
||||
if (paternFound) {
|
||||
Set<ObjectName> res = new HashSet<ObjectName>();
|
||||
for (ObjectName q : (Set<ObjectName>) super.queryNames(null,query)) {
|
||||
if (Utils.wildcardMatch(name.getDomain(), q.getDomain())) {
|
||||
Hashtable props = q.getKeyPropertyList();
|
||||
boolean found = true;
|
||||
for (Iterator j = patternProps.entrySet().iterator(); j
|
||||
.hasNext();) {
|
||||
Map.Entry entry = (Map.Entry) j.next();
|
||||
String patternKey = (String) entry.getKey();
|
||||
String patternValue = (String) entry.getValue();
|
||||
if (props.containsKey(patternKey)) {
|
||||
if (!Utils.wildcardMatch(patternValue,
|
||||
props.get(patternKey).toString())) {
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
found = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (found) {
|
||||
res.add(q);
|
||||
}
|
||||
}
|
||||
if (type == null || StreamingMetrics.TYPE_NAME.equals(type)) {
|
||||
result |= StreamingMetrics.checkRegistration(client, server);
|
||||
}
|
||||
return res;
|
||||
} catch (MalformedObjectNameException | UnknownHostException e) {
|
||||
// TODO: log
|
||||
}
|
||||
return super.queryNames(name, query);
|
||||
return result;
|
||||
}
|
||||
}
|
@ -1,315 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package com.scylladb.jmx.utils;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.concurrent.atomic.AtomicLongArray;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
|
||||
public class EstimatedHistogram {
|
||||
/**
|
||||
* The series of values to which the counts in `buckets` correspond: 1, 2,
|
||||
* 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of [0, 0, 1,
|
||||
* 10] would mean we had seen one value of 3 and 10 values of 4.
|
||||
*
|
||||
* The series starts at 1 and grows by 1.2 each time (rounding and removing
|
||||
* duplicates). It goes from 1 to around 36M by default (creating 90+1
|
||||
* buckets), which will give us timing resolution from microseconds to 36
|
||||
* seconds, with less precision as the numbers get larger.
|
||||
*
|
||||
* Each bucket represents values from (previous bucket offset, current
|
||||
* offset].
|
||||
*/
|
||||
private final long[] bucketOffsets;
|
||||
|
||||
// buckets is one element longer than bucketOffsets -- the last element is
|
||||
// values greater than the last offset
|
||||
final AtomicLongArray buckets;
|
||||
|
||||
public EstimatedHistogram() {
|
||||
this(90);
|
||||
}
|
||||
|
||||
public EstimatedHistogram(int bucketCount) {
|
||||
bucketOffsets = newOffsets(bucketCount);
|
||||
buckets = new AtomicLongArray(bucketOffsets.length + 1);
|
||||
}
|
||||
|
||||
public EstimatedHistogram(long[] offsets, long[] bucketData) {
|
||||
assert bucketData.length == offsets.length + 1;
|
||||
bucketOffsets = offsets;
|
||||
buckets = new AtomicLongArray(bucketData);
|
||||
}
|
||||
|
||||
|
||||
public EstimatedHistogram(long[] bucketData) {
|
||||
bucketOffsets = newOffsets(bucketData.length - 1);
|
||||
buckets = new AtomicLongArray(bucketData);
|
||||
}
|
||||
|
||||
private static long[] newOffsets(int size) {
|
||||
if (size <= 0) {
|
||||
return new long[0];
|
||||
}
|
||||
long[] result = new long[size];
|
||||
long last = 1;
|
||||
result[0] = last;
|
||||
for (int i = 1; i < size; i++) {
|
||||
long next = Math.round(last * 1.2);
|
||||
if (next == last)
|
||||
next++;
|
||||
result[i] = next;
|
||||
last = next;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the histogram values corresponding to each bucket index
|
||||
*/
|
||||
public long[] getBucketOffsets() {
|
||||
return bucketOffsets;
|
||||
}
|
||||
|
||||
/**
|
||||
* Increments the count of the bucket closest to n, rounding UP.
|
||||
*
|
||||
* @param n
|
||||
*/
|
||||
public void add(long n) {
|
||||
int index = Arrays.binarySearch(bucketOffsets, n);
|
||||
if (index < 0) {
|
||||
// inexact match, take the first bucket higher than n
|
||||
index = -index - 1;
|
||||
}
|
||||
// else exact match; we're good
|
||||
buckets.incrementAndGet(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the count in the given bucket
|
||||
*/
|
||||
long get(int bucket) {
|
||||
return buckets.get(bucket);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param reset
|
||||
* zero out buckets afterwards if true
|
||||
* @return a long[] containing the current histogram buckets
|
||||
*/
|
||||
public long[] getBuckets(boolean reset) {
|
||||
final int len = buckets.length();
|
||||
long[] rv = new long[len];
|
||||
|
||||
if (reset)
|
||||
for (int i = 0; i < len; i++)
|
||||
rv[i] = buckets.getAndSet(i, 0L);
|
||||
else
|
||||
for (int i = 0; i < len; i++)
|
||||
rv[i] = buckets.get(i);
|
||||
|
||||
return rv;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the smallest value that could have been added to this histogram
|
||||
*/
|
||||
public long min() {
|
||||
for (int i = 0; i < buckets.length(); i++) {
|
||||
if (buckets.get(i) > 0)
|
||||
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the largest value that could have been added to this histogram.
|
||||
* If the histogram overflowed, returns Long.MAX_VALUE.
|
||||
*/
|
||||
public long max() {
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
return Long.MAX_VALUE;
|
||||
|
||||
for (int i = lastBucket - 1; i >= 0; i--) {
|
||||
if (buckets.get(i) > 0)
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param percentile
|
||||
* @return estimated value at given percentile
|
||||
*/
|
||||
public long percentile(double percentile) {
|
||||
assert percentile >= 0 && percentile <= 1.0;
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
throw new IllegalStateException(
|
||||
"Unable to compute when histogram overflowed");
|
||||
|
||||
long pcount = (long) Math.floor(count() * percentile);
|
||||
if (pcount == 0)
|
||||
return 0;
|
||||
|
||||
long elements = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
elements += buckets.get(i);
|
||||
if (elements >= pcount)
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mean histogram value (average of bucket offsets, weighted by
|
||||
* count)
|
||||
* @throws IllegalStateException
|
||||
* if any values were greater than the largest bucket threshold
|
||||
*/
|
||||
public long mean() {
|
||||
int lastBucket = buckets.length() - 1;
|
||||
if (buckets.get(lastBucket) > 0)
|
||||
throw new IllegalStateException(
|
||||
"Unable to compute ceiling for max when histogram overflowed");
|
||||
|
||||
long elements = 0;
|
||||
long sum = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
long bCount = buckets.get(i);
|
||||
elements += bCount;
|
||||
sum += bCount * bucketOffsets[i];
|
||||
}
|
||||
|
||||
return (long) Math.ceil((double) sum / elements);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of non-zero values
|
||||
*/
|
||||
public long count() {
|
||||
long sum = 0L;
|
||||
for (int i = 0; i < buckets.length(); i++)
|
||||
sum += buckets.get(i);
|
||||
return sum;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if this histogram has overflowed -- that is, a value larger
|
||||
* than our largest bucket could bound was added
|
||||
*/
|
||||
public boolean isOverflowed() {
|
||||
return buckets.get(buckets.length() - 1) > 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* log.debug() every record in the histogram
|
||||
*
|
||||
* @param log
|
||||
*/
|
||||
public void log(Logger log) {
|
||||
// only print overflow if there is any
|
||||
int nameCount;
|
||||
if (buckets.get(buckets.length() - 1) == 0)
|
||||
nameCount = buckets.length() - 1;
|
||||
else
|
||||
nameCount = buckets.length();
|
||||
String[] names = new String[nameCount];
|
||||
|
||||
int maxNameLength = 0;
|
||||
for (int i = 0; i < nameCount; i++) {
|
||||
names[i] = nameOfRange(bucketOffsets, i);
|
||||
maxNameLength = Math.max(maxNameLength, names[i].length());
|
||||
}
|
||||
|
||||
// emit log records
|
||||
String formatstr = "%" + maxNameLength + "s: %d";
|
||||
for (int i = 0; i < nameCount; i++) {
|
||||
long count = buckets.get(i);
|
||||
// sort-of-hack to not print empty ranges at the start that are only
|
||||
// used to demarcate the
|
||||
// first populated range. for code clarity we don't omit this record
|
||||
// from the maxNameLength
|
||||
// calculation, and accept the unnecessary whitespace prefixes that
|
||||
// will occasionally occur
|
||||
if (i == 0 && count == 0)
|
||||
continue;
|
||||
log.debug(String.format(formatstr, names[i], count));
|
||||
}
|
||||
}
|
||||
|
||||
private static String nameOfRange(long[] bucketOffsets, int index) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
appendRange(sb, bucketOffsets, index);
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static void appendRange(StringBuilder sb, long[] bucketOffsets,
|
||||
int index) {
|
||||
sb.append("[");
|
||||
if (index == 0)
|
||||
if (bucketOffsets[0] > 0)
|
||||
// by original definition, this histogram is for values greater
|
||||
// than zero only;
|
||||
// if values of 0 or less are required, an entry of lb-1 must be
|
||||
// inserted at the start
|
||||
sb.append("1");
|
||||
else
|
||||
sb.append("-Inf");
|
||||
else
|
||||
sb.append(bucketOffsets[index - 1] + 1);
|
||||
sb.append("..");
|
||||
if (index == bucketOffsets.length)
|
||||
sb.append("Inf");
|
||||
else
|
||||
sb.append(bucketOffsets[index]);
|
||||
sb.append("]");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o)
|
||||
return true;
|
||||
|
||||
if (!(o instanceof EstimatedHistogram))
|
||||
return false;
|
||||
|
||||
EstimatedHistogram that = (EstimatedHistogram) o;
|
||||
return Arrays.equals(getBucketOffsets(), that.getBucketOffsets())
|
||||
&& Arrays.equals(getBuckets(false), that.getBuckets(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(getBucketOffsets(), getBuckets(false));
|
||||
}
|
||||
}
|
@ -24,69 +24,57 @@
|
||||
|
||||
package com.scylladb.jmx.utils;
|
||||
|
||||
import java.io.*;
|
||||
import java.io.File;
|
||||
import java.text.DecimalFormat;
|
||||
|
||||
public class FileUtils
|
||||
{
|
||||
public class FileUtils {
|
||||
private static final double KB = 1024d;
|
||||
private static final double MB = 1024*1024d;
|
||||
private static final double GB = 1024*1024*1024d;
|
||||
private static final double TB = 1024*1024*1024*1024d;
|
||||
private static final double MB = 1024 * 1024d;
|
||||
private static final double GB = 1024 * 1024 * 1024d;
|
||||
private static final double TB = 1024 * 1024 * 1024 * 1024d;
|
||||
|
||||
private static final DecimalFormat df = new DecimalFormat("#.##");
|
||||
|
||||
|
||||
public static String stringifyFileSize(double value)
|
||||
{
|
||||
public static String stringifyFileSize(double value) {
|
||||
double d;
|
||||
if ( value >= TB )
|
||||
{
|
||||
if (value >= TB) {
|
||||
d = value / TB;
|
||||
String val = df.format(d);
|
||||
return val + " TB";
|
||||
}
|
||||
else if ( value >= GB )
|
||||
{
|
||||
} else if (value >= GB) {
|
||||
d = value / GB;
|
||||
String val = df.format(d);
|
||||
return val + " GB";
|
||||
}
|
||||
else if ( value >= MB )
|
||||
{
|
||||
} else if (value >= MB) {
|
||||
d = value / MB;
|
||||
String val = df.format(d);
|
||||
return val + " MB";
|
||||
}
|
||||
else if ( value >= KB )
|
||||
{
|
||||
} else if (value >= KB) {
|
||||
d = value / KB;
|
||||
String val = df.format(d);
|
||||
return val + " KB";
|
||||
}
|
||||
else
|
||||
{
|
||||
} else {
|
||||
String val = df.format(value);
|
||||
return val + " bytes";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the size of a directory in bytes
|
||||
* @param directory The directory for which we need size.
|
||||
*
|
||||
* @param directory
|
||||
* The directory for which we need size.
|
||||
* @return The size of the directory
|
||||
*/
|
||||
public static long folderSize(File directory)
|
||||
{
|
||||
public static long folderSize(File directory) {
|
||||
long length = 0;
|
||||
for (File file : directory.listFiles())
|
||||
{
|
||||
if (file.isFile())
|
||||
for (File file : directory.listFiles()) {
|
||||
if (file.isFile()) {
|
||||
length += file.length();
|
||||
else
|
||||
} else {
|
||||
length += folderSize(file);
|
||||
}
|
||||
}
|
||||
return length;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,43 +26,38 @@ package com.scylladb.jmx.utils;
|
||||
|
||||
import com.google.common.base.Objects;
|
||||
|
||||
public class Pair<T1, T2>
|
||||
{
|
||||
public class Pair<T1, T2> {
|
||||
public final T1 left;
|
||||
public final T2 right;
|
||||
|
||||
protected Pair(T1 left, T2 right)
|
||||
{
|
||||
protected Pair(T1 left, T2 right) {
|
||||
this.left = left;
|
||||
this.right = right;
|
||||
}
|
||||
|
||||
@Override
|
||||
public final int hashCode()
|
||||
{
|
||||
public final int hashCode() {
|
||||
int hashCode = 31 + (left == null ? 0 : left.hashCode());
|
||||
return 31*hashCode + (right == null ? 0 : right.hashCode());
|
||||
return 31 * hashCode + (right == null ? 0 : right.hashCode());
|
||||
}
|
||||
|
||||
@Override
|
||||
public final boolean equals(Object o)
|
||||
{
|
||||
if(!(o instanceof Pair))
|
||||
public final boolean equals(Object o) {
|
||||
if (!(o instanceof Pair)) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("rawtypes")
|
||||
Pair that = (Pair)o;
|
||||
Pair that = (Pair) o;
|
||||
// handles nulls properly
|
||||
return Objects.equal(left, that.left) && Objects.equal(right, that.right);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
public String toString() {
|
||||
return "(" + left + "," + right + ")";
|
||||
}
|
||||
|
||||
public static <X, Y> Pair<X, Y> create(X x, Y y)
|
||||
{
|
||||
public static <X, Y> Pair<X, Y> create(X x, Y y) {
|
||||
return new Pair<X, Y>(x, y);
|
||||
}
|
||||
}
|
||||
|
@ -1,121 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2016 ScyllaDB
|
||||
*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
|
||||
package com.scylladb.jmx.utils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.*;
|
||||
import java.rmi.registry.LocateRegistry;
|
||||
import java.rmi.server.RMIServerSocketFactory;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.management.remote.JMXConnectorServer;
|
||||
import javax.management.remote.JMXServiceURL;
|
||||
import javax.management.remote.rmi.RMIConnectorServer;
|
||||
import javax.net.ServerSocketFactory;
|
||||
|
||||
public class RMIServerSocketFactoryImpl implements RMIServerSocketFactory {
|
||||
public static JMXConnectorServer jmxServer = null;
|
||||
|
||||
public static void maybeInitJmx() {
|
||||
System.setProperty("javax.management.builder.initial", "com.scylladb.jmx.utils.APIBuilder");
|
||||
System.setProperty("mx4j.strict.mbean.interface", "no");
|
||||
|
||||
String jmxPort = System
|
||||
.getProperty("com.sun.management.jmxremote.port");
|
||||
|
||||
if (jmxPort == null) {
|
||||
System.out.println(
|
||||
"JMX is not enabled to receive remote connections.");
|
||||
|
||||
jmxPort = System.getProperty("cassandra.jmx.local.port", "7199");
|
||||
String address = System.getProperty("jmx.address", "localhost");
|
||||
if (address.equals("localhost")) {
|
||||
System.setProperty("java.rmi.server.hostname",
|
||||
InetAddress.getLoopbackAddress().getHostAddress());
|
||||
} else {
|
||||
try {
|
||||
System.setProperty("java.rmi.server.hostname",
|
||||
InetAddress.getByName(address).getHostAddress());
|
||||
} catch (UnknownHostException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
|
||||
}
|
||||
try {
|
||||
RMIServerSocketFactory serverFactory = new RMIServerSocketFactoryImpl();
|
||||
LocateRegistry.createRegistry(Integer.valueOf(jmxPort), null,
|
||||
serverFactory);
|
||||
|
||||
StringBuffer url = new StringBuffer();
|
||||
url.append("service:jmx:");
|
||||
url.append("rmi://").append(address).append("/jndi/");
|
||||
url.append("rmi://").append(address).append(":").append(jmxPort)
|
||||
.append("/jmxrmi");
|
||||
System.out.println(url);
|
||||
Map env = new HashMap();
|
||||
env.put(RMIConnectorServer.RMI_SERVER_SOCKET_FACTORY_ATTRIBUTE,
|
||||
serverFactory);
|
||||
|
||||
jmxServer = new RMIConnectorServer(
|
||||
new JMXServiceURL(url.toString()), env,
|
||||
ManagementFactory.getPlatformMBeanServer());
|
||||
|
||||
jmxServer.start();
|
||||
} catch (IOException e) {
|
||||
System.out.println(
|
||||
"Error starting local jmx server: " + e.toString());
|
||||
}
|
||||
|
||||
} else {
|
||||
System.out.println(
|
||||
"JMX is enabled to receive remote connections on port: "
|
||||
+ jmxPort);
|
||||
}
|
||||
}
|
||||
|
||||
public ServerSocket createServerSocket(final int pPort) throws IOException {
|
||||
return ServerSocketFactory.getDefault().createServerSocket(pPort, 0,
|
||||
InetAddress.getLoopbackAddress());
|
||||
}
|
||||
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (obj == this) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return obj.getClass().equals(getClass());
|
||||
}
|
||||
|
||||
public int hashCode() {
|
||||
return RMIServerSocketFactoryImpl.class.hashCode();
|
||||
}
|
||||
|
||||
}
|
@ -1,65 +0,0 @@
|
||||
package com.scylladb.jmx.utils;
|
||||
/*
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/**
|
||||
*
|
||||
* RecentEstimatedHistogram In the (deprecated) 'recent' functionality, each
|
||||
* call to get the values cleans the value.
|
||||
*
|
||||
* The RecentEstimatedHistogram support recent call to EstimatedHistogram.
|
||||
* It holds the latest total values and a call to getBuckets return the delta.
|
||||
*
|
||||
*/
|
||||
public class RecentEstimatedHistogram extends EstimatedHistogram {
|
||||
public RecentEstimatedHistogram() {
|
||||
}
|
||||
|
||||
public RecentEstimatedHistogram(int bucketCount) {
|
||||
super(bucketCount);
|
||||
}
|
||||
|
||||
public RecentEstimatedHistogram(long[] offsets, long[] bucketData) {
|
||||
super(offsets, bucketData);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the current buckets to new value and return the delta from the last
|
||||
* getBuckets call
|
||||
*
|
||||
* @param bucketData
|
||||
* - new bucket value
|
||||
* @return a long[] containing the current histogram difference buckets
|
||||
*/
|
||||
public long[] getBuckets(long[] bucketData) {
|
||||
if (bucketData.length == 0) {
|
||||
return new long[0];
|
||||
}
|
||||
final int len = buckets.length();
|
||||
long[] rv = new long[len];
|
||||
|
||||
for (int i = 0; i < len; i++) {
|
||||
rv[i] = bucketData[i];
|
||||
rv[i] -= buckets.getAndSet(i, bucketData[i]);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
}
|
@ -23,18 +23,24 @@
|
||||
package com.scylladb.jmx.utils;
|
||||
|
||||
import java.util.Map;
|
||||
import javax.management.openmbean.*;
|
||||
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
import javax.management.openmbean.TabularDataSupport;
|
||||
import javax.management.openmbean.TabularType;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
|
||||
public class SnapshotDetailsTabularData {
|
||||
|
||||
private static final String[] ITEM_NAMES = new String[] { "Snapshot name",
|
||||
"Keyspace name", "Column family name", "True size", "Size on disk" };
|
||||
private static final String[] ITEM_NAMES = new String[] { "Snapshot name", "Keyspace name", "Column family name",
|
||||
"True size", "Size on disk" };
|
||||
|
||||
private static final String[] ITEM_DESCS = new String[] { "snapshot_name",
|
||||
"keyspace_name", "columnfamily_name", "TrueDiskSpaceUsed",
|
||||
"TotalDiskSpaceUsed" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "snapshot_name", "keyspace_name", "columnfamily_name",
|
||||
"TrueDiskSpaceUsed", "TotalDiskSpaceUsed" };
|
||||
|
||||
private static final String TYPE_NAME = "SnapshotDetails";
|
||||
|
||||
@ -48,28 +54,22 @@ public class SnapshotDetailsTabularData {
|
||||
|
||||
static {
|
||||
try {
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING,
|
||||
SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING,
|
||||
SimpleType.STRING };
|
||||
|
||||
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES,
|
||||
ITEM_DESCS, ITEM_TYPES);
|
||||
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
|
||||
|
||||
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE,
|
||||
ITEM_NAMES);
|
||||
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void from(final String snapshot, final String ks,
|
||||
final String cf,
|
||||
Map.Entry<String, Pair<Long, Long>> snapshotDetail,
|
||||
TabularDataSupport result) {
|
||||
public static void from(final String snapshot, final String ks, final String cf,
|
||||
Map.Entry<String, Pair<Long, Long>> snapshotDetail, TabularDataSupport result) {
|
||||
try {
|
||||
final String totalSize = FileUtils.stringifyFileSize(snapshotDetail
|
||||
.getValue().left);
|
||||
final String liveSize = FileUtils.stringifyFileSize(snapshotDetail
|
||||
.getValue().right);
|
||||
final String totalSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().left);
|
||||
final String liveSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().right);
|
||||
result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
|
||||
new Object[] { snapshot, ks, cf, liveSize, totalSize }));
|
||||
} catch (OpenDataException e) {
|
||||
@ -77,8 +77,8 @@ public class SnapshotDetailsTabularData {
|
||||
}
|
||||
}
|
||||
|
||||
public static void from(final String snapshot, final String ks,
|
||||
final String cf, long total, long live, TabularDataSupport result) {
|
||||
public static void from(final String snapshot, final String ks, final String cf, long total, long live,
|
||||
TabularDataSupport result) {
|
||||
try {
|
||||
final String totalSize = FileUtils.stringifyFileSize(total);
|
||||
final String liveSize = FileUtils.stringifyFileSize(live);
|
||||
|
@ -1,29 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
|
||||
public class APICounter extends Counter {
|
||||
String url;
|
||||
private APIClient c = new APIClient();
|
||||
|
||||
public APICounter(String _url) {
|
||||
super();
|
||||
url = _url;
|
||||
}
|
||||
/**
|
||||
* Returns the counter's current value.
|
||||
*
|
||||
* @return the counter's current value
|
||||
*/
|
||||
public long count() {
|
||||
return c.getLongValue(url);
|
||||
}
|
||||
|
||||
|
||||
}
|
@ -1,215 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import javax.json.JsonObject;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.yammer.metrics.stats.Sample;
|
||||
import com.yammer.metrics.stats.Snapshot;
|
||||
|
||||
public class APIHistogram extends Histogram {
|
||||
Field countField;
|
||||
Field minField;
|
||||
Field maxField;
|
||||
Field sumField;
|
||||
Field varianceField;
|
||||
Field sampleField;
|
||||
|
||||
long last_update = 0;
|
||||
static final long UPDATE_INTERVAL = 50;
|
||||
long updateInterval;
|
||||
String url;
|
||||
private APIClient c = new APIClient();
|
||||
|
||||
private void setFields() {
|
||||
try {
|
||||
minField = Histogram.class.getDeclaredField("min");
|
||||
minField.setAccessible(true);
|
||||
maxField = Histogram.class.getDeclaredField("max");
|
||||
maxField.setAccessible(true);
|
||||
sumField = Histogram.class.getDeclaredField("sum");
|
||||
sumField.setAccessible(true);
|
||||
varianceField = Histogram.class.getDeclaredField("variance");
|
||||
varianceField.setAccessible(true);
|
||||
sampleField = Histogram.class.getDeclaredField("sample");
|
||||
sampleField.setAccessible(true);
|
||||
countField = Histogram.class.getDeclaredField("count");
|
||||
countField.setAccessible(true);
|
||||
try {
|
||||
getCount().set(0);
|
||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
||||
// There's no reason to get here
|
||||
// and there's nothing we can do even if we would
|
||||
}
|
||||
} catch (NoSuchFieldException | SecurityException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public AtomicLong getMin() throws IllegalArgumentException,
|
||||
IllegalAccessException {
|
||||
return (AtomicLong) minField.get(this);
|
||||
}
|
||||
|
||||
public AtomicLong getMax() throws IllegalArgumentException,
|
||||
IllegalAccessException {
|
||||
return (AtomicLong) maxField.get(this);
|
||||
}
|
||||
|
||||
public AtomicLong getSum() throws IllegalArgumentException,
|
||||
IllegalAccessException {
|
||||
return (AtomicLong) sumField.get(this);
|
||||
}
|
||||
|
||||
public AtomicLong getCount() throws IllegalArgumentException,
|
||||
IllegalAccessException {
|
||||
return (AtomicLong) countField.get(this);
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public AtomicReference<double[]> getVariance()
|
||||
throws IllegalArgumentException, IllegalAccessException {
|
||||
return (AtomicReference<double[]>) varianceField.get(this);
|
||||
}
|
||||
|
||||
public Sample getSample() throws IllegalArgumentException,
|
||||
IllegalAccessException {
|
||||
return (Sample) sampleField.get(this);
|
||||
}
|
||||
|
||||
public APIHistogram(String url, Sample sample) {
|
||||
super(sample);
|
||||
setFields();
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
public APIHistogram(String url, SampleType type, long updateInterval) {
|
||||
super(type);
|
||||
setFields();
|
||||
this.url = url;
|
||||
this.updateInterval = updateInterval;
|
||||
}
|
||||
|
||||
public APIHistogram(String url, SampleType type) {
|
||||
this(url, type, UPDATE_INTERVAL);
|
||||
}
|
||||
|
||||
public void updateValue(HistogramValues vals) {
|
||||
try {
|
||||
if (vals.sample != null) {
|
||||
for (long v : vals.sample) {
|
||||
getSample().update(v);
|
||||
}
|
||||
}
|
||||
getCount().set(vals.count);
|
||||
getMax().set(vals.max);
|
||||
getMin().set(vals.min);
|
||||
getSum().set(vals.sum);
|
||||
double[] newValue = new double[2];
|
||||
newValue[0] = vals.mean;
|
||||
newValue[1] = vals.variance;
|
||||
getVariance().getAndSet(newValue);
|
||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public void update() {
|
||||
if (url == null) {
|
||||
return;
|
||||
}
|
||||
long now = System.currentTimeMillis();
|
||||
if (now - last_update < UPDATE_INTERVAL) {
|
||||
return;
|
||||
}
|
||||
last_update = now;
|
||||
clear();
|
||||
JsonObject obj = c.getJsonObj(url, null);
|
||||
if (obj.containsKey("hist")) {
|
||||
updateValue(APIClient.json2histogram(obj.getJsonObject("hist")));
|
||||
} else {
|
||||
updateValue(APIClient.json2histogram(obj));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of values recorded.
|
||||
*
|
||||
* @return the number of values recorded
|
||||
*/
|
||||
public long count() {
|
||||
update();
|
||||
return super.count();
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.yammer.metrics.core.Summarizable#max()
|
||||
*/
|
||||
@Override
|
||||
public double max() {
|
||||
update();
|
||||
return super.max();
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.yammer.metrics.core.Summarizable#min()
|
||||
*/
|
||||
@Override
|
||||
public double min() {
|
||||
update();
|
||||
return super.min();
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.yammer.metrics.core.Summarizable#mean()
|
||||
*/
|
||||
@Override
|
||||
public double mean() {
|
||||
update();
|
||||
return super.mean();
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.yammer.metrics.core.Summarizable#stdDev()
|
||||
*/
|
||||
@Override
|
||||
public double stdDev() {
|
||||
update();
|
||||
return super.stdDev();
|
||||
}
|
||||
|
||||
/*
|
||||
* (non-Javadoc)
|
||||
*
|
||||
* @see com.yammer.metrics.core.Summarizable#sum()
|
||||
*/
|
||||
@Override
|
||||
public double sum() {
|
||||
update();
|
||||
return super.sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot getSnapshot() {
|
||||
update();
|
||||
return super.getSnapshot();
|
||||
}
|
||||
}
|
@ -1,113 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
/*
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Modified by ScyllaDB
|
||||
*/
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
public class APIMeter extends Meter {
|
||||
public final static long CACHE_DURATION = 1000;
|
||||
|
||||
String url;
|
||||
String eventType;
|
||||
TimeUnit rateUnit;
|
||||
APIClient c = new APIClient();
|
||||
long count;
|
||||
double oneMinuteRate;
|
||||
double fiveMinuteRate;
|
||||
double fifteenMinuteRate;
|
||||
double meanRate;
|
||||
|
||||
public APIMeter(String url, ScheduledExecutorService tickThread,
|
||||
String eventType, TimeUnit rateUnit) {
|
||||
super(tickThread, eventType, rateUnit, Clock.defaultClock());
|
||||
super.stop();
|
||||
this.url = url;
|
||||
this.eventType = eventType;
|
||||
this.rateUnit = rateUnit;
|
||||
}
|
||||
|
||||
public void fromJson(JsonObject obj) {
|
||||
JsonArray rates = obj.getJsonArray("rates");
|
||||
int i = 0;
|
||||
oneMinuteRate = rates.getJsonNumber(i++).doubleValue();
|
||||
fiveMinuteRate = rates.getJsonNumber(i++).doubleValue();
|
||||
fifteenMinuteRate = rates.getJsonNumber(i++).doubleValue();
|
||||
meanRate = obj.getJsonNumber("mean_rate").doubleValue();
|
||||
count = obj.getJsonNumber("count").longValue();
|
||||
}
|
||||
|
||||
public void update_fields() {
|
||||
if (url != null) {
|
||||
fromJson(c.getJsonObj(url, null, CACHE_DURATION));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeUnit rateUnit() {
|
||||
return rateUnit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String eventType() {
|
||||
return eventType;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long count() {
|
||||
update_fields();
|
||||
return count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double fifteenMinuteRate() {
|
||||
update_fields();
|
||||
return fifteenMinuteRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double fiveMinuteRate() {
|
||||
update_fields();
|
||||
return fiveMinuteRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double meanRate() {
|
||||
update_fields();
|
||||
return meanRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double oneMinuteRate() {
|
||||
update_fields();
|
||||
return oneMinuteRate;
|
||||
}
|
||||
|
||||
}
|
@ -1,384 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.yammer.metrics.core.APICounter;
|
||||
import com.yammer.metrics.core.APIMeter;
|
||||
import com.yammer.metrics.core.Clock;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.yammer.metrics.core.Meter;
|
||||
import com.yammer.metrics.core.Metric;
|
||||
import com.yammer.metrics.core.MetricName;
|
||||
import com.yammer.metrics.core.MetricsRegistry;
|
||||
import com.yammer.metrics.core.ThreadPools;
|
||||
import com.yammer.metrics.core.Histogram.SampleType;
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
public class APIMetricsRegistry extends MetricsRegistry {
|
||||
Field fieldMetrics;
|
||||
Field fieldClock;
|
||||
Field fieldThreadPool;
|
||||
|
||||
public APIMetricsRegistry() {
|
||||
try {
|
||||
fieldMetrics = MetricsRegistry.class.getDeclaredField("metrics");
|
||||
fieldMetrics.setAccessible(true);
|
||||
fieldClock = MetricsRegistry.class.getDeclaredField("clock");
|
||||
fieldClock.setAccessible(true);
|
||||
fieldThreadPool = MetricsRegistry.class
|
||||
.getDeclaredField("threadPools");
|
||||
fieldThreadPool.setAccessible(true);
|
||||
} catch (NoSuchFieldException | SecurityException e) {
|
||||
// TODO Auto-generated catch block
|
||||
e.printStackTrace();
|
||||
}
|
||||
}
|
||||
|
||||
public ThreadPools getThreadPools() {
|
||||
try {
|
||||
return (ThreadPools) fieldThreadPool.get(this);
|
||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
public Clock getClock() {
|
||||
try {
|
||||
return (Clock) fieldClock.get(this);
|
||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public ConcurrentMap<MetricName, Metric> getMetrics() {
|
||||
try {
|
||||
return (ConcurrentMap<MetricName, Metric>) fieldMetrics.get(this);
|
||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
||||
e.printStackTrace();
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Counter} and registers it under the given class and
|
||||
* name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link Counter}
|
||||
*/
|
||||
public Counter newCounter(String url, Class<?> klass, String name) {
|
||||
return newCounter(url, klass, name, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Counter} and registers it under the given class and
|
||||
* name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link Counter}
|
||||
*/
|
||||
public Counter newCounter(String url, Class<?> klass, String name,
|
||||
String scope) {
|
||||
return newCounter(url, createName(klass, name, scope));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Counter} and registers it under the given metric
|
||||
* name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @return a new {@link Counter}
|
||||
*/
|
||||
public Counter newCounter(String url, MetricName metricName) {
|
||||
return getOrAdd(metricName, new APICounter(url));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Meter} and registers it under the given class and
|
||||
* name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link Meter}
|
||||
*/
|
||||
public APIMeter newMeter(String url, Class<?> klass, String name,
|
||||
String eventType, TimeUnit unit) {
|
||||
return newMeter(url, klass, name, null, eventType, unit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Meter} and registers it under the given class, name,
|
||||
* and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link Meter}
|
||||
*/
|
||||
public APIMeter newMeter(String url, Class<?> klass, String name,
|
||||
String scope, String eventType, TimeUnit unit) {
|
||||
return newMeter(url, createName(klass, name, scope), eventType, unit);
|
||||
}
|
||||
|
||||
private ScheduledExecutorService newMeterTickThreadPool() {
|
||||
return getThreadPools().newScheduledThreadPool(2, "meter-tick");
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Meter} and registers it under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link Meter}
|
||||
*/
|
||||
public APIMeter newMeter(String url, MetricName metricName, String eventType,
|
||||
TimeUnit unit) {
|
||||
final Metric existingMetric = getMetrics().get(metricName);
|
||||
if (existingMetric != null) {
|
||||
return (APIMeter) existingMetric;
|
||||
}
|
||||
return getOrAdd(metricName, new APIMeter(url, newMeterTickThreadPool(),
|
||||
eventType, unit));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link APISettableMeter} and registers it under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param eventType
|
||||
* the plural name of the type of events the meter is measuring
|
||||
* (e.g., {@code "requests"})
|
||||
* @param unit
|
||||
* the rate unit of the new meter
|
||||
* @return a new {@link Meter}
|
||||
*/
|
||||
public Meter newSettableMeter(MetricName metricName, String eventType,
|
||||
TimeUnit unit) {
|
||||
final Metric existingMetric = getMetrics().get(metricName);
|
||||
if (existingMetric != null) {
|
||||
return (Meter) existingMetric;
|
||||
}
|
||||
return getOrAdd(metricName, new APISettableMeter(newMeterTickThreadPool(),
|
||||
eventType, unit, getClock()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Histogram} and registers it under the given class
|
||||
* and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link Histogram}
|
||||
*/
|
||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
||||
boolean biased) {
|
||||
return newHistogram(url, klass, name, null, biased);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Histogram} and registers it under the given class,
|
||||
* name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link Histogram}
|
||||
*/
|
||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
||||
String scope, boolean biased) {
|
||||
return newHistogram(url, createName(klass, name, scope), biased);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new non-biased {@link Histogram} and registers it under the
|
||||
* given class and name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link Histogram}
|
||||
*/
|
||||
public Histogram newHistogram(String url, Class<?> klass, String name) {
|
||||
return newHistogram(url, klass, name, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new non-biased {@link Histogram} and registers it under the
|
||||
* given class, name, and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link Histogram}
|
||||
*/
|
||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
||||
String scope) {
|
||||
return newHistogram(url, klass, name, scope, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Histogram} and registers it under the given metric
|
||||
* name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param biased
|
||||
* whether or not the histogram should be biased
|
||||
* @return a new {@link Histogram}
|
||||
*/
|
||||
public Histogram newHistogram(String url, MetricName metricName,
|
||||
boolean biased) {
|
||||
return getOrAdd(metricName, new APIHistogram(url,
|
||||
biased ? SampleType.BIASED : SampleType.UNIFORM));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer} and registers it under the given class and
|
||||
* name, measuring elapsed time in milliseconds and invocations per second.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @return a new {@link Timer}
|
||||
*/
|
||||
public Timer newTimer(String url, Class<?> klass, String name) {
|
||||
return newTimer(url, klass, name, null, TimeUnit.MILLISECONDS,
|
||||
TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer} and registers it under the given class and
|
||||
* name.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link Timer}
|
||||
*/
|
||||
public Timer newTimer(String url, Class<?> klass, String name,
|
||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
return newTimer(url, klass, name, null, durationUnit, rateUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer} and registers it under the given class, name,
|
||||
* and scope, measuring elapsed time in milliseconds and invocations per
|
||||
* second.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @return a new {@link Timer}
|
||||
*/
|
||||
public Timer newTimer(String url, Class<?> klass, String name, String scope) {
|
||||
return newTimer(url, klass, name, scope, TimeUnit.MILLISECONDS,
|
||||
TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer} and registers it under the given class, name,
|
||||
* and scope.
|
||||
*
|
||||
* @param klass
|
||||
* the class which owns the metric
|
||||
* @param name
|
||||
* the name of the metric
|
||||
* @param scope
|
||||
* the scope of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link Timer}
|
||||
*/
|
||||
public Timer newTimer(String url, Class<?> klass, String name,
|
||||
String scope, TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
return newTimer(url, createName(klass, name, scope), durationUnit,
|
||||
rateUnit);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link Timer} and registers it under the given metric name.
|
||||
*
|
||||
* @param metricName
|
||||
* the name of the metric
|
||||
* @param durationUnit
|
||||
* the duration scale unit of the new timer
|
||||
* @param rateUnit
|
||||
* the rate scale unit of the new timer
|
||||
* @return a new {@link Timer}
|
||||
*/
|
||||
public Timer newTimer(String url, MetricName metricName,
|
||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
final Metric existingMetric = getMetrics().get(metricName);
|
||||
if (existingMetric != null) {
|
||||
return (Timer) existingMetric;
|
||||
}
|
||||
return getOrAdd(metricName, new APITimer(url, newMeterTickThreadPool(),
|
||||
durationUnit, rateUnit));
|
||||
}
|
||||
|
||||
}
|
@ -1,49 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/*
|
||||
* Copyright 2015 ScyllaDB
|
||||
*
|
||||
*/
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
public class APISettableMeter extends Meter {
|
||||
|
||||
public APISettableMeter(ScheduledExecutorService tickThread,
|
||||
String eventType, TimeUnit rateUnit, Clock clock) {
|
||||
super(tickThread, eventType, rateUnit, clock);
|
||||
}
|
||||
|
||||
// Meter doesn't have a set value method.
|
||||
// to mimic it, we clear the old value and set it to a new one.
|
||||
// This is safe because the only this method would be used
|
||||
// to update the values
|
||||
public long set(long new_value) {
|
||||
long res = super.count();
|
||||
mark(-res);
|
||||
mark(new_value);
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tick() {
|
||||
super.tick();
|
||||
}
|
||||
}
|
@ -1,134 +0,0 @@
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
*/
|
||||
package com.yammer.metrics.core;
|
||||
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import javax.json.JsonObject;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.yammer.metrics.core.Histogram.SampleType;
|
||||
import com.yammer.metrics.stats.Snapshot;
|
||||
|
||||
/**
|
||||
* A timer metric which aggregates timing durations and provides duration
|
||||
* statistics, plus throughput statistics via {@link Meter}.
|
||||
*/
|
||||
public class APITimer extends Timer {
|
||||
public final static long CACHE_DURATION = 1000;
|
||||
|
||||
final TimeUnit durationUnit, rateUnit;
|
||||
final APIMeter meter;
|
||||
final APIHistogram histogram;
|
||||
APIClient c = new APIClient();
|
||||
|
||||
private double convertFromNS(double ns) {
|
||||
return ns / TimeUnit.NANOSECONDS.convert(1, durationUnit);
|
||||
}
|
||||
|
||||
String url;
|
||||
|
||||
public APITimer(String url, ScheduledExecutorService tickThread,
|
||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
||||
super(tickThread, durationUnit, rateUnit);
|
||||
super.stop();
|
||||
this.url = url;
|
||||
this.durationUnit = durationUnit;
|
||||
this.rateUnit = rateUnit;
|
||||
meter = new APIMeter(null, tickThread, "calls", rateUnit);
|
||||
histogram = new APIHistogram(null, SampleType.BIASED);
|
||||
}
|
||||
|
||||
public void fromJson(JsonObject obj) {
|
||||
meter.fromJson(obj.getJsonObject("meter"));
|
||||
histogram.updateValue(APIClient.json2histogram(obj.getJsonObject("hist")));
|
||||
}
|
||||
|
||||
public void update_fields() {
|
||||
if (url != null) {
|
||||
fromJson(c.getJsonObj(url, null, CACHE_DURATION));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public double max() {
|
||||
update_fields();
|
||||
return convertFromNS(histogram.max());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double min() {
|
||||
update_fields();
|
||||
return convertFromNS(histogram.min());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double mean() {
|
||||
update_fields();
|
||||
return convertFromNS(histogram.mean());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double stdDev() {
|
||||
update_fields();
|
||||
return convertFromNS(histogram.stdDev());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double sum() {
|
||||
update_fields();
|
||||
return convertFromNS(histogram.sum());
|
||||
}
|
||||
|
||||
@Override
|
||||
public Snapshot getSnapshot() {
|
||||
update_fields();
|
||||
return histogram.getSnapshot();
|
||||
}
|
||||
|
||||
@Override
|
||||
public TimeUnit rateUnit() {
|
||||
update_fields();
|
||||
return meter.rateUnit();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String eventType() {
|
||||
update_fields();
|
||||
return meter.eventType();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long count() {
|
||||
update_fields();
|
||||
return meter.count();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double fifteenMinuteRate() {
|
||||
update_fields();
|
||||
return meter.fifteenMinuteRate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double fiveMinuteRate() {
|
||||
update_fields();
|
||||
return meter.fiveMinuteRate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double meanRate() {
|
||||
update_fields();
|
||||
return meter.meanRate();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double oneMinuteRate() {
|
||||
update_fields();
|
||||
return meter.oneMinuteRate();
|
||||
}
|
||||
|
||||
}
|
@ -1,11 +0,0 @@
|
||||
package com.yammer.metrics.core;
|
||||
|
||||
public class HistogramValues {
|
||||
public long count;
|
||||
public long min;
|
||||
public long max;
|
||||
public long sum;
|
||||
public double variance;
|
||||
public double mean;
|
||||
public long sample[];
|
||||
}
|
@ -23,62 +23,61 @@
|
||||
*/
|
||||
package org.apache.cassandra.db;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.ConnectException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.*;
|
||||
import static java.lang.String.valueOf;
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.stream.Collectors.toMap;
|
||||
import static javax.json.Json.createObjectBuilder;
|
||||
|
||||
import java.io.StringReader;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.json.Json;
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.*;
|
||||
import javax.json.JsonObjectBuilder;
|
||||
import javax.json.JsonReader;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.ws.rs.ProcessingException;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import org.apache.cassandra.metrics.ColumnFamilyMetrics;
|
||||
import org.apache.cassandra.metrics.TableMetrics;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(ColumnFamilyStore.class.getName());
|
||||
private APIClient c = new APIClient();
|
||||
private String type;
|
||||
private String keyspace;
|
||||
private String name;
|
||||
private String mbeanName;
|
||||
private static APIClient s_c = new APIClient();
|
||||
static final int INTERVAL = 1000; // update every 1second
|
||||
public final ColumnFamilyMetrics metric;
|
||||
public class ColumnFamilyStore extends MetricsMBean implements ColumnFamilyStoreMBean {
|
||||
private static final Logger logger = Logger.getLogger(ColumnFamilyStore.class.getName());
|
||||
@SuppressWarnings("unused")
|
||||
private final String type;
|
||||
private final String keyspace;
|
||||
private final String name;
|
||||
|
||||
private static Map<String, ColumnFamilyStore> cf = new HashMap<String, ColumnFamilyStore>();
|
||||
private static Timer timer = new Timer("Column Family");
|
||||
public static final Set<String> TYPE_NAMES = new HashSet<>(asList("ColumnFamilies", "IndexTables", "Tables"));
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
public static void register_mbeans() {
|
||||
TimerTask taskToExecute = new CheckRegistration();
|
||||
timer.schedule(taskToExecute, 100, INTERVAL);
|
||||
}
|
||||
|
||||
public ColumnFamilyStore(String type, String keyspace, String name) {
|
||||
public ColumnFamilyStore(APIClient client, String type, String keyspace, String name) {
|
||||
super(client,
|
||||
new TableMetrics(keyspace, name, false /* hardcoded for now */));
|
||||
this.type = type;
|
||||
this.keyspace = keyspace;
|
||||
this.name = name;
|
||||
mbeanName = getName(type, keyspace, name);
|
||||
try {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
ObjectName nameObj = new ObjectName(mbeanName);
|
||||
mbs.registerMBean(this, nameObj);
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
metric = new ColumnFamilyMetrics(this);
|
||||
}
|
||||
|
||||
public ColumnFamilyStore(APIClient client, ObjectName name) {
|
||||
this(client, name.getKeyProperty("type"), name.getKeyProperty("keyspace"), name.getKeyProperty("columnfamily"));
|
||||
}
|
||||
|
||||
/** true if this CFS contains secondary index data */
|
||||
@ -98,427 +97,89 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
return keyspace + ":" + name;
|
||||
}
|
||||
|
||||
private static String getName(String type, String keyspace, String name) {
|
||||
return "org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace
|
||||
+ ",columnfamily=" + name;
|
||||
private static ObjectName getName(String type, String keyspace, String name) throws MalformedObjectNameException {
|
||||
return new ObjectName(
|
||||
"org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace + ",columnfamily=" + name);
|
||||
}
|
||||
|
||||
public static boolean checkRegistration() {
|
||||
try {
|
||||
JsonArray mbeans = s_c.getJsonArray("/column_family/");
|
||||
Set<String> all_cf = new HashSet<String>();
|
||||
for (int i = 0; i < mbeans.size(); i++) {
|
||||
JsonObject mbean = mbeans.getJsonObject(i);
|
||||
String name = getName(mbean.getString("type"),
|
||||
mbean.getString("ks"), mbean.getString("cf"));
|
||||
if (!cf.containsKey(name)) {
|
||||
ColumnFamilyStore cfs = new ColumnFamilyStore(
|
||||
mbean.getString("type"), mbean.getString("ks"),
|
||||
mbean.getString("cf"));
|
||||
cf.put(name, cfs);
|
||||
}
|
||||
all_cf.add(name);
|
||||
}
|
||||
// removing deleted column family
|
||||
for (String n : cf.keySet()) {
|
||||
if (!all_cf.contains(n)) {
|
||||
cf.remove(n);
|
||||
}
|
||||
}
|
||||
} catch (IllegalStateException e) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
private static final class CheckRegistration extends TimerTask {
|
||||
private int missed_response = 0;
|
||||
// After MAX_RETRY retry we assume the API is not available
|
||||
// and the jmx will shutdown
|
||||
private static final int MAX_RETRY = 30;
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
if (checkRegistration()) {
|
||||
missed_response = 0;
|
||||
} else {
|
||||
if (missed_response++ > MAX_RETRY) {
|
||||
System.err.println("API is not available, JMX is shuting down");
|
||||
System.exit(-1);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// ignoring exceptions, will retry on the next interval
|
||||
}
|
||||
public static boolean checkRegistration(APIClient client, MBeanServer server) throws MalformedObjectNameException {
|
||||
JsonArray mbeans = client.getJsonArray("/column_family/");
|
||||
Set<ObjectName> all = new HashSet<ObjectName>();
|
||||
for (int i = 0; i < mbeans.size(); i++) {
|
||||
JsonObject mbean = mbeans.getJsonObject(i);
|
||||
all.add(getName(mbean.getString("type"), mbean.getString("ks"), mbean.getString("cf")));
|
||||
}
|
||||
return checkRegistration(server, all, n -> TYPE_NAMES.contains(n.getKeyProperty("type")), n -> new ColumnFamilyStore(client, n));
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the column family
|
||||
*/
|
||||
@Override
|
||||
public String getColumnFamilyName() {
|
||||
log(" getColumnFamilyName()");
|
||||
return name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total amount of data stored in the memtable, including column
|
||||
* related overhead.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize
|
||||
* @return The size in bytes.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMemtableDataSize() {
|
||||
log(" getMemtableDataSize()");
|
||||
return c.getLongValue("/column_family/metrics/memtable_on_heap_size/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the total number of columns present in the memtable.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount
|
||||
* @return The number of columns.
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMemtableColumnsCount() {
|
||||
log(" getMemtableColumnsCount()");
|
||||
return metric.memtableColumnsCount.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of times that a flush has resulted in the memtable
|
||||
* being switched out.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount
|
||||
* @return the number of memtable switches
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMemtableSwitchCount() {
|
||||
log(" getMemtableSwitchCount()");
|
||||
return c.getIntValue("/column_family/metrics/memtable_switch_count/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead
|
||||
* @return a histogram of the number of sstable data files accessed per
|
||||
* read: reading this property resets it
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentSSTablesPerReadHistogram() {
|
||||
log(" getRecentSSTablesPerReadHistogram()");
|
||||
return metric.getRecentSSTablesPerRead();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram
|
||||
* @return a histogram of the number of sstable data files accessed per read
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getSSTablesPerReadHistogram() {
|
||||
log(" getSSTablesPerReadHistogram()");
|
||||
return metric.sstablesPerRead.getBuckets(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return the number of read operations on this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public long getReadCount() {
|
||||
log(" getReadCount()");
|
||||
return c.getIntValue("/column_family/metrics/read/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return total read latency (divide by getReadCount() for average)
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalReadLatencyMicros() {
|
||||
log(" getTotalReadLatencyMicros()");
|
||||
return c.getLongValue("/column_family/metrics/read_latency/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getLifetimeReadLatencyHistogramMicros() {
|
||||
log(" getLifetimeReadLatencyHistogramMicros()");
|
||||
return metric.readLatency.totalLatencyHistogram.getBuckets(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentReadLatencyHistogramMicros() {
|
||||
log(" getRecentReadLatencyHistogramMicros()");
|
||||
return metric.readLatency.getRecentLatencyHistogram();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return average latency per read operation since the last call
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentReadLatencyMicros() {
|
||||
log(" getRecentReadLatencyMicros()");
|
||||
return metric.readLatency.getRecentLatency();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return the number of write operations on this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public long getWriteCount() {
|
||||
log(" getWriteCount()");
|
||||
return c.getLongValue("/column_family/metrics/write/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return total write latency (divide by getReadCount() for average)
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalWriteLatencyMicros() {
|
||||
log(" getTotalWriteLatencyMicros()");
|
||||
return c.getLongValue("/column_family/metrics/write_latency/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getLifetimeWriteLatencyHistogramMicros() {
|
||||
log(" getLifetimeWriteLatencyHistogramMicros()");
|
||||
return metric.writeLatency.totalLatencyHistogram.getBuckets(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentWriteLatencyHistogramMicros() {
|
||||
log(" getRecentWriteLatencyHistogramMicros()");
|
||||
return metric.writeLatency.getRecentLatencyHistogram();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return average latency per write operation since the last call
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentWriteLatencyMicros() {
|
||||
log(" getRecentWriteLatencyMicros()");
|
||||
return metric.writeLatency.getRecentLatency();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes
|
||||
* @return the estimated number of tasks pending for this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public int getPendingTasks() {
|
||||
log(" getPendingTasks()");
|
||||
return c.getIntValue("/column_family/metrics/pending_flushes/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount
|
||||
* @return the number of SSTables on disk for this CF
|
||||
*/
|
||||
@Deprecated
|
||||
public int getLiveSSTableCount() {
|
||||
log(" getLiveSSTableCount()");
|
||||
return c.getIntValue("/column_family/metrics/live_ss_table_count/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed
|
||||
* @return disk space used by SSTables belonging to this CF
|
||||
*/
|
||||
@Deprecated
|
||||
public long getLiveDiskSpaceUsed() {
|
||||
log(" getLiveDiskSpaceUsed()");
|
||||
return c.getLongValue("/column_family/metrics/live_disk_space_used/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed
|
||||
* @return total disk space used by SSTables belonging to this CF, including
|
||||
* obsolete ones waiting to be GC'd
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalDiskSpaceUsed() {
|
||||
log(" getTotalDiskSpaceUsed()");
|
||||
return c.getLongValue("/column_family/metrics/total_disk_space_used/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* force a major compaction of this column family
|
||||
*/
|
||||
public void forceMajorCompaction()
|
||||
throws ExecutionException, InterruptedException {
|
||||
public void forceMajorCompaction() throws ExecutionException, InterruptedException {
|
||||
log(" forceMajorCompaction() throws ExecutionException, InterruptedException");
|
||||
c.post("column_family/major_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize
|
||||
* @return the size of the smallest compacted row
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMinRowSize() {
|
||||
log(" getMinRowSize()");
|
||||
return c.getLongValue("/column_family/metrics/min_row_size/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize
|
||||
* @return the size of the largest compacted row
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMaxRowSize() {
|
||||
log(" getMaxRowSize()");
|
||||
return c.getLongValue("/column_family/metrics/max_row_size/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize
|
||||
* @return the average row size across all the sstables
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMeanRowSize() {
|
||||
log(" getMeanRowSize()");
|
||||
return c.getLongValue("/column_family/metrics/mean_row_size/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterFalsePositives() {
|
||||
log(" getBloomFilterFalsePositives()");
|
||||
return c.getLongValue("/column_family/metrics/bloom_filter_false_positives/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRecentBloomFilterFalsePositives() {
|
||||
log(" getRecentBloomFilterFalsePositives()");
|
||||
return c.getLongValue("/column_family/metrics/recent_bloom_filter_false_positives/" +getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getBloomFilterFalseRatio() {
|
||||
log(" getBloomFilterFalseRatio()");
|
||||
return c.getDoubleValue("/column_family/metrics/bloom_filter_false_ratio/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentBloomFilterFalseRatio() {
|
||||
log(" getRecentBloomFilterFalseRatio()");
|
||||
return c.getDoubleValue("/column_family/metrics/recent_bloom_filter_false_ratio/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterDiskSpaceUsed() {
|
||||
log(" getBloomFilterDiskSpaceUsed()");
|
||||
return c.getLongValue("/column_family/metrics/bloom_filter_disk_space_used/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterOffHeapMemoryUsed() {
|
||||
log(" getBloomFilterOffHeapMemoryUsed()");
|
||||
return c.getLongValue("/column_family/metrics/bloom_filter_off_heap_memory_used/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getIndexSummaryOffHeapMemoryUsed() {
|
||||
log(" getIndexSummaryOffHeapMemoryUsed()");
|
||||
return c.getLongValue("/column_family/metrics/index_summary_off_heap_memory_used/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompressionMetadataOffHeapMemoryUsed() {
|
||||
log(" getCompressionMetadataOffHeapMemoryUsed()");
|
||||
return c.getLongValue("/column_family/metrics/compression_metadata_off_heap_memory_used/" + getCFName());
|
||||
client.post("column_family/major_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the minimum number of sstables in queue before compaction kicks off
|
||||
*/
|
||||
@Override
|
||||
public int getMinimumCompactionThreshold() {
|
||||
log(" getMinimumCompactionThreshold()");
|
||||
return c.getIntValue("column_family/minimum_compaction/" + getCFName());
|
||||
return client.getIntValue("column_family/minimum_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the minimum number of sstables in queue before compaction kicks off
|
||||
*/
|
||||
@Override
|
||||
public void setMinimumCompactionThreshold(int threshold) {
|
||||
log(" setMinimumCompactionThreshold(int threshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("value", Integer.toString(threshold));
|
||||
c.post("column_family/minimum_compaction/" + getCFName(), queryParams);
|
||||
client.post("column_family/minimum_compaction/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the maximum number of sstables in queue before compaction kicks off
|
||||
*/
|
||||
@Override
|
||||
public int getMaximumCompactionThreshold() {
|
||||
log(" getMaximumCompactionThreshold()");
|
||||
return c.getIntValue("column_family/maximum_compaction/" + getCFName());
|
||||
return client.getIntValue("column_family/maximum_compaction/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum and maximum number of SSTables in queue before
|
||||
* compaction kicks off
|
||||
*/
|
||||
@Override
|
||||
public void setCompactionThresholds(int minThreshold, int maxThreshold) {
|
||||
log(" setCompactionThresholds(int minThreshold, int maxThreshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("minimum", Integer.toString(minThreshold));
|
||||
queryParams.add("maximum", Integer.toString(maxThreshold));
|
||||
c.post("column_family/compaction" + getCFName(), queryParams);
|
||||
client.post("column_family/compaction" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the maximum number of sstables in queue before compaction kicks off
|
||||
*/
|
||||
@Override
|
||||
public void setMaximumCompactionThreshold(int threshold) {
|
||||
log(" setMaximumCompactionThreshold(int threshold)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("value", Integer.toString(threshold));
|
||||
c.post("column_family/maximum_compaction/" + getCFName(), queryParams);
|
||||
client.post("column_family/maximum_compaction/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -531,7 +192,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
log(" setCompactionStrategyClass(String className)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("class_name", className);
|
||||
c.post("column_family/compaction_strategy/" + getCFName(), queryParams);
|
||||
client.post("column_family/compaction_strategy/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -539,17 +200,16 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*/
|
||||
public String getCompactionStrategyClass() {
|
||||
log(" getCompactionStrategyClass()");
|
||||
return c.getStringValue(
|
||||
"column_family/compaction_strategy/" + getCFName());
|
||||
return client.getStringValue("column_family/compaction_strategy/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the compression parameters
|
||||
*/
|
||||
@Override
|
||||
public Map<String, String> getCompressionParameters() {
|
||||
log(" getCompressionParameters()");
|
||||
return c.getMapStrValue(
|
||||
"column_family/compression_parameters/" + getCFName());
|
||||
return client.getMapStrValue("column_family/compression_parameters/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -558,73 +218,49 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
* @param opts
|
||||
* map of string names to values
|
||||
*/
|
||||
@Override
|
||||
public void setCompressionParameters(Map<String, String> opts) {
|
||||
log(" setCompressionParameters(Map<String,String> opts)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("opts", APIClient.mapToString(opts));
|
||||
c.post("column_family/compression_parameters/" + getCFName(),
|
||||
queryParams);
|
||||
client.post("column_family/compression_parameters/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set new crc check chance
|
||||
*/
|
||||
@Override
|
||||
public void setCrcCheckChance(double crcCheckChance) {
|
||||
log(" setCrcCheckChance(double crcCheckChance)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("check_chance", Double.toString(crcCheckChance));
|
||||
c.post("column_family/crc_check_chance/" + getCFName(), queryParams);
|
||||
client.post("column_family/crc_check_chance/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isAutoCompactionDisabled() {
|
||||
log(" isAutoCompactionDisabled()");
|
||||
return c.getBooleanValue("column_family/autocompaction/" + getCFName());
|
||||
return client.getBooleanValue("column_family/autocompaction/" + getCFName());
|
||||
}
|
||||
|
||||
/** Number of tombstoned cells retreived during the last slicequery */
|
||||
@Deprecated
|
||||
public double getTombstonesPerSlice() {
|
||||
log(" getTombstonesPerSlice()");
|
||||
return c.getDoubleValue("");
|
||||
return client.getDoubleValue("");
|
||||
}
|
||||
|
||||
/** Number of live cells retreived during the last slicequery */
|
||||
@Deprecated
|
||||
public double getLiveCellsPerSlice() {
|
||||
log(" getLiveCellsPerSlice()");
|
||||
return c.getDoubleValue("");
|
||||
return client.getDoubleValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long estimateKeys() {
|
||||
log(" estimateKeys()");
|
||||
return c.getLongValue("column_family/estimate_keys/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getEstimatedRowSizeHistogram() {
|
||||
log(" getEstimatedRowSizeHistogram()");
|
||||
return metric.estimatedRowSizeHistogram.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getEstimatedColumnCountHistogram() {
|
||||
log(" getEstimatedColumnCountHistogram()");
|
||||
return metric.estimatedColumnCountHistogram.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getCompressionRatio() {
|
||||
log(" getCompressionRatio()");
|
||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/" + getCFName());
|
||||
return client.getLongValue("column_family/estimate_keys/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -632,9 +268,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*
|
||||
* @return list of the index names
|
||||
*/
|
||||
@Override
|
||||
public List<String> getBuiltIndexes() {
|
||||
log(" getBuiltIndexes()");
|
||||
return c.getListStrValue("column_family/built_indexes/" + getCFName());
|
||||
return client.getListStrValue("column_family/built_indexes/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -643,30 +280,32 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
* @param key
|
||||
* @return list of filenames containing the key
|
||||
*/
|
||||
@Override
|
||||
public List<String> getSSTablesForKey(String key) {
|
||||
log(" getSSTablesForKey(String key)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("key", key);
|
||||
return c.getListStrValue("column_family/sstables/by_key/" + getCFName(),
|
||||
queryParams);
|
||||
return client.getListStrValue("column_family/sstables/by_key/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan through Keyspace/ColumnFamily's data directory determine which
|
||||
* SSTables should be loaded and load them
|
||||
*/
|
||||
@Override
|
||||
public void loadNewSSTables() {
|
||||
log(" loadNewSSTables()");
|
||||
c.post("column_family/sstable/" + getCFName());
|
||||
client.post("column_family/sstable/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of SSTables in L0. Always return 0 if Leveled
|
||||
* compaction is not enabled.
|
||||
*/
|
||||
@Override
|
||||
public int getUnleveledSSTables() {
|
||||
log(" getUnleveledSSTables()");
|
||||
return c.getIntValue("column_family/sstables/unleveled/" + getCFName());
|
||||
return client.getIntValue("column_family/sstables/unleveled/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -674,10 +313,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
* used. array index corresponds to level(int[0] is for level 0,
|
||||
* ...).
|
||||
*/
|
||||
@Override
|
||||
public int[] getSSTableCountPerLevel() {
|
||||
log(" getSSTableCountPerLevel()");
|
||||
int[] res = c.getIntArrValue(
|
||||
"column_family/sstables/per_level/" + getCFName());
|
||||
int[] res = client.getIntArrValue("column_family/sstables/per_level/" + getCFName());
|
||||
if (res.length == 0) {
|
||||
// no sstable count
|
||||
// should return null
|
||||
@ -692,18 +331,20 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
*
|
||||
* @return ratio
|
||||
*/
|
||||
@Override
|
||||
public double getDroppableTombstoneRatio() {
|
||||
log(" getDroppableTombstoneRatio()");
|
||||
return c.getDoubleValue("column_family/droppable_ratio/" + getCFName());
|
||||
return client.getDoubleValue("column_family/droppable_ratio/" + getCFName());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the size of SSTables in "snapshots" subdirectory which aren't
|
||||
* live anymore
|
||||
*/
|
||||
@Override
|
||||
public long trueSnapshotsSize() {
|
||||
log(" trueSnapshotsSize()");
|
||||
return c.getLongValue("column_family/metrics/snapshots_size/" + getCFName());
|
||||
return client.getLongValue("column_family/metrics/snapshots_size/" + getCFName());
|
||||
}
|
||||
|
||||
public String getKeyspace() {
|
||||
@ -711,48 +352,79 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getRangeCount() {
|
||||
log("getRangeCount()");
|
||||
return metric.rangeLatency.latency.count();
|
||||
public String getTableName() {
|
||||
log(" getTableName()");
|
||||
return name;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalRangeLatencyMicros() {
|
||||
log("getTotalRangeLatencyMicros()");
|
||||
return metric.rangeLatency.totalLatency.count();
|
||||
public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException {
|
||||
log(" forceMajorCompaction(boolean) throws ExecutionException, InterruptedException");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.putSingle("value", valueOf(splitOutput));
|
||||
client.post("column_family/major_compaction/" + getCFName(), queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] getLifetimeRangeLatencyHistogramMicros() {
|
||||
log("getLifetimeRangeLatencyHistogramMicros()");
|
||||
return metric.rangeLatency.totalLatencyHistogram.getBuckets(false);
|
||||
public void setCompactionParametersJson(String options) {
|
||||
log(" setCompactionParametersJson");
|
||||
JsonReader reader = Json.createReaderFactory(null).createReader(new StringReader(options));
|
||||
setCompactionParameters(
|
||||
reader.readObject().entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.toString())));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] getRecentRangeLatencyHistogramMicros() {
|
||||
log("getRecentRangeLatencyHistogramMicros()");
|
||||
return metric.rangeLatency.getRecentLatencyHistogram();
|
||||
public String getCompactionParametersJson() {
|
||||
log(" getCompactionParametersJson");
|
||||
JsonObjectBuilder b = createObjectBuilder();
|
||||
getCompactionParameters().forEach(b::add);
|
||||
return b.build().toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getRecentRangeLatencyMicros() {
|
||||
log("getRecentRangeLatencyMicros()");
|
||||
return metric.rangeLatency.getRecentLatency();
|
||||
public void setCompactionParameters(Map<String, String> options) {
|
||||
for (Map.Entry<String, String> e : options.entrySet()) {
|
||||
// See below
|
||||
if ("class".equals(e.getKey())) {
|
||||
setCompactionStrategyClass(e.getValue());
|
||||
} else {
|
||||
throw new IllegalArgumentException(e.getKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getCompactionParameters() {
|
||||
// We only currently support class. Here could have been a call that can
|
||||
// be expanded only on the server side, but that raises controversy.
|
||||
// Lets add some technical debt instead.
|
||||
return Collections.singletonMap("class", getCompactionStrategyClass());
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCompactionDiskSpaceCheckEnabled() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" isCompactionDiskSpaceCheckEnabled()");
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void compactionDiskSpaceCheck(boolean enable) {
|
||||
// TODO Auto-generated method stub
|
||||
log(" compactionDiskSpaceCheck()");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beginLocalSampling(String sampler, int capacity) {
|
||||
// TODO Auto-generated method stub
|
||||
log("beginLocalSampling()");
|
||||
log(" beginLocalSampling()");
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public CompositeData finishLocalSampling(String sampler, int count)
|
||||
throws OpenDataException {
|
||||
public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException {
|
||||
// TODO Auto-generated method stub
|
||||
log("finishLocalSampling()");
|
||||
log(" finishLocalSampling()");
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -27,263 +27,23 @@ import javax.management.openmbean.OpenDataException;
|
||||
/**
|
||||
* The MBean interface for ColumnFamilyStore
|
||||
*/
|
||||
public interface ColumnFamilyStoreMBean
|
||||
{
|
||||
public interface ColumnFamilyStoreMBean {
|
||||
/**
|
||||
* @return the name of the column family
|
||||
*/
|
||||
@Deprecated
|
||||
public String getColumnFamilyName();
|
||||
|
||||
/**
|
||||
* Returns the total amount of data stored in the memtable, including
|
||||
* column related overhead.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize
|
||||
* @return The size in bytes.
|
||||
* @deprecated
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMemtableDataSize();
|
||||
|
||||
/**
|
||||
* Returns the total number of columns present in the memtable.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount
|
||||
* @return The number of columns.
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMemtableColumnsCount();
|
||||
|
||||
/**
|
||||
* Returns the number of times that a flush has resulted in the
|
||||
* memtable being switched out.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount
|
||||
* @return the number of memtable switches
|
||||
*/
|
||||
@Deprecated
|
||||
public int getMemtableSwitchCount();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead
|
||||
* @return a histogram of the number of sstable data files accessed per read: reading this property resets it
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentSSTablesPerReadHistogram();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram
|
||||
* @return a histogram of the number of sstable data files accessed per read
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getSSTablesPerReadHistogram();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return the number of read operations on this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public long getReadCount();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return total read latency (divide by getReadCount() for average)
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalReadLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getLifetimeReadLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentReadLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
||||
* @return average latency per read operation since the last call
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentReadLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return the number of write operations on this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public long getWriteCount();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return total write latency (divide by getReadCount() for average)
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalWriteLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getLifetimeWriteLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentWriteLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
||||
* @return average latency per write operation since the last call
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentWriteLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
||||
* @return the number of range slice operations on this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRangeCount();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
||||
* @return total range slice latency (divide by getRangeCount() for average)
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalRangeLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getLifetimeRangeLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
||||
* @return an array representing the latency histogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentRangeLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
||||
* @return average latency per range slice operation since the last call
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentRangeLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes
|
||||
* @return the estimated number of tasks pending for this column family
|
||||
*/
|
||||
@Deprecated
|
||||
public int getPendingTasks();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount
|
||||
* @return the number of SSTables on disk for this CF
|
||||
*/
|
||||
@Deprecated
|
||||
public int getLiveSSTableCount();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed
|
||||
* @return disk space used by SSTables belonging to this CF
|
||||
*/
|
||||
@Deprecated
|
||||
public long getLiveDiskSpaceUsed();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed
|
||||
* @return total disk space used by SSTables belonging to this CF, including obsolete ones waiting to be GC'd
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalDiskSpaceUsed();
|
||||
public String getTableName();
|
||||
|
||||
/**
|
||||
* force a major compaction of this column family
|
||||
*
|
||||
* @param splitOutput
|
||||
* true if the output of the major compaction should be split in
|
||||
* several sstables
|
||||
*/
|
||||
public void forceMajorCompaction() throws ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize
|
||||
* @return the size of the smallest compacted row
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMinRowSize();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize
|
||||
* @return the size of the largest compacted row
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMaxRowSize();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize
|
||||
* @return the average row size across all the sstables
|
||||
*/
|
||||
@Deprecated
|
||||
public long getMeanRowSize();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterFalsePositives();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRecentBloomFilterFalsePositives();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getBloomFilterFalseRatio();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentBloomFilterFalseRatio();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterDiskSpaceUsed();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getBloomFilterOffHeapMemoryUsed();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getIndexSummaryOffHeapMemoryUsed();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompressionMetadataOffHeapMemoryUsed();
|
||||
public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Gets the minimum number of sstables in queue before compaction kicks off
|
||||
@ -301,7 +61,8 @@ public interface ColumnFamilyStoreMBean
|
||||
public int getMaximumCompactionThreshold();
|
||||
|
||||
/**
|
||||
* Sets the maximum and maximum number of SSTables in queue before compaction kicks off
|
||||
* Sets the maximum and maximum number of SSTables in queue before
|
||||
* compaction kicks off
|
||||
*/
|
||||
public void setCompactionThresholds(int minThreshold, int maxThreshold);
|
||||
|
||||
@ -311,26 +72,44 @@ public interface ColumnFamilyStoreMBean
|
||||
public void setMaximumCompactionThreshold(int threshold);
|
||||
|
||||
/**
|
||||
* Sets the compaction strategy by class name
|
||||
* @param className the name of the compaction strategy class
|
||||
* Sets the compaction parameters locally for this node
|
||||
*
|
||||
* Note that this will be set until an ALTER with compaction = {..} is
|
||||
* executed or the node is restarted
|
||||
*
|
||||
* @param options
|
||||
* compaction options with the same syntax as when doing ALTER
|
||||
* ... WITH compaction = {..}
|
||||
*/
|
||||
public void setCompactionStrategyClass(String className);
|
||||
public void setCompactionParametersJson(String options);
|
||||
|
||||
public String getCompactionParametersJson();
|
||||
|
||||
/**
|
||||
* Gets the compaction strategy class name
|
||||
* Sets the compaction parameters locally for this node
|
||||
*
|
||||
* Note that this will be set until an ALTER with compaction = {..} is
|
||||
* executed or the node is restarted
|
||||
*
|
||||
* @param options
|
||||
* compaction options map
|
||||
*/
|
||||
public String getCompactionStrategyClass();
|
||||
public void setCompactionParameters(Map<String, String> options);
|
||||
|
||||
public Map<String, String> getCompactionParameters();
|
||||
|
||||
/**
|
||||
* Get the compression parameters
|
||||
*/
|
||||
public Map<String,String> getCompressionParameters();
|
||||
public Map<String, String> getCompressionParameters();
|
||||
|
||||
/**
|
||||
* Set the compression parameters
|
||||
* @param opts map of string names to values
|
||||
*
|
||||
* @param opts
|
||||
* map of string names to values
|
||||
*/
|
||||
public void setCompressionParameters(Map<String,String> opts);
|
||||
public void setCompressionParameters(Map<String, String> opts);
|
||||
|
||||
/**
|
||||
* Set new crc check chance
|
||||
@ -339,81 +118,76 @@ public interface ColumnFamilyStoreMBean
|
||||
|
||||
public boolean isAutoCompactionDisabled();
|
||||
|
||||
/** Number of tombstoned cells retreived during the last slicequery */
|
||||
@Deprecated
|
||||
public double getTombstonesPerSlice();
|
||||
|
||||
/** Number of live cells retreived during the last slicequery */
|
||||
@Deprecated
|
||||
public double getLiveCellsPerSlice();
|
||||
|
||||
public long estimateKeys();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getEstimatedRowSizeHistogram();
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getEstimatedColumnCountHistogram();
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio
|
||||
*/
|
||||
@Deprecated
|
||||
public double getCompressionRatio();
|
||||
|
||||
/**
|
||||
* Returns a list of the names of the built column indexes for current store
|
||||
*
|
||||
* @return list of the index names
|
||||
*/
|
||||
public List<String> getBuiltIndexes();
|
||||
|
||||
/**
|
||||
* Returns a list of filenames that contain the given key on this node
|
||||
*
|
||||
* @param key
|
||||
* @return list of filenames containing the key
|
||||
*/
|
||||
public List<String> getSSTablesForKey(String key);
|
||||
|
||||
/**
|
||||
* Scan through Keyspace/ColumnFamily's data directory
|
||||
* determine which SSTables should be loaded and load them
|
||||
* Scan through Keyspace/ColumnFamily's data directory determine which
|
||||
* SSTables should be loaded and load them
|
||||
*/
|
||||
public void loadNewSSTables();
|
||||
|
||||
/**
|
||||
* @return the number of SSTables in L0. Always return 0 if Leveled compaction is not enabled.
|
||||
* @return the number of SSTables in L0. Always return 0 if Leveled
|
||||
* compaction is not enabled.
|
||||
*/
|
||||
public int getUnleveledSSTables();
|
||||
|
||||
/**
|
||||
* @return sstable count for each level. null unless leveled compaction is used.
|
||||
* array index corresponds to level(int[0] is for level 0, ...).
|
||||
* @return sstable count for each level. null unless leveled compaction is
|
||||
* used. array index corresponds to level(int[0] is for level 0,
|
||||
* ...).
|
||||
*/
|
||||
public int[] getSSTableCountPerLevel();
|
||||
|
||||
/**
|
||||
* Get the ratio of droppable tombstones to real columns (and non-droppable tombstones)
|
||||
* Get the ratio of droppable tombstones to real columns (and non-droppable
|
||||
* tombstones)
|
||||
*
|
||||
* @return ratio
|
||||
*/
|
||||
public double getDroppableTombstoneRatio();
|
||||
|
||||
/**
|
||||
* @return the size of SSTables in "snapshots" subdirectory which aren't live anymore
|
||||
* @return the size of SSTables in "snapshots" subdirectory which aren't
|
||||
* live anymore
|
||||
*/
|
||||
public long trueSnapshotsSize();
|
||||
|
||||
/**
|
||||
* begin sampling for a specific sampler with a given capacity. The cardinality may
|
||||
* be larger than the capacity, but depending on the use case it may affect its accuracy
|
||||
* begin sampling for a specific sampler with a given capacity. The
|
||||
* cardinality may be larger than the capacity, but depending on the use
|
||||
* case it may affect its accuracy
|
||||
*/
|
||||
public void beginLocalSampling(String sampler, int capacity);
|
||||
|
||||
/**
|
||||
* @return top <i>count</i> items for the sampler since beginLocalSampling was called
|
||||
* @return top <i>count</i> items for the sampler since beginLocalSampling
|
||||
* was called
|
||||
*/
|
||||
public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException;
|
||||
|
||||
/*
|
||||
* Is Compaction space check enabled
|
||||
*/
|
||||
public boolean isCompactionDiskSpaceCheckEnabled();
|
||||
|
||||
/*
|
||||
* Enable/Disable compaction space check
|
||||
*/
|
||||
public void compactionDiskSpaceCheck(boolean enable);
|
||||
}
|
||||
|
@ -22,85 +22,39 @@
|
||||
*/
|
||||
package org.apache.cassandra.db.commitlog;
|
||||
|
||||
import java.io.*;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.*;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import org.apache.cassandra.metrics.CommitLogMetrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
/*
|
||||
* Commit Log tracks every write operation into the system. The aim of the commit log is to be able to
|
||||
* successfully recover data that was not stored to disk via the Memtable.
|
||||
*/
|
||||
public class CommitLog implements CommitLogMBean {
|
||||
|
||||
CommitLogMetrics metrics = new CommitLogMetrics();
|
||||
public class CommitLog extends MetricsMBean implements CommitLogMBean {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(CommitLog.class.getName());
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
private static final CommitLog instance = new CommitLog();
|
||||
|
||||
public static CommitLog getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private CommitLog() {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
try {
|
||||
mbs.registerMBean(this,
|
||||
new ObjectName("org.apache.cassandra.db:type=Commitlog"));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of completed tasks
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompletedTasks() {
|
||||
log(" getCompletedTasks()");
|
||||
return metrics.completedTasks.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the number of tasks waiting to be executed
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks
|
||||
*/
|
||||
@Deprecated
|
||||
public long getPendingTasks() {
|
||||
log(" getPendingTasks()");
|
||||
return metrics.pendingTasks.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the current size used by all the commitlog segments.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalCommitlogSize() {
|
||||
log(" getTotalCommitlogSize()");
|
||||
return metrics.totalCommitLogSize.value();
|
||||
public CommitLog(APIClient client) {
|
||||
super("org.apache.cassandra.db:type=Commitlog", client, new CommitLogMetrics());
|
||||
}
|
||||
|
||||
/**
|
||||
* Recover a single file.
|
||||
*/
|
||||
@Override
|
||||
public void recover(String path) throws IOException {
|
||||
log(" recover(String path) throws IOException");
|
||||
}
|
||||
@ -109,9 +63,10 @@ public class CommitLog implements CommitLogMBean {
|
||||
* @return file names (not full paths) of active commit log segments
|
||||
* (segments containing unflushed data)
|
||||
*/
|
||||
@Override
|
||||
public List<String> getActiveSegmentNames() {
|
||||
log(" getActiveSegmentNames()");
|
||||
List<String> lst = c.getListStrValue("/commitlog/segments/active");
|
||||
List<String> lst = client.getListStrValue("/commitlog/segments/active");
|
||||
Set<String> set = new HashSet<String>();
|
||||
for (String l : lst) {
|
||||
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
||||
@ -124,9 +79,10 @@ public class CommitLog implements CommitLogMBean {
|
||||
* @return Files which are pending for archival attempt. Does NOT include
|
||||
* failed archive attempts.
|
||||
*/
|
||||
@Override
|
||||
public List<String> getArchivingSegmentNames() {
|
||||
log(" getArchivingSegmentNames()");
|
||||
List<String> lst = c.getListStrValue("/commitlog/segments/archiving");
|
||||
List<String> lst = client.getListStrValue("/commitlog/segments/archiving");
|
||||
Set<String> set = new HashSet<String>();
|
||||
for (String l : lst) {
|
||||
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
||||
@ -139,35 +95,54 @@ public class CommitLog implements CommitLogMBean {
|
||||
public String getArchiveCommand() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getArchiveCommand()");
|
||||
return c.getStringValue("");
|
||||
return client.getStringValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRestoreCommand() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getRestoreCommand()");
|
||||
return c.getStringValue("");
|
||||
return client.getStringValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRestoreDirectories() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getRestoreDirectories()");
|
||||
return c.getStringValue("");
|
||||
return client.getStringValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getRestorePointInTime() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getRestorePointInTime()");
|
||||
return c.getLongValue("");
|
||||
return client.getLongValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRestorePrecision() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getRestorePrecision()");
|
||||
return c.getStringValue("");
|
||||
return client.getStringValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getActiveContentSize() {
|
||||
// scylla does not compress commit log, so this is equivalent
|
||||
return getActiveOnDiskSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getActiveOnDiskSize() {
|
||||
return client.getLongValue("/commitlog/metrics/total_commit_log_size");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Double> getActiveSegmentCompressionRatios() {
|
||||
HashMap<String, Double> res = new HashMap<>();
|
||||
for (String name : getActiveSegmentNames()) {
|
||||
res.put(name, 1.0);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
@ -19,32 +19,9 @@ package org.apache.cassandra.db.commitlog;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public interface CommitLogMBean {
|
||||
/**
|
||||
* Get the number of completed tasks
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompletedTasks();
|
||||
|
||||
/**
|
||||
* Get the number of tasks waiting to be executed
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks
|
||||
*/
|
||||
@Deprecated
|
||||
public long getPendingTasks();
|
||||
|
||||
/**
|
||||
* Get the current size used by all the commitlog segments.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalCommitlogSize();
|
||||
|
||||
/**
|
||||
* Command to execute to archive a commitlog segment. Blank to disabled.
|
||||
*/
|
||||
@ -92,4 +69,21 @@ public interface CommitLogMBean {
|
||||
* failed archive attempts.
|
||||
*/
|
||||
public List<String> getArchivingSegmentNames();
|
||||
|
||||
/**
|
||||
* @return The size of the mutations in all active commit log segments
|
||||
* (uncompressed).
|
||||
*/
|
||||
public long getActiveContentSize();
|
||||
|
||||
/**
|
||||
* @return The space taken on disk by the commit log (compressed).
|
||||
*/
|
||||
public long getActiveOnDiskSize();
|
||||
|
||||
/**
|
||||
* @return A map between active log segments and the compression ratio
|
||||
* achieved for each.
|
||||
*/
|
||||
public Map<String, Double> getActiveSegmentCompressionRatios();
|
||||
}
|
||||
|
@ -36,13 +36,11 @@ import javax.management.openmbean.TabularType;
|
||||
import com.google.common.base.Throwables;
|
||||
|
||||
public class CompactionHistoryTabularData {
|
||||
private static final String[] ITEM_NAMES = new String[] { "id",
|
||||
"keyspace_name", "columnfamily_name", "compacted_at", "bytes_in",
|
||||
"bytes_out", "rows_merged" };
|
||||
private static final String[] ITEM_NAMES = new String[] { "id", "keyspace_name", "columnfamily_name",
|
||||
"compacted_at", "bytes_in", "bytes_out", "rows_merged" };
|
||||
|
||||
private static final String[] ITEM_DESCS = new String[] { "time uuid",
|
||||
"keyspace name", "column family name", "compaction finished at",
|
||||
"total bytes in", "total bytes out", "total rows merged" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "time uuid", "keyspace name", "column family name",
|
||||
"compaction finished at", "total bytes in", "total bytes out", "total rows merged" };
|
||||
|
||||
private static final String TYPE_NAME = "CompactionHistory";
|
||||
|
||||
@ -56,22 +54,18 @@ public class CompactionHistoryTabularData {
|
||||
|
||||
static {
|
||||
try {
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING,
|
||||
SimpleType.STRING, SimpleType.LONG, SimpleType.LONG,
|
||||
SimpleType.LONG, SimpleType.STRING };
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.LONG,
|
||||
SimpleType.LONG, SimpleType.LONG, SimpleType.STRING };
|
||||
|
||||
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES,
|
||||
ITEM_DESCS, ITEM_TYPES);
|
||||
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
|
||||
|
||||
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE,
|
||||
ITEM_NAMES);
|
||||
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static TabularData from(JsonArray resultSet)
|
||||
throws OpenDataException {
|
||||
public static TabularData from(JsonArray resultSet) throws OpenDataException {
|
||||
TabularDataSupport result = new TabularDataSupport(TABULAR_TYPE);
|
||||
for (int i = 0; i < resultSet.size(); i++) {
|
||||
JsonObject row = resultSet.getJsonObject(i);
|
||||
@ -91,15 +85,13 @@ public class CompactionHistoryTabularData {
|
||||
if (m > 0) {
|
||||
sb.append(',');
|
||||
}
|
||||
sb.append(entry.getString("key")).append(':')
|
||||
.append(entry.getString("value"));
|
||||
sb.append(entry.getString("key")).append(':').append(entry.getString("value"));
|
||||
|
||||
}
|
||||
sb.append('}');
|
||||
}
|
||||
result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
|
||||
new Object[] { id, ksName, cfName, compactedAt, bytesIn,
|
||||
bytesOut, sb.toString() }));
|
||||
new Object[] { id, ksName, cfName, compactedAt, bytesIn, bytesOut, sb.toString() }));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -17,16 +17,14 @@
|
||||
*/
|
||||
package org.apache.cassandra.db.compaction;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.TabularData;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
@ -35,6 +33,7 @@ import javax.ws.rs.core.MultivaluedMap;
|
||||
import org.apache.cassandra.metrics.CompactionMetrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
/**
|
||||
* A singleton which manages a private executor of ongoing compactions.
|
||||
@ -49,37 +48,24 @@ import com.scylladb.jmx.api.APIClient;
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
public class CompactionManager implements CompactionManagerMBean {
|
||||
public class CompactionManager extends MetricsMBean implements CompactionManagerMBean {
|
||||
public static final String MBEAN_OBJECT_NAME = "org.apache.cassandra.db:type=CompactionManager";
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(CompactionManager.class.getName());
|
||||
public static final CompactionManager instance;
|
||||
private APIClient c = new APIClient();
|
||||
CompactionMetrics metrics = new CompactionMetrics();
|
||||
private static final Logger logger = Logger.getLogger(CompactionManager.class.getName());
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
static {
|
||||
instance = new CompactionManager();
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
try {
|
||||
mbs.registerMBean(instance, new ObjectName(MBEAN_OBJECT_NAME));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static CompactionManager getInstance() {
|
||||
return instance;
|
||||
public CompactionManager(APIClient client) {
|
||||
super(MBEAN_OBJECT_NAME, client, new CompactionMetrics());
|
||||
}
|
||||
|
||||
/** List of running compaction objects. */
|
||||
@Override
|
||||
public List<Map<String, String>> getCompactions() {
|
||||
log(" getCompactions()");
|
||||
List<Map<String, String>> results = new ArrayList<Map<String, String>>();
|
||||
JsonArray compactions = c.getJsonArray("compaction_manager/compactions");
|
||||
JsonArray compactions = client.getJsonArray("compaction_manager/compactions");
|
||||
for (int i = 0; i < compactions.size(); i++) {
|
||||
JsonObject compaction = compactions.getJsonObject(i);
|
||||
Map<String, String> result = new HashMap<String, String>();
|
||||
@ -95,61 +81,23 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
}
|
||||
|
||||
/** List of running compaction summary strings. */
|
||||
@Override
|
||||
public List<String> getCompactionSummary() {
|
||||
log(" getCompactionSummary()");
|
||||
return c.getListStrValue("compaction_manager/compaction_summary");
|
||||
return client.getListStrValue("compaction_manager/compaction_summary");
|
||||
}
|
||||
|
||||
/** compaction history **/
|
||||
@Override
|
||||
public TabularData getCompactionHistory() {
|
||||
log(" getCompactionHistory()");
|
||||
try {
|
||||
return CompactionHistoryTabularData.from(c.getJsonArray("/compaction_manager/compaction_history"));
|
||||
return CompactionHistoryTabularData.from(client.getJsonArray("/compaction_manager/compaction_history"));
|
||||
} catch (OpenDataException e) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks
|
||||
* @return estimated number of compactions remaining to perform
|
||||
*/
|
||||
@Deprecated
|
||||
public int getPendingTasks() {
|
||||
log(" getPendingTasks()");
|
||||
return metrics.pendingTasks.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks
|
||||
* @return number of completed compactions since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompletedTasks() {
|
||||
log(" getCompletedTasks()");
|
||||
return metrics.completedTasks.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted
|
||||
* @return total number of bytes compacted since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalBytesCompacted() {
|
||||
log(" getTotalBytesCompacted()");
|
||||
return metrics.bytesCompacted.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted
|
||||
* @return total number of compactions since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalCompactionsCompleted() {
|
||||
log(" getTotalCompactionsCompleted()");
|
||||
return metrics.totalCompactionsCompleted.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* Triggers the compaction of user specified sstables. You can specify files
|
||||
* from various keyspaces and columnfamilies. If you do so, user defined
|
||||
@ -161,11 +109,12 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* contain keyspace and columnfamily name in path(for 2.1+) or
|
||||
* file name itself.
|
||||
*/
|
||||
@Override
|
||||
public void forceUserDefinedCompaction(String dataFiles) {
|
||||
log(" forceUserDefinedCompaction(String dataFiles)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("dataFiles", dataFiles);
|
||||
c.post("compaction_manager/force_user_defined_compaction", queryParams);
|
||||
client.post("compaction_manager/force_user_defined_compaction", queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -175,23 +124,21 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* the type of compaction to stop. Can be one of: - COMPACTION -
|
||||
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
||||
*/
|
||||
@Override
|
||||
public void stopCompaction(String type) {
|
||||
log(" stopCompaction(String type)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("type", type);
|
||||
c.post("compaction_manager/stop_compaction", queryParams);
|
||||
client.post("compaction_manager/stop_compaction", queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns core size of compaction thread pool
|
||||
*/
|
||||
@Override
|
||||
public int getCoreCompactorThreads() {
|
||||
log(" getCoreCompactorThreads()");
|
||||
/**
|
||||
* Core size pool is meaningless, we still wants to return a valid reponse,
|
||||
* just in case someone will try to call this method.
|
||||
*/
|
||||
return 1;
|
||||
return client.getIntValue("");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -200,6 +147,7 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@Override
|
||||
public void setCoreCompactorThreads(int number) {
|
||||
log(" setCoreCompactorThreads(int number)");
|
||||
}
|
||||
@ -207,13 +155,10 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
/**
|
||||
* Returns maximum size of compaction thread pool
|
||||
*/
|
||||
@Override
|
||||
public int getMaximumCompactorThreads() {
|
||||
log(" getMaximumCompactorThreads()");
|
||||
/**
|
||||
* Core size pool is meaningless, we still wants to return a valid reponse,
|
||||
* just in case someone will try to call this method.
|
||||
*/
|
||||
return 1;
|
||||
return client.getIntValue("");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -222,6 +167,7 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@Override
|
||||
public void setMaximumCompactorThreads(int number) {
|
||||
log(" setMaximumCompactorThreads(int number)");
|
||||
}
|
||||
@ -229,13 +175,10 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
/**
|
||||
* Returns core size of validation thread pool
|
||||
*/
|
||||
@Override
|
||||
public int getCoreValidationThreads() {
|
||||
log(" getCoreValidationThreads()");
|
||||
/**
|
||||
* Core validation size pool is meaningless, we still wants to return a valid reponse,
|
||||
* just in case someone will try to call this method.
|
||||
*/
|
||||
return 1;
|
||||
return client.getIntValue("");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -244,6 +187,7 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@Override
|
||||
public void setCoreValidationThreads(int number) {
|
||||
log(" setCoreValidationThreads(int number)");
|
||||
}
|
||||
@ -251,13 +195,10 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
/**
|
||||
* Returns size of validator thread pool
|
||||
*/
|
||||
@Override
|
||||
public int getMaximumValidatorThreads() {
|
||||
log(" getMaximumValidatorThreads()");
|
||||
/**
|
||||
* Core validation size pool is meaningless, we still wants to return a valid reponse,
|
||||
* just in case someone will try to call this method.
|
||||
*/
|
||||
return 1;
|
||||
return client.getIntValue("");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -266,8 +207,19 @@ public class CompactionManager implements CompactionManagerMBean {
|
||||
* @param number
|
||||
* New maximum of validator threads
|
||||
*/
|
||||
@Override
|
||||
public void setMaximumValidatorThreads(int number) {
|
||||
log(" setMaximumValidatorThreads(int number)");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stopCompactionById(String compactionId) {
|
||||
// scylla does not have neither compaction ids nor the file described
|
||||
// in:
|
||||
// "Ids can be found in the transaction log files whose name starts with
|
||||
// compaction_, located in the table transactions folder"
|
||||
// (nodetool)
|
||||
// TODO: throw?
|
||||
log(" stopCompactionById");
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@ package org.apache.cassandra.db.compaction;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.management.openmbean.TabularData;
|
||||
|
||||
public interface CompactionManagerMBean {
|
||||
@ -31,34 +32,6 @@ public interface CompactionManagerMBean {
|
||||
/** compaction history **/
|
||||
public TabularData getCompactionHistory();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks
|
||||
* @return estimated number of compactions remaining to perform
|
||||
*/
|
||||
@Deprecated
|
||||
public int getPendingTasks();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks
|
||||
* @return number of completed compactions since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCompletedTasks();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted
|
||||
* @return total number of bytes compacted since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalBytesCompacted();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted
|
||||
* @return total number of compactions since server [re]start
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalCompactionsCompleted();
|
||||
|
||||
/**
|
||||
* Triggers the compaction of user specified sstables. You can specify files
|
||||
* from various keyspaces and columnfamilies. If you do so, user defined
|
||||
@ -72,13 +45,23 @@ public interface CompactionManagerMBean {
|
||||
|
||||
/**
|
||||
* Stop all running compaction-like tasks having the provided {@code type}.
|
||||
*
|
||||
*
|
||||
* @param type
|
||||
* the type of compaction to stop. Can be one of: - COMPACTION -
|
||||
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
||||
*/
|
||||
public void stopCompaction(String type);
|
||||
|
||||
/**
|
||||
* Stop an individual running compaction using the compactionId.
|
||||
*
|
||||
* @param compactionId
|
||||
* Compaction ID of compaction to stop. Such IDs can be found in
|
||||
* the transaction log files whose name starts with compaction_,
|
||||
* located in the table transactions folder.
|
||||
*/
|
||||
public void stopCompactionById(String compactionId);
|
||||
|
||||
/**
|
||||
* Returns core size of compaction thread pool
|
||||
*/
|
||||
@ -86,7 +69,7 @@ public interface CompactionManagerMBean {
|
||||
|
||||
/**
|
||||
* Allows user to resize maximum size of the compaction thread pool.
|
||||
*
|
||||
*
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@ -99,7 +82,7 @@ public interface CompactionManagerMBean {
|
||||
|
||||
/**
|
||||
* Allows user to resize maximum size of the compaction thread pool.
|
||||
*
|
||||
*
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@ -112,7 +95,7 @@ public interface CompactionManagerMBean {
|
||||
|
||||
/**
|
||||
* Allows user to resize maximum size of the compaction thread pool.
|
||||
*
|
||||
*
|
||||
* @param number
|
||||
* New maximum of compaction threads
|
||||
*/
|
||||
@ -125,7 +108,7 @@ public interface CompactionManagerMBean {
|
||||
|
||||
/**
|
||||
* Allows user to resize maximum size of the validator thread pool.
|
||||
*
|
||||
*
|
||||
* @param number
|
||||
* New maximum of validator threads
|
||||
*/
|
||||
|
@ -24,31 +24,12 @@
|
||||
|
||||
package org.apache.cassandra.gms;
|
||||
|
||||
public enum ApplicationState
|
||||
{
|
||||
STATUS,
|
||||
LOAD,
|
||||
SCHEMA,
|
||||
DC,
|
||||
RACK,
|
||||
RELEASE_VERSION,
|
||||
REMOVAL_COORDINATOR,
|
||||
INTERNAL_IP,
|
||||
RPC_ADDRESS,
|
||||
X_11_PADDING, // padding specifically for 1.1
|
||||
SEVERITY,
|
||||
NET_VERSION,
|
||||
HOST_ID,
|
||||
TOKENS,
|
||||
public enum ApplicationState {
|
||||
STATUS, LOAD, SCHEMA, DC, RACK, RELEASE_VERSION, REMOVAL_COORDINATOR, INTERNAL_IP, RPC_ADDRESS, X_11_PADDING, // padding
|
||||
// specifically
|
||||
// for
|
||||
// 1.1
|
||||
SEVERITY, NET_VERSION, HOST_ID, TOKENS,
|
||||
// pad to allow adding new states to existing cluster
|
||||
X1,
|
||||
X2,
|
||||
X3,
|
||||
X4,
|
||||
X5,
|
||||
X6,
|
||||
X7,
|
||||
X8,
|
||||
X9,
|
||||
X10,
|
||||
X1, X2, X3, X4, X5, X6, X7, X8, X9, X10,
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ public class EndpointState {
|
||||
ApplicationState[] applicationValues;
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(EndpointState.class.getName());
|
||||
|
||||
EndpointState(HeartBeatState initialHbState) {
|
||||
applicationValues = ApplicationState.values();
|
||||
hbState = initialHbState;
|
||||
@ -101,8 +102,8 @@ public class EndpointState {
|
||||
isAlive = alive;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = "
|
||||
+ applicationState;
|
||||
return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = " + applicationState;
|
||||
}
|
||||
}
|
||||
|
@ -24,84 +24,81 @@
|
||||
|
||||
package org.apache.cassandra.gms;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.*;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.json.JsonValue;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
import javax.management.openmbean.TabularData;
|
||||
import javax.management.openmbean.TabularDataSupport;
|
||||
import javax.management.openmbean.TabularType;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
public class FailureDetector implements FailureDetectorMBean {
|
||||
public class FailureDetector extends APIMBean implements FailureDetectorMBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=FailureDetector";
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(FailureDetector.class.getName());
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
public FailureDetector(APIClient c) {
|
||||
super(c);
|
||||
}
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
private static final FailureDetector instance = new FailureDetector();
|
||||
|
||||
public static FailureDetector getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private FailureDetector() {
|
||||
// Register this instance with JMX
|
||||
try {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void dumpInterArrivalTimes() {
|
||||
log(" dumpInterArrivalTimes()");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setPhiConvictThreshold(double phi) {
|
||||
log(" setPhiConvictThreshold(double phi)");
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getPhiConvictThreshold() {
|
||||
log(" getPhiConvictThreshold()");
|
||||
return c.getDoubleValue("/failure_detector/phi");
|
||||
return client.getDoubleValue("/failure_detector/phi");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getAllEndpointStates() {
|
||||
log(" getAllEndpointStates()");
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (Map.Entry<String, EndpointState> entry : getEndpointStateMap().entrySet())
|
||||
{
|
||||
for (Map.Entry<String, EndpointState> entry : getEndpointStateMap().entrySet()) {
|
||||
sb.append('/').append(entry.getKey()).append("\n");
|
||||
appendEndpointState(sb, entry.getValue());
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private void appendEndpointState(StringBuilder sb, EndpointState endpointState)
|
||||
{
|
||||
private void appendEndpointState(StringBuilder sb, EndpointState endpointState) {
|
||||
sb.append(" generation:").append(endpointState.getHeartBeatState().getGeneration()).append("\n");
|
||||
sb.append(" heartbeat:").append(endpointState.getHeartBeatState().getHeartBeatVersion()).append("\n");
|
||||
for (Map.Entry<ApplicationState, String> state : endpointState.applicationState.entrySet())
|
||||
{
|
||||
if (state.getKey() == ApplicationState.TOKENS)
|
||||
for (Map.Entry<ApplicationState, String> state : endpointState.applicationState.entrySet()) {
|
||||
if (state.getKey() == ApplicationState.TOKENS) {
|
||||
continue;
|
||||
}
|
||||
sb.append(" ").append(state.getKey()).append(":").append(state.getValue()).append("\n");
|
||||
}
|
||||
}
|
||||
|
||||
public Map<String, EndpointState> getEndpointStateMap() {
|
||||
Map<String, EndpointState> res = new HashMap<String, EndpointState>();
|
||||
JsonArray arr = c.getJsonArray("/failure_detector/endpoints");
|
||||
JsonArray arr = client.getJsonArray("/failure_detector/endpoints");
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
EndpointState ep = new EndpointState(new HeartBeatState(obj.getInt("generation"), obj.getInt("version")));
|
||||
@ -117,24 +114,63 @@ public class FailureDetector implements FailureDetectorMBean {
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getEndpointState(String address) throws UnknownHostException {
|
||||
log(" getEndpointState(String address) throws UnknownHostException");
|
||||
return c.getStringValue("/failure_detector/endpoints/states/" + address);
|
||||
return client.getStringValue("/failure_detector/endpoints/states/" + address);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, String> getSimpleStates() {
|
||||
log(" getSimpleStates()");
|
||||
return c.getMapStrValue("/failure_detector/simple_states");
|
||||
return client.getMapStrValue("/failure_detector/simple_states");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getDownEndpointCount() {
|
||||
log(" getDownEndpointCount()");
|
||||
return c.getIntValue("/failure_detector/count/endpoint/down");
|
||||
return client.getIntValue("/failure_detector/count/endpoint/down");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getUpEndpointCount() {
|
||||
log(" getUpEndpointCount()");
|
||||
return c.getIntValue("/failure_detector/count/endpoint/up");
|
||||
return client.getIntValue("/failure_detector/count/endpoint/up");
|
||||
}
|
||||
|
||||
// From origin:
|
||||
// this is useless except to provide backwards compatibility in
|
||||
// phi_convict_threshold,
|
||||
// because everyone seems pretty accustomed to the default of 8, and users
|
||||
// who have
|
||||
// already tuned their phi_convict_threshold for their own environments
|
||||
// won't need to
|
||||
// change.
|
||||
private final double PHI_FACTOR = 1.0 / Math.log(10.0); // 0.434...
|
||||
|
||||
@Override
|
||||
public TabularData getPhiValues() throws OpenDataException {
|
||||
final CompositeType ct = new CompositeType("Node", "Node", new String[] { "Endpoint", "PHI" },
|
||||
new String[] { "IP of the endpoint", "PHI value" },
|
||||
new OpenType[] { SimpleType.STRING, SimpleType.DOUBLE });
|
||||
final TabularDataSupport results = new TabularDataSupport(
|
||||
new TabularType("PhiList", "PhiList", ct, new String[] { "Endpoint" }));
|
||||
final JsonArray arr = client.getJsonArray("/failure_detector/endpoint_phi_values");
|
||||
|
||||
for (JsonValue v : arr) {
|
||||
JsonObject o = (JsonObject) v;
|
||||
String endpoint = o.getString("endpoint");
|
||||
double phi = Double.parseDouble(o.getString("phi"));
|
||||
|
||||
if (phi != Double.MIN_VALUE) {
|
||||
// returned values are scaled by PHI_FACTOR so that the are on
|
||||
// the same scale as PhiConvictThreshold
|
||||
final CompositeData data = new CompositeDataSupport(ct, new String[] { "Endpoint", "PHI" },
|
||||
new Object[] { endpoint, phi * PHI_FACTOR });
|
||||
results.put(data);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
}
|
||||
|
@ -20,8 +20,10 @@ package org.apache.cassandra.gms;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Map;
|
||||
|
||||
public interface FailureDetectorMBean
|
||||
{
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.TabularData;
|
||||
|
||||
public interface FailureDetectorMBean {
|
||||
public void dumpInterArrivalTimes();
|
||||
|
||||
public void setPhiConvictThreshold(double phi);
|
||||
@ -37,4 +39,6 @@ public interface FailureDetectorMBean
|
||||
public int getDownEndpointCount();
|
||||
|
||||
public int getUpEndpointCount();
|
||||
|
||||
public TabularData getPhiValues() throws OpenDataException;
|
||||
}
|
||||
|
@ -23,15 +23,14 @@
|
||||
*/
|
||||
package org.apache.cassandra.gms;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
/**
|
||||
* This module is responsible for Gossiping information for the local endpoint.
|
||||
@ -48,57 +47,43 @@ import com.scylladb.jmx.api.APIClient;
|
||||
* node as down in the Failure Detector.
|
||||
*/
|
||||
|
||||
public class Gossiper implements GossiperMBean {
|
||||
public class Gossiper extends APIMBean implements GossiperMBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=Gossiper";
|
||||
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(Gossiper.class.getName());
|
||||
private static final Logger logger = Logger.getLogger(Gossiper.class.getName());
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
public Gossiper(APIClient c) {
|
||||
super(c);
|
||||
}
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
private static final Gossiper instance = new Gossiper();
|
||||
|
||||
public static Gossiper getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private Gossiper() {
|
||||
|
||||
// Register this instance with JMX
|
||||
try {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getEndpointDowntime(String address) throws UnknownHostException {
|
||||
log(" getEndpointDowntime(String address) throws UnknownHostException");
|
||||
return c.getLongValue("gossiper/downtime/" + address);
|
||||
return client.getLongValue("gossiper/downtime/" + address);
|
||||
}
|
||||
|
||||
public int getCurrentGenerationNumber(String address)
|
||||
throws UnknownHostException {
|
||||
@Override
|
||||
public int getCurrentGenerationNumber(String address) throws UnknownHostException {
|
||||
log(" getCurrentGenerationNumber(String address) throws UnknownHostException");
|
||||
return c.getIntValue("gossiper/generation_number/" + address);
|
||||
return client.getIntValue("gossiper/generation_number/" + address);
|
||||
}
|
||||
|
||||
public void unsafeAssassinateEndpoint(String address)
|
||||
throws UnknownHostException {
|
||||
@Override
|
||||
public void unsafeAssassinateEndpoint(String address) throws UnknownHostException {
|
||||
log(" unsafeAssassinateEndpoint(String address) throws UnknownHostException");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("unsafe", "True");
|
||||
c.post("gossiper/assassinate/" + address, queryParams);
|
||||
client.post("gossiper/assassinate/" + address, queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void assassinateEndpoint(String address) throws UnknownHostException {
|
||||
log(" assassinateEndpoint(String address) throws UnknownHostException");
|
||||
c.post("gossiper/assassinate/" + address, null);
|
||||
client.post("gossiper/assassinate/" + address, null);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,12 +19,13 @@ package org.apache.cassandra.gms;
|
||||
|
||||
import java.net.UnknownHostException;
|
||||
|
||||
public interface GossiperMBean
|
||||
{
|
||||
public interface GossiperMBean {
|
||||
public long getEndpointDowntime(String address) throws UnknownHostException;
|
||||
|
||||
public int getCurrentGenerationNumber(String address) throws UnknownHostException;
|
||||
|
||||
public void unsafeAssassinateEndpoint(String address) throws UnknownHostException;
|
||||
|
||||
public void assassinateEndpoint(String address) throws UnknownHostException;
|
||||
|
||||
}
|
@ -58,8 +58,8 @@ class HeartBeatState {
|
||||
version = Integer.MAX_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return String.format("HeartBeat: generation = %d, version = %d",
|
||||
generation, version);
|
||||
return String.format("HeartBeat: generation = %d, version = %d", generation, version);
|
||||
}
|
||||
}
|
||||
|
@ -17,43 +17,30 @@
|
||||
*/
|
||||
package org.apache.cassandra.locator;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(EndpointSnitchInfo.class.getName());
|
||||
public class EndpointSnitchInfo extends APIMBean implements EndpointSnitchInfoMBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.db:type=EndpointSnitchInfo";
|
||||
private static final Logger logger = Logger.getLogger(EndpointSnitchInfo.class.getName());
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
public EndpointSnitchInfo(APIClient c) {
|
||||
super(c);
|
||||
}
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
private static final EndpointSnitchInfo instance = new EndpointSnitchInfo();
|
||||
|
||||
public static EndpointSnitchInfo getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private EndpointSnitchInfo() {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
try {
|
||||
mbs.registerMBean(this, new ObjectName(
|
||||
"org.apache.cassandra.db:type=EndpointSnitchInfo"));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides the Rack name depending on the respective snitch used, given the
|
||||
* host name/ip
|
||||
@ -64,12 +51,9 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
||||
@Override
|
||||
public String getRack(String host) throws UnknownHostException {
|
||||
log("getRack(String host) throws UnknownHostException");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
if (host == null) {
|
||||
host = InetAddress.getLoopbackAddress().getHostAddress();
|
||||
}
|
||||
queryParams.add("host", host);
|
||||
return c.getStringValue("/snitch/rack", queryParams, 10000);
|
||||
MultivaluedMap<String, String> queryParams = host != null ? new MultivaluedHashMap<String, String>(
|
||||
singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null;
|
||||
return client.getStringValue("/snitch/rack", queryParams, 10000);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -82,12 +66,9 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
||||
@Override
|
||||
public String getDatacenter(String host) throws UnknownHostException {
|
||||
log(" getDatacenter(String host) throws UnknownHostException");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
if (host == null) {
|
||||
host = InetAddress.getLoopbackAddress().getHostAddress();
|
||||
}
|
||||
queryParams.add("host", host);
|
||||
return c.getStringValue("/snitch/datacenter", queryParams, 10000);
|
||||
MultivaluedMap<String, String> queryParams = host != null ? new MultivaluedHashMap<String, String>(
|
||||
singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null;
|
||||
return client.getStringValue("/snitch/datacenter", queryParams, 10000);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -98,7 +79,16 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
||||
@Override
|
||||
public String getSnitchName() {
|
||||
log(" getSnitchName()");
|
||||
return c.getStringValue("/snitch/name");
|
||||
return client.getStringValue("/snitch/name");
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRack() {
|
||||
return client.getStringValue("/snitch/rack", null, 10000);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDatacenter() {
|
||||
return client.getStringValue("/snitch/datacenter", null, 10000);
|
||||
}
|
||||
}
|
||||
|
@ -22,25 +22,40 @@ import java.net.UnknownHostException;
|
||||
/**
|
||||
* MBean exposing standard Snitch info
|
||||
*/
|
||||
public interface EndpointSnitchInfoMBean
|
||||
{
|
||||
public interface EndpointSnitchInfoMBean {
|
||||
/**
|
||||
* Provides the Rack name depending on the respective snitch used, given the host name/ip
|
||||
* Provides the Rack name depending on the respective snitch used, given the
|
||||
* host name/ip
|
||||
*
|
||||
* @param host
|
||||
* @throws UnknownHostException
|
||||
*/
|
||||
public String getRack(String host) throws UnknownHostException;
|
||||
|
||||
/**
|
||||
* Provides the Datacenter name depending on the respective snitch used, given the hostname/ip
|
||||
* Provides the Datacenter name depending on the respective snitch used,
|
||||
* given the hostname/ip
|
||||
*
|
||||
* @param host
|
||||
* @throws UnknownHostException
|
||||
*/
|
||||
public String getDatacenter(String host) throws UnknownHostException;
|
||||
|
||||
/**
|
||||
* Provides the Rack name depending on the respective snitch used for this
|
||||
* node
|
||||
*/
|
||||
public String getRack();
|
||||
|
||||
/**
|
||||
* Provides the Datacenter name depending on the respective snitch used for
|
||||
* this node
|
||||
*/
|
||||
public String getDatacenter();
|
||||
|
||||
/**
|
||||
* Provides the snitch name of the cluster
|
||||
*
|
||||
* @return Snitch name
|
||||
*/
|
||||
public String getSnitchName();
|
||||
|
@ -25,34 +25,20 @@
|
||||
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.yammer.metrics.core.*;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
// TODO: In StorageProxy
|
||||
public class CASClientRequestMetrics extends ClientRequestMetrics {
|
||||
|
||||
public final Histogram contention;
|
||||
/* Used only for write */
|
||||
public final Counter conditionNotMet;
|
||||
|
||||
public final Counter unfinishedCommit;
|
||||
|
||||
public CASClientRequestMetrics(String url, String scope) {
|
||||
super(url, scope);
|
||||
contention = APIMetrics.newHistogram(url + "contention",
|
||||
factory.createMetricName("ContentionHistogram"), true);
|
||||
conditionNotMet = APIMetrics.newCounter(url + "condition_not_met",
|
||||
factory.createMetricName("ConditionNotMet"));
|
||||
unfinishedCommit = APIMetrics.newCounter(url + "unfinished_commit",
|
||||
factory.createMetricName("UnfinishedCommit"));
|
||||
public CASClientRequestMetrics(String scope, String url) {
|
||||
super(scope, url);
|
||||
}
|
||||
|
||||
public void release() {
|
||||
super.release();
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("ContentionHistogram"));
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("ConditionNotMet"));
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("UnfinishedCommit"));
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
super.register(registry);
|
||||
registry.register(() -> registry.histogram(uri + "/contention", true), names("ContentionHistogram"));
|
||||
registry.register(() -> registry.counter(uri + "/condition_not_met"), names("ConditionNotMet"));
|
||||
registry.register(() -> registry.counter(uri + "/unfinished_commit"), names("UnfinishedCommit"));
|
||||
}
|
||||
}
|
||||
|
@ -23,44 +23,20 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.Gauge;
|
||||
import com.yammer.metrics.core.APIMeter;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Metrics for {@code ICache}.
|
||||
*/
|
||||
public class CacheMetrics {
|
||||
/** Cache capacity in bytes */
|
||||
public final Gauge<Long> capacity;
|
||||
/** Total number of cache hits */
|
||||
public final APIMeter hits;
|
||||
/** Total number of cache requests */
|
||||
public final APIMeter requests;
|
||||
/** cache hit rate */
|
||||
public final Gauge<Double> hitRate;
|
||||
/** Total size of cache, in bytes */
|
||||
public final Gauge<Long> size;
|
||||
/** Total number of cache entries */
|
||||
public final Gauge<Integer> entries;
|
||||
public class CacheMetrics implements Metrics {
|
||||
|
||||
private final AtomicLong lastRequests = new AtomicLong(0);
|
||||
private final AtomicLong lastHits = new AtomicLong(0);
|
||||
private final String type;
|
||||
private final String url;
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
|
||||
private String getURL(String url, String value) {
|
||||
if (url == null || value == null) {
|
||||
return null;
|
||||
}
|
||||
return "/cache_service/metrics/" + url + value;
|
||||
private String compose(String value) {
|
||||
return "/cache_service/metrics/" + url + "/" + value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create metrics for given cache.
|
||||
*
|
||||
@ -70,70 +46,21 @@ public class CacheMetrics {
|
||||
* Cache to measure metrics
|
||||
*/
|
||||
public CacheMetrics(String type, final String url) {
|
||||
this.type = type;
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
MetricNameFactory factory = new DefaultNameFactory("Cache", type);
|
||||
|
||||
registry.register(() -> registry.gauge(compose("capacity")), factory.createMetricName("Capacity"));
|
||||
registry.register(() -> registry.meter(compose("hits_moving_avrage")), factory.createMetricName("Hits"));
|
||||
registry.register(() -> registry.meter(compose("requests_moving_avrage")),
|
||||
factory.createMetricName("Requests"));
|
||||
|
||||
capacity = APIMetrics.newGauge(factory.createMetricName("Capacity"),
|
||||
new Gauge<Long>() {
|
||||
String u = getURL(url, "/capacity");
|
||||
public Long value() {
|
||||
if (u == null) {
|
||||
return 0L;
|
||||
}
|
||||
return c.getLongValue(u);
|
||||
}
|
||||
});
|
||||
hits = APIMetrics.newMeter(getURL(url, "/hits_moving_avrage"), factory.createMetricName("Hits"), "hits",
|
||||
TimeUnit.SECONDS);
|
||||
requests = APIMetrics.newMeter(getURL(url, "/requests_moving_avrage"), factory.createMetricName("Requests"),
|
||||
"requests", TimeUnit.SECONDS);
|
||||
hitRate = APIMetrics.newGauge(factory.createMetricName("HitRate"),
|
||||
new Gauge<Double>() {
|
||||
String u = getURL(url, "/hit_rate");
|
||||
@Override
|
||||
public Double value() {
|
||||
if (u == null) {
|
||||
return 0.0;
|
||||
}
|
||||
return c.getDoubleValue(u);
|
||||
}
|
||||
});
|
||||
size = APIMetrics.newGauge(factory.createMetricName("Size"),
|
||||
new Gauge<Long>() {
|
||||
String u = getURL(url, "/size");
|
||||
public Long value() {
|
||||
if (u == null) {
|
||||
return 0L;
|
||||
}
|
||||
return c.getLongValue(u);
|
||||
}
|
||||
});
|
||||
entries = APIMetrics.newGauge(factory.createMetricName("Entries"),
|
||||
new Gauge<Integer>() {
|
||||
String u = getURL(url, "/entries");
|
||||
public Integer value() {
|
||||
if (u == null) {
|
||||
return 0;
|
||||
}
|
||||
return c.getIntValue(u);
|
||||
}
|
||||
});
|
||||
registry.register(() -> registry.gauge(Double.class, compose("hit_rate")), factory.createMetricName("HitRate"));
|
||||
registry.register(() -> registry.gauge(compose("size")), factory.createMetricName("Size"));
|
||||
registry.register(() -> registry.gauge(Integer.class, compose("entries")), factory.createMetricName("Entries"));
|
||||
}
|
||||
|
||||
// for backward compatibility
|
||||
@Deprecated
|
||||
public double getRecentHitRate() {
|
||||
long r = requests.count();
|
||||
long h = hits.count();
|
||||
try
|
||||
{
|
||||
return ((double)(h - lastHits.get())) / (r - lastRequests.get());
|
||||
}
|
||||
finally
|
||||
{
|
||||
lastRequests.set(r);
|
||||
lastHits.set(h);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -27,52 +27,17 @@
|
||||
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.yammer.metrics.Metrics;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.yammer.metrics.core.Meter;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
public class ClientRequestMetrics extends LatencyMetrics {
|
||||
@Deprecated
|
||||
public static final Counter readTimeouts = Metrics
|
||||
.newCounter(DefaultNameFactory.createMetricName(
|
||||
"ClientRequestMetrics", "ReadTimeouts", null));
|
||||
@Deprecated
|
||||
public static final Counter writeTimeouts = Metrics
|
||||
.newCounter(DefaultNameFactory.createMetricName(
|
||||
"ClientRequestMetrics", "WriteTimeouts", null));
|
||||
@Deprecated
|
||||
public static final Counter readUnavailables = Metrics
|
||||
.newCounter(DefaultNameFactory.createMetricName(
|
||||
"ClientRequestMetrics", "ReadUnavailables", null));
|
||||
@Deprecated
|
||||
public static final Counter writeUnavailables = Metrics
|
||||
.newCounter(DefaultNameFactory.createMetricName(
|
||||
"ClientRequestMetrics", "WriteUnavailables", null));
|
||||
|
||||
public final Meter timeouts;
|
||||
public final Meter unavailables;
|
||||
|
||||
public ClientRequestMetrics(String url, String scope) {
|
||||
super(url, "ClientRequest", scope);
|
||||
|
||||
timeouts = APIMetrics.newMeter(url + "/timeouts_rates",
|
||||
factory.createMetricName("Timeouts"), "timeouts",
|
||||
TimeUnit.SECONDS);
|
||||
unavailables = APIMetrics.newMeter(url + "/unavailables_rates",
|
||||
factory.createMetricName("Unavailables"), "unavailables",
|
||||
TimeUnit.SECONDS);
|
||||
|
||||
public ClientRequestMetrics(String scope, String url) {
|
||||
super("ClientRequest", scope, url);
|
||||
}
|
||||
|
||||
public void release() {
|
||||
super.release();
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("Timeouts"));
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("Unavailables"));
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
super.register(registry);
|
||||
registry.register(() -> registry.meter(uri + "/timeouts_rates"), names("Timeouts"));
|
||||
registry.register(() -> registry.meter(uri + "/unavailables_rates"), names("Unavailables"));
|
||||
}
|
||||
}
|
||||
|
@ -1,576 +0,0 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.scylladb.jmx.utils.RecentEstimatedHistogram;
|
||||
import com.yammer.metrics.Metrics;
|
||||
import com.yammer.metrics.core.*;
|
||||
|
||||
/**
|
||||
* Metrics for {@link ColumnFamilyStore}.
|
||||
*/
|
||||
public class ColumnFamilyMetrics {
|
||||
private APIClient c = new APIClient();
|
||||
/**
|
||||
* Total amount of data stored in the memtable that resides on-heap,
|
||||
* including column related overhead and overwritten rows.
|
||||
*/
|
||||
public final Gauge<Long> memtableOnHeapSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtable that resides off-heap,
|
||||
* including column related overhead and overwritten rows.
|
||||
*/
|
||||
public final Gauge<Long> memtableOffHeapSize;
|
||||
/**
|
||||
* Total amount of live data stored in the memtable, excluding any data
|
||||
* structure overhead
|
||||
*/
|
||||
public final Gauge<Long> memtableLiveDataSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides on-heap.
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesOnHeapSize;
|
||||
/**
|
||||
* Total amount of data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides off-heap.
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesOffHeapSize;
|
||||
/**
|
||||
* Total amount of live data stored in the memtables (2i and pending flush
|
||||
* memtables included) that resides off-heap, excluding any data structure
|
||||
* overhead
|
||||
*/
|
||||
public final Gauge<Long> allMemtablesLiveDataSize;
|
||||
/** Total number of columns present in the memtable. */
|
||||
public final Gauge<Long> memtableColumnsCount;
|
||||
/** Number of times flush has resulted in the memtable being switched out. */
|
||||
public final Counter memtableSwitchCount;
|
||||
/** Current compression ratio for all SSTables */
|
||||
public final Gauge<Double> compressionRatio;
|
||||
/** Histogram of estimated row size (in bytes). */
|
||||
public final Gauge<long[]> estimatedRowSizeHistogram;
|
||||
/** Approximate number of keys in table. */
|
||||
public final Gauge<Long> estimatedRowCount;
|
||||
/** Histogram of estimated number of columns. */
|
||||
public final Gauge<long[]> estimatedColumnCountHistogram;
|
||||
/** Histogram of the number of sstable data files accessed per read */
|
||||
public final ColumnFamilyHistogram sstablesPerReadHistogram;
|
||||
/** (Local) read metrics */
|
||||
public final LatencyMetrics readLatency;
|
||||
/** (Local) range slice metrics */
|
||||
public final LatencyMetrics rangeLatency;
|
||||
/** (Local) write metrics */
|
||||
public final LatencyMetrics writeLatency;
|
||||
/** Estimated number of tasks pending for this column family */
|
||||
public final Counter pendingFlushes;
|
||||
/** Estimate of number of pending compactios for this CF */
|
||||
public final Gauge<Integer> pendingCompactions;
|
||||
/** Number of SSTables on disk for this CF */
|
||||
public final Gauge<Integer> liveSSTableCount;
|
||||
/** Disk space used by SSTables belonging to this CF */
|
||||
public final Counter liveDiskSpaceUsed;
|
||||
/**
|
||||
* Total disk space used by SSTables belonging to this CF, including
|
||||
* obsolete ones waiting to be GC'd
|
||||
*/
|
||||
public final Counter totalDiskSpaceUsed;
|
||||
/** Size of the smallest compacted row */
|
||||
public final Gauge<Long> minRowSize;
|
||||
/** Size of the largest compacted row */
|
||||
public final Gauge<Long> maxRowSize;
|
||||
/** Size of the smallest compacted row */
|
||||
public final Gauge<Long> meanRowSize;
|
||||
/** Number of false positives in bloom filter */
|
||||
public final Gauge<Long> bloomFilterFalsePositives;
|
||||
/** Number of false positives in bloom filter from last read */
|
||||
public final Gauge<Long> recentBloomFilterFalsePositives;
|
||||
/** False positive ratio of bloom filter */
|
||||
public final Gauge<Double> bloomFilterFalseRatio;
|
||||
/** False positive ratio of bloom filter from last read */
|
||||
public final Gauge<Double> recentBloomFilterFalseRatio;
|
||||
/** Disk space used by bloom filter */
|
||||
public final Gauge<Long> bloomFilterDiskSpaceUsed;
|
||||
/** Off heap memory used by bloom filter */
|
||||
public final Gauge<Long> bloomFilterOffHeapMemoryUsed;
|
||||
/** Off heap memory used by index summary */
|
||||
public final Gauge<Long> indexSummaryOffHeapMemoryUsed;
|
||||
/** Off heap memory used by compression meta data */
|
||||
public final Gauge<Long> compressionMetadataOffHeapMemoryUsed;
|
||||
/** Key cache hit rate for this CF */
|
||||
public final Gauge<Double> keyCacheHitRate;
|
||||
/** Tombstones scanned in queries on this CF */
|
||||
public final ColumnFamilyHistogram tombstoneScannedHistogram;
|
||||
/** Live cells scanned in queries on this CF */
|
||||
public final ColumnFamilyHistogram liveScannedHistogram;
|
||||
/** Column update time delta on this CF */
|
||||
public final ColumnFamilyHistogram colUpdateTimeDeltaHistogram;
|
||||
/** Disk space used by snapshot files which */
|
||||
public final Gauge<Long> trueSnapshotsSize;
|
||||
/** Row cache hits, but result out of range */
|
||||
public final Counter rowCacheHitOutOfRange;
|
||||
/** Number of row cache hits */
|
||||
public final Counter rowCacheHit;
|
||||
/** Number of row cache misses */
|
||||
public final Counter rowCacheMiss;
|
||||
/** CAS Prepare metrics */
|
||||
public final LatencyMetrics casPrepare;
|
||||
/** CAS Propose metrics */
|
||||
public final LatencyMetrics casPropose;
|
||||
/** CAS Commit metrics */
|
||||
public final LatencyMetrics casCommit;
|
||||
|
||||
public final Timer coordinatorReadLatency;
|
||||
public final Timer coordinatorScanLatency;
|
||||
|
||||
/** Time spent waiting for free memtable space, either on- or off-heap */
|
||||
public final Timer waitingOnFreeMemtableSpace;
|
||||
|
||||
private final MetricNameFactory factory;
|
||||
private static final MetricNameFactory globalNameFactory = new AllColumnFamilyMetricNameFactory();
|
||||
|
||||
public final Counter speculativeRetries;
|
||||
|
||||
// for backward compatibility
|
||||
@Deprecated
|
||||
public final EstimatedHistogramWrapper sstablesPerRead;
|
||||
// it should not be called directly
|
||||
@Deprecated
|
||||
protected final RecentEstimatedHistogram recentSSTablesPerRead = new RecentEstimatedHistogram(35);
|
||||
private String cfName;
|
||||
|
||||
public final static LatencyMetrics globalReadLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/read_latency", globalNameFactory, "Read");
|
||||
public final static LatencyMetrics globalWriteLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/write_latency", globalNameFactory, "Write");
|
||||
public final static LatencyMetrics globalRangeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/range_latency", globalNameFactory, "Range");
|
||||
|
||||
/**
|
||||
* stores metrics that will be rolled into a single global metric
|
||||
*/
|
||||
public final static ConcurrentMap<String, Set<Metric>> allColumnFamilyMetrics = Maps
|
||||
.newConcurrentMap();
|
||||
|
||||
/**
|
||||
* Stores all metric names created that can be used when unregistering
|
||||
*/
|
||||
public final static Set<String> all = Sets.newHashSet();
|
||||
|
||||
/**
|
||||
* Creates metrics for given {@link ColumnFamilyStore}.
|
||||
*
|
||||
* @param cfs
|
||||
* ColumnFamilyStore to measure metrics
|
||||
*/
|
||||
public ColumnFamilyMetrics(final ColumnFamilyStore cfs) {
|
||||
factory = new ColumnFamilyMetricNameFactory(cfs);
|
||||
cfName = cfs.getCFName();
|
||||
memtableColumnsCount = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_columns_count",
|
||||
"MemtableColumnsCount");
|
||||
memtableOnHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_on_heap_size",
|
||||
"MemtableOnHeapSize");
|
||||
memtableOffHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_off_heap_size",
|
||||
"MemtableOffHeapSize");
|
||||
memtableLiveDataSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/memtable_live_data_size",
|
||||
"MemtableLiveDataSize");
|
||||
allMemtablesOnHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_on_heap_size",
|
||||
"AllMemtablesHeapSize");
|
||||
allMemtablesOffHeapSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_off_heap_size",
|
||||
"AllMemtablesOffHeapSize");
|
||||
allMemtablesLiveDataSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/all_memtables_live_data_size",
|
||||
"AllMemtablesLiveDataSize");
|
||||
memtableSwitchCount = createColumnFamilyCounter(
|
||||
"/column_family/metrics/memtable_switch_count",
|
||||
"MemtableSwitchCount");
|
||||
estimatedRowSizeHistogram = Metrics.newGauge(
|
||||
factory.createMetricName("EstimatedRowSizeHistogram"),
|
||||
new Gauge<long[]>() {
|
||||
public long[] value() {
|
||||
return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_row_size_histogram/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
estimatedRowCount= Metrics.newGauge(
|
||||
factory.createMetricName("EstimatedRowCount"),
|
||||
new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue("/column_family/metrics/estimated_row_count/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
|
||||
estimatedColumnCountHistogram = Metrics.newGauge(
|
||||
factory.createMetricName("EstimatedColumnCountHistogram"),
|
||||
new Gauge<long[]>() {
|
||||
public long[] value() {
|
||||
return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_column_count_histogram/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
sstablesPerReadHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/sstables_per_read_histogram",
|
||||
"SSTablesPerReadHistogram");
|
||||
compressionRatio = createColumnFamilyGauge("CompressionRatio",
|
||||
new Gauge<Double>() {
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/"
|
||||
+ cfName);
|
||||
}
|
||||
}, new Gauge<Double>() // global gauge
|
||||
{
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/");
|
||||
}
|
||||
});
|
||||
readLatency = new LatencyMetrics("/column_family/metrics/read_latency",
|
||||
cfName, factory, "Read");
|
||||
writeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/write_latency", cfName, factory,
|
||||
"Write");
|
||||
rangeLatency = new LatencyMetrics(
|
||||
"/column_family/metrics/range_latency", cfName, factory,
|
||||
"Range");
|
||||
pendingFlushes = createColumnFamilyCounter(
|
||||
"/column_family/metrics/pending_flushes", "PendingFlushes");
|
||||
pendingCompactions = createColumnFamilyGaugeInt(
|
||||
"/column_family/metrics/pending_compactions",
|
||||
"PendingCompactions");
|
||||
liveSSTableCount = createColumnFamilyGaugeInt(
|
||||
"/column_family/metrics/live_ss_table_count",
|
||||
"LiveSSTableCount");
|
||||
liveDiskSpaceUsed = createColumnFamilyCounter(
|
||||
"/column_family/metrics/live_disk_space_used",
|
||||
"LiveDiskSpaceUsed");
|
||||
totalDiskSpaceUsed = createColumnFamilyCounter(
|
||||
"/column_family/metrics/total_disk_space_used",
|
||||
"TotalDiskSpaceUsed");
|
||||
minRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/min_row_size", "MinRowSize");
|
||||
maxRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/max_row_size", "MaxRowSize");
|
||||
meanRowSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/mean_row_size", "MeanRowSize");
|
||||
bloomFilterFalsePositives = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_false_positives",
|
||||
"BloomFilterFalsePositives");
|
||||
recentBloomFilterFalsePositives = createColumnFamilyGauge(
|
||||
"/column_family/metrics/recent_bloom_filter_false_positives",
|
||||
"RecentBloomFilterFalsePositives");
|
||||
bloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
||||
"/column_family/metrics/bloom_filter_false_ratio",
|
||||
"BloomFilterFalseRatio");
|
||||
recentBloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
||||
"/column_family/metrics/recent_bloom_filter_false_ratio",
|
||||
"RecentBloomFilterFalseRatio");
|
||||
bloomFilterDiskSpaceUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_disk_space_used",
|
||||
"BloomFilterDiskSpaceUsed");
|
||||
bloomFilterOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/bloom_filter_off_heap_memory_used",
|
||||
"BloomFilterOffHeapMemoryUsed");
|
||||
indexSummaryOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/index_summary_off_heap_memory_used",
|
||||
"IndexSummaryOffHeapMemoryUsed");
|
||||
compressionMetadataOffHeapMemoryUsed = createColumnFamilyGauge(
|
||||
"/column_family/metrics/compression_metadata_off_heap_memory_used",
|
||||
"CompressionMetadataOffHeapMemoryUsed");
|
||||
speculativeRetries = createColumnFamilyCounter(
|
||||
"/column_family/metrics/speculative_retries",
|
||||
"SpeculativeRetries");
|
||||
keyCacheHitRate = Metrics.newGauge(
|
||||
factory.createMetricName("KeyCacheHitRate"),
|
||||
new Gauge<Double>() {
|
||||
@Override
|
||||
public Double value() {
|
||||
return c.getDoubleValue("/column_family/metrics/key_cache_hit_rate/"
|
||||
+ cfName);
|
||||
}
|
||||
});
|
||||
tombstoneScannedHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/tombstone_scanned_histogram",
|
||||
"TombstoneScannedHistogram");
|
||||
liveScannedHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/live_scanned_histogram",
|
||||
"LiveScannedHistogram");
|
||||
colUpdateTimeDeltaHistogram = createColumnFamilyHistogram(
|
||||
"/column_family/metrics/col_update_time_delta_histogram",
|
||||
"ColUpdateTimeDeltaHistogram");
|
||||
coordinatorReadLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/read/" + cfName,
|
||||
factory.createMetricName("CoordinatorReadLatency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
coordinatorScanLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/scan/" + cfName,
|
||||
factory.createMetricName("CoordinatorScanLatency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
waitingOnFreeMemtableSpace = APIMetrics.newTimer("/column_family/metrics/waiting_on_free_memtable/" + cfName,
|
||||
factory.createMetricName("WaitingOnFreeMemtableSpace"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
|
||||
trueSnapshotsSize = createColumnFamilyGauge(
|
||||
"/column_family/metrics/snapshots_size", "SnapshotsSize");
|
||||
rowCacheHitOutOfRange = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_hit_out_of_range",
|
||||
"RowCacheHitOutOfRange");
|
||||
rowCacheHit = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_hit", "RowCacheHit");
|
||||
rowCacheMiss = createColumnFamilyCounter(
|
||||
"/column_family/metrics/row_cache_miss", "RowCacheMiss");
|
||||
|
||||
casPrepare = new LatencyMetrics("/column_family/metrics/cas_prepare/"
|
||||
+ cfName, factory, "CasPrepare");
|
||||
casPropose = new LatencyMetrics("/column_family/metrics/cas_propose/"
|
||||
+ cfName, factory, "CasPropose");
|
||||
casCommit = new LatencyMetrics("/column_family/metrics/cas_commit/"
|
||||
+ cfName, factory, "CasCommit");
|
||||
sstablesPerRead = new EstimatedHistogramWrapper("/column_family/metrics/sstables_per_read_histogram/" + cfName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Release all associated metrics.
|
||||
*/
|
||||
public void release() {
|
||||
for (String name : all) {
|
||||
allColumnFamilyMetrics.get(name).remove(
|
||||
Metrics.defaultRegistry().allMetrics()
|
||||
.get(factory.createMetricName(name)));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName(name));
|
||||
}
|
||||
readLatency.release();
|
||||
writeLatency.release();
|
||||
rangeLatency.release();
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("EstimatedRowSizeHistogram"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("EstimatedColumnCountHistogram"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("KeyCacheHitRate"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("CoordinatorReadLatency"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("CoordinatorScanLatency"));
|
||||
Metrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName("WaitingOnFreeMemtableSpace"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Double> createColumnFamilyGaugeDouble(final String url,
|
||||
final String name) {
|
||||
Gauge<Double> gauge = new Gauge<Double>() {
|
||||
public Double value() {
|
||||
return c.getDoubleValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Long> createColumnFamilyGauge(final String url, final String name) {
|
||||
Gauge<Long> gauge = new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return (long)c.getDoubleValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected Gauge<Integer> createColumnFamilyGaugeInt(final String url,
|
||||
final String name) {
|
||||
Gauge<Integer> gauge = new Gauge<Integer>() {
|
||||
public Integer value() {
|
||||
return (int)c.getDoubleValue(url + "/" + cfName);
|
||||
}
|
||||
};
|
||||
return createColumnFamilyGauge(url, name, gauge);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge will merge each CF gauge by adding their
|
||||
* values
|
||||
*/
|
||||
protected <T extends Number> Gauge<T> createColumnFamilyGauge(final String url,
|
||||
final String name, Gauge<T> gauge) {
|
||||
return createColumnFamilyGauge(name, gauge, new Gauge<Long>() {
|
||||
public Long value() {
|
||||
// This is an optimiztion, call once for all column families
|
||||
// instead
|
||||
// of iterating over all of them
|
||||
return c.getLongValue(url);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a gauge that will be part of a merged version of all column
|
||||
* families. The global gauge is defined as the globalGauge parameter
|
||||
*/
|
||||
protected <G, T> Gauge<T> createColumnFamilyGauge(String name,
|
||||
Gauge<T> gauge, Gauge<G> globalGauge) {
|
||||
Gauge<T> cfGauge = APIMetrics.newGauge(factory.createMetricName(name),
|
||||
gauge);
|
||||
if (register(name, cfGauge)) {
|
||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
||||
globalGauge);
|
||||
}
|
||||
return cfGauge;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a counter that will also have a global counter thats the sum of
|
||||
* all counters across different column families
|
||||
*/
|
||||
protected Counter createColumnFamilyCounter(final String url, final String name) {
|
||||
Counter cfCounter = APIMetrics.newCounter(url + "/" + cfName,
|
||||
factory.createMetricName(name));
|
||||
if (register(name, cfCounter)) {
|
||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
||||
new Gauge<Long>() {
|
||||
public Long value() {
|
||||
// This is an optimiztion, call once for all column
|
||||
// families instead
|
||||
// of iterating over all of them
|
||||
return c.getLongValue(url);
|
||||
}
|
||||
});
|
||||
}
|
||||
return cfCounter;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a histogram-like interface that will register both a CF, keyspace
|
||||
* and global level histogram and forward any updates to both
|
||||
*/
|
||||
protected ColumnFamilyHistogram createColumnFamilyHistogram(String url,
|
||||
String name) {
|
||||
Histogram cfHistogram = APIMetrics.newHistogram(url + "/" + cfName,
|
||||
factory.createMetricName(name), true);
|
||||
register(name, cfHistogram);
|
||||
|
||||
// TBD add keyspace and global histograms
|
||||
// keyspaceHistogram,
|
||||
// Metrics.newHistogram(globalNameFactory.createMetricName(name),
|
||||
// true));
|
||||
return new ColumnFamilyHistogram(cfHistogram, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a metric to be removed when unloading CF.
|
||||
*
|
||||
* @return true if first time metric with that name has been registered
|
||||
*/
|
||||
private boolean register(String name, Metric metric) {
|
||||
boolean ret = allColumnFamilyMetrics.putIfAbsent(name,
|
||||
new HashSet<Metric>()) == null;
|
||||
allColumnFamilyMetrics.get(name).add(metric);
|
||||
all.add(name);
|
||||
return ret;
|
||||
}
|
||||
|
||||
public long[] getRecentSSTablesPerRead() {
|
||||
return recentSSTablesPerRead
|
||||
.getBuckets(sstablesPerRead.getBuckets(false));
|
||||
}
|
||||
|
||||
public class ColumnFamilyHistogram {
|
||||
public final Histogram[] all;
|
||||
public final Histogram cf;
|
||||
|
||||
private ColumnFamilyHistogram(Histogram cf, Histogram keyspace,
|
||||
Histogram global) {
|
||||
this.cf = cf;
|
||||
this.all = new Histogram[] { cf, keyspace, global };
|
||||
}
|
||||
}
|
||||
|
||||
class ColumnFamilyMetricNameFactory implements MetricNameFactory {
|
||||
private final String keyspaceName;
|
||||
private final String columnFamilyName;
|
||||
private final boolean isIndex;
|
||||
|
||||
ColumnFamilyMetricNameFactory(ColumnFamilyStore cfs) {
|
||||
this.keyspaceName = cfs.getKeyspace();
|
||||
this.columnFamilyName = cfs.getColumnFamilyName();
|
||||
isIndex = cfs.isIndex();
|
||||
}
|
||||
|
||||
public MetricName createMetricName(String metricName) {
|
||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
||||
String type = isIndex ? "IndexColumnFamily" : "ColumnFamily";
|
||||
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=").append(type);
|
||||
mbeanName.append(",keyspace=").append(keyspaceName);
|
||||
mbeanName.append(",scope=").append(columnFamilyName);
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
return new MetricName(groupName, type, metricName, keyspaceName
|
||||
+ "." + columnFamilyName, mbeanName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static class AllColumnFamilyMetricNameFactory implements MetricNameFactory {
|
||||
public MetricName createMetricName(String metricName) {
|
||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=ColumnFamily");
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
return new MetricName(groupName, "ColumnFamily", metricName, "all",
|
||||
mbeanName.toString());
|
||||
}
|
||||
}
|
||||
}
|
@ -23,65 +23,38 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.Gauge;
|
||||
import com.yammer.metrics.core.Timer;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Metrics for commit log
|
||||
*/
|
||||
public class CommitLogMetrics {
|
||||
public static final MetricNameFactory factory = new DefaultNameFactory(
|
||||
"CommitLog");
|
||||
private APIClient c = new APIClient();
|
||||
|
||||
/** Number of completed tasks */
|
||||
public final Gauge<Long> completedTasks;
|
||||
/** Number of pending tasks */
|
||||
public final Gauge<Long> pendingTasks;
|
||||
/** Current size used by all the commit log segments */
|
||||
public final Gauge<Long> totalCommitLogSize;
|
||||
/**
|
||||
* Time spent waiting for a CLS to be allocated - under normal conditions
|
||||
* this should be zero
|
||||
*/
|
||||
public final Timer waitingOnSegmentAllocation;
|
||||
/**
|
||||
* The time spent waiting on CL sync; for Periodic this is only occurs when
|
||||
* the sync is lagging its sync interval
|
||||
*/
|
||||
public final Timer waitingOnCommit;
|
||||
|
||||
public class CommitLogMetrics implements Metrics {
|
||||
public CommitLogMetrics() {
|
||||
completedTasks = APIMetrics.newGauge(
|
||||
factory.createMetricName("CompletedTasks"), new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue("/commitlog/metrics/completed_tasks");
|
||||
}
|
||||
});
|
||||
pendingTasks = APIMetrics.newGauge(
|
||||
factory.createMetricName("PendingTasks"), new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue("/commitlog/metrics/pending_tasks");
|
||||
}
|
||||
});
|
||||
totalCommitLogSize = APIMetrics.newGauge(
|
||||
factory.createMetricName("TotalCommitLogSize"),
|
||||
new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue("/commitlog/metrics/total_commit_log_size");
|
||||
}
|
||||
});
|
||||
waitingOnSegmentAllocation = APIMetrics.newTimer("/commit_log/metrics/waiting_on_segment_allocation",
|
||||
factory.createMetricName("WaitingOnSegmentAllocation"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
waitingOnCommit = APIMetrics.newTimer("/commit_log/metrics/waiting_on_commit",
|
||||
factory.createMetricName("WaitingOnCommit"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
MetricNameFactory factory = new DefaultNameFactory("CommitLog");
|
||||
/** Number of completed tasks */
|
||||
registry.register(() -> registry.gauge("/commitlog/metrics/completed_tasks"),
|
||||
factory.createMetricName("CompletedTasks"));
|
||||
/** Number of pending tasks */
|
||||
registry.register(() -> registry.gauge("/commitlog/metrics/pending_tasks"),
|
||||
factory.createMetricName("PendingTasks"));
|
||||
/** Current size used by all the commit log segments */
|
||||
registry.register(() -> registry.gauge("/commitlog/metrics/total_commit_log_size"),
|
||||
factory.createMetricName("TotalCommitLogSize"));
|
||||
/**
|
||||
* Time spent waiting for a CLS to be allocated - under normal
|
||||
* conditions this should be zero
|
||||
*/
|
||||
registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_segment_allocation"),
|
||||
factory.createMetricName("WaitingOnSegmentAllocation"));
|
||||
/**
|
||||
* The time spent waiting on CL sync; for Periodic this is only occurs
|
||||
* when the sync is lagging its sync interval
|
||||
*/
|
||||
registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_commit"),
|
||||
factory.createMetricName("WaitingOnCommit"));
|
||||
}
|
||||
}
|
||||
|
@ -23,52 +23,30 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.yammer.metrics.core.Gauge;
|
||||
import com.yammer.metrics.core.APIMeter;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Metrics for compaction.
|
||||
*/
|
||||
public class CompactionMetrics {
|
||||
public static final MetricNameFactory factory = new DefaultNameFactory(
|
||||
"Compaction");
|
||||
private APIClient c = new APIClient();
|
||||
/** Estimated number of compactions remaining to perform */
|
||||
public final Gauge<Integer> pendingTasks;
|
||||
/** Number of completed compactions since server [re]start */
|
||||
public final Gauge<Long> completedTasks;
|
||||
/** Total number of compactions since server [re]start */
|
||||
public final APIMeter totalCompactionsCompleted;
|
||||
/** Total number of bytes compacted since server [re]start */
|
||||
public final Counter bytesCompacted;
|
||||
|
||||
public class CompactionMetrics implements Metrics {
|
||||
public CompactionMetrics() {
|
||||
}
|
||||
|
||||
pendingTasks = APIMetrics.newGauge(
|
||||
factory.createMetricName("PendingTasks"), new Gauge<Integer>() {
|
||||
public Integer value() {
|
||||
return c.getIntValue("/compaction_manager/metrics/pending_tasks");
|
||||
}
|
||||
});
|
||||
completedTasks = APIMetrics.newGauge(
|
||||
factory.createMetricName("CompletedTasks"), new Gauge<Long>() {
|
||||
public Long value() {
|
||||
return c.getLongValue("/compaction_manager/metrics/completed_tasks");
|
||||
}
|
||||
});
|
||||
totalCompactionsCompleted = APIMetrics.newMeter(
|
||||
"/compaction_manager/metrics/total_compactions_completed",
|
||||
factory.createMetricName("TotalCompactionsCompleted"),
|
||||
"compaction completed", TimeUnit.SECONDS);
|
||||
bytesCompacted = APIMetrics.newCounter(
|
||||
"/compaction_manager/metrics/bytes_compacted",
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
MetricNameFactory factory = new DefaultNameFactory("Compaction");
|
||||
/** Estimated number of compactions remaining to perform */
|
||||
registry.register(() -> registry.gauge(Integer.class, "/compaction_manager/metrics/pending_tasks"),
|
||||
factory.createMetricName("PendingTasks"));
|
||||
/** Number of completed compactions since server [re]start */
|
||||
registry.register(() -> registry.gauge("/compaction_manager/metrics/completed_tasks"),
|
||||
factory.createMetricName("CompletedTasks"));
|
||||
/** Total number of compactions since server [re]start */
|
||||
registry.register(() -> registry.meter("/compaction_manager/metrics/total_compactions_completed"),
|
||||
factory.createMetricName("TotalCompactionsCompleted"));
|
||||
/** Total number of bytes compacted since server [re]start */
|
||||
registry.register(() -> registry.meter("/compaction_manager/metrics/bytes_compacted"),
|
||||
factory.createMetricName("BytesCompacted"));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -15,15 +15,10 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
package com.scylladb.jmx.metrics;
|
||||
|
||||
import com.yammer.metrics.core.MetricName;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
/**
|
||||
* MetricNameFactory that generates default MetricName of metrics.
|
||||
@ -43,19 +38,14 @@ public class DefaultNameFactory implements MetricNameFactory {
|
||||
this.scope = scope;
|
||||
}
|
||||
|
||||
public MetricName createMetricName(String metricName) {
|
||||
@Override
|
||||
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||
return createMetricName(type, metricName, scope);
|
||||
}
|
||||
|
||||
public static MetricName createMetricName(String type, String metricName,
|
||||
String scope) {
|
||||
return new MetricName(GROUP_NAME, type, metricName, scope,
|
||||
createDefaultMBeanName(type, metricName, scope));
|
||||
}
|
||||
|
||||
protected static String createDefaultMBeanName(String type, String name,
|
||||
String scope) {
|
||||
final StringBuilder nameBuilder = new StringBuilder();
|
||||
public static ObjectName createMetricName(String type, String name, String scope)
|
||||
throws MalformedObjectNameException {
|
||||
StringBuilder nameBuilder = new StringBuilder();
|
||||
nameBuilder.append(GROUP_NAME);
|
||||
nameBuilder.append(":type=");
|
||||
nameBuilder.append(type);
|
||||
@ -67,6 +57,6 @@ public class DefaultNameFactory implements MetricNameFactory {
|
||||
nameBuilder.append(",name=");
|
||||
nameBuilder.append(name);
|
||||
}
|
||||
return nameBuilder.toString();
|
||||
return new ObjectName(nameBuilder.toString());
|
||||
}
|
||||
}
|
@ -24,42 +24,27 @@
|
||||
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
import org.apache.cassandra.net.MessagingService;
|
||||
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.APIMeter;
|
||||
|
||||
/**
|
||||
* Metrics for dropped messages by verb.
|
||||
*/
|
||||
public class DroppedMessageMetrics {
|
||||
/** Number of dropped messages */
|
||||
public final APIMeter dropped;
|
||||
|
||||
private long lastDropped = 0;
|
||||
public class DroppedMessageMetrics implements Metrics {
|
||||
private final MessagingService.Verb verb;
|
||||
|
||||
public DroppedMessageMetrics(MessagingService.Verb verb) {
|
||||
MetricNameFactory factory = new DefaultNameFactory("DroppedMessage",
|
||||
verb.toString());
|
||||
dropped = (APIMeter) APIMetrics.newMeter(null,
|
||||
factory.createMetricName("Dropped"), "dropped",
|
||||
TimeUnit.SECONDS);
|
||||
dropped.stop();
|
||||
this.verb = verb;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public int getRecentlyDropped() {
|
||||
long currentDropped = dropped.count();
|
||||
long recentlyDropped = currentDropped - lastDropped;
|
||||
lastDropped = currentDropped;
|
||||
return (int) recentlyDropped;
|
||||
}
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
MetricNameFactory factory = new DefaultNameFactory("DroppedMessage", verb.toString());
|
||||
/** Number of dropped messages */
|
||||
// TODO: this API url does not exist. Add meter calls for verbs.
|
||||
registry.register(() -> registry.meter("/messaging_service/messages/dropped/" + verb),
|
||||
factory.createMetricName("Dropped"));
|
||||
|
||||
public APIMeter getMeter() {
|
||||
return dropped;
|
||||
}
|
||||
}
|
||||
|
@ -1,55 +0,0 @@
|
||||
package org.apache.cassandra.metrics;
|
||||
/*
|
||||
* Copyright (C) 2015 ScyllaDB
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file is part of Scylla.
|
||||
*
|
||||
* Scylla is free software: you can redistribute it and/or modify
|
||||
* it under the terms of the GNU Affero General Public License as published by
|
||||
* the Free Software Foundation, either version 3 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* Scylla is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.utils.EstimatedHistogram;
|
||||
|
||||
public class EstimatedHistogramWrapper {
|
||||
private APIClient c = new APIClient();
|
||||
private String url;
|
||||
private MultivaluedMap<String, String> queryParams;
|
||||
private static final int DURATION = 50;
|
||||
private int duration;
|
||||
public EstimatedHistogramWrapper(String url, MultivaluedMap<String, String> queryParams, int duration) {
|
||||
this.url = url;
|
||||
this.queryParams = queryParams;
|
||||
this.duration = duration;
|
||||
|
||||
}
|
||||
public EstimatedHistogramWrapper(String url) {
|
||||
this(url, null, DURATION);
|
||||
|
||||
}
|
||||
public EstimatedHistogramWrapper(String url, int duration) {
|
||||
this(url, null, duration);
|
||||
|
||||
}
|
||||
public EstimatedHistogram get() {
|
||||
return c.getEstimatedHistogram(url, queryParams, duration);
|
||||
}
|
||||
|
||||
public long[] getBuckets(boolean reset) {
|
||||
return get().getBuckets(reset);
|
||||
}
|
||||
}
|
@ -23,41 +23,19 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.Arrays;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.scylladb.jmx.utils.RecentEstimatedHistogram;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.yammer.metrics.core.Timer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
/**
|
||||
* Metrics about latencies
|
||||
*/
|
||||
public class LatencyMetrics {
|
||||
/** Latency */
|
||||
public final Timer latency;
|
||||
/** Total latency in micro sec */
|
||||
public final Counter totalLatency;
|
||||
|
||||
/** parent metrics to replicate any updates to **/
|
||||
private List<LatencyMetrics> parents = Lists.newArrayList();
|
||||
|
||||
protected final MetricNameFactory factory;
|
||||
public class LatencyMetrics implements Metrics {
|
||||
protected final MetricNameFactory[] factories;
|
||||
protected final String namePrefix;
|
||||
|
||||
@Deprecated public EstimatedHistogramWrapper totalLatencyHistogram;
|
||||
/*
|
||||
* It should not be called directly, use the getRecentLatencyHistogram
|
||||
*/
|
||||
@Deprecated protected final RecentEstimatedHistogram recentLatencyHistogram = new RecentEstimatedHistogram();
|
||||
|
||||
protected long lastLatency;
|
||||
protected long lastOpCount;
|
||||
protected final String uri;
|
||||
protected final String param;
|
||||
|
||||
/**
|
||||
* Create LatencyMetrics with given group, type, and scope. Name prefix for
|
||||
@ -68,8 +46,8 @@ public class LatencyMetrics {
|
||||
* @param scope
|
||||
* Scope
|
||||
*/
|
||||
public LatencyMetrics(String url, String type, String scope) {
|
||||
this(url, type, "", scope);
|
||||
public LatencyMetrics(String type, String scope, String uri) {
|
||||
this(type, "", scope, uri, null);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -83,83 +61,35 @@ public class LatencyMetrics {
|
||||
* @param scope
|
||||
* Scope of metrics
|
||||
*/
|
||||
public LatencyMetrics(String url, String type, String namePrefix,
|
||||
String scope) {
|
||||
this(url, new DefaultNameFactory(type, scope), namePrefix);
|
||||
public LatencyMetrics(String type, String namePrefix, String scope, String uri, String param) {
|
||||
this(namePrefix, uri, param, new DefaultNameFactory(type, scope));
|
||||
}
|
||||
|
||||
/**
|
||||
* Create LatencyMetrics with given group, type, prefix to append to each
|
||||
* metric name, and scope.
|
||||
*
|
||||
* @param factory
|
||||
* MetricName factory to use
|
||||
* @param namePrefix
|
||||
* Prefix to append to each metric name
|
||||
*/
|
||||
public LatencyMetrics(String url, MetricNameFactory factory,
|
||||
String namePrefix) {
|
||||
this(url, null, factory, namePrefix);
|
||||
public LatencyMetrics(String namePrefix, String uri, MetricNameFactory... factories) {
|
||||
this(namePrefix, uri, null, factories);
|
||||
}
|
||||
|
||||
public LatencyMetrics(String url, String paramName,
|
||||
MetricNameFactory factory, String namePrefix) {
|
||||
this.factory = factory;
|
||||
public LatencyMetrics(String namePrefix, String uri, String param, MetricNameFactory... factories) {
|
||||
this.factories = factories;
|
||||
this.namePrefix = namePrefix;
|
||||
|
||||
paramName = (paramName == null)? "" : "/" + paramName;
|
||||
latency = APIMetrics.newTimer(url + "/moving_average_histogram" + paramName,
|
||||
factory.createMetricName(namePrefix + "Latency"),
|
||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
||||
totalLatency = APIMetrics.newCounter(url + paramName,
|
||||
factory.createMetricName(namePrefix + "TotalLatency"));
|
||||
totalLatencyHistogram = new EstimatedHistogramWrapper(url + "/estimated_histogram" + paramName);
|
||||
this.uri = uri;
|
||||
this.param = param;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create LatencyMetrics with given group, type, prefix to append to each
|
||||
* metric name, and scope. Any updates to this will also run on parent
|
||||
*
|
||||
* @param factory
|
||||
* MetricName factory to use
|
||||
* @param namePrefix
|
||||
* Prefix to append to each metric name
|
||||
* @param parents
|
||||
* any amount of parents to replicate updates to
|
||||
*/
|
||||
public LatencyMetrics(String url, MetricNameFactory factory,
|
||||
String namePrefix, LatencyMetrics... parents) {
|
||||
this(url, factory, namePrefix);
|
||||
this.parents.addAll(ImmutableList.copyOf(parents));
|
||||
protected ObjectName[] names(String suffix) throws MalformedObjectNameException {
|
||||
return Arrays.stream(factories).map(f -> {
|
||||
try {
|
||||
return f.createMetricName(namePrefix + suffix);
|
||||
} catch (MalformedObjectNameException e) {
|
||||
throw new RuntimeException(e); // dung...
|
||||
}
|
||||
}).toArray(size -> new ObjectName[size]);
|
||||
}
|
||||
|
||||
/** takes nanoseconds **/
|
||||
public void addNano(long nanos) {
|
||||
// the object is only updated from the API
|
||||
}
|
||||
|
||||
public void release() {
|
||||
APIMetrics.defaultRegistry()
|
||||
.removeMetric(factory.createMetricName(namePrefix + "Latency"));
|
||||
APIMetrics.defaultRegistry().removeMetric(
|
||||
factory.createMetricName(namePrefix + "TotalLatency"));
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public synchronized double getRecentLatency() {
|
||||
long ops = latency.count();
|
||||
long n = totalLatency.count();
|
||||
if (ops == lastOpCount)
|
||||
return 0;
|
||||
try {
|
||||
return ((double) n - lastLatency) / (ops - lastOpCount);
|
||||
} finally {
|
||||
lastLatency = n;
|
||||
lastOpCount = ops;
|
||||
}
|
||||
}
|
||||
|
||||
public long[] getRecentLatencyHistogram() {
|
||||
return recentLatencyHistogram.getBuckets(totalLatencyHistogram.getBuckets(false));
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
String paramName = (param == null) ? "" : "/" + param;
|
||||
registry.register(() -> registry.timer(uri + "/moving_average_histogram" + paramName), names("Latency"));
|
||||
registry.register(() -> registry.counter(uri + paramName), names("TotalLatency"));
|
||||
}
|
||||
}
|
||||
|
@ -15,23 +15,26 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
/**
|
||||
* Simplified version of {@link Metrics} naming factory paradigm, simply
|
||||
* generating {@link ObjectName} and nothing more.
|
||||
*
|
||||
* @author calle
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package com.scylladb.jmx.metrics;
|
||||
|
||||
import com.yammer.metrics.core.MetricName;
|
||||
|
||||
public interface MetricNameFactory
|
||||
{
|
||||
public interface MetricNameFactory {
|
||||
/**
|
||||
* Create a qualified name from given metric name.
|
||||
*
|
||||
* @param metricName part of qualified name.
|
||||
* @param metricName
|
||||
* part of qualified name.
|
||||
* @return new String with given metric name.
|
||||
* @throws MalformedObjectNameException
|
||||
*/
|
||||
MetricName createMetricName(String metricName);
|
||||
ObjectName createMetricName(String metricName) throws MalformedObjectNameException;
|
||||
}
|
38
src/main/java/org/apache/cassandra/metrics/Metrics.java
Normal file
38
src/main/java/org/apache/cassandra/metrics/Metrics.java
Normal file
@ -0,0 +1,38 @@
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import java.util.function.Function;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Action interface for any type that encapsulates n metrics.
|
||||
*
|
||||
* @author calle
|
||||
*
|
||||
*/
|
||||
public interface Metrics {
|
||||
/**
|
||||
* Implementors should issue
|
||||
* {@link MetricsRegistry#register(java.util.function.Supplier, javax.management.ObjectName...)}
|
||||
* for every {@link Metrics} they generate. This method is called in both
|
||||
* bind (create) and unbind (remove) phase, so an appropriate use of
|
||||
* {@link Function} binding is advisable.
|
||||
*
|
||||
* @param registry
|
||||
* @throws MalformedObjectNameException
|
||||
*/
|
||||
void register(MetricsRegistry registry) throws MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Same as {{@link #register(MetricsRegistry)}, but for {@link Metric}s that
|
||||
* are "global" (i.e. static - not bound to an individual bean instance.
|
||||
* This method is called whenever the first encapsulating MBean is
|
||||
* added/removed from a {@link MBeanServer}.
|
||||
*
|
||||
* @param registry
|
||||
* @throws MalformedObjectNameException
|
||||
*/
|
||||
default void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
}
|
||||
}
|
792
src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java
Normal file
792
src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java
Normal file
@ -0,0 +1,792 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import static com.scylladb.jmx.api.APIClient.getReader;
|
||||
import static java.lang.Math.floor;
|
||||
import static java.util.logging.Level.SEVERE;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Supplier;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonNumber;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
/**
|
||||
* Makes integrating 3.0 metrics API with 2.0.
|
||||
* <p>
|
||||
* The 3.0 API comes with poor JMX integration
|
||||
* </p>
|
||||
*/
|
||||
public class MetricsRegistry {
|
||||
private static final long CACHE_DURATION = 1000;
|
||||
private static final long UPDATE_INTERVAL = 50;
|
||||
|
||||
private static final Logger logger = Logger.getLogger(MetricsRegistry.class.getName());
|
||||
|
||||
private final APIClient client;
|
||||
private final MBeanServer mBeanServer;
|
||||
|
||||
public MetricsRegistry(APIClient client, MBeanServer mBeanServer) {
|
||||
this.client = client;
|
||||
this.mBeanServer = mBeanServer;
|
||||
}
|
||||
|
||||
public MetricsRegistry(MetricsRegistry other) {
|
||||
this(other.client, other.mBeanServer);
|
||||
}
|
||||
|
||||
public MetricMBean gauge(String url) {
|
||||
return gauge(Long.class, url);
|
||||
}
|
||||
|
||||
public <T> MetricMBean gauge(Class<T> type, final String url) {
|
||||
return gauge(getReader(type), url);
|
||||
}
|
||||
|
||||
public <T> MetricMBean gauge(final BiFunction<APIClient, String, T> function, final String url) {
|
||||
return gauge(c -> function.apply(c, url));
|
||||
}
|
||||
|
||||
public <T> MetricMBean gauge(final Function<APIClient, T> function) {
|
||||
return gauge(() -> function.apply(client));
|
||||
}
|
||||
|
||||
private class JmxGauge implements JmxGaugeMBean {
|
||||
private final Supplier<?> function;
|
||||
|
||||
public JmxGauge(Supplier<?> function) {
|
||||
this.function = function;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object getValue() {
|
||||
return function.get();
|
||||
}
|
||||
}
|
||||
|
||||
public <T> MetricMBean gauge(final Supplier<T> function) {
|
||||
return new JmxGauge(function);
|
||||
}
|
||||
|
||||
/**
|
||||
* Default approach to register is to actually register/add to
|
||||
* {@link MBeanServer} For unbind phase, override here.
|
||||
*
|
||||
* @param bean
|
||||
* @param objectNames
|
||||
*/
|
||||
public void register(Supplier<MetricMBean> f, ObjectName... objectNames) {
|
||||
MetricMBean bean = f.get();
|
||||
for (ObjectName name : objectNames) {
|
||||
try {
|
||||
mBeanServer.registerMBean(bean, name);
|
||||
} catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) {
|
||||
logger.log(SEVERE, "Could not register mbean", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class JmxCounter implements JmxCounterMBean {
|
||||
private final String url;
|
||||
|
||||
public JmxCounter(String url) {
|
||||
super();
|
||||
this.url = url;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCount() {
|
||||
return client.getLongValue(url);
|
||||
}
|
||||
}
|
||||
|
||||
public MetricMBean counter(final String url) {
|
||||
return new JmxCounter(url);
|
||||
}
|
||||
|
||||
private abstract class IntermediatelyUpdated {
|
||||
private final long interval;
|
||||
private final Supplier<JsonObject> supplier;
|
||||
private long lastUpdate;
|
||||
|
||||
public IntermediatelyUpdated(String url, long interval) {
|
||||
this.supplier = () -> client.getJsonObj(url, null);
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
public IntermediatelyUpdated(Supplier<JsonObject> supplier, long interval) {
|
||||
this.supplier = supplier;
|
||||
this.interval = interval;
|
||||
}
|
||||
|
||||
public abstract void update(JsonObject obj);
|
||||
|
||||
public final void update() {
|
||||
long now = System.currentTimeMillis();
|
||||
if (now - lastUpdate < interval) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
JsonObject obj = supplier.get();
|
||||
update(obj);
|
||||
} finally {
|
||||
lastUpdate = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static class Meter {
|
||||
public final long count;
|
||||
public final double oneMinuteRate;
|
||||
public final double fiveMinuteRate;
|
||||
public final double fifteenMinuteRate;
|
||||
public final double meanRate;
|
||||
|
||||
public Meter(long count, double oneMinuteRate, double fiveMinuteRate, double fifteenMinuteRate,
|
||||
double meanRate) {
|
||||
this.count = count;
|
||||
this.oneMinuteRate = oneMinuteRate;
|
||||
this.fiveMinuteRate = fiveMinuteRate;
|
||||
this.fifteenMinuteRate = fifteenMinuteRate;
|
||||
this.meanRate = meanRate;
|
||||
}
|
||||
|
||||
public Meter() {
|
||||
this(0, 0, 0, 0, 0);
|
||||
}
|
||||
|
||||
public Meter(JsonObject obj) {
|
||||
JsonArray rates = obj.getJsonArray("rates");
|
||||
oneMinuteRate = rates.getJsonNumber(0).doubleValue();
|
||||
fiveMinuteRate = rates.getJsonNumber(1).doubleValue();
|
||||
fifteenMinuteRate = rates.getJsonNumber(2).doubleValue();
|
||||
meanRate = obj.getJsonNumber("mean_rate").doubleValue();
|
||||
count = obj.getJsonNumber("count").longValue();
|
||||
}
|
||||
}
|
||||
|
||||
private static final TimeUnit RATE_UNIT = TimeUnit.SECONDS;
|
||||
private static final TimeUnit DURATION_UNIT = TimeUnit.MICROSECONDS;
|
||||
private static final TimeUnit API_DURATION_UNIT = TimeUnit.NANOSECONDS;
|
||||
private static final double DURATION_FACTOR = 1.0 / API_DURATION_UNIT.convert(1, DURATION_UNIT);
|
||||
|
||||
private static double toDuration(double nanos) {
|
||||
return nanos * DURATION_FACTOR;
|
||||
}
|
||||
|
||||
private static String unitString(TimeUnit u) {
|
||||
String s = u.toString().toLowerCase(Locale.US);
|
||||
return s.substring(0, s.length() - 1);
|
||||
}
|
||||
|
||||
private class JmxMeter extends IntermediatelyUpdated implements JmxMeterMBean {
|
||||
private Meter meter = new Meter();
|
||||
|
||||
public JmxMeter(String url, long interval) {
|
||||
super(url, interval);
|
||||
}
|
||||
|
||||
public JmxMeter(Supplier<JsonObject> supplier, long interval) {
|
||||
super(supplier, interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(JsonObject obj) {
|
||||
meter = new Meter(obj);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCount() {
|
||||
update();
|
||||
return meter.count;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getMeanRate() {
|
||||
update();
|
||||
return meter.meanRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getOneMinuteRate() {
|
||||
update();
|
||||
return meter.oneMinuteRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getFiveMinuteRate() {
|
||||
update();
|
||||
return meter.fiveMinuteRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getFifteenMinuteRate() {
|
||||
update();
|
||||
return meter.fifteenMinuteRate;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRateUnit() {
|
||||
return "event/" + unitString(RATE_UNIT);
|
||||
}
|
||||
}
|
||||
|
||||
public MetricMBean meter(String url) {
|
||||
return new JmxMeter(url, CACHE_DURATION);
|
||||
}
|
||||
|
||||
private static long[] asLongArray(JsonArray a) {
|
||||
return a.getValuesAs(JsonNumber.class).stream().mapToLong(n -> n.longValue()).toArray();
|
||||
}
|
||||
|
||||
private static interface Samples {
|
||||
default double getValue(double quantile) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
default long[] getValues() {
|
||||
return new long[0];
|
||||
}
|
||||
}
|
||||
|
||||
private static class BufferSamples implements Samples {
|
||||
private final long[] samples;
|
||||
|
||||
public BufferSamples(long[] samples) {
|
||||
this.samples = samples;
|
||||
Arrays.sort(this.samples);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] getValues() {
|
||||
return samples;
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getValue(double quantile) {
|
||||
if (quantile < 0.0 || quantile > 1.0) {
|
||||
throw new IllegalArgumentException(quantile + " is not in [0..1]");
|
||||
}
|
||||
|
||||
if (samples.length == 0) {
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
final double pos = quantile * (samples.length + 1);
|
||||
|
||||
if (pos < 1) {
|
||||
return samples[0];
|
||||
}
|
||||
|
||||
if (pos >= samples.length) {
|
||||
return samples[samples.length - 1];
|
||||
}
|
||||
|
||||
final double lower = samples[(int) pos - 1];
|
||||
final double upper = samples[(int) pos];
|
||||
return lower + (pos - floor(pos)) * (upper - lower);
|
||||
}
|
||||
}
|
||||
|
||||
private static class Histogram {
|
||||
private final long count;
|
||||
private final long min;
|
||||
private final long max;
|
||||
private final double mean;
|
||||
private final double stdDev;
|
||||
|
||||
private final Samples samples;
|
||||
|
||||
public Histogram(long count, long min, long max, double mean, double stdDev, Samples samples) {
|
||||
this.count = count;
|
||||
this.min = min;
|
||||
this.max = max;
|
||||
this.mean = mean;
|
||||
this.stdDev = stdDev;
|
||||
this.samples = samples;
|
||||
}
|
||||
|
||||
public Histogram() {
|
||||
this(0, 0, 0, 0, 0, new Samples() {
|
||||
});
|
||||
}
|
||||
|
||||
public Histogram(JsonObject obj) {
|
||||
this(obj.getJsonNumber("count").longValue(), obj.getJsonNumber("min").longValue(),
|
||||
obj.getJsonNumber("max").longValue(), obj.getJsonNumber("mean").doubleValue(),
|
||||
obj.getJsonNumber("variance").doubleValue(), new BufferSamples(getValues(obj)));
|
||||
}
|
||||
|
||||
public Histogram(EstimatedHistogram h) {
|
||||
this(h.count(), h.min(), h.max(), h.mean(), 0, h);
|
||||
}
|
||||
|
||||
private static long[] getValues(JsonObject obj) {
|
||||
JsonArray arr = obj.getJsonArray("sample");
|
||||
if (arr != null) {
|
||||
return asLongArray(arr);
|
||||
}
|
||||
return new long[0];
|
||||
}
|
||||
|
||||
public long[] getValues() {
|
||||
return samples.getValues();
|
||||
}
|
||||
|
||||
// Origin (and previous iterations of scylla-jxm)
|
||||
// uses biased/ExponentiallyDecaying measurements
|
||||
// for the history & quantile resolution.
|
||||
// However, for use that is just gobbletigook, since
|
||||
// we, at occasions of being asked, and when certain time
|
||||
// has passed, ask the actual scylla server for a
|
||||
// "values" buffer. A buffer with no information whatsoever
|
||||
// on how said values correlate to actual sampling
|
||||
// time.
|
||||
// So, applying time weights at this level is just
|
||||
// wrong. We can just as well treat this as a uniform
|
||||
// distribution.
|
||||
// Obvious improvement: Send time/value tuples instead.
|
||||
public double getValue(double quantile) {
|
||||
return samples.getValue(quantile);
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public long getMin() {
|
||||
return min;
|
||||
}
|
||||
|
||||
public long getMax() {
|
||||
return max;
|
||||
}
|
||||
|
||||
public double getMean() {
|
||||
return mean;
|
||||
}
|
||||
|
||||
public double getStdDev() {
|
||||
return stdDev;
|
||||
}
|
||||
}
|
||||
|
||||
private static class EstimatedHistogram implements Samples {
|
||||
/**
|
||||
* The series of values to which the counts in `buckets` correspond: 1,
|
||||
* 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of
|
||||
* [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of
|
||||
* 4.
|
||||
*
|
||||
* The series starts at 1 and grows by 1.2 each time (rounding and
|
||||
* removing duplicates). It goes from 1 to around 36M by default
|
||||
* (creating 90+1 buckets), which will give us timing resolution from
|
||||
* microseconds to 36 seconds, with less precision as the numbers get
|
||||
* larger.
|
||||
*
|
||||
* Each bucket represents values from (previous bucket offset, current
|
||||
* offset].
|
||||
*/
|
||||
private final long[] bucketOffsets;
|
||||
// buckets is one element longer than bucketOffsets -- the last element
|
||||
// is
|
||||
// values greater than the last offset
|
||||
private long[] buckets;
|
||||
|
||||
public EstimatedHistogram(JsonObject obj) {
|
||||
this(asLongArray(obj.getJsonArray("bucket_offsets")), asLongArray(obj.getJsonArray("buckets")));
|
||||
}
|
||||
|
||||
public EstimatedHistogram(long[] offsets, long[] bucketData) {
|
||||
assert bucketData.length == offsets.length + 1;
|
||||
bucketOffsets = offsets;
|
||||
buckets = bucketData;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the smallest value that could have been added to this
|
||||
* histogram
|
||||
*/
|
||||
public long min() {
|
||||
for (int i = 0; i < buckets.length; i++) {
|
||||
if (buckets[i] > 0) {
|
||||
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the largest value that could have been added to this
|
||||
* histogram. If the histogram overflowed, returns
|
||||
* Long.MAX_VALUE.
|
||||
*/
|
||||
public long max() {
|
||||
int lastBucket = buckets.length - 1;
|
||||
if (buckets[lastBucket] > 0) {
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
for (int i = lastBucket - 1; i >= 0; i--) {
|
||||
if (buckets[i] > 0) {
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] getValues() {
|
||||
return buckets;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param percentile
|
||||
* @return estimated value at given percentile
|
||||
*/
|
||||
@Override
|
||||
public double getValue(double percentile) {
|
||||
assert percentile >= 0 && percentile <= 1.0;
|
||||
int lastBucket = buckets.length - 1;
|
||||
if (buckets[lastBucket] > 0) {
|
||||
throw new IllegalStateException("Unable to compute when histogram overflowed");
|
||||
}
|
||||
|
||||
long pcount = (long) Math.floor(count() * percentile);
|
||||
if (pcount == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
long elements = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
elements += buckets[i];
|
||||
if (elements >= pcount) {
|
||||
return bucketOffsets[i];
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the mean histogram value (average of bucket offsets, weighted
|
||||
* by count)
|
||||
* @throws IllegalStateException
|
||||
* if any values were greater than the largest bucket
|
||||
* threshold
|
||||
*/
|
||||
public long mean() {
|
||||
int lastBucket = buckets.length - 1;
|
||||
if (buckets[lastBucket] > 0) {
|
||||
throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed");
|
||||
}
|
||||
|
||||
long elements = 0;
|
||||
long sum = 0;
|
||||
for (int i = 0; i < lastBucket; i++) {
|
||||
long bCount = buckets[i];
|
||||
elements += bCount;
|
||||
sum += bCount * bucketOffsets[i];
|
||||
}
|
||||
|
||||
return (long) Math.ceil((double) sum / elements);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the total number of non-zero values
|
||||
*/
|
||||
public long count() {
|
||||
return Arrays.stream(buckets).sum();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if this histogram has overflowed -- that is, a value
|
||||
* larger than our largest bucket could bound was added
|
||||
*/
|
||||
@SuppressWarnings("unused")
|
||||
public boolean isOverflowed() {
|
||||
return buckets[buckets.length - 1] > 0;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class JmxHistogram extends IntermediatelyUpdated implements JmxHistogramMBean {
|
||||
private Histogram histogram = new Histogram();
|
||||
|
||||
public JmxHistogram(String url, long interval) {
|
||||
super(url, interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(JsonObject obj) {
|
||||
if (obj.containsKey("hist")) {
|
||||
obj = obj.getJsonObject("hist");
|
||||
}
|
||||
if (obj.containsKey("buckets")) {
|
||||
histogram = new Histogram(new EstimatedHistogram(obj));
|
||||
} else {
|
||||
histogram = new Histogram(obj);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCount() {
|
||||
update();
|
||||
return histogram.getCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMin() {
|
||||
update();
|
||||
return histogram.getMin();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMax() {
|
||||
update();
|
||||
return histogram.getMax();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getMean() {
|
||||
update();
|
||||
return histogram.getMean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getStdDev() {
|
||||
update();
|
||||
return histogram.getStdDev();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get50thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.5);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get75thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.75);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get95thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.95);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get98thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.98);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get99thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.99);
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get999thPercentile() {
|
||||
update();
|
||||
return histogram.getValue(.999);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] values() {
|
||||
update();
|
||||
return histogram.getValues();
|
||||
}
|
||||
}
|
||||
|
||||
public MetricMBean histogram(String url, boolean considerZeroes) {
|
||||
return new JmxHistogram(url, UPDATE_INTERVAL);
|
||||
}
|
||||
|
||||
private class JmxTimer extends JmxMeter implements JmxTimerMBean {
|
||||
private Histogram histogram = new Histogram();
|
||||
|
||||
public JmxTimer(String url, long interval) {
|
||||
super(url, interval);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void update(JsonObject obj) {
|
||||
// TODO: this is not atomic.
|
||||
super.update(obj.getJsonObject("meter"));
|
||||
histogram = new Histogram(obj.getJsonObject("hist"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getMin() {
|
||||
return toDuration(histogram.getMin());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getMax() {
|
||||
return toDuration(histogram.getMax());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getMean() {
|
||||
return toDuration(histogram.getMean());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getStdDev() {
|
||||
return toDuration(histogram.getStdDev());
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get50thPercentile() {
|
||||
return toDuration(histogram.getValue(.5));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get75thPercentile() {
|
||||
return toDuration(histogram.getValue(.75));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get95thPercentile() {
|
||||
return toDuration(histogram.getValue(.95));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get98thPercentile() {
|
||||
return toDuration(histogram.getValue(.98));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get99thPercentile() {
|
||||
return toDuration(histogram.getValue(.99));
|
||||
}
|
||||
|
||||
@Override
|
||||
public double get999thPercentile() {
|
||||
return toDuration(histogram.getValue(.999));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long[] values() {
|
||||
return histogram.getValues();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDurationUnit() {
|
||||
return DURATION_UNIT.toString().toLowerCase(Locale.US);
|
||||
}
|
||||
}
|
||||
|
||||
public MetricMBean timer(String url) {
|
||||
return new JmxTimer(url, UPDATE_INTERVAL);
|
||||
}
|
||||
|
||||
public interface MetricMBean {
|
||||
}
|
||||
|
||||
public static interface JmxGaugeMBean extends MetricMBean {
|
||||
Object getValue();
|
||||
}
|
||||
|
||||
public interface JmxHistogramMBean extends MetricMBean {
|
||||
long getCount();
|
||||
|
||||
long getMin();
|
||||
|
||||
long getMax();
|
||||
|
||||
double getMean();
|
||||
|
||||
double getStdDev();
|
||||
|
||||
double get50thPercentile();
|
||||
|
||||
double get75thPercentile();
|
||||
|
||||
double get95thPercentile();
|
||||
|
||||
double get98thPercentile();
|
||||
|
||||
double get99thPercentile();
|
||||
|
||||
double get999thPercentile();
|
||||
|
||||
long[] values();
|
||||
}
|
||||
|
||||
public interface JmxCounterMBean extends MetricMBean {
|
||||
long getCount();
|
||||
}
|
||||
|
||||
public interface JmxMeterMBean extends MetricMBean {
|
||||
long getCount();
|
||||
|
||||
double getMeanRate();
|
||||
|
||||
double getOneMinuteRate();
|
||||
|
||||
double getFiveMinuteRate();
|
||||
|
||||
double getFifteenMinuteRate();
|
||||
|
||||
String getRateUnit();
|
||||
}
|
||||
|
||||
public interface JmxTimerMBean extends JmxMeterMBean {
|
||||
double getMin();
|
||||
|
||||
double getMax();
|
||||
|
||||
double getMean();
|
||||
|
||||
double getStdDev();
|
||||
|
||||
double get50thPercentile();
|
||||
|
||||
double get75thPercentile();
|
||||
|
||||
double get95thPercentile();
|
||||
|
||||
double get98thPercentile();
|
||||
|
||||
double get99thPercentile();
|
||||
|
||||
double get999thPercentile();
|
||||
|
||||
long[] values();
|
||||
|
||||
String getDurationUnit();
|
||||
}
|
||||
}
|
@ -23,27 +23,21 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
|
||||
/**
|
||||
* Metrics related to Storage.
|
||||
*/
|
||||
public class StorageMetrics {
|
||||
private static final MetricNameFactory factory = new DefaultNameFactory(
|
||||
"Storage");
|
||||
|
||||
public static final Counter load = APIMetrics.newCounter(
|
||||
"/storage_service/metrics/load", factory.createMetricName("Load"));
|
||||
public static final Counter exceptions = APIMetrics.newCounter(
|
||||
"/storage_service/metrics/exceptions",
|
||||
factory.createMetricName("Exceptions"));
|
||||
public static final Counter totalHintsInProgress = APIMetrics.newCounter(
|
||||
"/storage_service/metrics/hints_in_progress",
|
||||
factory.createMetricName("TotalHintsInProgress"));
|
||||
public static final Counter totalHints = APIMetrics.newCounter(
|
||||
"/storage_service/metrics/total_hints",
|
||||
factory.createMetricName("TotalHints"));
|
||||
public class StorageMetrics implements Metrics {
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
MetricNameFactory factory = new DefaultNameFactory("Storage");
|
||||
registry.register(() -> registry.counter("/storage_service/metrics/load"), factory.createMetricName("Load"));
|
||||
registry.register(() -> registry.counter("/storage_service/metrics/exceptions"),
|
||||
factory.createMetricName("Exceptions"));
|
||||
registry.register(() -> registry.counter("/storage_service/metrics/hints_in_progress"),
|
||||
factory.createMetricName("TotalHintsInProgress"));
|
||||
registry.register(() -> registry.counter("/storage_service/metrics/total_hints"),
|
||||
factory.createMetricName("TotalHints"));
|
||||
}
|
||||
}
|
||||
|
@ -23,84 +23,87 @@
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.apache.cassandra.metrics.DefaultNameFactory.createMetricName;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.util.HashMap;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.Timer;
|
||||
import java.util.TimerTask;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMetrics;
|
||||
import com.scylladb.jmx.metrics.DefaultNameFactory;
|
||||
import com.scylladb.jmx.metrics.MetricNameFactory;
|
||||
import com.yammer.metrics.core.Counter;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
/**
|
||||
* Metrics for streaming.
|
||||
*/
|
||||
public class StreamingMetrics
|
||||
{
|
||||
public class StreamingMetrics {
|
||||
public static final String TYPE_NAME = "Streaming";
|
||||
private static final Map<String, StreamingMetrics> instances = new HashMap<String, StreamingMetrics>();
|
||||
static final int INTERVAL = 1000; //update every 1second
|
||||
|
||||
private static Timer timer = new Timer("Streaming Metrics");
|
||||
private static final HashSet<ObjectName> globalNames;
|
||||
|
||||
public static final Counter activeStreamsOutbound = APIMetrics.newCounter("/stream_manager/metrics/outbound", DefaultNameFactory.createMetricName(TYPE_NAME, "ActiveOutboundStreams", null));
|
||||
public static final Counter totalIncomingBytes = APIMetrics.newCounter("/stream_manager/metrics/incoming", DefaultNameFactory.createMetricName(TYPE_NAME, "TotalIncomingBytes", null));
|
||||
public static final Counter totalOutgoingBytes = APIMetrics.newCounter("/stream_manager/metrics/outgoing", DefaultNameFactory.createMetricName(TYPE_NAME, "TotalOutgoingBytes", null));
|
||||
public final Counter incomingBytes;
|
||||
public final Counter outgoingBytes;
|
||||
private static APIClient s_c = new APIClient();
|
||||
|
||||
public static void register_mbeans() {
|
||||
TimerTask taskToExecute = new CheckRegistration();
|
||||
timer.scheduleAtFixedRate(taskToExecute, 100, INTERVAL);
|
||||
}
|
||||
|
||||
public StreamingMetrics(final InetAddress peer)
|
||||
{
|
||||
MetricNameFactory factory = new DefaultNameFactory("Streaming", peer.getHostAddress().replaceAll(":", "."));
|
||||
incomingBytes = APIMetrics.newCounter("/stream_manager/metrics/incoming/" + peer,factory.createMetricName("IncomingBytes"));
|
||||
outgoingBytes= APIMetrics.newCounter("/stream_manager/metrics/outgoing/" + peer, factory.createMetricName("OutgoingBytes"));
|
||||
}
|
||||
|
||||
public static boolean checkRegistration() {
|
||||
static {
|
||||
try {
|
||||
JsonArray streams = s_c.getJsonArray("/stream_manager/");
|
||||
Set<String> all = new HashSet<String>();
|
||||
for (int i = 0; i < streams.size(); i ++) {
|
||||
JsonArray sessions = streams.getJsonObject(i).getJsonArray("sessions");
|
||||
for (int j = 0; j < sessions.size(); j++) {
|
||||
String name = sessions.getJsonObject(j).getString("peer");
|
||||
if (!instances.containsKey(name)) {
|
||||
StreamingMetrics metrics = new StreamingMetrics(InetAddress.getByName(name));
|
||||
instances.put(name, metrics);
|
||||
}
|
||||
all.add(name);
|
||||
}
|
||||
}
|
||||
//removing deleted stream
|
||||
for (String n : instances.keySet()) {
|
||||
if (! all.contains(n)) {
|
||||
instances.remove(n);
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
// ignoring exceptions, will retry on the next interval
|
||||
return false;
|
||||
globalNames = new HashSet<ObjectName>(asList(createMetricName(TYPE_NAME, "ActiveOutboundStreams", null),
|
||||
createMetricName(TYPE_NAME, "TotalIncomingBytes", null),
|
||||
createMetricName(TYPE_NAME, "TotalOutgoingBytes", null)));
|
||||
} catch (MalformedObjectNameException e) {
|
||||
throw new Error(e);
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
private StreamingMetrics() {
|
||||
}
|
||||
|
||||
private static final class CheckRegistration extends TimerTask {
|
||||
@Override
|
||||
public void run() {
|
||||
checkRegistration();
|
||||
private static boolean isStreamingName(ObjectName n) {
|
||||
return TYPE_NAME.equals(n.getKeyProperty("type"));
|
||||
}
|
||||
|
||||
public static void unregister(APIClient client, MBeanServer server) throws MalformedObjectNameException {
|
||||
APIMBean.checkRegistration(server, emptySet(), StreamingMetrics::isStreamingName, (n) -> null);
|
||||
}
|
||||
|
||||
public static boolean checkRegistration(APIClient client, MBeanServer server)
|
||||
throws MalformedObjectNameException, UnknownHostException {
|
||||
|
||||
Set<ObjectName> all = new HashSet<ObjectName>(globalNames);
|
||||
JsonArray streams = client.getJsonArray("/stream_manager/");
|
||||
for (int i = 0; i < streams.size(); i++) {
|
||||
JsonArray sessions = streams.getJsonObject(i).getJsonArray("sessions");
|
||||
for (int j = 0; j < sessions.size(); j++) {
|
||||
String peer = sessions.getJsonObject(j).getString("peer");
|
||||
String scope = InetAddress.getByName(peer).getHostAddress().replaceAll(":", ".");
|
||||
all.add(createMetricName(TYPE_NAME, "IncomingBytes", scope));
|
||||
all.add(createMetricName(TYPE_NAME, "OutgoingBytes", scope));
|
||||
}
|
||||
}
|
||||
|
||||
MetricsRegistry registry = new MetricsRegistry(client, server);
|
||||
return APIMBean.checkRegistration(server, all, StreamingMetrics::isStreamingName, n -> {
|
||||
String scope = n.getKeyProperty("scope");
|
||||
String name = n.getKeyProperty("name");
|
||||
|
||||
String url = null;
|
||||
if ("ActiveOutboundStreams".equals(name)) {
|
||||
url = "/stream_manager/metrics/outbound";
|
||||
} else if ("IncomingBytes".equals(name) || "TotalIncomingBytes".equals(name)) {
|
||||
url = "/stream_manager/metrics/incoming";
|
||||
} else if ("OutgoingBytes".equals(name) || "TotalOutgoingBytes".equals(name)) {
|
||||
url = "/stream_manager/metrics/outgoing";
|
||||
}
|
||||
if (url == null) {
|
||||
throw new IllegalArgumentException();
|
||||
}
|
||||
if (scope != null) {
|
||||
url = url + "/" + scope;
|
||||
}
|
||||
return registry.counter(url);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
310
src/main/java/org/apache/cassandra/metrics/TableMetrics.java
Normal file
310
src/main/java/org/apache/cassandra/metrics/TableMetrics.java
Normal file
@ -0,0 +1,310 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.cassandra.metrics;
|
||||
|
||||
import static com.scylladb.jmx.api.APIClient.getReader;
|
||||
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Function;
|
||||
|
||||
import javax.management.MalformedObjectNameException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
|
||||
/**
|
||||
* Metrics for {@link ColumnFamilyStore}.
|
||||
*/
|
||||
public class TableMetrics implements Metrics {
|
||||
private final MetricNameFactory factory;
|
||||
private final MetricNameFactory aliasFactory;
|
||||
private static final MetricNameFactory globalFactory = new AllTableMetricNameFactory("Table");
|
||||
private static final MetricNameFactory globalAliasFactory = new AllTableMetricNameFactory("ColumnFamily");
|
||||
private static final LatencyMetrics globalLatency[] = new LatencyMetrics[] {
|
||||
new LatencyMetrics("Read", compose("read_latency"), globalFactory, globalAliasFactory),
|
||||
new LatencyMetrics("Write", compose("read_latency"), globalFactory, globalAliasFactory),
|
||||
new LatencyMetrics("Range", compose("read_latency"), globalFactory, globalAliasFactory), };
|
||||
|
||||
private final String cfName;
|
||||
private final LatencyMetrics latencyMetrics[];
|
||||
|
||||
public TableMetrics(String keyspace, String columnFamily, boolean isIndex) {
|
||||
this.factory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "Table");
|
||||
this.aliasFactory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "ColumnFamily");
|
||||
this.cfName = keyspace + ":" + columnFamily;
|
||||
|
||||
latencyMetrics = new LatencyMetrics[] {
|
||||
new LatencyMetrics("Read", compose("read_latency"), cfName, factory, aliasFactory),
|
||||
new LatencyMetrics("Write", compose("write_latency"), cfName, factory, aliasFactory),
|
||||
new LatencyMetrics("Range", compose("range_latency"), cfName, factory, aliasFactory),
|
||||
|
||||
new LatencyMetrics("CasPrepare", compose("cas_prepare"), cfName, factory, aliasFactory),
|
||||
new LatencyMetrics("CasPropose", compose("cas_propose"), cfName, factory, aliasFactory),
|
||||
new LatencyMetrics("CasCommit", compose("cas_commit"), cfName, factory, aliasFactory), };
|
||||
}
|
||||
|
||||
@Override
|
||||
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
Registry r = new Registry(registry, factory, aliasFactory, cfName);
|
||||
registerCommon(r);
|
||||
registerLocal(r);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||
Registry r = new Registry(registry, globalFactory, globalAliasFactory, null);
|
||||
registerCommon(r);
|
||||
for (LatencyMetrics l : globalLatency) {
|
||||
l.register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
private static String compose(String base, String name) {
|
||||
String s = "/column_family/metrics/" + base;
|
||||
return name != null ? s + "/" + name : s;
|
||||
}
|
||||
|
||||
private static String compose(String base) {
|
||||
return compose(base, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates metrics for given {@link ColumnFamilyStore}.
|
||||
*
|
||||
* @param cfs
|
||||
* ColumnFamilyStore to measure metrics
|
||||
*/
|
||||
static class Registry extends MetricsRegistry {
|
||||
@SuppressWarnings("unused")
|
||||
private Function<APIClient, Long> newGauge(final String url) {
|
||||
return newGauge(Long.class, url);
|
||||
}
|
||||
|
||||
public <T> Function<APIClient, T> newGauge(BiFunction<APIClient, String, T> function, String url) {
|
||||
return c -> {
|
||||
return function.apply(c, url);
|
||||
};
|
||||
}
|
||||
|
||||
private <T> Function<APIClient, T> newGauge(Class<T> type, final String url) {
|
||||
return newGauge(getReader(type), url);
|
||||
}
|
||||
|
||||
final MetricNameFactory factory;
|
||||
final MetricNameFactory aliasFactory;
|
||||
final String cfName;
|
||||
|
||||
public Registry(MetricsRegistry other, MetricNameFactory factory, MetricNameFactory aliasFactory,
|
||||
String cfName) {
|
||||
super(other);
|
||||
this.cfName = cfName;
|
||||
this.factory = factory;
|
||||
this.aliasFactory = aliasFactory;
|
||||
}
|
||||
|
||||
public void createTableGauge(String name, String uri) throws MalformedObjectNameException {
|
||||
createTableGauge(name, name, uri);
|
||||
}
|
||||
|
||||
public void createTableGauge(String name, String alias, String uri) throws MalformedObjectNameException {
|
||||
createTableGauge(Long.class, name, alias, uri);
|
||||
}
|
||||
|
||||
public <T> void createTableGauge(Class<T> c, String name, String uri) throws MalformedObjectNameException {
|
||||
createTableGauge(c, c, name, name, uri);
|
||||
}
|
||||
|
||||
public <T> void createTableGauge(Class<T> c, String name, String alias, String uri) throws MalformedObjectNameException {
|
||||
createTableGauge(c, name, alias, uri, getReader(c));
|
||||
}
|
||||
|
||||
public <T> void createTableGauge(Class<T> c, String name, String uri, BiFunction<APIClient, String, T> f)
|
||||
throws MalformedObjectNameException {
|
||||
createTableGauge(c, name, name, uri, f);
|
||||
}
|
||||
|
||||
public <T> void createTableGauge(Class<T> c, String name, String alias, String uri,
|
||||
BiFunction<APIClient, String, T> f) throws MalformedObjectNameException {
|
||||
register(() -> gauge(newGauge(f, compose(uri, cfName))), factory.createMetricName(name),
|
||||
aliasFactory.createMetricName(alias));
|
||||
}
|
||||
|
||||
public <L, G> void createTableGauge(Class<L> c1, Class<G> c2, String name, String alias, String uri)
|
||||
throws MalformedObjectNameException {
|
||||
if (cfName != null) {
|
||||
createTableGauge(c1, name, alias, uri, getReader(c1));
|
||||
} else { // global case
|
||||
createTableGauge(c2, name, alias, uri, getReader(c2));
|
||||
}
|
||||
}
|
||||
|
||||
public void createTableCounter(String name, String uri) throws MalformedObjectNameException {
|
||||
createTableCounter(name, name, uri);
|
||||
}
|
||||
|
||||
public void createTableCounter(String name, String alias, String uri) throws MalformedObjectNameException {
|
||||
register(() -> counter(compose(uri, cfName)), factory.createMetricName(name),
|
||||
aliasFactory.createMetricName(alias));
|
||||
}
|
||||
|
||||
public void createTableHistogram(String name, String uri, boolean considerZeros)
|
||||
throws MalformedObjectNameException {
|
||||
createTableHistogram(name, name, uri, considerZeros);
|
||||
}
|
||||
|
||||
public void createTableHistogram(String name, String alias, String uri, boolean considerZeros)
|
||||
throws MalformedObjectNameException {
|
||||
register(() -> histogram(compose(uri, cfName), considerZeros), factory.createMetricName(name),
|
||||
aliasFactory.createMetricName(alias));
|
||||
}
|
||||
|
||||
public void createTimer(String name, String uri) throws MalformedObjectNameException {
|
||||
register(() -> timer(compose(uri, cfName)), factory.createMetricName(name));
|
||||
}
|
||||
}
|
||||
|
||||
private void registerLocal(Registry registry) throws MalformedObjectNameException {
|
||||
registry.createTableGauge(long[].class, "EstimatedPartitionSizeHistogram", "EstimatedRowSizeHistogram",
|
||||
"estimated_row_size_histogram", APIClient::getEstimatedHistogramAsLongArrValue);
|
||||
registry.createTableGauge("EstimatedPartitionCount", "EstimatedRowCount", "estimated_row_count");
|
||||
|
||||
registry.createTableGauge(long[].class, "EstimatedColumnCountHistogram", "estimated_column_count_histogram",
|
||||
APIClient::getEstimatedHistogramAsLongArrValue);
|
||||
registry.createTableGauge(Double.class, "KeyCacheHitRate", "key_cache_hit_rate");
|
||||
|
||||
registry.createTimer("CoordinatorReadLatency", "coordinator/read");
|
||||
registry.createTimer("CoordinatorScanLatency", "coordinator/scan");
|
||||
registry.createTimer("WaitingOnFreeMemtableSpace", "waiting_on_free_memtable");
|
||||
|
||||
for (LatencyMetrics l : latencyMetrics) {
|
||||
l.register(registry);
|
||||
}
|
||||
}
|
||||
|
||||
private static void registerCommon(Registry registry) throws MalformedObjectNameException {
|
||||
registry.createTableGauge("MemtableColumnsCount", "memtable_columns_count");
|
||||
registry.createTableGauge("MemtableOnHeapSize", "memtable_on_heap_size");
|
||||
registry.createTableGauge("MemtableOffHeapSize", "memtable_off_heap_size");
|
||||
registry.createTableGauge("MemtableLiveDataSize", "memtable_live_data_size");
|
||||
registry.createTableGauge("AllMemtablesHeapSize", "all_memtables_on_heap_size");
|
||||
registry.createTableGauge("AllMemtablesOffHeapSize", "all_memtables_off_heap_size");
|
||||
registry.createTableGauge("AllMemtablesLiveDataSize", "all_memtables_live_data_size");
|
||||
|
||||
registry.createTableCounter("MemtableSwitchCount", "memtable_switch_count");
|
||||
|
||||
registry.createTableHistogram("SSTablesPerReadHistogram", "sstables_per_read_histogram", true);
|
||||
registry.createTableGauge(Double.class, "CompressionRatio", "compression_ratio");
|
||||
|
||||
registry.createTableCounter("PendingFlushes", "pending_flushes");
|
||||
|
||||
registry.createTableGauge(Integer.class, Long.class, "PendingCompactions", "PendingCompactions",
|
||||
"pending_compactions");
|
||||
registry.createTableGauge(Integer.class, Long.class, "LiveSSTableCount", "LiveSSTableCount",
|
||||
"live_ss_table_count");
|
||||
|
||||
registry.createTableCounter("LiveDiskSpaceUsed", "live_disk_space_used");
|
||||
registry.createTableCounter("TotalDiskSpaceUsed", "total_disk_space_used");
|
||||
registry.createTableGauge("MinPartitionSize", "MinRowSize", "min_row_size");
|
||||
registry.createTableGauge("MaxPartitionSize", "MaxRowSize", "max_row_size");
|
||||
registry.createTableGauge("MeanPartitionSize", "MeanRowSize", "mean_row_size");
|
||||
|
||||
registry.createTableGauge("BloomFilterFalsePositives", "bloom_filter_false_positives");
|
||||
registry.createTableGauge("RecentBloomFilterFalsePositives", "recent_bloom_filter_false_positives");
|
||||
registry.createTableGauge(Double.class, "BloomFilterFalseRatio", "bloom_filter_false_ratio");
|
||||
registry.createTableGauge(Double.class, "RecentBloomFilterFalseRatio", "recent_bloom_filter_false_ratio");
|
||||
|
||||
registry.createTableGauge("BloomFilterDiskSpaceUsed", "bloom_filter_disk_space_used");
|
||||
registry.createTableGauge("BloomFilterOffHeapMemoryUsed", "bloom_filter_off_heap_memory_used");
|
||||
registry.createTableGauge("IndexSummaryOffHeapMemoryUsed", "index_summary_off_heap_memory_used");
|
||||
registry.createTableGauge("CompressionMetadataOffHeapMemoryUsed", "compression_metadata_off_heap_memory_used");
|
||||
registry.createTableGauge("SpeculativeRetries", "speculative_retries");
|
||||
|
||||
registry.createTableHistogram("TombstoneScannedHistogram", "tombstone_scanned_histogram", false);
|
||||
registry.createTableHistogram("LiveScannedHistogram", "live_scanned_histogram", false);
|
||||
registry.createTableHistogram("ColUpdateTimeDeltaHistogram", "col_update_time_delta_histogram", false);
|
||||
|
||||
// We do not want to capture view mutation specific metrics for a view
|
||||
// They only makes sense to capture on the base table
|
||||
// TODO: views
|
||||
// if (!cfs.metadata.isView())
|
||||
// {
|
||||
// viewLockAcquireTime = createTableTimer("ViewLockAcquireTime",
|
||||
// cfs.keyspace.metric.viewLockAcquireTime);
|
||||
// viewReadTime = createTableTimer("ViewReadTime",
|
||||
// cfs.keyspace.metric.viewReadTime);
|
||||
// }
|
||||
|
||||
registry.createTableGauge("SnapshotsSize", "snapshots_size");
|
||||
registry.createTableCounter("RowCacheHitOutOfRange", "row_cache_hit_out_of_range");
|
||||
registry.createTableCounter("RowCacheHit", "row_cache_hit");
|
||||
registry.createTableCounter("RowCacheMiss", "row_cache_miss");
|
||||
}
|
||||
|
||||
static class TableMetricNameFactory implements MetricNameFactory {
|
||||
private final String keyspaceName;
|
||||
private final String tableName;
|
||||
private final boolean isIndex;
|
||||
private final String type;
|
||||
|
||||
public TableMetricNameFactory(String keyspaceName, String tableName, boolean isIndex, String type) {
|
||||
this.keyspaceName = keyspaceName;
|
||||
this.tableName = tableName;
|
||||
this.isIndex = isIndex;
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||
String groupName = TableMetrics.class.getPackage().getName();
|
||||
String type = isIndex ? "Index" + this.type : this.type;
|
||||
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=").append(type);
|
||||
mbeanName.append(",keyspace=").append(keyspaceName);
|
||||
mbeanName.append(",scope=").append(tableName);
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
|
||||
return new ObjectName(mbeanName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static class AllTableMetricNameFactory implements MetricNameFactory {
|
||||
private final String type;
|
||||
|
||||
public AllTableMetricNameFactory(String type) {
|
||||
this.type = type;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||
String groupName = TableMetrics.class.getPackage().getName();
|
||||
StringBuilder mbeanName = new StringBuilder();
|
||||
mbeanName.append(groupName).append(":");
|
||||
mbeanName.append("type=" + type);
|
||||
mbeanName.append(",name=").append(metricName);
|
||||
return new ObjectName(mbeanName.toString());
|
||||
}
|
||||
}
|
||||
|
||||
public enum Sampler {
|
||||
READS, WRITES
|
||||
}
|
||||
}
|
@ -22,157 +22,118 @@
|
||||
*/
|
||||
package org.apache.cassandra.net;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.net.*;
|
||||
import java.util.*;
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.logging.Logger;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import org.apache.cassandra.metrics.DroppedMessageMetrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
public final class MessagingService implements MessagingServiceMBean {
|
||||
static final int INTERVAL = 1000; // update every 1second
|
||||
public final class MessagingService extends MetricsMBean implements MessagingServiceMBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(MessagingService.class.getName());
|
||||
Map<String, DroppedMessageMetrics> dropped;
|
||||
private APIClient c = new APIClient();
|
||||
Map<String, Long> resent_timeout = new HashMap<String, Long>();
|
||||
private final ObjectName jmxObjectName;
|
||||
private static final Logger logger = Logger.getLogger(MessagingService.class.getName());
|
||||
|
||||
private Map<String, Long> resentTimeouts = new HashMap<String, Long>();
|
||||
private long recentTimeoutCount;
|
||||
|
||||
/* All verb handler identifiers */
|
||||
public enum Verb
|
||||
{
|
||||
MUTATION,
|
||||
@Deprecated BINARY,
|
||||
READ_REPAIR,
|
||||
READ,
|
||||
REQUEST_RESPONSE, // client-initiated reads and writes
|
||||
@Deprecated STREAM_INITIATE,
|
||||
@Deprecated STREAM_INITIATE_DONE,
|
||||
@Deprecated STREAM_REPLY,
|
||||
@Deprecated STREAM_REQUEST,
|
||||
RANGE_SLICE,
|
||||
@Deprecated BOOTSTRAP_TOKEN,
|
||||
@Deprecated TREE_REQUEST,
|
||||
@Deprecated TREE_RESPONSE,
|
||||
@Deprecated JOIN,
|
||||
GOSSIP_DIGEST_SYN,
|
||||
GOSSIP_DIGEST_ACK,
|
||||
GOSSIP_DIGEST_ACK2,
|
||||
@Deprecated DEFINITIONS_ANNOUNCE,
|
||||
DEFINITIONS_UPDATE,
|
||||
TRUNCATE,
|
||||
SCHEMA_CHECK,
|
||||
@Deprecated INDEX_SCAN,
|
||||
REPLICATION_FINISHED,
|
||||
INTERNAL_RESPONSE, // responses to internal calls
|
||||
COUNTER_MUTATION,
|
||||
@Deprecated STREAMING_REPAIR_REQUEST,
|
||||
@Deprecated STREAMING_REPAIR_RESPONSE,
|
||||
SNAPSHOT, // Similar to nt snapshot
|
||||
MIGRATION_REQUEST,
|
||||
GOSSIP_SHUTDOWN,
|
||||
_TRACE, // dummy verb so we can use MS.droppedMessages
|
||||
ECHO,
|
||||
REPAIR_MESSAGE,
|
||||
// use as padding for backwards compatability where a previous version needs to validate a verb from the future.
|
||||
PAXOS_PREPARE,
|
||||
PAXOS_PROPOSE,
|
||||
PAXOS_COMMIT,
|
||||
PAGED_RANGE,
|
||||
public enum Verb {
|
||||
MUTATION, @Deprecated BINARY, READ_REPAIR, READ, REQUEST_RESPONSE, // client-initiated
|
||||
// reads
|
||||
// and
|
||||
// writes
|
||||
@Deprecated STREAM_INITIATE, @Deprecated STREAM_INITIATE_DONE, @Deprecated STREAM_REPLY, @Deprecated STREAM_REQUEST, RANGE_SLICE, @Deprecated BOOTSTRAP_TOKEN, @Deprecated TREE_REQUEST, @Deprecated TREE_RESPONSE, @Deprecated JOIN, GOSSIP_DIGEST_SYN, GOSSIP_DIGEST_ACK, GOSSIP_DIGEST_ACK2, @Deprecated DEFINITIONS_ANNOUNCE, DEFINITIONS_UPDATE, TRUNCATE, SCHEMA_CHECK, @Deprecated INDEX_SCAN, REPLICATION_FINISHED, INTERNAL_RESPONSE, // responses
|
||||
// to
|
||||
// internal
|
||||
// calls
|
||||
COUNTER_MUTATION, @Deprecated STREAMING_REPAIR_REQUEST, @Deprecated STREAMING_REPAIR_RESPONSE, SNAPSHOT, // Similar
|
||||
// to
|
||||
// nt
|
||||
// snapshot
|
||||
MIGRATION_REQUEST, GOSSIP_SHUTDOWN, _TRACE, // dummy verb so we can use
|
||||
// MS.droppedMessages
|
||||
ECHO, REPAIR_MESSAGE,
|
||||
// use as padding for backwards compatability where a previous version
|
||||
// needs to validate a verb from the future.
|
||||
PAXOS_PREPARE, PAXOS_PROPOSE, PAXOS_COMMIT, PAGED_RANGE,
|
||||
// remember to add new verbs at the end, since we serialize by ordinal
|
||||
UNUSED_1,
|
||||
UNUSED_2,
|
||||
UNUSED_3,
|
||||
;
|
||||
UNUSED_1, UNUSED_2, UNUSED_3,;
|
||||
}
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
public MessagingService() {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
try {
|
||||
jmxObjectName = new ObjectName(MBEAN_NAME);
|
||||
mbs.registerMBean(this, jmxObjectName);
|
||||
dropped = new HashMap<String, DroppedMessageMetrics>();
|
||||
for (Verb v : Verb.values()) {
|
||||
dropped.put(v.name(), new DroppedMessageMetrics(v));
|
||||
}
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
static MessagingService instance;
|
||||
|
||||
public static MessagingService getInstance() {
|
||||
if (instance == null) {
|
||||
instance = new MessagingService();
|
||||
}
|
||||
return instance;
|
||||
public MessagingService(APIClient client) {
|
||||
super(MBEAN_NAME, client,
|
||||
Stream.of(Verb.values()).map(v -> new DroppedMessageMetrics(v)).collect(Collectors.toList()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Pending tasks for Command(Mutations, Read etc) TCP Connections
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Integer> getCommandPendingTasks() {
|
||||
log(" getCommandPendingTasks()");
|
||||
return c.getMapStringIntegerValue("/messaging_service/messages/pending");
|
||||
return client.getMapStringIntegerValue("/messaging_service/messages/pending");
|
||||
}
|
||||
|
||||
/**
|
||||
* Completed tasks for Command(Mutations, Read etc) TCP Connections
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getCommandCompletedTasks() {
|
||||
log("getCommandCompletedTasks()");
|
||||
Map<String, Long> res = c
|
||||
.getListMapStringLongValue("/messaging_service/messages/sent");
|
||||
Map<String, Long> res = client.getListMapStringLongValue("/messaging_service/messages/sent");
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* Dropped tasks for Command(Mutations, Read etc) TCP Connections
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getCommandDroppedTasks() {
|
||||
log(" getCommandDroppedTasks()");
|
||||
return c.getMapStringLongValue("/messaging_service/messages/dropped");
|
||||
return client.getMapStringLongValue("/messaging_service/messages/dropped");
|
||||
}
|
||||
|
||||
/**
|
||||
* Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Integer> getResponsePendingTasks() {
|
||||
log(" getResponsePendingTasks()");
|
||||
return c.getMapStringIntegerValue("/messaging_service/messages/respond_pending");
|
||||
return client.getMapStringIntegerValue("/messaging_service/messages/respond_pending");
|
||||
}
|
||||
|
||||
/**
|
||||
* Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getResponseCompletedTasks() {
|
||||
log(" getResponseCompletedTasks()");
|
||||
return c.getMapStringLongValue("/messaging_service/messages/respond_completed");
|
||||
return client.getMapStringLongValue("/messaging_service/messages/respond_completed");
|
||||
}
|
||||
|
||||
/**
|
||||
* dropped message counts for server lifetime
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Integer> getDroppedMessages() {
|
||||
log(" getDroppedMessages()");
|
||||
Map<String, Integer> res = new HashMap<String, Integer>();
|
||||
JsonArray arr = c.getJsonArray("/messaging_service/messages/dropped_by_ver");
|
||||
JsonArray arr = client.getJsonArray("/messaging_service/messages/dropped_by_ver");
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
res.put(obj.getString("verb"), obj.getInt("count"));
|
||||
@ -180,20 +141,32 @@ public final class MessagingService implements MessagingServiceMBean {
|
||||
return res;
|
||||
}
|
||||
|
||||
private Map<String, Integer> recent;
|
||||
|
||||
/**
|
||||
* dropped message counts since last called
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Integer> getRecentlyDroppedMessages() {
|
||||
log(" getRecentlyDroppedMessages()");
|
||||
Map<String, Integer> map = new HashMap<String, Integer>();
|
||||
for (Map.Entry<String, DroppedMessageMetrics> entry : dropped.entrySet())
|
||||
map.put(entry.getKey(), entry.getValue().getRecentlyDropped());
|
||||
return map;
|
||||
|
||||
Map<String, Integer> dropped = getDroppedMessages(), result = new HashMap<>(dropped), old = recent;
|
||||
|
||||
recent = dropped;
|
||||
|
||||
if (old != null) {
|
||||
for (Map.Entry<String, Integer> e : old.entrySet()) {
|
||||
result.put(e.getKey(), result.get(e.getKey()) - e.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Total number of timeouts happened on this node
|
||||
*/
|
||||
@Override
|
||||
public long getTotalTimeouts() {
|
||||
log(" getTotalTimeouts()");
|
||||
Map<String, Long> timeouts = getTimeoutsPerHost();
|
||||
@ -207,14 +180,16 @@ public final class MessagingService implements MessagingServiceMBean {
|
||||
/**
|
||||
* Number of timeouts per host
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getTimeoutsPerHost() {
|
||||
log(" getTimeoutsPerHost()");
|
||||
return c.getMapStringLongValue("/messaging_service/messages/timeout");
|
||||
return client.getMapStringLongValue("/messaging_service/messages/timeout");
|
||||
}
|
||||
|
||||
/**
|
||||
* Number of timeouts since last check.
|
||||
*/
|
||||
@Override
|
||||
public long getRecentTotalTimouts() {
|
||||
log(" getRecentTotalTimouts()");
|
||||
long timeoutCount = getTotalTimeouts();
|
||||
@ -226,25 +201,77 @@ public final class MessagingService implements MessagingServiceMBean {
|
||||
/**
|
||||
* Number of timeouts since last check per host.
|
||||
*/
|
||||
@Override
|
||||
public Map<String, Long> getRecentTimeoutsPerHost() {
|
||||
log(" getRecentTimeoutsPerHost()");
|
||||
Map<String, Long> timeouts = getTimeoutsPerHost();
|
||||
Map<String, Long> result = new HashMap<String, Long>();
|
||||
for ( Entry<String, Long> e : timeouts.entrySet()) {
|
||||
long res = e.getValue().longValue() -
|
||||
((resent_timeout.containsKey(e.getKey()))? (resent_timeout.get(e.getKey())).longValue()
|
||||
: 0);
|
||||
resent_timeout.put(e.getKey(), e.getValue());
|
||||
result.put(e.getKey(),res);
|
||||
for (Entry<String, Long> e : timeouts.entrySet()) {
|
||||
long res = e.getValue().longValue()
|
||||
- ((resentTimeouts.containsKey(e.getKey())) ? (resentTimeouts.get(e.getKey())).longValue() : 0);
|
||||
resentTimeouts.put(e.getKey(), e.getValue());
|
||||
result.put(e.getKey(), res);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getVersion(String address) throws UnknownHostException {
|
||||
log(" getVersion(String address) throws UnknownHostException");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("addr", address);
|
||||
return c.getIntValue("/messaging_service/version", queryParams);
|
||||
return client.getIntValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getLargeMessagePendingTasks() {
|
||||
// TODO: implement for realsies
|
||||
return getCommandPendingTasks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getLargeMessageCompletedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return getCommandCompletedTasks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getLargeMessageDroppedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return getCommandDroppedTasks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getSmallMessagePendingTasks() {
|
||||
// TODO: implement for realsies
|
||||
return getResponsePendingTasks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getSmallMessageCompletedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return getResponseCompletedTasks();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getSmallMessageDroppedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Integer> getGossipMessagePendingTasks() {
|
||||
// TODO: implement for realsies
|
||||
return emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getGossipMessageCompletedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return emptyMap();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Map<String, Long> getGossipMessageDroppedTasks() {
|
||||
// TODO: implement for realsies
|
||||
return emptyMap();
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,13 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package org.apache.cassandra.net;
|
||||
|
||||
import java.net.UnknownHostException;
|
||||
@ -25,6 +32,51 @@ import java.util.Map;
|
||||
* Command/Response - Pending/Completed Tasks
|
||||
*/
|
||||
public interface MessagingServiceMBean {
|
||||
/**
|
||||
* Pending tasks for large message TCP Connections
|
||||
*/
|
||||
public Map<String, Integer> getLargeMessagePendingTasks();
|
||||
|
||||
/**
|
||||
* Completed tasks for large message) TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getLargeMessageCompletedTasks();
|
||||
|
||||
/**
|
||||
* Dropped tasks for large message TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getLargeMessageDroppedTasks();
|
||||
|
||||
/**
|
||||
* Pending tasks for small message TCP Connections
|
||||
*/
|
||||
public Map<String, Integer> getSmallMessagePendingTasks();
|
||||
|
||||
/**
|
||||
* Completed tasks for small message TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getSmallMessageCompletedTasks();
|
||||
|
||||
/**
|
||||
* Dropped tasks for small message TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getSmallMessageDroppedTasks();
|
||||
|
||||
/**
|
||||
* Pending tasks for gossip message TCP Connections
|
||||
*/
|
||||
public Map<String, Integer> getGossipMessagePendingTasks();
|
||||
|
||||
/**
|
||||
* Completed tasks for gossip message TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getGossipMessageCompletedTasks();
|
||||
|
||||
/**
|
||||
* Dropped tasks for gossip message TCP Connections
|
||||
*/
|
||||
public Map<String, Long> getGossipMessageDroppedTasks();
|
||||
|
||||
/**
|
||||
* Pending tasks for Command(Mutations, Read etc) TCP Connections
|
||||
*/
|
||||
|
@ -24,22 +24,19 @@
|
||||
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import org.apache.cassandra.metrics.CacheMetrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
public class CacheService implements CacheServiceMBean {
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(CacheService.class.getName());
|
||||
private APIClient c = new APIClient();
|
||||
public class CacheService extends MetricsMBean implements CacheServiceMBean {
|
||||
private static final Logger logger = Logger.getLogger(CacheService.class.getName());
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
@ -47,141 +44,141 @@ public class CacheService implements CacheServiceMBean {
|
||||
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.db:type=Caches";
|
||||
|
||||
public final CacheMetrics keyCache;
|
||||
public final CacheMetrics rowCache;
|
||||
public final CacheMetrics counterCache;
|
||||
public final static CacheService instance = new CacheService();
|
||||
|
||||
public static CacheService getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
private CacheService() {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
|
||||
try {
|
||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
keyCache = new CacheMetrics("KeyCache", null);
|
||||
rowCache = new CacheMetrics("RowCache", "row");
|
||||
counterCache = new CacheMetrics("CounterCache", null);
|
||||
public CacheService(APIClient client) {
|
||||
super(MBEAN_NAME, client, new CacheMetrics("KeyCache", "key"), new CacheMetrics("RowCache", "row"),
|
||||
new CacheMetrics("CounterCache", "counter"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getRowCacheSavePeriodInSeconds() {
|
||||
log(" getRowCacheSavePeriodInSeconds()");
|
||||
return c.getIntValue("cache_service/row_cache_save_period");
|
||||
return client.getIntValue("cache_service/row_cache_save_period");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRowCacheSavePeriodInSeconds(int rcspis) {
|
||||
log(" setRowCacheSavePeriodInSeconds(int rcspis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("period", Integer.toString(rcspis));
|
||||
c.post("cache_service/row_cache_save_period", queryParams);
|
||||
client.post("cache_service/row_cache_save_period", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getKeyCacheSavePeriodInSeconds() {
|
||||
log(" getKeyCacheSavePeriodInSeconds()");
|
||||
return c.getIntValue("cache_service/key_cache_save_period");
|
||||
return client.getIntValue("cache_service/key_cache_save_period");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setKeyCacheSavePeriodInSeconds(int kcspis) {
|
||||
log(" setKeyCacheSavePeriodInSeconds(int kcspis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("period", Integer.toString(kcspis));
|
||||
c.post("cache_service/key_cache_save_period", queryParams);
|
||||
client.post("cache_service/key_cache_save_period", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCounterCacheSavePeriodInSeconds() {
|
||||
log(" getCounterCacheSavePeriodInSeconds()");
|
||||
return c.getIntValue("cache_service/counter_cache_save_period");
|
||||
return client.getIntValue("cache_service/counter_cache_save_period");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCounterCacheSavePeriodInSeconds(int ccspis) {
|
||||
log(" setCounterCacheSavePeriodInSeconds(int ccspis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("ccspis", Integer.toString(ccspis));
|
||||
c.post("cache_service/counter_cache_save_period", queryParams);
|
||||
client.post("cache_service/counter_cache_save_period", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getRowCacheKeysToSave() {
|
||||
log(" getRowCacheKeysToSave()");
|
||||
return c.getIntValue("cache_service/row_cache_keys_to_save");
|
||||
return client.getIntValue("cache_service/row_cache_keys_to_save");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRowCacheKeysToSave(int rckts) {
|
||||
log(" setRowCacheKeysToSave(int rckts)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("rckts", Integer.toString(rckts));
|
||||
c.post("cache_service/row_cache_keys_to_save", queryParams);
|
||||
client.post("cache_service/row_cache_keys_to_save", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getKeyCacheKeysToSave() {
|
||||
log(" getKeyCacheKeysToSave()");
|
||||
return c.getIntValue("cache_service/key_cache_keys_to_save");
|
||||
return client.getIntValue("cache_service/key_cache_keys_to_save");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setKeyCacheKeysToSave(int kckts) {
|
||||
log(" setKeyCacheKeysToSave(int kckts)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("kckts", Integer.toString(kckts));
|
||||
c.post("cache_service/key_cache_keys_to_save", queryParams);
|
||||
client.post("cache_service/key_cache_keys_to_save", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getCounterCacheKeysToSave() {
|
||||
log(" getCounterCacheKeysToSave()");
|
||||
return c.getIntValue("cache_service/counter_cache_keys_to_save");
|
||||
return client.getIntValue("cache_service/counter_cache_keys_to_save");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCounterCacheKeysToSave(int cckts) {
|
||||
log(" setCounterCacheKeysToSave(int cckts)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("cckts", Integer.toString(cckts));
|
||||
c.post("cache_service/counter_cache_keys_to_save", queryParams);
|
||||
client.post("cache_service/counter_cache_keys_to_save", queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
* invalidate the key cache; for use after invalidating row cache
|
||||
*/
|
||||
@Override
|
||||
public void invalidateKeyCache() {
|
||||
log(" invalidateKeyCache()");
|
||||
c.post("cache_service/invalidate_key_cache");
|
||||
client.post("cache_service/invalidate_key_cache");
|
||||
}
|
||||
|
||||
/**
|
||||
* invalidate the row cache; for use after bulk loading via BinaryMemtable
|
||||
*/
|
||||
@Override
|
||||
public void invalidateRowCache() {
|
||||
log(" invalidateRowCache()");
|
||||
c.post("cache_service/invalidate_row_cache");
|
||||
client.post("cache_service/invalidate_row_cache");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void invalidateCounterCache() {
|
||||
log(" invalidateCounterCache()");
|
||||
c.post("cache_service/invalidate_counter_cache");
|
||||
client.post("cache_service/invalidate_counter_cache");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRowCacheCapacityInMB(long capacity) {
|
||||
log(" setRowCacheCapacityInMB(long capacity)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("capacity", Long.toString(capacity));
|
||||
c.post("cache_service/row_cache_capacity", queryParams);
|
||||
client.post("cache_service/row_cache_capacity", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setKeyCacheCapacityInMB(long capacity) {
|
||||
log(" setKeyCacheCapacityInMB(long capacity)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("capacity", Long.toString(capacity));
|
||||
c.post("cache_service/key_cache_capacity", queryParams);
|
||||
client.post("cache_service/key_cache_capacity", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCounterCacheCapacityInMB(long capacity) {
|
||||
log(" setCounterCacheCapacityInMB(long capacity)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("capacity", Long.toString(capacity));
|
||||
c.post("cache_service/counter_cache_capacity_in_mb", queryParams);
|
||||
client.post("cache_service/counter_cache_capacity_in_mb", queryParams);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -195,139 +192,9 @@ public class CacheService implements CacheServiceMBean {
|
||||
* and the thread is interrupted, either before or during the
|
||||
* activity.
|
||||
*/
|
||||
@Override
|
||||
public void saveCaches() throws ExecutionException, InterruptedException {
|
||||
log(" saveCaches() throws ExecutionException, InterruptedException");
|
||||
c.post("cache_service/save_caches");
|
||||
}
|
||||
|
||||
//
|
||||
// remaining methods are provided for backwards compatibility; modern
|
||||
// clients should use CacheMetrics instead
|
||||
//
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hits
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheHits() {
|
||||
log(" getKeyCacheHits()");
|
||||
return keyCache.hits.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hits
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheHits() {
|
||||
log(" getRowCacheHits()");
|
||||
return rowCache.hits.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#requests
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheRequests() {
|
||||
log(" getKeyCacheRequests()");
|
||||
return keyCache.requests.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#requests
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheRequests() {
|
||||
log(" getRowCacheRequests()");
|
||||
return rowCache.requests.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hitRate
|
||||
*/
|
||||
@Deprecated
|
||||
public double getKeyCacheRecentHitRate() {
|
||||
log(" getKeyCacheRecentHitRate()");
|
||||
return keyCache.getRecentHitRate();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hitRate
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRowCacheRecentHitRate() {
|
||||
log(" getRowCacheRecentHitRate()");
|
||||
return rowCache.getRecentHitRate();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheCapacityInMB() {
|
||||
log(" getRowCacheCapacityInMB()");
|
||||
return getRowCacheCapacityInBytes() / 1024 / 1024;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheCapacityInBytes() {
|
||||
log(" getRowCacheCapacityInBytes()");
|
||||
return rowCache.capacity.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheCapacityInMB() {
|
||||
log(" getKeyCacheCapacityInMB()");
|
||||
return getKeyCacheCapacityInBytes() / 1024 / 1024;
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheCapacityInBytes() {
|
||||
log(" getKeyCacheCapacityInBytes()");
|
||||
return keyCache.capacity.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#size
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheSize() {
|
||||
log(" getRowCacheSize()");
|
||||
return rowCache.size.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#entries
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheEntries() {
|
||||
log(" getRowCacheEntries()");
|
||||
return rowCache.size.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#size
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheSize() {
|
||||
log(" getKeyCacheSize()");
|
||||
return keyCache.size.value();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#entries
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheEntries() {
|
||||
log(" getKeyCacheEntries()");
|
||||
return keyCache.size.value();
|
||||
client.post("cache_service/save_caches");
|
||||
}
|
||||
}
|
||||
|
@ -15,19 +15,28 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
public interface CacheServiceMBean
|
||||
{
|
||||
public interface CacheServiceMBean {
|
||||
public int getRowCacheSavePeriodInSeconds();
|
||||
|
||||
public void setRowCacheSavePeriodInSeconds(int rcspis);
|
||||
|
||||
public int getKeyCacheSavePeriodInSeconds();
|
||||
|
||||
public void setKeyCacheSavePeriodInSeconds(int kcspis);
|
||||
|
||||
public int getCounterCacheSavePeriodInSeconds();
|
||||
|
||||
public void setCounterCacheSavePeriodInSeconds(int ccspis);
|
||||
|
||||
public int getRowCacheKeysToSave();
|
||||
@ -35,9 +44,11 @@ public interface CacheServiceMBean
|
||||
public void setRowCacheKeysToSave(int rckts);
|
||||
|
||||
public int getKeyCacheKeysToSave();
|
||||
|
||||
public void setKeyCacheKeysToSave(int kckts);
|
||||
|
||||
public int getCounterCacheKeysToSave();
|
||||
|
||||
public void setCounterCacheKeysToSave(int cckts);
|
||||
|
||||
/**
|
||||
@ -61,94 +72,13 @@ public interface CacheServiceMBean
|
||||
/**
|
||||
* save row and key caches
|
||||
*
|
||||
* @throws ExecutionException when attempting to retrieve the result of a task that aborted by throwing an exception
|
||||
* @throws InterruptedException when a thread is waiting, sleeping, or otherwise occupied, and the thread is interrupted, either before or during the activity.
|
||||
* @throws ExecutionException
|
||||
* when attempting to retrieve the result of a task that aborted
|
||||
* by throwing an exception
|
||||
* @throws InterruptedException
|
||||
* when a thread is waiting, sleeping, or otherwise occupied,
|
||||
* and the thread is interrupted, either before or during the
|
||||
* activity.
|
||||
*/
|
||||
public void saveCaches() throws ExecutionException, InterruptedException;
|
||||
|
||||
//
|
||||
// remaining methods are provided for backwards compatibility; modern clients should use CacheMetrics instead
|
||||
//
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hits
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheHits();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hits
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheHits();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#requests
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheRequests();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#requests
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheRequests();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hitRate
|
||||
*/
|
||||
@Deprecated
|
||||
public double getKeyCacheRecentHitRate();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#hitRate
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRowCacheRecentHitRate();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheCapacityInMB();
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheCapacityInBytes();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheCapacityInMB();
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#capacity
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheCapacityInBytes();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#size
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheSize();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#entries
|
||||
*/
|
||||
@Deprecated
|
||||
public long getRowCacheEntries();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#size
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheSize();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.CacheMetrics#entries
|
||||
*/
|
||||
@Deprecated
|
||||
public long getKeyCacheEntries();
|
||||
}
|
||||
|
@ -24,257 +24,18 @@
|
||||
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.lang.management.GarbageCollectorMXBean;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.Notification;
|
||||
import javax.management.NotificationListener;
|
||||
import javax.management.ObjectName;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.sun.management.GarbageCollectionNotificationInfo;
|
||||
import com.sun.management.GcInfo;
|
||||
|
||||
public class GCInspector implements NotificationListener, GCInspectorMXBean
|
||||
{
|
||||
public class GCInspector extends APIMBean implements GCInspectorMXBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.service:type=GCInspector";
|
||||
private static final Logger logger = LoggerFactory.getLogger(GCInspector.class);
|
||||
final static long MIN_LOG_DURATION = 200;
|
||||
final static long MIN_LOG_DURATION_TPSTATS = 1000;
|
||||
|
||||
static final class State
|
||||
{
|
||||
final double maxRealTimeElapsed;
|
||||
final double totalRealTimeElapsed;
|
||||
final double sumSquaresRealTimeElapsed;
|
||||
final double totalBytesReclaimed;
|
||||
final double count;
|
||||
final long startNanos;
|
||||
|
||||
State(double extraElapsed, double extraBytes, State prev)
|
||||
{
|
||||
this.totalRealTimeElapsed = prev.totalRealTimeElapsed + extraElapsed;
|
||||
this.totalBytesReclaimed = prev.totalBytesReclaimed + extraBytes;
|
||||
this.sumSquaresRealTimeElapsed = prev.sumSquaresRealTimeElapsed + (extraElapsed * extraElapsed);
|
||||
this.startNanos = prev.startNanos;
|
||||
this.count = prev.count + 1;
|
||||
this.maxRealTimeElapsed = Math.max(prev.maxRealTimeElapsed, extraElapsed);
|
||||
}
|
||||
|
||||
State()
|
||||
{
|
||||
count = maxRealTimeElapsed = sumSquaresRealTimeElapsed = totalRealTimeElapsed = totalBytesReclaimed = 0;
|
||||
startNanos = System.nanoTime();
|
||||
}
|
||||
public GCInspector(APIClient client) {
|
||||
super(client);
|
||||
}
|
||||
|
||||
static final class GCState
|
||||
{
|
||||
final GarbageCollectorMXBean gcBean;
|
||||
final boolean assumeGCIsPartiallyConcurrent;
|
||||
final boolean assumeGCIsOldGen;
|
||||
private String[] keys;
|
||||
long lastGcTotalDuration = 0;
|
||||
|
||||
|
||||
GCState(GarbageCollectorMXBean gcBean, boolean assumeGCIsPartiallyConcurrent, boolean assumeGCIsOldGen)
|
||||
{
|
||||
this.gcBean = gcBean;
|
||||
this.assumeGCIsPartiallyConcurrent = assumeGCIsPartiallyConcurrent;
|
||||
this.assumeGCIsOldGen = assumeGCIsOldGen;
|
||||
}
|
||||
|
||||
String[] keys(GarbageCollectionNotificationInfo info)
|
||||
{
|
||||
if (keys != null)
|
||||
return keys;
|
||||
|
||||
keys = info.getGcInfo().getMemoryUsageBeforeGc().keySet().toArray(new String[0]);
|
||||
Arrays.sort(keys);
|
||||
|
||||
return keys;
|
||||
}
|
||||
}
|
||||
|
||||
final AtomicReference<State> state = new AtomicReference<>(new State());
|
||||
|
||||
final Map<String, GCState> gcStates = new HashMap<>();
|
||||
|
||||
public GCInspector()
|
||||
{
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
|
||||
try
|
||||
{
|
||||
ObjectName gcName = new ObjectName(ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE + ",*");
|
||||
for (ObjectName name : mbs.queryNames(gcName, null))
|
||||
{
|
||||
GarbageCollectorMXBean gc = ManagementFactory.newPlatformMXBeanProxy(mbs, name.getCanonicalName(), GarbageCollectorMXBean.class);
|
||||
gcStates.put(gc.getName(), new GCState(gc, assumeGCIsPartiallyConcurrent(gc), assumeGCIsOldGen(gc)));
|
||||
}
|
||||
|
||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
||||
}
|
||||
catch (Exception e)
|
||||
{
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static void register() throws Exception
|
||||
{
|
||||
GCInspector inspector = new GCInspector();
|
||||
MBeanServer server = ManagementFactory.getPlatformMBeanServer();
|
||||
ObjectName gcName = new ObjectName(ManagementFactory.GARBAGE_COLLECTOR_MXBEAN_DOMAIN_TYPE + ",*");
|
||||
for (ObjectName name : server.queryNames(gcName, null))
|
||||
{
|
||||
server.addNotificationListener(name, inspector, null, null);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that a GC type is at least partially concurrent and so a side channel method
|
||||
* should be used to calculate application stopped time due to the GC.
|
||||
*
|
||||
* If the GC isn't recognized then assume that is concurrent and we need to do our own calculation
|
||||
* via the the side channel.
|
||||
*/
|
||||
private static boolean assumeGCIsPartiallyConcurrent(GarbageCollectorMXBean gc)
|
||||
{
|
||||
switch (gc.getName())
|
||||
{
|
||||
//First two are from the serial collector
|
||||
case "Copy":
|
||||
case "MarkSweepCompact":
|
||||
//Parallel collector
|
||||
case "PS MarkSweep":
|
||||
case "PS Scavenge":
|
||||
case "G1 Young Generation":
|
||||
//CMS young generation collector
|
||||
case "ParNew":
|
||||
return false;
|
||||
case "ConcurrentMarkSweep":
|
||||
case "G1 Old Generation":
|
||||
return true;
|
||||
default:
|
||||
//Assume possibly concurrent if unsure
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Assume that a GC type is an old generation collection so SSTableDeletingTask.rescheduleFailedTasks()
|
||||
* should be invoked.
|
||||
*
|
||||
* Defaults to not invoking SSTableDeletingTask.rescheduleFailedTasks() on unrecognized GC names
|
||||
*/
|
||||
private static boolean assumeGCIsOldGen(GarbageCollectorMXBean gc)
|
||||
{
|
||||
switch (gc.getName())
|
||||
{
|
||||
case "Copy":
|
||||
case "PS Scavenge":
|
||||
case "G1 Young Generation":
|
||||
case "ParNew":
|
||||
return false;
|
||||
case "MarkSweepCompact":
|
||||
case "PS MarkSweep":
|
||||
case "ConcurrentMarkSweep":
|
||||
case "G1 Old Generation":
|
||||
return true;
|
||||
default:
|
||||
//Assume not old gen otherwise, don't call
|
||||
//SSTableDeletingTask.rescheduleFailedTasks()
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public void handleNotification(final Notification notification, final Object handback)
|
||||
{
|
||||
String type = notification.getType();
|
||||
if (type.equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION))
|
||||
{
|
||||
// retrieve the garbage collection notification information
|
||||
CompositeData cd = (CompositeData) notification.getUserData();
|
||||
GarbageCollectionNotificationInfo info = GarbageCollectionNotificationInfo.from(cd);
|
||||
String gcName = info.getGcName();
|
||||
GcInfo gcInfo = info.getGcInfo();
|
||||
|
||||
long duration = gcInfo.getDuration();
|
||||
|
||||
/*
|
||||
* The duration supplied in the notification info includes more than just
|
||||
* application stopped time for concurrent GCs. Try and do a better job coming up with a good stopped time
|
||||
* value by asking for and tracking cumulative time spent blocked in GC.
|
||||
*/
|
||||
GCState gcState = gcStates.get(gcName);
|
||||
if (gcState.assumeGCIsPartiallyConcurrent)
|
||||
{
|
||||
long previousTotal = gcState.lastGcTotalDuration;
|
||||
long total = gcState.gcBean.getCollectionTime();
|
||||
gcState.lastGcTotalDuration = total;
|
||||
duration = total - previousTotal; // may be zero for a really fast collection
|
||||
}
|
||||
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append(info.getGcName()).append(" GC in ").append(duration).append("ms. ");
|
||||
long bytes = 0;
|
||||
Map<String, MemoryUsage> beforeMemoryUsage = gcInfo.getMemoryUsageBeforeGc();
|
||||
Map<String, MemoryUsage> afterMemoryUsage = gcInfo.getMemoryUsageAfterGc();
|
||||
for (String key : gcState.keys(info))
|
||||
{
|
||||
MemoryUsage before = beforeMemoryUsage.get(key);
|
||||
MemoryUsage after = afterMemoryUsage.get(key);
|
||||
if (after != null && after.getUsed() != before.getUsed())
|
||||
{
|
||||
sb.append(key).append(": ").append(before.getUsed());
|
||||
sb.append(" -> ");
|
||||
sb.append(after.getUsed());
|
||||
if (!key.equals(gcState.keys[gcState.keys.length - 1]))
|
||||
sb.append("; ");
|
||||
bytes += before.getUsed() - after.getUsed();
|
||||
}
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
State prev = state.get();
|
||||
if (state.compareAndSet(prev, new State(duration, bytes, prev)))
|
||||
break;
|
||||
}
|
||||
|
||||
String st = sb.toString();
|
||||
if (duration > MIN_LOG_DURATION)
|
||||
logger.trace(st);
|
||||
else if (logger.isDebugEnabled())
|
||||
logger.debug(st);
|
||||
}
|
||||
}
|
||||
|
||||
public State getTotalSinceLastCheck()
|
||||
{
|
||||
return state.getAndSet(new State());
|
||||
}
|
||||
|
||||
public double[] getAndResetStats()
|
||||
{
|
||||
State state = getTotalSinceLastCheck();
|
||||
double[] r = new double[6];
|
||||
r[0] = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - state.startNanos);
|
||||
r[1] = state.maxRealTimeElapsed;
|
||||
r[2] = state.totalRealTimeElapsed;
|
||||
r[3] = state.sumSquaresRealTimeElapsed;
|
||||
r[4] = state.totalBytesReclaimed;
|
||||
r[5] = state.count;
|
||||
return r;
|
||||
@Override
|
||||
public double[] getAndResetStats() {
|
||||
return new double[6];
|
||||
}
|
||||
}
|
||||
|
@ -18,8 +18,8 @@
|
||||
*/
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
public interface GCInspectorMXBean
|
||||
{
|
||||
// returns { interval (ms), max(gc real time (ms)), sum(gc real time (ms)), sum((gc real time (ms))^2), sum(gc bytes), count(gc) }
|
||||
public interface GCInspectorMXBean {
|
||||
// returns { interval (ms), max(gc real time (ms)), sum(gc real time (ms)),
|
||||
// sum((gc real time (ms))^2), sum(gc bytes), count(gc) }
|
||||
public double[] getAndResetStats();
|
||||
}
|
||||
|
@ -23,340 +23,239 @@
|
||||
*/
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.*;
|
||||
import static java.util.Collections.emptySet;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.ObjectName;
|
||||
import javax.ws.rs.core.MultivaluedHashMap;
|
||||
import javax.ws.rs.core.MultivaluedMap;
|
||||
|
||||
import org.apache.cassandra.metrics.CASClientRequestMetrics;
|
||||
import org.apache.cassandra.metrics.ClientRequestMetrics;
|
||||
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||
|
||||
import org.apache.cassandra.metrics.*;
|
||||
|
||||
public class StorageProxy implements StorageProxyMBean {
|
||||
public class StorageProxy extends MetricsMBean implements StorageProxyMBean {
|
||||
public static final String MBEAN_NAME = "org.apache.cassandra.db:type=StorageProxy";
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(StorageProxy.class.getName());
|
||||
|
||||
private APIClient c = new APIClient();
|
||||
private static final Logger logger = Logger.getLogger(StorageProxy.class.getName());
|
||||
|
||||
public void log(String str) {
|
||||
logger.finest(str);
|
||||
}
|
||||
|
||||
private static final StorageProxy instance = new StorageProxy();
|
||||
|
||||
public static StorageProxy getInstance() {
|
||||
return instance;
|
||||
}
|
||||
|
||||
public static final String UNREACHABLE = "UNREACHABLE";
|
||||
|
||||
private static final ClientRequestMetrics readMetrics = new ClientRequestMetrics(
|
||||
"storage_proxy/metrics/read", "Read");
|
||||
private static final ClientRequestMetrics rangeMetrics = new ClientRequestMetrics(
|
||||
"storage_proxy/metrics/range", "RangeSlice");
|
||||
private static final ClientRequestMetrics writeMetrics = new ClientRequestMetrics(
|
||||
"storage_proxy/metrics/write", "Write");
|
||||
private static final CASClientRequestMetrics casWriteMetrics = new CASClientRequestMetrics(
|
||||
"storage_proxy/metrics/cas_write", "CASWrite");
|
||||
private static final CASClientRequestMetrics casReadMetrics = new CASClientRequestMetrics(
|
||||
"storage_proxy/metrics/cas_read", "CASRead");
|
||||
|
||||
private static final double CONCURRENT_SUBREQUESTS_MARGIN = 0.10;
|
||||
|
||||
private StorageProxy() {
|
||||
}
|
||||
|
||||
static {
|
||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
try {
|
||||
mbs.registerMBean(instance, new ObjectName(MBEAN_NAME));
|
||||
} catch (Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#lastOpCount
|
||||
*/
|
||||
@Deprecated
|
||||
public long getReadOperations() {
|
||||
log(" getReadOperations()");
|
||||
return readMetrics.latency.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalReadLatencyMicros() {
|
||||
log(" getTotalReadLatencyMicros()");
|
||||
return readMetrics.totalLatency.count();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentReadLatencyMicros() {
|
||||
log(" getRecentReadLatencyMicros()");
|
||||
return readMetrics.getRecentLatency();
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getTotalReadLatencyHistogramMicros() {
|
||||
log(" getTotalReadLatencyHistogramMicros()");
|
||||
return readMetrics.totalLatencyHistogram.getBuckets(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentReadLatencyHistogramMicros() {
|
||||
log(" getRecentReadLatencyHistogramMicros()");
|
||||
return readMetrics.getRecentLatencyHistogram();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long getRangeOperations() {
|
||||
log(" getRangeOperations()");
|
||||
return rangeMetrics.latency.count();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long getTotalRangeLatencyMicros() {
|
||||
log(" getTotalRangeLatencyMicros()");
|
||||
return rangeMetrics.totalLatency.count();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public double getRecentRangeLatencyMicros() {
|
||||
log(" getRecentRangeLatencyMicros()");
|
||||
return rangeMetrics.getRecentLatency();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long[] getTotalRangeLatencyHistogramMicros() {
|
||||
log(" getTotalRangeLatencyHistogramMicros()");
|
||||
return rangeMetrics.totalLatencyHistogram.getBuckets(false);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long[] getRecentRangeLatencyHistogramMicros() {
|
||||
log(" getRecentRangeLatencyHistogramMicros()");
|
||||
return rangeMetrics.getRecentLatencyHistogram();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long getWriteOperations() {
|
||||
log(" getWriteOperations()");
|
||||
return writeMetrics.latency.count();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long getTotalWriteLatencyMicros() {
|
||||
log(" getTotalWriteLatencyMicros()");
|
||||
return writeMetrics.totalLatency.count();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public double getRecentWriteLatencyMicros() {
|
||||
log(" getRecentWriteLatencyMicros()");
|
||||
return writeMetrics.getRecentLatency();
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long[] getTotalWriteLatencyHistogramMicros() {
|
||||
log(" getTotalWriteLatencyHistogramMicros()");
|
||||
return writeMetrics.totalLatencyHistogram.getBuckets(false);
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
public long[] getRecentWriteLatencyHistogramMicros() {
|
||||
log(" getRecentWriteLatencyHistogramMicros()");
|
||||
return writeMetrics.getRecentLatencyHistogram();
|
||||
public StorageProxy(APIClient client) {
|
||||
super(MBEAN_NAME, client, new ClientRequestMetrics("Read", "storage_proxy/metrics/read"),
|
||||
new ClientRequestMetrics("RangeSlice", "/storage_proxy/metrics/range"),
|
||||
new ClientRequestMetrics("Write", "storage_proxy/metrics/write"),
|
||||
new CASClientRequestMetrics("CASWrite", "storage_proxy/metrics/cas_write"),
|
||||
new CASClientRequestMetrics("CASRead", "storage_proxy/metrics/cas_read"));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalHints() {
|
||||
log(" getTotalHints()");
|
||||
return c.getLongValue("storage_proxy/total_hints");
|
||||
return client.getLongValue("storage_proxy/total_hints");
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getHintedHandoffEnabled() {
|
||||
log(" getHintedHandoffEnabled()");
|
||||
return c.getBooleanValue("storage_proxy/hinted_handoff_enabled");
|
||||
return client.getBooleanValue("storage_proxy/hinted_handoff_enabled");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getHintedHandoffEnabledByDC() {
|
||||
log(" getHintedHandoffEnabledByDC()");
|
||||
return c.getSetStringValue(
|
||||
"storage_proxy/hinted_handoff_enabled_by_dc");
|
||||
return client.getSetStringValue("storage_proxy/hinted_handoff_enabled_by_dc");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setHintedHandoffEnabled(boolean b) {
|
||||
log(" setHintedHandoffEnabled(boolean b)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("enable", Boolean.toString(b));
|
||||
c.post("storage_proxy/hinted_handoff_enabled", queryParams);
|
||||
client.post("storage_proxy/hinted_handoff_enabled", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setHintedHandoffEnabledByDCList(String dcs) {
|
||||
log(" setHintedHandoffEnabledByDCList(String dcs)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("dcs", dcs);
|
||||
c.post("storage_proxy/hinted_handoff_enabled_by_dc_list");
|
||||
client.post("storage_proxy/hinted_handoff_enabled_by_dc_list");
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMaxHintWindow() {
|
||||
log(" getMaxHintWindow()");
|
||||
return c.getIntValue("storage_proxy/max_hint_window");
|
||||
return client.getIntValue("storage_proxy/max_hint_window");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMaxHintWindow(int ms) {
|
||||
log(" setMaxHintWindow(int ms)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("ms", Integer.toString(ms));
|
||||
c.post("storage_proxy/max_hint_window", queryParams);
|
||||
client.post("storage_proxy/max_hint_window", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMaxHintsInProgress() {
|
||||
log(" getMaxHintsInProgress()");
|
||||
return c.getIntValue("storage_proxy/max_hints_in_progress");
|
||||
return client.getIntValue("storage_proxy/max_hints_in_progress");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setMaxHintsInProgress(int qs) {
|
||||
log(" setMaxHintsInProgress(int qs)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("qs", Integer.toString(qs));
|
||||
c.post("storage_proxy/max_hints_in_progress", queryParams);
|
||||
client.post("storage_proxy/max_hints_in_progress", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHintsInProgress() {
|
||||
log(" getHintsInProgress()");
|
||||
return c.getIntValue("storage_proxy/hints_in_progress");
|
||||
return client.getIntValue("storage_proxy/hints_in_progress");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getRpcTimeout() {
|
||||
log(" getRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/rpc_timeout", queryParams);
|
||||
client.post("storage_proxy/rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getReadRpcTimeout() {
|
||||
log(" getReadRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/read_rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/read_rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReadRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setReadRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/read_rpc_timeout", queryParams);
|
||||
client.post("storage_proxy/read_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getWriteRpcTimeout() {
|
||||
log(" getWriteRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/write_rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/write_rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWriteRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setWriteRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/write_rpc_timeout", queryParams);
|
||||
client.post("storage_proxy/write_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getCounterWriteRpcTimeout() {
|
||||
log(" getCounterWriteRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/counter_write_rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/counter_write_rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCounterWriteRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setCounterWriteRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/counter_write_rpc_timeout", queryParams);
|
||||
client.post("storage_proxy/counter_write_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getCasContentionTimeout() {
|
||||
log(" getCasContentionTimeout()");
|
||||
return c.getLongValue("storage_proxy/cas_contention_timeout");
|
||||
return client.getLongValue("storage_proxy/cas_contention_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setCasContentionTimeout(Long timeoutInMillis) {
|
||||
log(" setCasContentionTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/cas_contention_timeout", queryParams);
|
||||
client.post("storage_proxy/cas_contention_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getRangeRpcTimeout() {
|
||||
log(" getRangeRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/range_rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/range_rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setRangeRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setRangeRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/range_rpc_timeout", queryParams);
|
||||
client.post("storage_proxy/range_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Long getTruncateRpcTimeout() {
|
||||
log(" getTruncateRpcTimeout()");
|
||||
return c.getLongValue("storage_proxy/truncate_rpc_timeout");
|
||||
return client.getLongValue("storage_proxy/truncate_rpc_timeout");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setTruncateRpcTimeout(Long timeoutInMillis) {
|
||||
log(" setTruncateRpcTimeout(Long timeoutInMillis)");
|
||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||
queryParams.add("timeout", Long.toString(timeoutInMillis));
|
||||
c.post("storage_proxy/truncate_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
public void reloadTriggerClasses() {
|
||||
log(" reloadTriggerClasses()");
|
||||
c.post("storage_proxy/reload_trigger_classes");
|
||||
}
|
||||
|
||||
public long getReadRepairAttempted() {
|
||||
log(" getReadRepairAttempted()");
|
||||
return c.getLongValue("storage_proxy/read_repair_attempted");
|
||||
}
|
||||
|
||||
public long getReadRepairRepairedBlocking() {
|
||||
log(" getReadRepairRepairedBlocking()");
|
||||
return c.getLongValue("storage_proxy/read_repair_repaired_blocking");
|
||||
}
|
||||
|
||||
public long getReadRepairRepairedBackground() {
|
||||
log(" getReadRepairRepairedBackground()");
|
||||
return c.getLongValue("storage_proxy/read_repair_repaired_background");
|
||||
}
|
||||
|
||||
/** Returns each live node's schema version */
|
||||
public Map<String, List<String>> getSchemaVersions() {
|
||||
log(" getSchemaVersions()");
|
||||
return c.getMapStringListStrValue("storage_proxy/schema_versions");
|
||||
client.post("storage_proxy/truncate_rpc_timeout", queryParams);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNativeTransportMaxConcurrentConnections(
|
||||
Long nativeTransportMaxConcurrentConnections) {
|
||||
public void reloadTriggerClasses() {
|
||||
log(" reloadTriggerClasses()");
|
||||
client.post("storage_proxy/reload_trigger_classes");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getReadRepairAttempted() {
|
||||
log(" getReadRepairAttempted()");
|
||||
return client.getLongValue("storage_proxy/read_repair_attempted");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getReadRepairRepairedBlocking() {
|
||||
log(" getReadRepairRepairedBlocking()");
|
||||
return client.getLongValue("storage_proxy/read_repair_repaired_blocking");
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getReadRepairRepairedBackground() {
|
||||
log(" getReadRepairRepairedBackground()");
|
||||
return client.getLongValue("storage_proxy/read_repair_repaired_background");
|
||||
}
|
||||
|
||||
/** Returns each live node's schema version */
|
||||
@Override
|
||||
public Map<String, List<String>> getSchemaVersions() {
|
||||
log(" getSchemaVersions()");
|
||||
return client.getMapStringListStrValue("storage_proxy/schema_versions");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setNativeTransportMaxConcurrentConnections(Long nativeTransportMaxConcurrentConnections) {
|
||||
// TODO Auto-generated method stub
|
||||
log(" setNativeTransportMaxConcurrentConnections()");
|
||||
|
||||
@ -366,7 +265,25 @@ public class StorageProxy implements StorageProxyMBean {
|
||||
public Long getNativeTransportMaxConcurrentConnections() {
|
||||
// TODO Auto-generated method stub
|
||||
log(" getNativeTransportMaxConcurrentConnections()");
|
||||
return c.getLongValue("");
|
||||
return client.getLongValue("");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void enableHintsForDC(String dc) {
|
||||
// TODO if/when scylla uses hints
|
||||
log(" enableHintsForDC()");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void disableHintsForDC(String dc) {
|
||||
// TODO if/when scylla uses hints
|
||||
log(" disableHintsForDC()");
|
||||
}
|
||||
|
||||
@Override
|
||||
public Set<String> getHintedHandoffDisabledDCs() {
|
||||
// TODO if/when scylla uses hints
|
||||
log(" getHintedHandoffDisabledDCs()");
|
||||
return emptySet();
|
||||
}
|
||||
}
|
||||
|
@ -15,6 +15,13 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.util.List;
|
||||
@ -22,66 +29,6 @@ import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public interface StorageProxyMBean {
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#lastOpCount
|
||||
*/
|
||||
@Deprecated
|
||||
public long getReadOperations();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long getTotalReadLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public double getRecentReadLatencyMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#totalLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getTotalReadLatencyHistogramMicros();
|
||||
|
||||
/**
|
||||
* @see org.apache.cassandra.metrics.LatencyMetrics#recentLatencyHistogram
|
||||
*/
|
||||
@Deprecated
|
||||
public long[] getRecentReadLatencyHistogramMicros();
|
||||
|
||||
@Deprecated
|
||||
public long getRangeOperations();
|
||||
|
||||
@Deprecated
|
||||
public long getTotalRangeLatencyMicros();
|
||||
|
||||
@Deprecated
|
||||
public double getRecentRangeLatencyMicros();
|
||||
|
||||
@Deprecated
|
||||
public long[] getTotalRangeLatencyHistogramMicros();
|
||||
|
||||
@Deprecated
|
||||
public long[] getRecentRangeLatencyHistogramMicros();
|
||||
|
||||
@Deprecated
|
||||
public long getWriteOperations();
|
||||
|
||||
@Deprecated
|
||||
public long getTotalWriteLatencyMicros();
|
||||
|
||||
@Deprecated
|
||||
public double getRecentWriteLatencyMicros();
|
||||
|
||||
@Deprecated
|
||||
public long[] getTotalWriteLatencyHistogramMicros();
|
||||
|
||||
@Deprecated
|
||||
public long[] getRecentWriteLatencyHistogramMicros();
|
||||
|
||||
public long getTotalHints();
|
||||
|
||||
public boolean getHintedHandoffEnabled();
|
||||
@ -92,6 +39,12 @@ public interface StorageProxyMBean {
|
||||
|
||||
public void setHintedHandoffEnabledByDCList(String dcs);
|
||||
|
||||
public void enableHintsForDC(String dc);
|
||||
|
||||
public void disableHintsForDC(String dc);
|
||||
|
||||
public Set<String> getHintedHandoffDisabledDCs();
|
||||
|
||||
public int getMaxHintWindow();
|
||||
|
||||
public void setMaxHintWindow(int ms);
|
||||
@ -130,8 +83,7 @@ public interface StorageProxyMBean {
|
||||
|
||||
public void setTruncateRpcTimeout(Long timeoutInMillis);
|
||||
|
||||
public void setNativeTransportMaxConcurrentConnections(
|
||||
Long nativeTransportMaxConcurrentConnections);
|
||||
public void setNativeTransportMaxConcurrentConnections(Long nativeTransportMaxConcurrentConnections);
|
||||
|
||||
public Long getNativeTransportMaxConcurrentConnections();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -15,6 +15,13 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Copyright 2015 Cloudius Systems
|
||||
*
|
||||
* Modified by Cloudius Systems
|
||||
*/
|
||||
|
||||
package org.apache.cassandra.service;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -125,8 +132,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
*
|
||||
* @return mapping of ranges to end points
|
||||
*/
|
||||
public Map<List<String>, List<String>> getRangeToEndpointMap(
|
||||
String keyspace);
|
||||
public Map<List<String>, List<String>> getRangeToEndpointMap(String keyspace);
|
||||
|
||||
/**
|
||||
* Retrieve a map of range to rpc addresses that describe the ring topology
|
||||
@ -134,8 +140,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
*
|
||||
* @return mapping of ranges to rpc addresses
|
||||
*/
|
||||
public Map<List<String>, List<String>> getRangeToRpcaddressMap(
|
||||
String keyspace);
|
||||
public Map<List<String>, List<String>> getRangeToRpcaddressMap(String keyspace);
|
||||
|
||||
/**
|
||||
* The same as {@code describeRing(String)} but converts TokenRange to the
|
||||
@ -157,8 +162,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* the keyspace to get the pending range map for.
|
||||
* @return a map of pending ranges to endpoints
|
||||
*/
|
||||
public Map<List<String>, List<String>> getPendingRangeToEndpointMap(
|
||||
String keyspace);
|
||||
public Map<List<String>, List<String>> getPendingRangeToEndpointMap(String keyspace);
|
||||
|
||||
/**
|
||||
* Retrieve a map of tokens to endpoints, including the bootstrapping ones.
|
||||
@ -173,13 +177,11 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
/** Retrieve the mapping of endpoint to host ID */
|
||||
public Map<String, String> getHostIdMap();
|
||||
|
||||
/**
|
||||
* Numeric load value.
|
||||
*
|
||||
* @see org.apache.cassandra.metrics.StorageMetrics#load
|
||||
*/
|
||||
@Deprecated
|
||||
public double getLoad();
|
||||
/** Retrieve the mapping of endpoint to host ID */
|
||||
public Map<String, String> getEndpointToHostId();
|
||||
|
||||
/** Retrieve the mapping of host ID to endpoint */
|
||||
public Map<String, String> getHostIdToEndpoint();
|
||||
|
||||
/** Human-readable load value */
|
||||
public String getLoadString();
|
||||
@ -206,11 +208,9 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* - key for which we need to find the endpoint return value -
|
||||
* the endpoint responsible for this key
|
||||
*/
|
||||
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf,
|
||||
String key);
|
||||
public List<InetAddress> getNaturalEndpoints(String keyspaceName, String cf, String key);
|
||||
|
||||
public List<InetAddress> getNaturalEndpoints(String keyspaceName,
|
||||
ByteBuffer key);
|
||||
public List<InetAddress> getNaturalEndpoints(String keyspaceName, ByteBuffer key);
|
||||
|
||||
/**
|
||||
* Takes the snapshot for the given keyspaces. A snapshot name must be
|
||||
@ -221,8 +221,18 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* @param keyspaceNames
|
||||
* the name of the keyspaces to snapshot; empty means "all."
|
||||
*/
|
||||
public void takeSnapshot(String tag, String... keyspaceNames)
|
||||
throws IOException;
|
||||
public void takeSnapshot(String tag, String... keyspaceNames) throws IOException;
|
||||
|
||||
/**
|
||||
* Takes the snapshot of a specific column family. A snapshot name must be specified.
|
||||
*
|
||||
* @param keyspaceName the keyspace which holds the specified column family
|
||||
* @param tableName the table to snapshot
|
||||
* @param tag the tag given to the snapshot; may not be null or empty
|
||||
*/
|
||||
default void takeTableSnapshot(String keyspaceName, String tableName, String tag) throws IOException {
|
||||
takeColumnFamilySnapshot(keyspaceName, tableName, tag);
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes the snapshot of a specific column family. A snapshot name must be
|
||||
@ -235,8 +245,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* @param tag
|
||||
* the tag given to the snapshot; may not be null or empty
|
||||
*/
|
||||
public void takeColumnFamilySnapshot(String keyspaceName,
|
||||
String columnFamilyName, String tag) throws IOException;
|
||||
public void takeColumnFamilySnapshot(String keyspaceName, String columnFamilyName, String tag) throws IOException;
|
||||
|
||||
/**
|
||||
* Takes the snapshot of a multiple column family from different keyspaces.
|
||||
@ -248,15 +257,13 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* list of columnfamily from different keyspace in the form of
|
||||
* ks1.cf1 ks2.cf2
|
||||
*/
|
||||
public void takeMultipleColumnFamilySnapshot(String tag,
|
||||
String... columnFamilyList) throws IOException;
|
||||
public void takeMultipleColumnFamilySnapshot(String tag, String... columnFamilyList) throws IOException;
|
||||
|
||||
/**
|
||||
* Remove the snapshot with the given name from the given keyspaces. If no
|
||||
* tag is specified we will remove all snapshots.
|
||||
*/
|
||||
public void clearSnapshot(String tag, String... keyspaceNames)
|
||||
throws IOException;
|
||||
public void clearSnapshot(String tag, String... keyspaceNames) throws IOException;
|
||||
|
||||
/**
|
||||
* Get the details of all the snapshot
|
||||
@ -272,19 +279,27 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
*/
|
||||
public long trueSnapshotsSize();
|
||||
|
||||
/**
|
||||
* Forces refresh of values stored in system.size_estimates of all column
|
||||
* families.
|
||||
*/
|
||||
public void refreshSizeEstimates() throws ExecutionException;
|
||||
|
||||
/**
|
||||
* Forces major compaction of a single keyspace
|
||||
*/
|
||||
public void forceKeyspaceCompaction(String keyspaceName,
|
||||
String... columnFamilies) throws IOException, ExecutionException,
|
||||
InterruptedException;
|
||||
public void forceKeyspaceCompaction(boolean splitOutput, String keyspaceName, String... tableNames)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Trigger a cleanup of keys on a single keyspace
|
||||
*/
|
||||
public int forceKeyspaceCleanup(String keyspaceName,
|
||||
String... columnFamilies) throws IOException, ExecutionException,
|
||||
InterruptedException;
|
||||
@Deprecated
|
||||
public int forceKeyspaceCleanup(String keyspaceName, String... tables)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
public int forceKeyspaceCleanup(int jobs, String keyspaceName, String... tables)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Scrub (deserialize + reserialize at the latest version, skipping bad rows
|
||||
@ -294,23 +309,36 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* Scrubbed CFs will be snapshotted first, if disableSnapshot is false
|
||||
*/
|
||||
@Deprecated
|
||||
public int scrub(boolean disableSnapshot, boolean skipCorrupted,
|
||||
String keyspaceName, String... columnFamilies) throws IOException,
|
||||
ExecutionException, InterruptedException;
|
||||
public int scrub(boolean disableSnapshot, boolean skipCorrupted, String keyspaceName, String... tableNames)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
public int scrub(boolean disableSnapshot, boolean skipCorrupted,
|
||||
boolean checkData, String keyspaceName, String... columnFamilies)
|
||||
throws IOException, ExecutionException,
|
||||
InterruptedException;
|
||||
@Deprecated
|
||||
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, String keyspaceName,
|
||||
String... tableNames) throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
public int scrub(boolean disableSnapshot, boolean skipCorrupted, boolean checkData, int jobs, String keyspaceName,
|
||||
String... columnFamilies) throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Verify (checksums of) the given keyspace. If tableNames array is empty,
|
||||
* all CFs are verified.
|
||||
*
|
||||
* The entire sstable will be read to ensure each cell validates if
|
||||
* extendedVerify is true
|
||||
*/
|
||||
public int verify(boolean extendedVerify, String keyspaceName, String... tableNames)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Rewrite all sstables to the latest version. Unlike scrub, it doesn't skip
|
||||
* bad rows and do not snapshot sstables first.
|
||||
*/
|
||||
public int upgradeSSTables(String keyspaceName,
|
||||
boolean excludeCurrentVersion, String... columnFamilies)
|
||||
throws IOException, ExecutionException,
|
||||
InterruptedException;
|
||||
@Deprecated
|
||||
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, String... tableNames)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
public int upgradeSSTables(String keyspaceName, boolean excludeCurrentVersion, int jobs, String... tableNames)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Flush all memtables for the given column families, or all columnfamilies
|
||||
@ -320,9 +348,8 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* @param columnFamilies
|
||||
* @throws IOException
|
||||
*/
|
||||
public void forceKeyspaceFlush(String keyspaceName,
|
||||
String... columnFamilies) throws IOException, ExecutionException,
|
||||
InterruptedException;
|
||||
public void forceKeyspaceFlush(String keyspaceName, String... columnFamilies)
|
||||
throws IOException, ExecutionException, InterruptedException;
|
||||
|
||||
/**
|
||||
* Invoke repair asynchronously. You can track repair progress by
|
||||
@ -330,12 +357,22 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* Notification format is: type: "repair" userObject: int array of length 2,
|
||||
* [0]=command number, [1]=ordinal of ActiveRepairService.Status
|
||||
*
|
||||
* @param keyspace
|
||||
* Keyspace name to repair. Should not be null.
|
||||
* @param options
|
||||
* repair option.
|
||||
* @return Repair command number, or 0 if nothing to repair
|
||||
*/
|
||||
public int forceRepairAsync(String keyspace, boolean isSequential,
|
||||
Collection<String> dataCenters, Collection<String> hosts,
|
||||
boolean primaryRange, boolean repairedAt, String... columnFamilies)
|
||||
throws IOException;
|
||||
public int repairAsync(String keyspace, Map<String, String> options);
|
||||
|
||||
/**
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public int forceRepairAsync(String keyspace, boolean isSequential, Collection<String> dataCenters,
|
||||
Collection<String> hosts, boolean primaryRange, boolean fullRepair, String... tableNames)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Invoke repair asynchronously. You can track repair progress by
|
||||
@ -343,52 +380,54 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* Notification format is: type: "repair" userObject: int array of length 2,
|
||||
* [0]=command number, [1]=ordinal of ActiveRepairService.Status
|
||||
*
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*
|
||||
* @param parallelismDegree
|
||||
* 0: sequential, 1: parallel, 2: DC parallel
|
||||
* @return Repair command number, or 0 if nothing to repair
|
||||
*/
|
||||
public int forceRepairAsync(String keyspace, int parallelismDegree,
|
||||
Collection<String> dataCenters, Collection<String> hosts,
|
||||
boolean primaryRange, boolean fullRepair, String... columnFamilies);
|
||||
@Deprecated
|
||||
public int forceRepairAsync(String keyspace, int parallelismDegree, Collection<String> dataCenters,
|
||||
Collection<String> hosts, boolean primaryRange, boolean fullRepair, String... tableNames);
|
||||
|
||||
public int forceRepairAsync(String keyspace);
|
||||
/**
|
||||
* Same as forceRepairAsync, but handles a specified range
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*/
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken,
|
||||
String keyspaceName, boolean isSequential,
|
||||
Collection<String> dataCenters, Collection<String> hosts,
|
||||
boolean repairedAt, String... columnFamilies) throws IOException;
|
||||
@Deprecated
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential,
|
||||
Collection<String> dataCenters, Collection<String> hosts, boolean fullRepair, String... tableNames)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* Same as forceRepairAsync, but handles a specified range
|
||||
*
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*
|
||||
* @param parallelismDegree
|
||||
* 0: sequential, 1: parallel, 2: DC parallel
|
||||
*/
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken,
|
||||
String keyspaceName, int parallelismDegree,
|
||||
Collection<String> dataCenters, Collection<String> hosts,
|
||||
boolean fullRepair, String... columnFamilies);
|
||||
@Deprecated
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, int parallelismDegree,
|
||||
Collection<String> dataCenters, Collection<String> hosts, boolean fullRepair, String... tableNames);
|
||||
|
||||
/**
|
||||
* Invoke repair asynchronously. You can track repair progress by
|
||||
* subscribing JMX notification sent from this StorageServiceMBean.
|
||||
* Notification format is: type: "repair" userObject: int array of length 2,
|
||||
* [0]=command number, [1]=ordinal of ActiveRepairService.Status
|
||||
*
|
||||
* @return Repair command number, or 0 if nothing to repair
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*/
|
||||
public int forceRepairAsync(String keyspace, boolean isSequential,
|
||||
boolean isLocal, boolean primaryRange, boolean fullRepair,
|
||||
String... columnFamilies);
|
||||
@Deprecated
|
||||
public int forceRepairAsync(String keyspace, boolean isSequential, boolean isLocal, boolean primaryRange,
|
||||
boolean fullRepair, String... tableNames);
|
||||
|
||||
/**
|
||||
* Same as forceRepairAsync, but handles a specified range
|
||||
* @deprecated use {@link #repairAsync(String keyspace, Map options)}
|
||||
* instead.
|
||||
*/
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken,
|
||||
String keyspaceName, boolean isSequential, boolean isLocal,
|
||||
boolean repairedAt, String... columnFamilies);
|
||||
@Deprecated
|
||||
public int forceRepairRangeAsync(String beginToken, String endToken, String keyspaceName, boolean isSequential,
|
||||
boolean isLocal, boolean fullRepair, String... tableNames);
|
||||
|
||||
public void forceTerminateAllRepairSessions();
|
||||
|
||||
@ -439,8 +478,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
*
|
||||
* @see ch.qos.logback.classic.Level#toLevel(String)
|
||||
*/
|
||||
public void setLoggingLevel(String classQualifier, String level)
|
||||
throws Exception;
|
||||
public void setLoggingLevel(String classQualifier, String level) throws Exception;
|
||||
|
||||
/** get the runtime logging levels */
|
||||
public Map<String, String> getLoggingLevels();
|
||||
@ -461,8 +499,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* makes node unavailable for writes, flushes memtables and replays
|
||||
* commitlog.
|
||||
*/
|
||||
public void drain()
|
||||
throws IOException, InterruptedException, ExecutionException;
|
||||
public void drain() throws IOException, InterruptedException, ExecutionException;
|
||||
|
||||
/**
|
||||
* Truncates (deletes) the given columnFamily from the provided keyspace.
|
||||
@ -476,8 +513,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* @param columnFamily
|
||||
* The column family to delete data from.
|
||||
*/
|
||||
public void truncate(String keyspace, String columnFamily)
|
||||
throws TimeoutException, IOException;
|
||||
public void truncate(String keyspace, String columnFamily) throws TimeoutException, IOException;
|
||||
|
||||
/**
|
||||
* given a list of tokens (representing the nodes in the cluster), returns a
|
||||
@ -492,11 +528,14 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* the same replication strategies and if yes then we will use the first
|
||||
* else a empty Map is returned.
|
||||
*/
|
||||
public Map<InetAddress, Float> effectiveOwnership(String keyspace)
|
||||
throws IllegalStateException;
|
||||
public Map<InetAddress, Float> effectiveOwnership(String keyspace) throws IllegalStateException;
|
||||
|
||||
public List<String> getKeyspaces();
|
||||
|
||||
public List<String> getNonSystemKeyspaces();
|
||||
|
||||
public List<String> getNonLocalStrategyKeyspaces();
|
||||
|
||||
/**
|
||||
* Change endpointsnitch class and dynamic-ness (and dynamic attributes) at
|
||||
* runtime
|
||||
@ -513,9 +552,8 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
* @param dynamicBadnessThreshold
|
||||
* double, (default 0.0)
|
||||
*/
|
||||
public void updateSnitch(String epSnitchClassName, Boolean dynamic,
|
||||
Integer dynamicUpdateInterval, Integer dynamicResetInterval,
|
||||
Double dynamicBadnessThreshold) throws ClassNotFoundException;
|
||||
public void updateSnitch(String epSnitchClassName, Boolean dynamic, Integer dynamicUpdateInterval,
|
||||
Integer dynamicResetInterval, Double dynamicBadnessThreshold) throws ClassNotFoundException;
|
||||
|
||||
// allows a user to forcibly 'kill' a sick node
|
||||
public void stopGossiping();
|
||||
@ -552,14 +590,13 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
|
||||
public boolean isJoined();
|
||||
|
||||
@Deprecated
|
||||
public int getExceptionCount();
|
||||
|
||||
public void setStreamThroughputMbPerSec(int value);
|
||||
|
||||
public int getStreamThroughputMbPerSec();
|
||||
|
||||
public int getCompactionThroughputMbPerSec();
|
||||
public void setInterDCStreamThroughputMbPerSec(int value);
|
||||
|
||||
public int getInterDCStreamThroughputMbPerSec();
|
||||
|
||||
public void setCompactionThroughputMbPerSec(int value);
|
||||
|
||||
@ -615,8 +652,7 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
/**
|
||||
* rebuild the specified indexes
|
||||
*/
|
||||
public void rebuildSecondaryIndex(String ksName, String cfName,
|
||||
String... idxNames);
|
||||
public void rebuildSecondaryIndex(String ksName, String cfName, String... idxNames);
|
||||
|
||||
public void resetLocalSchema() throws IOException;
|
||||
|
||||
@ -635,13 +671,11 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
/**
|
||||
* Returns the configured tracing probability.
|
||||
*/
|
||||
public double getTracingProbability();
|
||||
public double getTraceProbability();
|
||||
|
||||
void disableAutoCompaction(String ks, String... columnFamilies)
|
||||
throws IOException;
|
||||
void disableAutoCompaction(String ks, String... columnFamilies) throws IOException;
|
||||
|
||||
void enableAutoCompaction(String ks, String... columnFamilies)
|
||||
throws IOException;
|
||||
void enableAutoCompaction(String ks, String... columnFamilies) throws IOException;
|
||||
|
||||
public void deliverHints(String host) throws UnknownHostException;
|
||||
|
||||
@ -663,8 +697,19 @@ public interface StorageServiceMBean extends NotificationEmitter {
|
||||
/** Sets the threshold for abandoning queries with many tombstones */
|
||||
public void setTombstoneFailureThreshold(int tombstoneDebugThreshold);
|
||||
|
||||
/** Returns the threshold for rejecting queries due to a large batch size */
|
||||
public int getBatchSizeFailureThreshold();
|
||||
|
||||
/** Sets the threshold for rejecting queries due to a large batch size */
|
||||
public void setBatchSizeFailureThreshold(int batchSizeDebugThreshold);
|
||||
|
||||
/**
|
||||
* Sets the hinted handoff throttle in kb per second, per delivery thread.
|
||||
*/
|
||||
public void setHintedHandoffThrottleInKB(int throttleInKB);
|
||||
|
||||
/**
|
||||
* Sets the hinted handoff throttle in kb per second, per delivery thread.
|
||||
*/
|
||||
public boolean resumeBootstrap();
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
|
||||
@ -37,25 +38,21 @@ import com.google.common.base.Objects;
|
||||
/**
|
||||
* ProgressInfo contains file transfer progress.
|
||||
*/
|
||||
public class ProgressInfo implements Serializable
|
||||
{
|
||||
@SuppressWarnings("serial")
|
||||
public class ProgressInfo implements Serializable {
|
||||
/**
|
||||
* Direction of the stream.
|
||||
*/
|
||||
public static enum Direction
|
||||
{
|
||||
OUT(0),
|
||||
IN(1);
|
||||
public static enum Direction {
|
||||
OUT(0), IN(1);
|
||||
|
||||
public final byte code;
|
||||
|
||||
private Direction(int code)
|
||||
{
|
||||
private Direction(int code) {
|
||||
this.code = (byte) code;
|
||||
}
|
||||
|
||||
public static Direction fromByte(byte direction)
|
||||
{
|
||||
public static Direction fromByte(byte direction) {
|
||||
return direction == 0 ? OUT : IN;
|
||||
}
|
||||
}
|
||||
@ -67,8 +64,8 @@ public class ProgressInfo implements Serializable
|
||||
public final long currentBytes;
|
||||
public final long totalBytes;
|
||||
|
||||
public ProgressInfo(InetAddress peer, int sessionIndex, String fileName, Direction direction, long currentBytes, long totalBytes)
|
||||
{
|
||||
public ProgressInfo(InetAddress peer, int sessionIndex, String fileName, Direction direction, long currentBytes,
|
||||
long totalBytes) {
|
||||
assert totalBytes > 0;
|
||||
|
||||
this.peer = peer;
|
||||
@ -81,12 +78,9 @@ public class ProgressInfo implements Serializable
|
||||
|
||||
static public ProgressInfo fromJsonObject(JsonObject obj) {
|
||||
try {
|
||||
return new ProgressInfo(InetAddress.getByName(obj.getString("peer")),
|
||||
obj.getInt("session_index"),
|
||||
obj.getString("file_name"),
|
||||
Direction.valueOf(obj.getString("direction")),
|
||||
obj.getJsonNumber("current_bytes").longValue(),
|
||||
obj.getJsonNumber("total_bytes").longValue());
|
||||
return new ProgressInfo(InetAddress.getByName(obj.getString("peer")), obj.getInt("session_index"),
|
||||
obj.getString("file_name"), Direction.valueOf(obj.getString("direction")),
|
||||
obj.getJsonNumber("current_bytes").longValue(), obj.getJsonNumber("total_bytes").longValue());
|
||||
} catch (UnknownHostException e) {
|
||||
// Not suppose to get here
|
||||
}
|
||||
@ -104,45 +98,55 @@ public class ProgressInfo implements Serializable
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if file transfer is completed
|
||||
*/
|
||||
public boolean isCompleted()
|
||||
{
|
||||
public boolean isCompleted() {
|
||||
return currentBytes >= totalBytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* ProgressInfo is considered to be equal only when all attributes except currentBytes are equal.
|
||||
* ProgressInfo is considered to be equal only when all attributes except
|
||||
* currentBytes are equal.
|
||||
*/
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ProgressInfo that = (ProgressInfo) o;
|
||||
|
||||
if (totalBytes != that.totalBytes) return false;
|
||||
if (direction != that.direction) return false;
|
||||
if (!fileName.equals(that.fileName)) return false;
|
||||
if (sessionIndex != that.sessionIndex) return false;
|
||||
if (totalBytes != that.totalBytes) {
|
||||
return false;
|
||||
}
|
||||
if (direction != that.direction) {
|
||||
return false;
|
||||
}
|
||||
if (!fileName.equals(that.fileName)) {
|
||||
return false;
|
||||
}
|
||||
if (sessionIndex != that.sessionIndex) {
|
||||
return false;
|
||||
}
|
||||
return peer.equals(that.peer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(peer, sessionIndex, fileName, direction, totalBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
public String toString() {
|
||||
StringBuilder sb = new StringBuilder(fileName);
|
||||
sb.append(" ").append(currentBytes);
|
||||
sb.append("/").append(totalBytes).append(" bytes");
|
||||
sb.append("(").append(currentBytes*100/totalBytes).append("%) ");
|
||||
sb.append("(").append(currentBytes * 100 / totalBytes).append("%) ");
|
||||
sb.append(direction == Direction.OUT ? "sent to " : "received from ");
|
||||
sb.append("idx:").append(sessionIndex);
|
||||
sb.append(peer);
|
||||
|
@ -28,30 +28,26 @@ import java.io.Serializable;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
/**
|
||||
* Stream session info.
|
||||
*/
|
||||
public final class SessionInfo implements Serializable
|
||||
{
|
||||
@SuppressWarnings("serial")
|
||||
public final class SessionInfo implements Serializable {
|
||||
public final InetAddress peer;
|
||||
public final int sessionIndex;
|
||||
public final InetAddress connecting;
|
||||
/** Immutable collection of receiving summaries */
|
||||
public final Collection<StreamSummary> receivingSummaries;
|
||||
/** Immutable collection of sending summaries*/
|
||||
/** Immutable collection of sending summaries */
|
||||
public final Collection<StreamSummary> sendingSummaries;
|
||||
/** Current session state */
|
||||
public final StreamSession.State state;
|
||||
@ -67,15 +63,10 @@ public final class SessionInfo implements Serializable
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
public SessionInfo(InetAddress peer,
|
||||
int sessionIndex,
|
||||
InetAddress connecting,
|
||||
Collection<StreamSummary> receivingSummaries,
|
||||
Collection<StreamSummary> sendingSummaries,
|
||||
StreamSession.State state,
|
||||
Map<String, ProgressInfo> receivingFiles,
|
||||
Map<String, ProgressInfo> sendingFiles) {
|
||||
public SessionInfo(InetAddress peer, int sessionIndex, InetAddress connecting,
|
||||
Collection<StreamSummary> receivingSummaries, Collection<StreamSummary> sendingSummaries,
|
||||
StreamSession.State state, Map<String, ProgressInfo> receivingFiles,
|
||||
Map<String, ProgressInfo> sendingFiles) {
|
||||
this.peer = peer;
|
||||
this.sessionIndex = sessionIndex;
|
||||
this.connecting = connecting;
|
||||
@ -86,24 +77,19 @@ public final class SessionInfo implements Serializable
|
||||
this.state = state;
|
||||
}
|
||||
|
||||
public SessionInfo(String peer,
|
||||
int sessionIndex,
|
||||
String connecting,
|
||||
Collection<StreamSummary> receivingSummaries,
|
||||
Collection<StreamSummary> sendingSummaries,
|
||||
String state,
|
||||
Map<String, ProgressInfo> receivingFiles,
|
||||
public SessionInfo(String peer, int sessionIndex, String connecting, Collection<StreamSummary> receivingSummaries,
|
||||
Collection<StreamSummary> sendingSummaries, String state, Map<String, ProgressInfo> receivingFiles,
|
||||
Map<String, ProgressInfo> sendingFiles) {
|
||||
this(address(peer), sessionIndex, address(connecting), receivingSummaries, sendingSummaries,
|
||||
StreamSession.State.valueOf(state), receivingFiles, sendingFiles);
|
||||
}
|
||||
|
||||
ProgressInfo in;
|
||||
|
||||
public static SessionInfo fromJsonObject(JsonObject obj) {
|
||||
return new SessionInfo(obj.getString("peer"), obj.getInt("session_index"),
|
||||
obj.getString("connecting"),
|
||||
return new SessionInfo(obj.getString("peer"), obj.getInt("session_index"), obj.getString("connecting"),
|
||||
StreamSummary.fromJsonArr(obj.getJsonArray("receiving_summaries")),
|
||||
StreamSummary.fromJsonArr(obj.getJsonArray("sending_summaries")),
|
||||
obj.getString("state"),
|
||||
StreamSummary.fromJsonArr(obj.getJsonArray("sending_summaries")), obj.getString("state"),
|
||||
ProgressInfo.fromJArrray(obj.getJsonArray("receiving_files")),
|
||||
ProgressInfo.fromJArrray(obj.getJsonArray("sending_files")));
|
||||
}
|
||||
@ -118,135 +104,117 @@ public final class SessionInfo implements Serializable
|
||||
return res;
|
||||
}
|
||||
|
||||
public boolean isFailed()
|
||||
{
|
||||
public boolean isFailed() {
|
||||
return state == StreamSession.State.FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Update progress of receiving/sending file.
|
||||
*
|
||||
* @param newProgress new progress info
|
||||
* @param newProgress
|
||||
* new progress info
|
||||
*/
|
||||
public void updateProgress(ProgressInfo newProgress)
|
||||
{
|
||||
public void updateProgress(ProgressInfo newProgress) {
|
||||
assert peer.equals(newProgress.peer);
|
||||
|
||||
Map<String, ProgressInfo> currentFiles = newProgress.direction == ProgressInfo.Direction.IN
|
||||
? receivingFiles : sendingFiles;
|
||||
Map<String, ProgressInfo> currentFiles = newProgress.direction == ProgressInfo.Direction.IN ? receivingFiles
|
||||
: sendingFiles;
|
||||
currentFiles.put(newProgress.fileName, newProgress);
|
||||
}
|
||||
|
||||
public Collection<ProgressInfo> getReceivingFiles()
|
||||
{
|
||||
public Collection<ProgressInfo> getReceivingFiles() {
|
||||
return receivingFiles.values();
|
||||
}
|
||||
|
||||
public Collection<ProgressInfo> getSendingFiles()
|
||||
{
|
||||
public Collection<ProgressInfo> getSendingFiles() {
|
||||
return sendingFiles.values();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of files already received.
|
||||
*/
|
||||
public long getTotalFilesReceived()
|
||||
{
|
||||
public long getTotalFilesReceived() {
|
||||
return getTotalFilesCompleted(receivingFiles.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of files already sent.
|
||||
*/
|
||||
public long getTotalFilesSent()
|
||||
{
|
||||
public long getTotalFilesSent() {
|
||||
return getTotalFilesCompleted(sendingFiles.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total size(in bytes) already received.
|
||||
*/
|
||||
public long getTotalSizeReceived()
|
||||
{
|
||||
public long getTotalSizeReceived() {
|
||||
return getTotalSizeInProgress(receivingFiles.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total size(in bytes) already sent.
|
||||
*/
|
||||
public long getTotalSizeSent()
|
||||
{
|
||||
public long getTotalSizeSent() {
|
||||
return getTotalSizeInProgress(sendingFiles.values());
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of files to receive in the session
|
||||
*/
|
||||
public long getTotalFilesToReceive()
|
||||
{
|
||||
public long getTotalFilesToReceive() {
|
||||
return getTotalFiles(receivingSummaries);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total number of files to send in the session
|
||||
*/
|
||||
public long getTotalFilesToSend()
|
||||
{
|
||||
public long getTotalFilesToSend() {
|
||||
return getTotalFiles(sendingSummaries);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total size(in bytes) to receive in the session
|
||||
*/
|
||||
public long getTotalSizeToReceive()
|
||||
{
|
||||
public long getTotalSizeToReceive() {
|
||||
return getTotalSizes(receivingSummaries);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return total size(in bytes) to send in the session
|
||||
*/
|
||||
public long getTotalSizeToSend()
|
||||
{
|
||||
public long getTotalSizeToSend() {
|
||||
return getTotalSizes(sendingSummaries);
|
||||
}
|
||||
|
||||
private long getTotalSizeInProgress(Collection<ProgressInfo> files)
|
||||
{
|
||||
private long getTotalSizeInProgress(Collection<ProgressInfo> files) {
|
||||
long total = 0;
|
||||
for (ProgressInfo file : files)
|
||||
for (ProgressInfo file : files) {
|
||||
total += file.currentBytes;
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private long getTotalFiles(Collection<StreamSummary> summaries)
|
||||
{
|
||||
private long getTotalFiles(Collection<StreamSummary> summaries) {
|
||||
long total = 0;
|
||||
for (StreamSummary summary : summaries)
|
||||
for (StreamSummary summary : summaries) {
|
||||
total += summary.files;
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private long getTotalSizes(Collection<StreamSummary> summaries)
|
||||
{
|
||||
private long getTotalSizes(Collection<StreamSummary> summaries) {
|
||||
if (summaries == null) {
|
||||
return 0;
|
||||
}
|
||||
long total = 0;
|
||||
for (StreamSummary summary : summaries)
|
||||
for (StreamSummary summary : summaries) {
|
||||
total += summary.totalSize;
|
||||
}
|
||||
return total;
|
||||
}
|
||||
|
||||
private long getTotalFilesCompleted(Collection<ProgressInfo> files)
|
||||
{
|
||||
Iterable<ProgressInfo> completed = Iterables.filter(files, new Predicate<ProgressInfo>()
|
||||
{
|
||||
public boolean apply(ProgressInfo input)
|
||||
{
|
||||
return input.isCompleted();
|
||||
}
|
||||
});
|
||||
private long getTotalFilesCompleted(Collection<ProgressInfo> files) {
|
||||
Iterable<ProgressInfo> completed = Iterables.filter(files, input -> input.isCompleted());
|
||||
return Iterables.size(completed);
|
||||
}
|
||||
}
|
||||
|
@ -26,21 +26,23 @@ package org.apache.cassandra.streaming;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.logging.Logger;
|
||||
|
||||
import javax.json.JsonArray;
|
||||
import javax.json.JsonObject;
|
||||
import javax.management.ListenerNotFoundException;
|
||||
import javax.management.MBeanNotificationInfo;
|
||||
import javax.management.NotificationBroadcasterSupport;
|
||||
import javax.management.NotificationFilter;
|
||||
import javax.management.NotificationListener;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
|
||||
import org.apache.cassandra.streaming.management.StreamStateCompositeData;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.scylladb.jmx.api.APIClient;
|
||||
import com.scylladb.jmx.metrics.APIMBean;
|
||||
|
||||
/**
|
||||
* StreamManager manages currently running {@link StreamResultFuture}s and
|
||||
@ -49,62 +51,51 @@ import com.scylladb.jmx.api.APIClient;
|
||||
* All stream operation should be created through this class to track streaming
|
||||
* status and progress.
|
||||
*/
|
||||
public class StreamManager implements StreamManagerMBean {
|
||||
public static final StreamManager instance = new StreamManager();
|
||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||
.getLogger(StreamManager.class.getName());
|
||||
private APIClient c = new APIClient();
|
||||
public class StreamManager extends APIMBean implements StreamManagerMBean {
|
||||
private static final Logger logger = Logger.getLogger(StreamManager.class.getName());
|
||||
|
||||
private final NotificationBroadcasterSupport notifier = new NotificationBroadcasterSupport();
|
||||
|
||||
public StreamManager(APIClient c) {
|
||||
super(c);
|
||||
}
|
||||
|
||||
public Set<StreamState> getState() {
|
||||
JsonArray arr = c.getJsonArray("/stream_manager/");
|
||||
JsonArray arr = client.getJsonArray("/stream_manager/");
|
||||
Set<StreamState> res = new HashSet<StreamState>();
|
||||
for (int i = 0; i < arr.size(); i++) {
|
||||
JsonObject obj = arr.getJsonObject(i);
|
||||
res.add(new StreamState(obj.getString("plan_id"), obj.getString("description"), SessionInfo.fromJsonArr(obj.getJsonArray("sessions"))));
|
||||
res.add(new StreamState(obj.getString("plan_id"), obj.getString("description"),
|
||||
SessionInfo.fromJsonArr(obj.getJsonArray("sessions"))));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
public static StreamManager getInstance() {
|
||||
return instance;
|
||||
}
|
||||
@Override
|
||||
public Set<CompositeData> getCurrentStreams() {
|
||||
logger.finest("getCurrentStreams");
|
||||
return Sets.newHashSet(Iterables.transform(getState(), new Function<StreamState, CompositeData>()
|
||||
{
|
||||
public CompositeData apply(StreamState input)
|
||||
{
|
||||
return StreamStateCompositeData.toCompositeData(input);
|
||||
}
|
||||
}));
|
||||
return Sets
|
||||
.newHashSet(Iterables.transform(getState(), input -> StreamStateCompositeData.toCompositeData(input)));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(NotificationListener arg0,
|
||||
NotificationFilter arg1, Object arg2)
|
||||
throws ListenerNotFoundException {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
public void addNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback) {
|
||||
notifier.addNotificationListener(listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addNotificationListener(NotificationListener arg0,
|
||||
NotificationFilter arg1, Object arg2)
|
||||
throws IllegalArgumentException {
|
||||
// TODO Auto-generated method stub
|
||||
public void removeNotificationListener(NotificationListener listener) throws ListenerNotFoundException {
|
||||
notifier.removeNotificationListener(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(NotificationListener listener, NotificationFilter filter, Object handback)
|
||||
throws ListenerNotFoundException {
|
||||
notifier.removeNotificationListener(listener, filter, handback);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MBeanNotificationInfo[] getNotificationInfo() {
|
||||
// TODO Auto-generated method stub
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeNotificationListener(NotificationListener arg0)
|
||||
throws ListenerNotFoundException {
|
||||
// TODO Auto-generated method stub
|
||||
|
||||
return notifier.getNotificationInfo();
|
||||
}
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
package org.apache.cassandra.streaming;
|
||||
|
||||
import java.util.Set;
|
||||
|
||||
import javax.management.NotificationEmitter;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
|
||||
|
@ -25,81 +25,80 @@
|
||||
package org.apache.cassandra.streaming;
|
||||
|
||||
/**
|
||||
* Handles the streaming a one or more section of one of more sstables to and from a specific
|
||||
* remote node.
|
||||
* Handles the streaming a one or more section of one of more sstables to and
|
||||
* from a specific remote node.
|
||||
*
|
||||
* Both this node and the remote one will create a similar symmetrical StreamSession. A streaming
|
||||
* session has the following life-cycle:
|
||||
* Both this node and the remote one will create a similar symmetrical
|
||||
* StreamSession. A streaming session has the following life-cycle:
|
||||
*
|
||||
* 1. Connections Initialization
|
||||
*
|
||||
* (a) A node (the initiator in the following) create a new StreamSession, initialize it (init())
|
||||
* and then start it (start()). Start will create a {@link ConnectionHandler} that will create
|
||||
* two connections to the remote node (the follower in the following) with whom to stream and send
|
||||
* a StreamInit message. The first connection will be the incoming connection for the
|
||||
* initiator, and the second connection will be the outgoing.
|
||||
* (b) Upon reception of that StreamInit message, the follower creates its own StreamSession,
|
||||
* initialize it if it still does not exist, and attach connecting socket to its ConnectionHandler
|
||||
* according to StreamInit message's isForOutgoing flag.
|
||||
* (d) When the both incoming and outgoing connections are established, StreamSession calls
|
||||
* StreamSession#onInitializationComplete method to start the streaming prepare phase
|
||||
* (StreamResultFuture.startStreaming()).
|
||||
* (a) A node (the initiator in the following) create a new StreamSession,
|
||||
* initialize it (init()) and then start it (start()). Start will create a
|
||||
* {@link ConnectionHandler} that will create two connections to the remote node
|
||||
* (the follower in the following) with whom to stream and send a StreamInit
|
||||
* message. The first connection will be the incoming connection for the
|
||||
* initiator, and the second connection will be the outgoing. (b) Upon reception
|
||||
* of that StreamInit message, the follower creates its own StreamSession,
|
||||
* initialize it if it still does not exist, and attach connecting socket to its
|
||||
* ConnectionHandler according to StreamInit message's isForOutgoing flag. (d)
|
||||
* When the both incoming and outgoing connections are established,
|
||||
* StreamSession calls StreamSession#onInitializationComplete method to start
|
||||
* the streaming prepare phase (StreamResultFuture.startStreaming()).
|
||||
*
|
||||
* 2. Streaming preparation phase
|
||||
*
|
||||
* (a) This phase is started when the initiator onInitializationComplete() method is called. This method sends a
|
||||
* PrepareMessage that includes what files/sections this node will stream to the follower
|
||||
* (stored in a StreamTransferTask, each column family has it's own transfer task) and what
|
||||
* the follower needs to stream back (StreamReceiveTask, same as above). If the initiator has
|
||||
* nothing to receive from the follower, it goes directly to its Streaming phase. Otherwise,
|
||||
* it waits for the follower PrepareMessage.
|
||||
* (b) Upon reception of the PrepareMessage, the follower records which files/sections it will receive
|
||||
* and send back its own PrepareMessage with a summary of the files/sections that will be sent to
|
||||
* the initiator (prepare()). After having sent that message, the follower goes to its Streamning
|
||||
* phase.
|
||||
* (c) When the initiator receives the follower PrepareMessage, it records which files/sections it will
|
||||
* receive and then goes to his own Streaming phase.
|
||||
* (a) This phase is started when the initiator onInitializationComplete()
|
||||
* method is called. This method sends a PrepareMessage that includes what
|
||||
* files/sections this node will stream to the follower (stored in a
|
||||
* StreamTransferTask, each column family has it's own transfer task) and what
|
||||
* the follower needs to stream back (StreamReceiveTask, same as above). If the
|
||||
* initiator has nothing to receive from the follower, it goes directly to its
|
||||
* Streaming phase. Otherwise, it waits for the follower PrepareMessage. (b)
|
||||
* Upon reception of the PrepareMessage, the follower records which
|
||||
* files/sections it will receive and send back its own PrepareMessage with a
|
||||
* summary of the files/sections that will be sent to the initiator (prepare()).
|
||||
* After having sent that message, the follower goes to its Streamning phase.
|
||||
* (c) When the initiator receives the follower PrepareMessage, it records which
|
||||
* files/sections it will receive and then goes to his own Streaming phase.
|
||||
*
|
||||
* 3. Streaming phase
|
||||
*
|
||||
* (a) The streaming phase is started by each node (the sender in the follower, but note that each side
|
||||
* of the StreamSession may be sender for some of the files) involved by calling startStreamingFiles().
|
||||
* This will sequentially send a FileMessage for each file of each SteamTransferTask. Each FileMessage
|
||||
* consists of a FileMessageHeader that indicates which file is coming and then start streaming the
|
||||
* content for that file (StreamWriter in FileMessage.serialize()). When a file is fully sent, the
|
||||
* fileSent() method is called for that file. If all the files for a StreamTransferTask are sent
|
||||
* (StreamTransferTask.complete()), the task is marked complete (taskCompleted()).
|
||||
* (b) On the receiving side, a SSTable will be written for the incoming file (StreamReader in
|
||||
* FileMessage.deserialize()) and once the FileMessage is fully received, the file will be marked as
|
||||
* complete (received()). When all files for the StreamReceiveTask have been received, the sstables
|
||||
* are added to the CFS (and 2ndary index are built, StreamReceiveTask.complete()) and the task
|
||||
* is marked complete (taskCompleted())
|
||||
* (b) If during the streaming of a particular file an I/O error occurs on the receiving end of a stream
|
||||
* (FileMessage.deserialize), the node will retry the file (up to DatabaseDescriptor.getMaxStreamingRetries())
|
||||
* by sending a RetryMessage to the sender. On receiving a RetryMessage, the sender simply issue a new
|
||||
* FileMessage for that file.
|
||||
* (c) When all transfer and receive tasks for a session are complete, the move to the Completion phase
|
||||
* (maybeCompleted()).
|
||||
* (a) The streaming phase is started by each node (the sender in the follower,
|
||||
* but note that each side of the StreamSession may be sender for some of the
|
||||
* files) involved by calling startStreamingFiles(). This will sequentially send
|
||||
* a FileMessage for each file of each SteamTransferTask. Each FileMessage
|
||||
* consists of a FileMessageHeader that indicates which file is coming and then
|
||||
* start streaming the content for that file (StreamWriter in
|
||||
* FileMessage.serialize()). When a file is fully sent, the fileSent() method is
|
||||
* called for that file. If all the files for a StreamTransferTask are sent
|
||||
* (StreamTransferTask.complete()), the task is marked complete
|
||||
* (taskCompleted()). (b) On the receiving side, a SSTable will be written for
|
||||
* the incoming file (StreamReader in FileMessage.deserialize()) and once the
|
||||
* FileMessage is fully received, the file will be marked as complete
|
||||
* (received()). When all files for the StreamReceiveTask have been received,
|
||||
* the sstables are added to the CFS (and 2ndary index are built,
|
||||
* StreamReceiveTask.complete()) and the task is marked complete
|
||||
* (taskCompleted()) (b) If during the streaming of a particular file an I/O
|
||||
* error occurs on the receiving end of a stream (FileMessage.deserialize), the
|
||||
* node will retry the file (up to DatabaseDescriptor.getMaxStreamingRetries())
|
||||
* by sending a RetryMessage to the sender. On receiving a RetryMessage, the
|
||||
* sender simply issue a new FileMessage for that file. (c) When all transfer
|
||||
* and receive tasks for a session are complete, the move to the Completion
|
||||
* phase (maybeCompleted()).
|
||||
*
|
||||
* 4. Completion phase
|
||||
*
|
||||
* (a) When a node has finished all transfer and receive task, it enter the completion phase (maybeCompleted()).
|
||||
* If it had already received a CompleteMessage from the other side (it is in the WAIT_COMPLETE state), that
|
||||
* session is done is is closed (closeSession()). Otherwise, the node switch to the WAIT_COMPLETE state and
|
||||
* send a CompleteMessage to the other side.
|
||||
* (a) When a node has finished all transfer and receive task, it enter the
|
||||
* completion phase (maybeCompleted()). If it had already received a
|
||||
* CompleteMessage from the other side (it is in the WAIT_COMPLETE state), that
|
||||
* session is done is is closed (closeSession()). Otherwise, the node switch to
|
||||
* the WAIT_COMPLETE state and send a CompleteMessage to the other side.
|
||||
*/
|
||||
public class StreamSession
|
||||
{
|
||||
public class StreamSession {
|
||||
|
||||
public static enum State
|
||||
{
|
||||
INITIALIZED,
|
||||
PREPARING,
|
||||
STREAMING,
|
||||
WAIT_COMPLETE,
|
||||
COMPLETE,
|
||||
FAILED,
|
||||
public static enum State {
|
||||
INITIALIZED, PREPARING, STREAMING, WAIT_COMPLETE, COMPLETE, FAILED,
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -28,14 +28,12 @@ import java.io.Serializable;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Iterables;
|
||||
|
||||
/**
|
||||
* Current snapshot of streaming progress.
|
||||
*/
|
||||
public class StreamState implements Serializable
|
||||
{
|
||||
public class StreamState implements Serializable {
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@ -49,19 +47,12 @@ public class StreamState implements Serializable
|
||||
this.description = description;
|
||||
this.sessions = sessions;
|
||||
}
|
||||
public StreamState(String planId, String description, Set<SessionInfo> sessions)
|
||||
{
|
||||
|
||||
public StreamState(String planId, String description, Set<SessionInfo> sessions) {
|
||||
this(UUID.fromString(planId), description, sessions);
|
||||
}
|
||||
|
||||
public boolean hasFailedSession()
|
||||
{
|
||||
return Iterables.any(sessions, new Predicate<SessionInfo>()
|
||||
{
|
||||
public boolean apply(SessionInfo session)
|
||||
{
|
||||
return session.isFailed();
|
||||
}
|
||||
});
|
||||
public boolean hasFailedSession() {
|
||||
return Iterables.any(sessions, session -> session.isFailed());
|
||||
}
|
||||
}
|
||||
|
@ -36,18 +36,17 @@ import com.google.common.base.Objects;
|
||||
/**
|
||||
* Summary of streaming.
|
||||
*/
|
||||
public class StreamSummary
|
||||
{
|
||||
public class StreamSummary {
|
||||
public final UUID cfId;
|
||||
|
||||
/**
|
||||
* Number of files to transfer. Can be 0 if nothing to transfer for some streaming request.
|
||||
* Number of files to transfer. Can be 0 if nothing to transfer for some
|
||||
* streaming request.
|
||||
*/
|
||||
public final int files;
|
||||
public final long totalSize;
|
||||
|
||||
public StreamSummary(UUID cfId, int files, long totalSize)
|
||||
{
|
||||
public StreamSummary(UUID cfId, int files, long totalSize) {
|
||||
this.cfId = cfId;
|
||||
this.files = files;
|
||||
this.totalSize = totalSize;
|
||||
@ -58,7 +57,8 @@ public class StreamSummary
|
||||
}
|
||||
|
||||
public static StreamSummary fromJsonObject(JsonObject obj) {
|
||||
return new StreamSummary(obj.getString("cf_id"), obj.getInt("files"), obj.getJsonNumber("total_size").longValue());
|
||||
return new StreamSummary(obj.getString("cf_id"), obj.getInt("files"),
|
||||
obj.getJsonNumber("total_size").longValue());
|
||||
}
|
||||
|
||||
public static Collection<StreamSummary> fromJsonArr(JsonArray arr) {
|
||||
@ -71,24 +71,26 @@ public class StreamSummary
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o)
|
||||
{
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
StreamSummary summary = (StreamSummary) o;
|
||||
return files == summary.files && totalSize == summary.totalSize && cfId.equals(summary.cfId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode()
|
||||
{
|
||||
public int hashCode() {
|
||||
return Objects.hashCode(cfId, files, totalSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString()
|
||||
{
|
||||
public String toString() {
|
||||
final StringBuilder sb = new StringBuilder("StreamSummary{");
|
||||
sb.append("path=").append(cfId);
|
||||
sb.append(", files=").append(files);
|
||||
|
@ -29,54 +29,38 @@ import java.net.UnknownHostException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import javax.management.openmbean.*;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
|
||||
import org.apache.cassandra.streaming.ProgressInfo;
|
||||
|
||||
public class ProgressInfoCompositeData
|
||||
{
|
||||
private static final String[] ITEM_NAMES = new String[]{"planId",
|
||||
"peer",
|
||||
"sessionIndex",
|
||||
"fileName",
|
||||
"direction",
|
||||
"currentBytes",
|
||||
"totalBytes"};
|
||||
private static final String[] ITEM_DESCS = new String[]{"String representation of Plan ID",
|
||||
"Session peer",
|
||||
"Index of session",
|
||||
"Name of the file",
|
||||
"Direction('IN' or 'OUT')",
|
||||
"Current bytes transferred",
|
||||
"Total bytes to transfer"};
|
||||
private static final OpenType<?>[] ITEM_TYPES = new OpenType[]{SimpleType.STRING,
|
||||
SimpleType.STRING,
|
||||
SimpleType.INTEGER,
|
||||
SimpleType.STRING,
|
||||
SimpleType.STRING,
|
||||
SimpleType.LONG,
|
||||
SimpleType.LONG};
|
||||
import com.google.common.base.Throwables;
|
||||
|
||||
public class ProgressInfoCompositeData {
|
||||
private static final String[] ITEM_NAMES = new String[] { "planId", "peer", "sessionIndex", "fileName", "direction",
|
||||
"currentBytes", "totalBytes" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "String representation of Plan ID", "Session peer",
|
||||
"Index of session", "Name of the file", "Direction('IN' or 'OUT')", "Current bytes transferred",
|
||||
"Total bytes to transfer" };
|
||||
private static final OpenType<?>[] ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING,
|
||||
SimpleType.INTEGER, SimpleType.STRING, SimpleType.STRING, SimpleType.LONG, SimpleType.LONG };
|
||||
|
||||
public static final CompositeType COMPOSITE_TYPE;
|
||||
static {
|
||||
try
|
||||
{
|
||||
COMPOSITE_TYPE = new CompositeType(ProgressInfo.class.getName(),
|
||||
"ProgressInfo",
|
||||
ITEM_NAMES,
|
||||
ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
static {
|
||||
try {
|
||||
COMPOSITE_TYPE = new CompositeType(ProgressInfo.class.getName(), "ProgressInfo", ITEM_NAMES, ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static CompositeData toCompositeData(UUID planId, ProgressInfo progressInfo)
|
||||
{
|
||||
public static CompositeData toCompositeData(UUID planId, ProgressInfo progressInfo) {
|
||||
Map<String, Object> valueMap = new HashMap<>();
|
||||
valueMap.put(ITEM_NAMES[0], planId.toString());
|
||||
valueMap.put(ITEM_NAMES[1], progressInfo.peer.getHostAddress());
|
||||
@ -85,30 +69,19 @@ public class ProgressInfoCompositeData
|
||||
valueMap.put(ITEM_NAMES[4], progressInfo.direction.name());
|
||||
valueMap.put(ITEM_NAMES[5], progressInfo.currentBytes);
|
||||
valueMap.put(ITEM_NAMES[6], progressInfo.totalBytes);
|
||||
try
|
||||
{
|
||||
try {
|
||||
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static ProgressInfo fromCompositeData(CompositeData cd)
|
||||
{
|
||||
public static ProgressInfo fromCompositeData(CompositeData cd) {
|
||||
Object[] values = cd.getAll(ITEM_NAMES);
|
||||
try
|
||||
{
|
||||
return new ProgressInfo(InetAddress.getByName((String) values[1]),
|
||||
(int) values[2],
|
||||
(String) values[3],
|
||||
ProgressInfo.Direction.valueOf((String)values[4]),
|
||||
(long) values[5],
|
||||
(long) values[6]);
|
||||
}
|
||||
catch (UnknownHostException e)
|
||||
{
|
||||
try {
|
||||
return new ProgressInfo(InetAddress.getByName((String) values[1]), (int) values[2], (String) values[3],
|
||||
ProgressInfo.Direction.valueOf((String) values[4]), (long) values[5], (long) values[6]);
|
||||
} catch (UnknownHostException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
@ -26,8 +26,24 @@ package org.apache.cassandra.streaming.management;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.*;
|
||||
import javax.management.openmbean.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.management.openmbean.ArrayType;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
|
||||
import org.apache.cassandra.streaming.ProgressInfo;
|
||||
import org.apache.cassandra.streaming.SessionInfo;
|
||||
import org.apache.cassandra.streaming.StreamSession;
|
||||
import org.apache.cassandra.streaming.StreamSummary;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Throwables;
|
||||
@ -35,149 +51,86 @@ import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import org.apache.cassandra.streaming.ProgressInfo;
|
||||
import org.apache.cassandra.streaming.SessionInfo;
|
||||
import org.apache.cassandra.streaming.StreamSession;
|
||||
import org.apache.cassandra.streaming.StreamSummary;
|
||||
import java.util.HashMap;
|
||||
|
||||
public class SessionInfoCompositeData
|
||||
{
|
||||
private static final String[] ITEM_NAMES = new String[]{"planId",
|
||||
"peer",
|
||||
"connecting",
|
||||
"receivingSummaries",
|
||||
"sendingSummaries",
|
||||
"state",
|
||||
"receivingFiles",
|
||||
"sendingFiles",
|
||||
"sessionIndex"};
|
||||
private static final String[] ITEM_DESCS = new String[]{"Plan ID",
|
||||
"Session peer",
|
||||
"Connecting address",
|
||||
"Summaries of receiving data",
|
||||
"Summaries of sending data",
|
||||
"Current session state",
|
||||
"Receiving files",
|
||||
"Sending files",
|
||||
"Session index"};
|
||||
public class SessionInfoCompositeData {
|
||||
private static final String[] ITEM_NAMES = new String[] { "planId", "peer", "connecting", "receivingSummaries",
|
||||
"sendingSummaries", "state", "receivingFiles", "sendingFiles", "sessionIndex" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "Plan ID", "Session peer", "Connecting address",
|
||||
"Summaries of receiving data", "Summaries of sending data", "Current session state", "Receiving files",
|
||||
"Sending files", "Session index" };
|
||||
private static final OpenType<?>[] ITEM_TYPES;
|
||||
|
||||
public static final CompositeType COMPOSITE_TYPE;
|
||||
static {
|
||||
try
|
||||
{
|
||||
ITEM_TYPES = new OpenType[]{SimpleType.STRING,
|
||||
SimpleType.STRING,
|
||||
SimpleType.STRING,
|
||||
ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE),
|
||||
ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE),
|
||||
SimpleType.STRING,
|
||||
ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE),
|
||||
ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE),
|
||||
SimpleType.INTEGER};
|
||||
COMPOSITE_TYPE = new CompositeType(SessionInfo.class.getName(),
|
||||
"SessionInfo",
|
||||
ITEM_NAMES,
|
||||
ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
static {
|
||||
try {
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING,
|
||||
ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE),
|
||||
ArrayType.getArrayType(StreamSummaryCompositeData.COMPOSITE_TYPE), SimpleType.STRING,
|
||||
ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE),
|
||||
ArrayType.getArrayType(ProgressInfoCompositeData.COMPOSITE_TYPE), SimpleType.INTEGER };
|
||||
COMPOSITE_TYPE = new CompositeType(SessionInfo.class.getName(), "SessionInfo", ITEM_NAMES, ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static CompositeData toCompositeData(final UUID planId, SessionInfo sessionInfo)
|
||||
{
|
||||
public static CompositeData toCompositeData(final UUID planId, SessionInfo sessionInfo) {
|
||||
Map<String, Object> valueMap = new HashMap<>();
|
||||
valueMap.put(ITEM_NAMES[0], planId.toString());
|
||||
valueMap.put(ITEM_NAMES[1], sessionInfo.peer.getHostAddress());
|
||||
valueMap.put(ITEM_NAMES[2], sessionInfo.connecting.getHostAddress());
|
||||
Function<StreamSummary, CompositeData> fromStreamSummary = new Function<StreamSummary, CompositeData>()
|
||||
{
|
||||
public CompositeData apply(StreamSummary input)
|
||||
{
|
||||
return StreamSummaryCompositeData.toCompositeData(input);
|
||||
}
|
||||
};
|
||||
Function<StreamSummary, CompositeData> fromStreamSummary = input -> StreamSummaryCompositeData
|
||||
.toCompositeData(input);
|
||||
valueMap.put(ITEM_NAMES[3], toArrayOfCompositeData(sessionInfo.receivingSummaries, fromStreamSummary));
|
||||
valueMap.put(ITEM_NAMES[4], toArrayOfCompositeData(sessionInfo.sendingSummaries, fromStreamSummary));
|
||||
valueMap.put(ITEM_NAMES[5], sessionInfo.state.name());
|
||||
Function<ProgressInfo, CompositeData> fromProgressInfo = new Function<ProgressInfo, CompositeData>()
|
||||
{
|
||||
public CompositeData apply(ProgressInfo input)
|
||||
{
|
||||
return ProgressInfoCompositeData.toCompositeData(planId, input);
|
||||
}
|
||||
};
|
||||
Function<ProgressInfo, CompositeData> fromProgressInfo = input -> ProgressInfoCompositeData
|
||||
.toCompositeData(planId, input);
|
||||
valueMap.put(ITEM_NAMES[6], toArrayOfCompositeData(sessionInfo.getReceivingFiles(), fromProgressInfo));
|
||||
valueMap.put(ITEM_NAMES[7], toArrayOfCompositeData(sessionInfo.getSendingFiles(), fromProgressInfo));
|
||||
valueMap.put(ITEM_NAMES[8], sessionInfo.sessionIndex);
|
||||
try
|
||||
{
|
||||
try {
|
||||
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static SessionInfo fromCompositeData(CompositeData cd)
|
||||
{
|
||||
public static SessionInfo fromCompositeData(CompositeData cd) {
|
||||
assert cd.getCompositeType().equals(COMPOSITE_TYPE);
|
||||
|
||||
Object[] values = cd.getAll(ITEM_NAMES);
|
||||
InetAddress peer, connecting;
|
||||
try
|
||||
{
|
||||
try {
|
||||
peer = InetAddress.getByName((String) values[1]);
|
||||
connecting = InetAddress.getByName((String) values[2]);
|
||||
}
|
||||
catch (UnknownHostException e)
|
||||
{
|
||||
} catch (UnknownHostException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
Function<CompositeData, StreamSummary> toStreamSummary = new Function<CompositeData, StreamSummary>()
|
||||
{
|
||||
public StreamSummary apply(CompositeData input)
|
||||
{
|
||||
return StreamSummaryCompositeData.fromCompositeData(input);
|
||||
}
|
||||
};
|
||||
SessionInfo info = new SessionInfo(peer,
|
||||
(int)values[8],
|
||||
connecting,
|
||||
fromArrayOfCompositeData((CompositeData[]) values[3], toStreamSummary),
|
||||
fromArrayOfCompositeData((CompositeData[]) values[4], toStreamSummary),
|
||||
StreamSession.State.valueOf((String) values[5]),
|
||||
new HashMap<String, ProgressInfo>(), new HashMap<String, ProgressInfo>());
|
||||
Function<CompositeData, ProgressInfo> toProgressInfo = new Function<CompositeData, ProgressInfo>()
|
||||
{
|
||||
public ProgressInfo apply(CompositeData input)
|
||||
{
|
||||
return ProgressInfoCompositeData.fromCompositeData(input);
|
||||
}
|
||||
};
|
||||
for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[6], toProgressInfo))
|
||||
{
|
||||
Function<CompositeData, StreamSummary> toStreamSummary = input -> StreamSummaryCompositeData
|
||||
.fromCompositeData(input);
|
||||
SessionInfo info = new SessionInfo(peer, (int) values[8], connecting,
|
||||
fromArrayOfCompositeData((CompositeData[]) values[3], toStreamSummary),
|
||||
fromArrayOfCompositeData((CompositeData[]) values[4], toStreamSummary),
|
||||
StreamSession.State.valueOf((String) values[5]), new HashMap<String, ProgressInfo>(),
|
||||
new HashMap<String, ProgressInfo>());
|
||||
Function<CompositeData, ProgressInfo> toProgressInfo = input -> ProgressInfoCompositeData
|
||||
.fromCompositeData(input);
|
||||
for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[6], toProgressInfo)) {
|
||||
info.updateProgress(progress);
|
||||
}
|
||||
for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[7], toProgressInfo))
|
||||
{
|
||||
for (ProgressInfo progress : fromArrayOfCompositeData((CompositeData[]) values[7], toProgressInfo)) {
|
||||
info.updateProgress(progress);
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
private static <T> Collection<T> fromArrayOfCompositeData(CompositeData[] cds, Function<CompositeData, T> func)
|
||||
{
|
||||
private static <T> Collection<T> fromArrayOfCompositeData(CompositeData[] cds, Function<CompositeData, T> func) {
|
||||
return Lists.newArrayList(Iterables.transform(Arrays.asList(cds), func));
|
||||
}
|
||||
|
||||
private static <T> CompositeData[] toArrayOfCompositeData(Collection<T> toConvert, Function<T, CompositeData> func)
|
||||
{
|
||||
private static <T> CompositeData[] toArrayOfCompositeData(Collection<T> toConvert,
|
||||
Function<T, CompositeData> func) {
|
||||
if (toConvert == null) {
|
||||
toConvert = Sets.newHashSet();
|
||||
}
|
||||
|
@ -24,79 +24,68 @@
|
||||
|
||||
package org.apache.cassandra.streaming.management;
|
||||
|
||||
import java.util.*;
|
||||
import javax.management.openmbean.*;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.UUID;
|
||||
|
||||
import javax.management.openmbean.ArrayType;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
|
||||
import org.apache.cassandra.streaming.SessionInfo;
|
||||
import org.apache.cassandra.streaming.StreamState;
|
||||
|
||||
import com.google.common.base.Function;
|
||||
import com.google.common.base.Throwables;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
|
||||
import org.apache.cassandra.streaming.SessionInfo;
|
||||
import org.apache.cassandra.streaming.StreamState;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class StreamStateCompositeData
|
||||
{
|
||||
private static final String[] ITEM_NAMES = new String[]{"planId", "description", "sessions",
|
||||
"currentRxBytes", "totalRxBytes", "rxPercentage",
|
||||
"currentTxBytes", "totalTxBytes", "txPercentage"};
|
||||
private static final String[] ITEM_DESCS = new String[]{"Plan ID of this stream",
|
||||
"Stream plan description",
|
||||
"Active stream sessions",
|
||||
"Number of bytes received across all streams",
|
||||
"Total bytes available to receive across all streams",
|
||||
"Percentage received across all streams",
|
||||
"Number of bytes sent across all streams",
|
||||
"Total bytes available to send across all streams",
|
||||
"Percentage sent across all streams"};
|
||||
public class StreamStateCompositeData {
|
||||
private static final String[] ITEM_NAMES = new String[] { "planId", "description", "sessions", "currentRxBytes",
|
||||
"totalRxBytes", "rxPercentage", "currentTxBytes", "totalTxBytes", "txPercentage" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "Plan ID of this stream", "Stream plan description",
|
||||
"Active stream sessions", "Number of bytes received across all streams",
|
||||
"Total bytes available to receive across all streams", "Percentage received across all streams",
|
||||
"Number of bytes sent across all streams", "Total bytes available to send across all streams",
|
||||
"Percentage sent across all streams" };
|
||||
private static final OpenType<?>[] ITEM_TYPES;
|
||||
|
||||
public static final CompositeType COMPOSITE_TYPE;
|
||||
static {
|
||||
try
|
||||
{
|
||||
ITEM_TYPES = new OpenType[]{SimpleType.STRING,
|
||||
SimpleType.STRING,
|
||||
ArrayType.getArrayType(SessionInfoCompositeData.COMPOSITE_TYPE),
|
||||
SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE,
|
||||
SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE};
|
||||
COMPOSITE_TYPE = new CompositeType(StreamState.class.getName(),
|
||||
"StreamState",
|
||||
ITEM_NAMES,
|
||||
ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
static {
|
||||
try {
|
||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING,
|
||||
ArrayType.getArrayType(SessionInfoCompositeData.COMPOSITE_TYPE), SimpleType.LONG, SimpleType.LONG,
|
||||
SimpleType.DOUBLE, SimpleType.LONG, SimpleType.LONG, SimpleType.DOUBLE };
|
||||
COMPOSITE_TYPE = new CompositeType(StreamState.class.getName(), "StreamState", ITEM_NAMES, ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static CompositeData toCompositeData(final StreamState streamState)
|
||||
{
|
||||
public static CompositeData toCompositeData(final StreamState streamState) {
|
||||
Map<String, Object> valueMap = new HashMap<>();
|
||||
valueMap.put(ITEM_NAMES[0], streamState.planId.toString());
|
||||
valueMap.put(ITEM_NAMES[1], streamState.description);
|
||||
|
||||
CompositeData[] sessions = new CompositeData[streamState.sessions.size()];
|
||||
Lists.newArrayList(Iterables.transform(streamState.sessions, new Function<SessionInfo, CompositeData>()
|
||||
{
|
||||
public CompositeData apply(SessionInfo input)
|
||||
{
|
||||
return SessionInfoCompositeData.toCompositeData(streamState.planId, input);
|
||||
}
|
||||
})).toArray(sessions);
|
||||
Lists.newArrayList(Iterables.transform(streamState.sessions,
|
||||
input -> SessionInfoCompositeData.toCompositeData(streamState.planId, input))).toArray(sessions);
|
||||
valueMap.put(ITEM_NAMES[2], sessions);
|
||||
|
||||
long currentRxBytes = 0;
|
||||
long totalRxBytes = 0;
|
||||
long currentTxBytes = 0;
|
||||
long totalTxBytes = 0;
|
||||
for (SessionInfo sessInfo : streamState.sessions)
|
||||
{
|
||||
for (SessionInfo sessInfo : streamState.sessions) {
|
||||
currentRxBytes += sessInfo.getTotalSizeReceived();
|
||||
totalRxBytes += sessInfo.getTotalSizeToReceive();
|
||||
currentTxBytes += sessInfo.getTotalSizeSent();
|
||||
@ -112,30 +101,20 @@ public class StreamStateCompositeData
|
||||
valueMap.put(ITEM_NAMES[7], totalTxBytes);
|
||||
valueMap.put(ITEM_NAMES[8], txPercentage);
|
||||
|
||||
try
|
||||
{
|
||||
try {
|
||||
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static StreamState fromCompositeData(CompositeData cd)
|
||||
{
|
||||
public static StreamState fromCompositeData(CompositeData cd) {
|
||||
assert cd.getCompositeType().equals(COMPOSITE_TYPE);
|
||||
Object[] values = cd.getAll(ITEM_NAMES);
|
||||
UUID planId = UUID.fromString((String) values[0]);
|
||||
String description = (String) values[1];
|
||||
Set<SessionInfo> sessions = Sets.newHashSet(Iterables.transform(Arrays.asList((CompositeData[]) values[2]),
|
||||
new Function<CompositeData, SessionInfo>()
|
||||
{
|
||||
public SessionInfo apply(CompositeData input)
|
||||
{
|
||||
return SessionInfoCompositeData.fromCompositeData(input);
|
||||
}
|
||||
}));
|
||||
input -> SessionInfoCompositeData.fromCompositeData(input)));
|
||||
return new StreamState(planId, description, sessions);
|
||||
}
|
||||
}
|
||||
|
@ -27,63 +27,51 @@ package org.apache.cassandra.streaming.management;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
import java.util.UUID;
|
||||
import javax.management.openmbean.*;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
import javax.management.openmbean.CompositeData;
|
||||
import javax.management.openmbean.CompositeDataSupport;
|
||||
import javax.management.openmbean.CompositeType;
|
||||
import javax.management.openmbean.OpenDataException;
|
||||
import javax.management.openmbean.OpenType;
|
||||
import javax.management.openmbean.SimpleType;
|
||||
|
||||
import org.apache.cassandra.streaming.StreamSummary;
|
||||
|
||||
import com.google.common.base.Throwables;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class StreamSummaryCompositeData
|
||||
{
|
||||
private static final String[] ITEM_NAMES = new String[]{"cfId",
|
||||
"files",
|
||||
"totalSize"};
|
||||
private static final String[] ITEM_DESCS = new String[]{"ColumnFamilu ID",
|
||||
"Number of files",
|
||||
"Total bytes of the files"};
|
||||
private static final OpenType<?>[] ITEM_TYPES = new OpenType[]{SimpleType.STRING,
|
||||
SimpleType.INTEGER,
|
||||
SimpleType.LONG};
|
||||
public class StreamSummaryCompositeData {
|
||||
private static final String[] ITEM_NAMES = new String[] { "cfId", "files", "totalSize" };
|
||||
private static final String[] ITEM_DESCS = new String[] { "ColumnFamilu ID", "Number of files",
|
||||
"Total bytes of the files" };
|
||||
private static final OpenType<?>[] ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.INTEGER,
|
||||
SimpleType.LONG };
|
||||
|
||||
public static final CompositeType COMPOSITE_TYPE;
|
||||
static {
|
||||
try
|
||||
{
|
||||
COMPOSITE_TYPE = new CompositeType(StreamSummary.class.getName(),
|
||||
"StreamSummary",
|
||||
ITEM_NAMES,
|
||||
ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
static {
|
||||
try {
|
||||
COMPOSITE_TYPE = new CompositeType(StreamSummary.class.getName(), "StreamSummary", ITEM_NAMES, ITEM_DESCS,
|
||||
ITEM_TYPES);
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static CompositeData toCompositeData(StreamSummary streamSummary)
|
||||
{
|
||||
public static CompositeData toCompositeData(StreamSummary streamSummary) {
|
||||
Map<String, Object> valueMap = new HashMap<>();
|
||||
valueMap.put(ITEM_NAMES[0], streamSummary.cfId.toString());
|
||||
valueMap.put(ITEM_NAMES[1], streamSummary.files);
|
||||
valueMap.put(ITEM_NAMES[2], streamSummary.totalSize);
|
||||
try
|
||||
{
|
||||
try {
|
||||
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
|
||||
}
|
||||
catch (OpenDataException e)
|
||||
{
|
||||
} catch (OpenDataException e) {
|
||||
throw Throwables.propagate(e);
|
||||
}
|
||||
}
|
||||
|
||||
public static StreamSummary fromCompositeData(CompositeData cd)
|
||||
{
|
||||
public static StreamSummary fromCompositeData(CompositeData cd) {
|
||||
Object[] values = cd.getAll(ITEM_NAMES);
|
||||
return new StreamSummary(UUID.fromString((String) values[0]),
|
||||
(int) values[1],
|
||||
(long) values[2]);
|
||||
return new StreamSummary(UUID.fromString((String) values[0]), (int) values[1], (long) values[2]);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user