Compare commits
398 Commits
branch-0.1
...
master
Author | SHA1 | Date | |
---|---|---|---|
|
69951d5a2d | ||
|
12cd93f7c1 | ||
|
c43992ba08 | ||
|
cd30267633 | ||
|
88d9bdc5b2 | ||
|
06f27357b4 | ||
|
fe351e8491 | ||
|
53f7f55e8c | ||
|
2c43d99aa5 | ||
|
26a6919714 | ||
|
d6225c5231 | ||
|
48d37f3402 | ||
|
5c383b641b | ||
|
658818b2d0 | ||
|
70b19e6270 | ||
|
5311e9bae3 | ||
|
fbfbdaa298 | ||
|
a7c4c39dd0 | ||
|
440313eb72 | ||
|
9c687b562e | ||
|
15c1d4f43f | ||
|
ffab41d714 | ||
|
bac7d0b31e | ||
|
59fd4d2b03 | ||
|
9d7ee8af3c | ||
|
c2fc96be71 | ||
|
8073af6e06 | ||
|
bf8bb16b52 | ||
|
8f62d71e11 | ||
|
3618481e23 | ||
|
949cefc251 | ||
|
611d586981 | ||
|
2c9565024f | ||
|
20469bf749 | ||
|
6174a47924 | ||
|
91e7adffb1 | ||
|
c51906ed01 | ||
|
c55f3f292b | ||
|
e3a381d5a1 | ||
|
25bcd76017 | ||
|
45e4f28766 | ||
|
6795a22afe | ||
|
5fa422c4ea | ||
|
f1612ef508 | ||
|
d3096f32e0 | ||
|
99e491df40 | ||
|
8d92e5450e | ||
|
ba3f58c63c | ||
|
771fe3e360 | ||
|
12ab6aaeb8 | ||
|
1219faf9f1 | ||
|
d998ac2e1e | ||
|
6311525346 | ||
|
d5d1efd188 | ||
|
23da40b559 | ||
|
be8f1ac511 | ||
|
c5ed83178a | ||
|
626fd75173 | ||
|
c0d9d0f051 | ||
|
7578d359af | ||
|
aa94fe53e0 | ||
|
4727910b5e | ||
|
15eb6adf92 | ||
|
5820992a8e | ||
|
38eb871383 | ||
|
28fe33e588 | ||
|
b2195734cc | ||
|
b2e4796901 | ||
|
78c3b7627f | ||
|
e0b21b9a19 | ||
|
2883a8dc63 | ||
|
354df10ea9 | ||
|
3fb777a8f0 | ||
|
f044c8988e | ||
|
ec2a830876 | ||
|
9628cc0728 | ||
|
c7dcbd7f42 | ||
|
fc43c56369 | ||
|
52bd496006 | ||
|
18f8acc60e | ||
|
773a82d539 | ||
|
46681753cd | ||
|
29601254fc | ||
|
4c8660d41a | ||
|
2f34a97c6e | ||
|
236ffa6c98 | ||
|
771cf6ea50 | ||
|
31e6bcf9be | ||
|
d8c47603d9 | ||
|
f45ae1833e | ||
|
8e1beb11f4 | ||
|
27fed6136a | ||
|
fa00e84794 | ||
|
f915f8fc7a | ||
|
dc7f37b901 | ||
|
9ef12f4651 | ||
|
e8355087ea | ||
|
a1044e3bd1 | ||
|
04ea3ab7e0 | ||
|
133b2e4728 | ||
|
71170f5713 | ||
|
ff0723abc6 | ||
|
cb42205061 | ||
|
b2f3eeee05 | ||
|
d8efa60ab7 | ||
|
bbc817013e | ||
|
263735379e | ||
|
f0d2df3d15 | ||
|
c7bce65919 | ||
|
4303f06426 | ||
|
183eb6158a | ||
|
4296c7d3ae | ||
|
222990d821 | ||
|
91ae4ec8ee | ||
|
9dae28e2f0 | ||
|
2fac82434b | ||
|
eda6f4e1a8 | ||
|
f73da49f62 | ||
|
512638ed6e | ||
|
5f974bc2bb | ||
|
5e50090bfd | ||
|
cb1ac4a58c | ||
|
da21305989 | ||
|
27313ee2c4 | ||
|
d4493295ff | ||
|
9eec9eabf6 | ||
|
854e6072a2 | ||
|
21e22d4e1a | ||
|
9ed8a01519 | ||
|
df2dee2402 | ||
|
e9bfbedfed | ||
|
5edfedf642 | ||
|
92847e3381 | ||
|
ca3fa8de20 | ||
|
74fa1a40ca | ||
|
cd1c79f90f | ||
|
a27d9601f5 | ||
|
c6aee9f63e | ||
|
9c3ac3e547 | ||
|
b4d983b45a | ||
|
d6c408445e | ||
|
2af17c1f53 | ||
|
b8394f677b | ||
|
1ad2ba8507 | ||
|
71f857b1de | ||
|
e27312df10 | ||
|
862aea4a33 | ||
|
5cba016962 | ||
|
455f5717ea | ||
|
48408dc6a3 | ||
|
2c48bab91a | ||
|
dd8d5c87ed | ||
|
55abaa1bc8 | ||
|
4e4589ba6f | ||
|
8d499401f0 | ||
|
df65f40bcd | ||
|
3c3d7ba8a7 | ||
|
f6f03172f1 | ||
|
6ae1559bcd | ||
|
0f38eb221e | ||
|
f4ef4a5a3e | ||
|
01ba660fe7 | ||
|
e80a5e3cb3 | ||
|
3233e157cf | ||
|
2b1ed89ec3 | ||
|
631605eff1 | ||
|
eb4dc47ba9 | ||
|
566a4f2639 | ||
|
7416bc79cf | ||
|
81b0d12dfe | ||
|
8c32e4f8a4 | ||
|
9147faed36 | ||
|
2b33ce558d | ||
|
956eac3972 | ||
|
2a74a394a4 | ||
|
2ca07ac215 | ||
|
8d404d8e9f | ||
|
e954db8444 | ||
|
fa72ea29d3 | ||
|
8176d5729f | ||
|
0c541d73e7 | ||
|
34c10fc91c | ||
|
9c11768b9d | ||
|
962a42dbd5 | ||
|
db711b4b53 | ||
|
7f63fabeee | ||
|
2ae7ccf945 | ||
|
ac8743269c | ||
|
557b346c07 | ||
|
79b3f989fc | ||
|
ee0e460b26 | ||
|
cc8f5e275b | ||
|
7f936862d2 | ||
|
db635738ad | ||
|
0c7afef8f4 | ||
|
b9328960cc | ||
|
6f916b9d8e | ||
|
85c3293ef1 | ||
|
3838921ca3 | ||
|
de20954fd6 | ||
|
b9986396bb | ||
|
1931b4b24c | ||
|
12ba9915a2 | ||
|
b61a5b8439 | ||
|
c08442b158 | ||
|
8e5d649048 | ||
|
9b63a35da6 | ||
|
ae6a000807 | ||
|
954f40e550 | ||
|
434ce947b0 | ||
|
9c2d6cec51 | ||
|
824638594b | ||
|
1709ff2d02 | ||
|
f4f3c44dc1 | ||
|
4ed049739a | ||
|
4ec7d58249 | ||
|
fec8b44942 | ||
|
3fe9cfc232 | ||
|
21a343d003 | ||
|
80762eb60a | ||
|
e49b4ef322 | ||
|
1470b37193 | ||
|
e55863e375 | ||
|
4b83a9388e | ||
|
781821ac9e | ||
|
cd9deafc51 | ||
|
319dadb79c | ||
|
a44c18c621 | ||
|
3e146845b4 | ||
|
b4e483b179 | ||
|
de28e68532 | ||
|
3a4adcb676 | ||
|
85e1b07544 | ||
|
f4759f05e7 | ||
|
68ce437b03 | ||
|
b7a6554ee9 | ||
|
3efcd5103b | ||
|
39e4cd8f3f | ||
|
85b39d7fbe | ||
|
9a44228c71 | ||
|
45e2c982f7 | ||
|
82ae18605a | ||
|
c07f5c034f | ||
|
27e3d1745a | ||
|
4672cd360f | ||
|
36ae2fcfd7 | ||
|
c6edab5990 | ||
|
9e97cb530a | ||
|
12daaf546d | ||
|
4eb02743cd | ||
|
225d8ace17 | ||
|
e3c5acfcad | ||
|
f6710465ef | ||
|
1e4edeb858 | ||
|
7756f4751a | ||
|
74d062851c | ||
|
78bb2ef25a | ||
|
8a73d6a840 | ||
|
ad49d05780 | ||
|
2c07ca2e09 | ||
|
801af00ddb | ||
|
d3b8ef1ae5 | ||
|
eca6451832 | ||
|
4d1f8ed7c9 | ||
|
adf466519f | ||
|
a028e59699 | ||
|
2ff16fa2a5 | ||
|
853963f833 | ||
|
f8112b5d57 | ||
|
50c8ff548f | ||
|
3e95c89310 | ||
|
1daa5eb030 | ||
|
4e02c52aee | ||
|
645d04083c | ||
|
5f17e6e0db | ||
|
0bfaba0a82 | ||
|
5c33a8afa7 | ||
|
9f2f92c379 | ||
|
19a0e72752 | ||
|
72aae6bf3b | ||
|
d8d3023334 | ||
|
ef15d95416 | ||
|
c63ec3e96b | ||
|
5903271c4d | ||
|
a2ec7f2f60 | ||
|
0c5c1debde | ||
|
524763cfed | ||
|
94f144e9b3 | ||
|
a60c3156c6 | ||
|
8f90d413a1 | ||
|
d7e3dae323 | ||
|
2cd5a5f048 | ||
|
c4d8d7087e | ||
|
02e0598506 | ||
|
a38bbfd603 | ||
|
105a1b5a1b | ||
|
f3610f1a02 | ||
|
39a19b144d | ||
|
00a62ca126 | ||
|
767517f6be | ||
|
afd49d7bd4 | ||
|
d589f3a3a3 | ||
|
15ad444c40 | ||
|
ea0c593a75 | ||
|
af8ce2d2ea | ||
|
691f86983b | ||
|
6692f5a3c0 | ||
|
3e1a8961a2 | ||
|
eec251805a | ||
|
b6d55f0623 | ||
|
358e7387ea | ||
|
d090136085 | ||
|
24cebcc9a1 | ||
|
d0757c4505 | ||
|
10caab8590 | ||
|
2ccb657fca | ||
|
686207b59a | ||
|
cda7448314 | ||
|
2840880e95 | ||
|
36c4a7df27 | ||
|
cd910aafa9 | ||
|
ccb474e424 | ||
|
e0e7dcdb5c | ||
|
71c4e892f6 | ||
|
2eb9f19236 | ||
|
6c2bb34ca3 | ||
|
69c6913668 | ||
|
f8b4dfed38 | ||
|
4f275cc44b | ||
|
9b03fa1074 | ||
|
c8b9198f3b | ||
|
75479531e0 | ||
|
7543882d6c | ||
|
0f044e2f47 | ||
|
4dfa0737ac | ||
|
c452a9f7ba | ||
|
7c34fb8ce0 | ||
|
4c2c4c85f1 | ||
|
107664dbf1 | ||
|
8e7c432374 | ||
|
c25a844410 | ||
|
21696bec1f | ||
|
fb9f3c8961 | ||
|
bb7409fbc7 | ||
|
67b244b8e4 | ||
|
bb209e8ce7 | ||
|
65e288c703 | ||
|
7f945be732 | ||
|
7b9ea44354 | ||
|
ba0ed7cbc7 | ||
|
3a69e3d9da | ||
|
e194ca85a4 | ||
|
18674d67b0 | ||
|
bdc7f3fb57 | ||
|
bcc99b274f | ||
|
c6fc4e8340 | ||
|
bfe07efa6d | ||
|
473702845f | ||
|
abb1838845 | ||
|
2310eb6844 | ||
|
378ed90653 | ||
|
fe3018cd35 | ||
|
793f53e403 | ||
|
c079c5922c | ||
|
b007b3ae1b | ||
|
dd2192ef97 | ||
|
c7df07ac70 | ||
|
695e23bd4e | ||
|
e0dea0c27e | ||
|
5362b2045d | ||
|
85ab591d27 | ||
|
9f9dc88643 | ||
|
e01ece2fcd | ||
|
d06e6fdde1 | ||
|
1292bd9ba4 | ||
|
db7aad26f5 | ||
|
896fd64de9 | ||
|
1cb048effe | ||
|
ac049129de | ||
|
b783a0d09a | ||
|
e530c13f87 | ||
|
fadfb9443c | ||
|
81aa2a8279 | ||
|
27c0eb8c99 | ||
|
07b319d827 | ||
|
01477809ac | ||
|
54d451de88 | ||
|
43e07fda0e | ||
|
39b50f63c7 | ||
|
72f6f5dab4 | ||
|
9b14550d0a | ||
|
3cb168bed3 | ||
|
20778b2df6 | ||
|
1db077618d | ||
|
2e11cb5471 | ||
|
c596a9e462 | ||
|
00694ce40a | ||
|
b5d29a0796 |
1
.github/CODEOWNERS
vendored
Normal file
1
.github/CODEOWNERS
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
* @penberg
|
9
.gitignore
vendored
Normal file
9
.gitignore
vendored
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
/target/
|
||||||
|
/bin/
|
||||||
|
dependency-reduced-pom.xml
|
||||||
|
scylla-apiclient/target/
|
||||||
|
.classpath
|
||||||
|
.project
|
||||||
|
.settings
|
||||||
|
build/
|
||||||
|
/.idea/
|
35
README.md
35
README.md
@ -1,22 +1,25 @@
|
|||||||
# Urchin JMX Interface
|
# Scylla JMX Server
|
||||||
This is the JMX interface for urchin.
|
|
||||||
## Compile
|
Scylla JMX server implements the Apache Cassandra JMX interface for compatibility with tooling such as `nodetool`. The JMX server uses Scylla's REST API to communicate with a Scylla server.
|
||||||
To compile do:
|
|
||||||
```
|
## Compiling
|
||||||
mvn install
|
|
||||||
|
To compile JMX server, run:
|
||||||
|
|
||||||
|
```console
|
||||||
|
$Â mvn --file scylla-jmx-parent/pom.xml package
|
||||||
```
|
```
|
||||||
|
|
||||||
## Run
|
## Running
|
||||||
The maven will create an uber-jar with all dependency under the target directory. You should run it with the remote jmx enable so the nodetool will be able to connect to it.
|
|
||||||
|
|
||||||
```
|
To start the JMX server, run:
|
||||||
java -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=7199 -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -jar target/urchin-mbean-1.0.jar
|
|
||||||
|
```console
|
||||||
|
$ ./scripts/scylla-jmx
|
||||||
```
|
```
|
||||||
|
|
||||||
## Setting IP and Port
|
To get help on supported options:
|
||||||
By default the the JMX would connect to a node on the localhost
|
|
||||||
on port 10000.
|
|
||||||
|
|
||||||
The jmx API uses the system properties to set the IP address and Port.
|
```console
|
||||||
To change the ip address use the apiaddress property (e.g. -Dapiaddress=1.1.1.1)
|
$ ./scripts/scylla-jmx --help
|
||||||
To change the port use the apiport (e.g. -Dapiport=10001)
|
```
|
||||||
|
@ -1,19 +1,49 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
VERSION=1.0
|
PRODUCT=scylla
|
||||||
|
VERSION=666.development
|
||||||
|
|
||||||
if test -f version
|
if test -f version
|
||||||
then
|
then
|
||||||
SCYLLA_VERSION=$(cat version | awk -F'-' '{print $1}')
|
SCYLLA_VERSION=$(cat version | awk -F'-' '{print $1}')
|
||||||
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
SCYLLA_RELEASE=$(cat version | awk -F'-' '{print $2}')
|
||||||
else
|
else
|
||||||
DATE=$(date +%Y%m%d)
|
DATE=$(date --utc +%Y%m%d)
|
||||||
GIT_COMMIT=$(git log --pretty=format:'%h' -n 1)
|
GIT_COMMIT=$(git log --pretty=format:'%h' -n 1)
|
||||||
SCYLLA_VERSION=$VERSION
|
SCYLLA_VERSION=$VERSION
|
||||||
SCYLLA_RELEASE=$DATE.$GIT_COMMIT
|
SCYLLA_RELEASE=$DATE.$GIT_COMMIT
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "usage: $0"
|
||||||
|
echo " [--version product-version-release] # override p-v-r"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
OVERRIDE=
|
||||||
|
while [[ $# > 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--version)
|
||||||
|
OVERRIDE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ -n "$OVERRIDE" ]]; then
|
||||||
|
# regular expression for p-v-r: alphabetic+dashes for product, trailing non-dashes
|
||||||
|
# for release, everything else for version
|
||||||
|
RE='^([-a-z]+)-(.+)-([^-]+)$'
|
||||||
|
PRODUCT="$(sed -E "s/$RE/\\1/" <<<"$OVERRIDE")"
|
||||||
|
SCYLLA_VERSION="$(sed -E "s/$RE/\\2/" <<<"$OVERRIDE")"
|
||||||
|
SCYLLA_RELEASE="$(sed -E "s/$RE/\\3/" <<<"$OVERRIDE")"
|
||||||
|
fi
|
||||||
|
|
||||||
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
echo "$SCYLLA_VERSION-$SCYLLA_RELEASE"
|
||||||
mkdir -p build
|
mkdir -p build
|
||||||
echo "$SCYLLA_VERSION" > build/SCYLLA-VERSION-FILE
|
echo "$SCYLLA_VERSION" > build/SCYLLA-VERSION-FILE
|
||||||
echo "$SCYLLA_RELEASE" > build/SCYLLA-RELEASE-FILE
|
echo "$SCYLLA_RELEASE" > build/SCYLLA-RELEASE-FILE
|
||||||
|
echo "$PRODUCT" > build/SCYLLA-PRODUCT-FILE
|
||||||
|
13
debian/control
vendored
13
debian/control
vendored
@ -1,13 +0,0 @@
|
|||||||
Source: scylla-jmx
|
|
||||||
Maintainer: Takuya ASADA <syuu@scylladb.com>
|
|
||||||
Homepage: http://scylladb.com
|
|
||||||
Section: database
|
|
||||||
Priority: optional
|
|
||||||
Standards-Version: 3.9.2
|
|
||||||
Build-Depends: debhelper (>= 9), maven, openjdk-7-jdk
|
|
||||||
|
|
||||||
Package: scylla-jmx
|
|
||||||
Architecture: all
|
|
||||||
Depends: ${shlibs:Depends}, ${misc:Depends}, openjdk-7-jre-headless, scylla-server
|
|
||||||
Description: Scylla JMX server binaries
|
|
||||||
Scylla is a highly scalable, eventually consistent, distributed, partitioned row DB.
|
|
12
debian/copyright
vendored
12
debian/copyright
vendored
@ -1,12 +0,0 @@
|
|||||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
|
||||||
Upstream-Name: Scylla DB
|
|
||||||
Upstream-Contact: http://www.scylladb.com/
|
|
||||||
Source: https://github.com/scylladb/scylla-jmx
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: Copyright (C) 2015 ScyllaDB
|
|
||||||
License: AGPL-3.0
|
|
||||||
|
|
||||||
Files: debian/*
|
|
||||||
Copyright: Copyright (C) 2015 ScyllaDB
|
|
||||||
License: AGPL-3.0
|
|
27
debian/rules
vendored
27
debian/rules
vendored
@ -1,27 +0,0 @@
|
|||||||
#!/usr/bin/make -f
|
|
||||||
|
|
||||||
DOC = $(CURDIR)/debian/scylla-jmx/usr/share/doc/scylla-jmx
|
|
||||||
DEST = $(CURDIR)/debian/scylla-jmx/usr/lib/scylla/jmx
|
|
||||||
|
|
||||||
override_dh_auto_build:
|
|
||||||
mvn install
|
|
||||||
|
|
||||||
override_dh_auto_clean:
|
|
||||||
rm -rf target
|
|
||||||
|
|
||||||
override_dh_auto_install:
|
|
||||||
mkdir -p $(CURDIR)/debian/scylla-jmx/etc/default/ && \
|
|
||||||
cp $(CURDIR)/dist/common/sysconfig/scylla-jmx \
|
|
||||||
$(CURDIR)/debian/scylla-jmx/etc/default/
|
|
||||||
|
|
||||||
mkdir -p $(DOC) && \
|
|
||||||
cp $(CURDIR)/*.md $(DOC)
|
|
||||||
cp $(CURDIR)/NOTICE $(DOC)
|
|
||||||
cp $(CURDIR)/LICENSE.AGPL $(DOC)
|
|
||||||
|
|
||||||
mkdir -p $(DEST)
|
|
||||||
cp $(CURDIR)/dist/common/scripts/* $(DEST)
|
|
||||||
cp $(CURDIR)/target/urchin-mbean-1.0.jar $(DEST)
|
|
||||||
|
|
||||||
%:
|
|
||||||
dh $@
|
|
22
debian/scylla-jmx.upstart
vendored
22
debian/scylla-jmx.upstart
vendored
@ -1,22 +0,0 @@
|
|||||||
# scylla-jmx - ScyllaDB
|
|
||||||
#
|
|
||||||
# ScyllaDB
|
|
||||||
|
|
||||||
description "ScyllaDB jmx"
|
|
||||||
|
|
||||||
start on starting scylla-server
|
|
||||||
stop on runlevel [!2345]
|
|
||||||
|
|
||||||
respawn
|
|
||||||
respawn limit 10 5
|
|
||||||
umask 022
|
|
||||||
|
|
||||||
expect fork
|
|
||||||
|
|
||||||
console log
|
|
||||||
|
|
||||||
script
|
|
||||||
. /etc/default/scylla-jmx
|
|
||||||
export JMX_LOCAL_PORT
|
|
||||||
/usr/lib/scylla/jmx/jmx_run
|
|
||||||
end script
|
|
5
dist/common/scripts/jmx_run
vendored
5
dist/common/scripts/jmx_run
vendored
@ -1,5 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
args="-Djava.net.preferIPv4Stack=true -Dcom.sun.management.jmxremote.port=$JMX_LOCAL_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_LOCAL_PORT -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false"
|
|
||||||
|
|
||||||
exec java $args -jar /usr/lib/scylla/jmx/urchin-mbean-1.0.jar
|
|
33
dist/common/sysconfig/scylla-jmx
vendored
33
dist/common/sysconfig/scylla-jmx
vendored
@ -1 +1,32 @@
|
|||||||
JMX_LOCAL_PORT=7199
|
# scylla home dir
|
||||||
|
SCYLLA_HOME=/var/lib/scylla
|
||||||
|
|
||||||
|
# scylla config dir
|
||||||
|
SCYLLA_CONF=/etc/scylla
|
||||||
|
|
||||||
|
# The jmx port to open
|
||||||
|
# SCYLLA_JMX_PORT="-jp 7199"
|
||||||
|
|
||||||
|
# The API port to connect to
|
||||||
|
#SCYLLA_API_PORT="-p 10000"
|
||||||
|
|
||||||
|
# API address to connect to
|
||||||
|
#SCYLLA_API_ADDR="-a localhost"
|
||||||
|
|
||||||
|
# use alternate jmx address
|
||||||
|
#SCYLLA_JMX_ADDR="-ja localhost"
|
||||||
|
|
||||||
|
# A configuration file to use
|
||||||
|
#SCYLLA_JMX_FILE="-cf /etc/scylla.d/scylla-user.cfg"
|
||||||
|
|
||||||
|
# The location of the jmx proxy jar file
|
||||||
|
SCYLLA_JMX_LOCAL="-l /opt/scylladb/jmx"
|
||||||
|
|
||||||
|
# allow to run remotely
|
||||||
|
#SCYLLA_JMX_REMOTE="-r"
|
||||||
|
|
||||||
|
# allow debug
|
||||||
|
#SCYLLA_JMX_DEBUG="-d"
|
||||||
|
|
||||||
|
# specify JVM options
|
||||||
|
JAVA_TOOL_OPTIONS=""
|
||||||
|
18
dist/common/systemd/scylla-jmx.service
vendored
Normal file
18
dist/common/systemd/scylla-jmx.service
vendored
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Scylla JMX
|
||||||
|
Requires=scylla-server.service
|
||||||
|
After=scylla-server.service
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
EnvironmentFile=/etc/sysconfig/scylla-jmx
|
||||||
|
User=scylla
|
||||||
|
Group=scylla
|
||||||
|
ExecStart=/opt/scylladb/jmx/scylla-jmx $SCYLLA_JMX_PORT $SCYLLA_API_PORT $SCYLLA_API_ADDR $SCYLLA_JMX_ADDR $SCYLLA_JMX_FILE $SCYLLA_JMX_LOCAL $SCYLLA_JMX_REMOTE $SCYLLA_JMX_DEBUG
|
||||||
|
KillMode=process
|
||||||
|
Restart=on-abnormal
|
||||||
|
Slice=scylla-helper.slice
|
||||||
|
WorkingDirectory=/var/lib/scylla
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
@ -1,4 +1,4 @@
|
|||||||
scylla-jmx (0.10-1) unstable; urgency=medium
|
%{product}-jmx (%{version}-%{release}-%{revision}) %{codename}; urgency=medium
|
||||||
|
|
||||||
* Initial release.
|
* Initial release.
|
||||||
|
|
14
dist/debian/control.template
vendored
Normal file
14
dist/debian/control.template
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
Source: %{product}-jmx
|
||||||
|
Maintainer: Takuya ASADA <syuu@scylladb.com>
|
||||||
|
Homepage: http://scylladb.com
|
||||||
|
Section: database
|
||||||
|
Priority: optional
|
||||||
|
Standards-Version: 3.9.5
|
||||||
|
Rules-Requires-Root: no
|
||||||
|
|
||||||
|
Package: %{product}-jmx
|
||||||
|
Architecture: all
|
||||||
|
Depends: ${shlibs:Depends}, ${misc:Depends}, openjdk-8-jre-headless | openjdk-8-jre | oracle-java8-set-default | adoptopenjdk-8-hotspot-jre | openjdk-11-jre-headless | openjdk-11-jre |oracle-java11-set-default , %{product}-server
|
||||||
|
Description: Scylla JMX server binaries
|
||||||
|
Scylla is a highly scalable, eventually consistent, distributed,
|
||||||
|
partitioned row DB.
|
706
dist/debian/debian/copyright
vendored
Normal file
706
dist/debian/debian/copyright
vendored
Normal file
@ -0,0 +1,706 @@
|
|||||||
|
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
|
||||||
|
Upstream-Name: Scylla DB
|
||||||
|
Upstream-Contact: http://www.scylladb.com/
|
||||||
|
Source: https://github.com/scylladb/scylla-jmx
|
||||||
|
|
||||||
|
Files: *
|
||||||
|
Copyright: Copyright (C) 2015 ScyllaDB
|
||||||
|
License: AGPL-3.0
|
||||||
|
|
||||||
|
Files: debian/*
|
||||||
|
Copyright: Copyright (C) 2015 ScyllaDB
|
||||||
|
License: AGPL-3.0
|
||||||
|
|
||||||
|
Files: scripts/git-archive-all
|
||||||
|
Copyright: Copyright (c) 2010 Ilya Kulakov
|
||||||
|
License: MIT
|
||||||
|
|
||||||
|
License: AGPL-3.0
|
||||||
|
GNU AFFERO GENERAL PUBLIC LICENSE
|
||||||
|
Version 3, 19 November 2007
|
||||||
|
.
|
||||||
|
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
||||||
|
Everyone is permitted to copy and distribute verbatim copies
|
||||||
|
of this license document, but changing it is not allowed.
|
||||||
|
.
|
||||||
|
Preamble
|
||||||
|
.
|
||||||
|
The GNU Affero General Public License is a free, copyleft license for
|
||||||
|
software and other kinds of works, specifically designed to ensure
|
||||||
|
cooperation with the community in the case of network server software.
|
||||||
|
.
|
||||||
|
The licenses for most software and other practical works are designed
|
||||||
|
to take away your freedom to share and change the works. By contrast,
|
||||||
|
our General Public Licenses are intended to guarantee your freedom to
|
||||||
|
share and change all versions of a program--to make sure it remains free
|
||||||
|
software for all its users.
|
||||||
|
.
|
||||||
|
When we speak of free software, we are referring to freedom, not
|
||||||
|
price. Our General Public Licenses are designed to make sure that you
|
||||||
|
have the freedom to distribute copies of free software (and charge for
|
||||||
|
them if you wish), that you receive source code or can get it if you
|
||||||
|
want it, that you can change the software or use pieces of it in new
|
||||||
|
free programs, and that you know you can do these things.
|
||||||
|
.
|
||||||
|
Developers that use our General Public Licenses protect your rights
|
||||||
|
with two steps: (1) assert copyright on the software, and (2) offer
|
||||||
|
you this License which gives you legal permission to copy, distribute
|
||||||
|
and/or modify the software.
|
||||||
|
.
|
||||||
|
A secondary benefit of defending all users' freedom is that
|
||||||
|
improvements made in alternate versions of the program, if they
|
||||||
|
receive widespread use, become available for other developers to
|
||||||
|
incorporate. Many developers of free software are heartened and
|
||||||
|
encouraged by the resulting cooperation. However, in the case of
|
||||||
|
software used on network servers, this result may fail to come about.
|
||||||
|
The GNU General Public License permits making a modified version and
|
||||||
|
letting the public access it on a server without ever releasing its
|
||||||
|
source code to the public.
|
||||||
|
.
|
||||||
|
The GNU Affero General Public License is designed specifically to
|
||||||
|
ensure that, in such cases, the modified source code becomes available
|
||||||
|
to the community. It requires the operator of a network server to
|
||||||
|
provide the source code of the modified version running there to the
|
||||||
|
users of that server. Therefore, public use of a modified version, on
|
||||||
|
a publicly accessible server, gives the public access to the source
|
||||||
|
code of the modified version.
|
||||||
|
.
|
||||||
|
An older license, called the Affero General Public License and
|
||||||
|
published by Affero, was designed to accomplish similar goals. This is
|
||||||
|
a different license, not a version of the Affero GPL, but Affero has
|
||||||
|
released a new version of the Affero GPL which permits relicensing under
|
||||||
|
this license.
|
||||||
|
.
|
||||||
|
The precise terms and conditions for copying, distribution and
|
||||||
|
modification follow.
|
||||||
|
.
|
||||||
|
TERMS AND CONDITIONS
|
||||||
|
.
|
||||||
|
0. Definitions.
|
||||||
|
.
|
||||||
|
"This License" refers to version 3 of the GNU Affero General Public License.
|
||||||
|
.
|
||||||
|
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||||
|
works, such as semiconductor masks.
|
||||||
|
.
|
||||||
|
"The Program" refers to any copyrightable work licensed under this
|
||||||
|
License. Each licensee is addressed as "you". "Licensees" and
|
||||||
|
"recipients" may be individuals or organizations.
|
||||||
|
.
|
||||||
|
To "modify" a work means to copy from or adapt all or part of the work
|
||||||
|
in a fashion requiring copyright permission, other than the making of an
|
||||||
|
exact copy. The resulting work is called a "modified version" of the
|
||||||
|
earlier work or a work "based on" the earlier work.
|
||||||
|
.
|
||||||
|
A "covered work" means either the unmodified Program or a work based
|
||||||
|
on the Program.
|
||||||
|
.
|
||||||
|
To "propagate" a work means to do anything with it that, without
|
||||||
|
permission, would make you directly or secondarily liable for
|
||||||
|
infringement under applicable copyright law, except executing it on a
|
||||||
|
computer or modifying a private copy. Propagation includes copying,
|
||||||
|
distribution (with or without modification), making available to the
|
||||||
|
public, and in some countries other activities as well.
|
||||||
|
.
|
||||||
|
To "convey" a work means any kind of propagation that enables other
|
||||||
|
parties to make or receive copies. Mere interaction with a user through
|
||||||
|
a computer network, with no transfer of a copy, is not conveying.
|
||||||
|
.
|
||||||
|
An interactive user interface displays "Appropriate Legal Notices"
|
||||||
|
to the extent that it includes a convenient and prominently visible
|
||||||
|
feature that (1) displays an appropriate copyright notice, and (2)
|
||||||
|
tells the user that there is no warranty for the work (except to the
|
||||||
|
extent that warranties are provided), that licensees may convey the
|
||||||
|
work under this License, and how to view a copy of this License. If
|
||||||
|
the interface presents a list of user commands or options, such as a
|
||||||
|
menu, a prominent item in the list meets this criterion.
|
||||||
|
.
|
||||||
|
1. Source Code.
|
||||||
|
.
|
||||||
|
The "source code" for a work means the preferred form of the work
|
||||||
|
for making modifications to it. "Object code" means any non-source
|
||||||
|
form of a work.
|
||||||
|
.
|
||||||
|
A "Standard Interface" means an interface that either is an official
|
||||||
|
standard defined by a recognized standards body, or, in the case of
|
||||||
|
interfaces specified for a particular programming language, one that
|
||||||
|
is widely used among developers working in that language.
|
||||||
|
.
|
||||||
|
The "System Libraries" of an executable work include anything, other
|
||||||
|
than the work as a whole, that (a) is included in the normal form of
|
||||||
|
packaging a Major Component, but which is not part of that Major
|
||||||
|
Component, and (b) serves only to enable use of the work with that
|
||||||
|
Major Component, or to implement a Standard Interface for which an
|
||||||
|
implementation is available to the public in source code form. A
|
||||||
|
"Major Component", in this context, means a major essential component
|
||||||
|
(kernel, window system, and so on) of the specific operating system
|
||||||
|
(if any) on which the executable work runs, or a compiler used to
|
||||||
|
produce the work, or an object code interpreter used to run it.
|
||||||
|
.
|
||||||
|
The "Corresponding Source" for a work in object code form means all
|
||||||
|
the source code needed to generate, install, and (for an executable
|
||||||
|
work) run the object code and to modify the work, including scripts to
|
||||||
|
control those activities. However, it does not include the work's
|
||||||
|
System Libraries, or general-purpose tools or generally available free
|
||||||
|
programs which are used unmodified in performing those activities but
|
||||||
|
which are not part of the work. For example, Corresponding Source
|
||||||
|
includes interface definition files associated with source files for
|
||||||
|
the work, and the source code for shared libraries and dynamically
|
||||||
|
linked subprograms that the work is specifically designed to require,
|
||||||
|
such as by intimate data communication or control flow between those
|
||||||
|
subprograms and other parts of the work.
|
||||||
|
.
|
||||||
|
The Corresponding Source need not include anything that users
|
||||||
|
can regenerate automatically from other parts of the Corresponding
|
||||||
|
Source.
|
||||||
|
.
|
||||||
|
The Corresponding Source for a work in source code form is that
|
||||||
|
same work.
|
||||||
|
.
|
||||||
|
2. Basic Permissions.
|
||||||
|
.
|
||||||
|
All rights granted under this License are granted for the term of
|
||||||
|
copyright on the Program, and are irrevocable provided the stated
|
||||||
|
conditions are met. This License explicitly affirms your unlimited
|
||||||
|
permission to run the unmodified Program. The output from running a
|
||||||
|
covered work is covered by this License only if the output, given its
|
||||||
|
content, constitutes a covered work. This License acknowledges your
|
||||||
|
rights of fair use or other equivalent, as provided by copyright law.
|
||||||
|
.
|
||||||
|
You may make, run and propagate covered works that you do not
|
||||||
|
convey, without conditions so long as your license otherwise remains
|
||||||
|
in force. You may convey covered works to others for the sole purpose
|
||||||
|
of having them make modifications exclusively for you, or provide you
|
||||||
|
with facilities for running those works, provided that you comply with
|
||||||
|
the terms of this License in conveying all material for which you do
|
||||||
|
not control copyright. Those thus making or running the covered works
|
||||||
|
for you must do so exclusively on your behalf, under your direction
|
||||||
|
and control, on terms that prohibit them from making any copies of
|
||||||
|
your copyrighted material outside their relationship with you.
|
||||||
|
.
|
||||||
|
Conveying under any other circumstances is permitted solely under
|
||||||
|
the conditions stated below. Sublicensing is not allowed; section 10
|
||||||
|
makes it unnecessary.
|
||||||
|
.
|
||||||
|
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||||
|
.
|
||||||
|
No covered work shall be deemed part of an effective technological
|
||||||
|
measure under any applicable law fulfilling obligations under article
|
||||||
|
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||||
|
similar laws prohibiting or restricting circumvention of such
|
||||||
|
measures.
|
||||||
|
.
|
||||||
|
When you convey a covered work, you waive any legal power to forbid
|
||||||
|
circumvention of technological measures to the extent such circumvention
|
||||||
|
is effected by exercising rights under this License with respect to
|
||||||
|
the covered work, and you disclaim any intention to limit operation or
|
||||||
|
modification of the work as a means of enforcing, against the work's
|
||||||
|
users, your or third parties' legal rights to forbid circumvention of
|
||||||
|
technological measures.
|
||||||
|
.
|
||||||
|
4. Conveying Verbatim Copies.
|
||||||
|
.
|
||||||
|
You may convey verbatim copies of the Program's source code as you
|
||||||
|
receive it, in any medium, provided that you conspicuously and
|
||||||
|
appropriately publish on each copy an appropriate copyright notice;
|
||||||
|
keep intact all notices stating that this License and any
|
||||||
|
non-permissive terms added in accord with section 7 apply to the code;
|
||||||
|
keep intact all notices of the absence of any warranty; and give all
|
||||||
|
recipients a copy of this License along with the Program.
|
||||||
|
.
|
||||||
|
You may charge any price or no price for each copy that you convey,
|
||||||
|
and you may offer support or warranty protection for a fee.
|
||||||
|
.
|
||||||
|
5. Conveying Modified Source Versions.
|
||||||
|
.
|
||||||
|
You may convey a work based on the Program, or the modifications to
|
||||||
|
produce it from the Program, in the form of source code under the
|
||||||
|
terms of section 4, provided that you also meet all of these conditions:
|
||||||
|
.
|
||||||
|
a) The work must carry prominent notices stating that you modified
|
||||||
|
it, and giving a relevant date.
|
||||||
|
.
|
||||||
|
b) The work must carry prominent notices stating that it is
|
||||||
|
released under this License and any conditions added under section
|
||||||
|
7. This requirement modifies the requirement in section 4 to
|
||||||
|
"keep intact all notices".
|
||||||
|
.
|
||||||
|
c) You must license the entire work, as a whole, under this
|
||||||
|
License to anyone who comes into possession of a copy. This
|
||||||
|
License will therefore apply, along with any applicable section 7
|
||||||
|
additional terms, to the whole of the work, and all its parts,
|
||||||
|
regardless of how they are packaged. This License gives no
|
||||||
|
permission to license the work in any other way, but it does not
|
||||||
|
invalidate such permission if you have separately received it.
|
||||||
|
.
|
||||||
|
d) If the work has interactive user interfaces, each must display
|
||||||
|
Appropriate Legal Notices; however, if the Program has interactive
|
||||||
|
interfaces that do not display Appropriate Legal Notices, your
|
||||||
|
work need not make them do so.
|
||||||
|
.
|
||||||
|
A compilation of a covered work with other separate and independent
|
||||||
|
works, which are not by their nature extensions of the covered work,
|
||||||
|
and which are not combined with it such as to form a larger program,
|
||||||
|
in or on a volume of a storage or distribution medium, is called an
|
||||||
|
"aggregate" if the compilation and its resulting copyright are not
|
||||||
|
used to limit the access or legal rights of the compilation's users
|
||||||
|
beyond what the individual works permit. Inclusion of a covered work
|
||||||
|
in an aggregate does not cause this License to apply to the other
|
||||||
|
parts of the aggregate.
|
||||||
|
.
|
||||||
|
6. Conveying Non-Source Forms.
|
||||||
|
.
|
||||||
|
You may convey a covered work in object code form under the terms
|
||||||
|
of sections 4 and 5, provided that you also convey the
|
||||||
|
machine-readable Corresponding Source under the terms of this License,
|
||||||
|
in one of these ways:
|
||||||
|
.
|
||||||
|
a) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by the
|
||||||
|
Corresponding Source fixed on a durable physical medium
|
||||||
|
customarily used for software interchange.
|
||||||
|
.
|
||||||
|
b) Convey the object code in, or embodied in, a physical product
|
||||||
|
(including a physical distribution medium), accompanied by a
|
||||||
|
written offer, valid for at least three years and valid for as
|
||||||
|
long as you offer spare parts or customer support for that product
|
||||||
|
model, to give anyone who possesses the object code either (1) a
|
||||||
|
copy of the Corresponding Source for all the software in the
|
||||||
|
product that is covered by this License, on a durable physical
|
||||||
|
medium customarily used for software interchange, for a price no
|
||||||
|
more than your reasonable cost of physically performing this
|
||||||
|
conveying of source, or (2) access to copy the
|
||||||
|
Corresponding Source from a network server at no charge.
|
||||||
|
.
|
||||||
|
c) Convey individual copies of the object code with a copy of the
|
||||||
|
written offer to provide the Corresponding Source. This
|
||||||
|
alternative is allowed only occasionally and noncommercially, and
|
||||||
|
only if you received the object code with such an offer, in accord
|
||||||
|
with subsection 6b.
|
||||||
|
.
|
||||||
|
d) Convey the object code by offering access from a designated
|
||||||
|
place (gratis or for a charge), and offer equivalent access to the
|
||||||
|
Corresponding Source in the same way through the same place at no
|
||||||
|
further charge. You need not require recipients to copy the
|
||||||
|
Corresponding Source along with the object code. If the place to
|
||||||
|
copy the object code is a network server, the Corresponding Source
|
||||||
|
may be on a different server (operated by you or a third party)
|
||||||
|
that supports equivalent copying facilities, provided you maintain
|
||||||
|
clear directions next to the object code saying where to find the
|
||||||
|
Corresponding Source. Regardless of what server hosts the
|
||||||
|
Corresponding Source, you remain obligated to ensure that it is
|
||||||
|
available for as long as needed to satisfy these requirements.
|
||||||
|
.
|
||||||
|
e) Convey the object code using peer-to-peer transmission, provided
|
||||||
|
you inform other peers where the object code and Corresponding
|
||||||
|
Source of the work are being offered to the general public at no
|
||||||
|
charge under subsection 6d.
|
||||||
|
.
|
||||||
|
A separable portion of the object code, whose source code is excluded
|
||||||
|
from the Corresponding Source as a System Library, need not be
|
||||||
|
included in conveying the object code work.
|
||||||
|
.
|
||||||
|
A "User Product" is either (1) a "consumer product", which means any
|
||||||
|
tangible personal property which is normally used for personal, family,
|
||||||
|
or household purposes, or (2) anything designed or sold for incorporation
|
||||||
|
into a dwelling. In determining whether a product is a consumer product,
|
||||||
|
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||||
|
product received by a particular user, "normally used" refers to a
|
||||||
|
typical or common use of that class of product, regardless of the status
|
||||||
|
of the particular user or of the way in which the particular user
|
||||||
|
actually uses, or expects or is expected to use, the product. A product
|
||||||
|
is a consumer product regardless of whether the product has substantial
|
||||||
|
commercial, industrial or non-consumer uses, unless such uses represent
|
||||||
|
the only significant mode of use of the product.
|
||||||
|
.
|
||||||
|
"Installation Information" for a User Product means any methods,
|
||||||
|
procedures, authorization keys, or other information required to install
|
||||||
|
and execute modified versions of a covered work in that User Product from
|
||||||
|
a modified version of its Corresponding Source. The information must
|
||||||
|
suffice to ensure that the continued functioning of the modified object
|
||||||
|
code is in no case prevented or interfered with solely because
|
||||||
|
modification has been made.
|
||||||
|
.
|
||||||
|
If you convey an object code work under this section in, or with, or
|
||||||
|
specifically for use in, a User Product, and the conveying occurs as
|
||||||
|
part of a transaction in which the right of possession and use of the
|
||||||
|
User Product is transferred to the recipient in perpetuity or for a
|
||||||
|
fixed term (regardless of how the transaction is characterized), the
|
||||||
|
Corresponding Source conveyed under this section must be accompanied
|
||||||
|
by the Installation Information. But this requirement does not apply
|
||||||
|
if neither you nor any third party retains the ability to install
|
||||||
|
modified object code on the User Product (for example, the work has
|
||||||
|
been installed in ROM).
|
||||||
|
.
|
||||||
|
The requirement to provide Installation Information does not include a
|
||||||
|
requirement to continue to provide support service, warranty, or updates
|
||||||
|
for a work that has been modified or installed by the recipient, or for
|
||||||
|
the User Product in which it has been modified or installed. Access to a
|
||||||
|
network may be denied when the modification itself materially and
|
||||||
|
adversely affects the operation of the network or violates the rules and
|
||||||
|
protocols for communication across the network.
|
||||||
|
.
|
||||||
|
Corresponding Source conveyed, and Installation Information provided,
|
||||||
|
in accord with this section must be in a format that is publicly
|
||||||
|
documented (and with an implementation available to the public in
|
||||||
|
source code form), and must require no special password or key for
|
||||||
|
unpacking, reading or copying.
|
||||||
|
.
|
||||||
|
7. Additional Terms.
|
||||||
|
.
|
||||||
|
"Additional permissions" are terms that supplement the terms of this
|
||||||
|
License by making exceptions from one or more of its conditions.
|
||||||
|
Additional permissions that are applicable to the entire Program shall
|
||||||
|
be treated as though they were included in this License, to the extent
|
||||||
|
that they are valid under applicable law. If additional permissions
|
||||||
|
apply only to part of the Program, that part may be used separately
|
||||||
|
under those permissions, but the entire Program remains governed by
|
||||||
|
this License without regard to the additional permissions.
|
||||||
|
.
|
||||||
|
When you convey a copy of a covered work, you may at your option
|
||||||
|
remove any additional permissions from that copy, or from any part of
|
||||||
|
it. (Additional permissions may be written to require their own
|
||||||
|
removal in certain cases when you modify the work.) You may place
|
||||||
|
additional permissions on material, added by you to a covered work,
|
||||||
|
for which you have or can give appropriate copyright permission.
|
||||||
|
.
|
||||||
|
Notwithstanding any other provision of this License, for material you
|
||||||
|
add to a covered work, you may (if authorized by the copyright holders of
|
||||||
|
that material) supplement the terms of this License with terms:
|
||||||
|
.
|
||||||
|
a) Disclaiming warranty or limiting liability differently from the
|
||||||
|
terms of sections 15 and 16 of this License; or
|
||||||
|
.
|
||||||
|
b) Requiring preservation of specified reasonable legal notices or
|
||||||
|
author attributions in that material or in the Appropriate Legal
|
||||||
|
Notices displayed by works containing it; or
|
||||||
|
.
|
||||||
|
c) Prohibiting misrepresentation of the origin of that material, or
|
||||||
|
requiring that modified versions of such material be marked in
|
||||||
|
reasonable ways as different from the original version; or
|
||||||
|
.
|
||||||
|
d) Limiting the use for publicity purposes of names of licensors or
|
||||||
|
authors of the material; or
|
||||||
|
.
|
||||||
|
e) Declining to grant rights under trademark law for use of some
|
||||||
|
trade names, trademarks, or service marks; or
|
||||||
|
.
|
||||||
|
f) Requiring indemnification of licensors and authors of that
|
||||||
|
material by anyone who conveys the material (or modified versions of
|
||||||
|
it) with contractual assumptions of liability to the recipient, for
|
||||||
|
any liability that these contractual assumptions directly impose on
|
||||||
|
those licensors and authors.
|
||||||
|
.
|
||||||
|
All other non-permissive additional terms are considered "further
|
||||||
|
restrictions" within the meaning of section 10. If the Program as you
|
||||||
|
received it, or any part of it, contains a notice stating that it is
|
||||||
|
governed by this License along with a term that is a further
|
||||||
|
restriction, you may remove that term. If a license document contains
|
||||||
|
a further restriction but permits relicensing or conveying under this
|
||||||
|
License, you may add to a covered work material governed by the terms
|
||||||
|
of that license document, provided that the further restriction does
|
||||||
|
not survive such relicensing or conveying.
|
||||||
|
.
|
||||||
|
If you add terms to a covered work in accord with this section, you
|
||||||
|
must place, in the relevant source files, a statement of the
|
||||||
|
additional terms that apply to those files, or a notice indicating
|
||||||
|
where to find the applicable terms.
|
||||||
|
.
|
||||||
|
Additional terms, permissive or non-permissive, may be stated in the
|
||||||
|
form of a separately written license, or stated as exceptions;
|
||||||
|
the above requirements apply either way.
|
||||||
|
.
|
||||||
|
8. Termination.
|
||||||
|
.
|
||||||
|
You may not propagate or modify a covered work except as expressly
|
||||||
|
provided under this License. Any attempt otherwise to propagate or
|
||||||
|
modify it is void, and will automatically terminate your rights under
|
||||||
|
this License (including any patent licenses granted under the third
|
||||||
|
paragraph of section 11).
|
||||||
|
.
|
||||||
|
However, if you cease all violation of this License, then your
|
||||||
|
license from a particular copyright holder is reinstated (a)
|
||||||
|
provisionally, unless and until the copyright holder explicitly and
|
||||||
|
finally terminates your license, and (b) permanently, if the copyright
|
||||||
|
holder fails to notify you of the violation by some reasonable means
|
||||||
|
prior to 60 days after the cessation.
|
||||||
|
.
|
||||||
|
Moreover, your license from a particular copyright holder is
|
||||||
|
reinstated permanently if the copyright holder notifies you of the
|
||||||
|
violation by some reasonable means, this is the first time you have
|
||||||
|
received notice of violation of this License (for any work) from that
|
||||||
|
copyright holder, and you cure the violation prior to 30 days after
|
||||||
|
your receipt of the notice.
|
||||||
|
.
|
||||||
|
Termination of your rights under this section does not terminate the
|
||||||
|
licenses of parties who have received copies or rights from you under
|
||||||
|
this License. If your rights have been terminated and not permanently
|
||||||
|
reinstated, you do not qualify to receive new licenses for the same
|
||||||
|
material under section 10.
|
||||||
|
.
|
||||||
|
9. Acceptance Not Required for Having Copies.
|
||||||
|
.
|
||||||
|
You are not required to accept this License in order to receive or
|
||||||
|
run a copy of the Program. Ancillary propagation of a covered work
|
||||||
|
occurring solely as a consequence of using peer-to-peer transmission
|
||||||
|
to receive a copy likewise does not require acceptance. However,
|
||||||
|
nothing other than this License grants you permission to propagate or
|
||||||
|
modify any covered work. These actions infringe copyright if you do
|
||||||
|
not accept this License. Therefore, by modifying or propagating a
|
||||||
|
covered work, you indicate your acceptance of this License to do so.
|
||||||
|
.
|
||||||
|
10. Automatic Licensing of Downstream Recipients.
|
||||||
|
.
|
||||||
|
Each time you convey a covered work, the recipient automatically
|
||||||
|
receives a license from the original licensors, to run, modify and
|
||||||
|
propagate that work, subject to this License. You are not responsible
|
||||||
|
for enforcing compliance by third parties with this License.
|
||||||
|
.
|
||||||
|
An "entity transaction" is a transaction transferring control of an
|
||||||
|
organization, or substantially all assets of one, or subdividing an
|
||||||
|
organization, or merging organizations. If propagation of a covered
|
||||||
|
work results from an entity transaction, each party to that
|
||||||
|
transaction who receives a copy of the work also receives whatever
|
||||||
|
licenses to the work the party's predecessor in interest had or could
|
||||||
|
give under the previous paragraph, plus a right to possession of the
|
||||||
|
Corresponding Source of the work from the predecessor in interest, if
|
||||||
|
the predecessor has it or can get it with reasonable efforts.
|
||||||
|
.
|
||||||
|
You may not impose any further restrictions on the exercise of the
|
||||||
|
rights granted or affirmed under this License. For example, you may
|
||||||
|
not impose a license fee, royalty, or other charge for exercise of
|
||||||
|
rights granted under this License, and you may not initiate litigation
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
any patent claim is infringed by making, using, selling, offering for
|
||||||
|
sale, or importing the Program or any portion of it.
|
||||||
|
.
|
||||||
|
11. Patents.
|
||||||
|
.
|
||||||
|
A "contributor" is a copyright holder who authorizes use under this
|
||||||
|
License of the Program or a work on which the Program is based. The
|
||||||
|
work thus licensed is called the contributor's "contributor version".
|
||||||
|
.
|
||||||
|
A contributor's "essential patent claims" are all patent claims
|
||||||
|
owned or controlled by the contributor, whether already acquired or
|
||||||
|
hereafter acquired, that would be infringed by some manner, permitted
|
||||||
|
by this License, of making, using, or selling its contributor version,
|
||||||
|
but do not include claims that would be infringed only as a
|
||||||
|
consequence of further modification of the contributor version. For
|
||||||
|
purposes of this definition, "control" includes the right to grant
|
||||||
|
patent sublicenses in a manner consistent with the requirements of
|
||||||
|
this License.
|
||||||
|
.
|
||||||
|
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||||
|
patent license under the contributor's essential patent claims, to
|
||||||
|
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||||
|
propagate the contents of its contributor version.
|
||||||
|
.
|
||||||
|
In the following three paragraphs, a "patent license" is any express
|
||||||
|
agreement or commitment, however denominated, not to enforce a patent
|
||||||
|
(such as an express permission to practice a patent or covenant not to
|
||||||
|
sue for patent infringement). To "grant" such a patent license to a
|
||||||
|
party means to make such an agreement or commitment not to enforce a
|
||||||
|
patent against the party.
|
||||||
|
.
|
||||||
|
If you convey a covered work, knowingly relying on a patent license,
|
||||||
|
and the Corresponding Source of the work is not available for anyone
|
||||||
|
to copy, free of charge and under the terms of this License, through a
|
||||||
|
publicly available network server or other readily accessible means,
|
||||||
|
then you must either (1) cause the Corresponding Source to be so
|
||||||
|
available, or (2) arrange to deprive yourself of the benefit of the
|
||||||
|
patent license for this particular work, or (3) arrange, in a manner
|
||||||
|
consistent with the requirements of this License, to extend the patent
|
||||||
|
license to downstream recipients. "Knowingly relying" means you have
|
||||||
|
actual knowledge that, but for the patent license, your conveying the
|
||||||
|
covered work in a country, or your recipient's use of the covered work
|
||||||
|
in a country, would infringe one or more identifiable patents in that
|
||||||
|
country that you have reason to believe are valid.
|
||||||
|
.
|
||||||
|
If, pursuant to or in connection with a single transaction or
|
||||||
|
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||||
|
covered work, and grant a patent license to some of the parties
|
||||||
|
receiving the covered work authorizing them to use, propagate, modify
|
||||||
|
or convey a specific copy of the covered work, then the patent license
|
||||||
|
you grant is automatically extended to all recipients of the covered
|
||||||
|
work and works based on it.
|
||||||
|
.
|
||||||
|
A patent license is "discriminatory" if it does not include within
|
||||||
|
the scope of its coverage, prohibits the exercise of, or is
|
||||||
|
conditioned on the non-exercise of one or more of the rights that are
|
||||||
|
specifically granted under this License. You may not convey a covered
|
||||||
|
work if you are a party to an arrangement with a third party that is
|
||||||
|
in the business of distributing software, under which you make payment
|
||||||
|
to the third party based on the extent of your activity of conveying
|
||||||
|
the work, and under which the third party grants, to any of the
|
||||||
|
parties who would receive the covered work from you, a discriminatory
|
||||||
|
patent license (a) in connection with copies of the covered work
|
||||||
|
conveyed by you (or copies made from those copies), or (b) primarily
|
||||||
|
for and in connection with specific products or compilations that
|
||||||
|
contain the covered work, unless you entered into that arrangement,
|
||||||
|
or that patent license was granted, prior to 28 March 2007.
|
||||||
|
.
|
||||||
|
Nothing in this License shall be construed as excluding or limiting
|
||||||
|
any implied license or other defenses to infringement that may
|
||||||
|
otherwise be available to you under applicable patent law.
|
||||||
|
.
|
||||||
|
12. No Surrender of Others' Freedom.
|
||||||
|
.
|
||||||
|
If conditions are imposed on you (whether by court order, agreement or
|
||||||
|
otherwise) that contradict the conditions of this License, they do not
|
||||||
|
excuse you from the conditions of this License. If you cannot convey a
|
||||||
|
covered work so as to satisfy simultaneously your obligations under this
|
||||||
|
License and any other pertinent obligations, then as a consequence you may
|
||||||
|
not convey it at all. For example, if you agree to terms that obligate you
|
||||||
|
to collect a royalty for further conveying from those to whom you convey
|
||||||
|
the Program, the only way you could satisfy both those terms and this
|
||||||
|
License would be to refrain entirely from conveying the Program.
|
||||||
|
.
|
||||||
|
13. Remote Network Interaction; Use with the GNU General Public License.
|
||||||
|
.
|
||||||
|
Notwithstanding any other provision of this License, if you modify the
|
||||||
|
Program, your modified version must prominently offer all users
|
||||||
|
interacting with it remotely through a computer network (if your version
|
||||||
|
supports such interaction) an opportunity to receive the Corresponding
|
||||||
|
Source of your version by providing access to the Corresponding Source
|
||||||
|
from a network server at no charge, through some standard or customary
|
||||||
|
means of facilitating copying of software. This Corresponding Source
|
||||||
|
shall include the Corresponding Source for any work covered by version 3
|
||||||
|
of the GNU General Public License that is incorporated pursuant to the
|
||||||
|
following paragraph.
|
||||||
|
.
|
||||||
|
Notwithstanding any other provision of this License, you have
|
||||||
|
permission to link or combine any covered work with a work licensed
|
||||||
|
under version 3 of the GNU General Public License into a single
|
||||||
|
combined work, and to convey the resulting work. The terms of this
|
||||||
|
License will continue to apply to the part which is the covered work,
|
||||||
|
but the work with which it is combined will remain governed by version
|
||||||
|
3 of the GNU General Public License.
|
||||||
|
.
|
||||||
|
14. Revised Versions of this License.
|
||||||
|
.
|
||||||
|
The Free Software Foundation may publish revised and/or new versions of
|
||||||
|
the GNU Affero General Public License from time to time. Such new versions
|
||||||
|
will be similar in spirit to the present version, but may differ in detail to
|
||||||
|
address new problems or concerns.
|
||||||
|
.
|
||||||
|
Each version is given a distinguishing version number. If the
|
||||||
|
Program specifies that a certain numbered version of the GNU Affero General
|
||||||
|
Public License "or any later version" applies to it, you have the
|
||||||
|
option of following the terms and conditions either of that numbered
|
||||||
|
version or of any later version published by the Free Software
|
||||||
|
Foundation. If the Program does not specify a version number of the
|
||||||
|
GNU Affero General Public License, you may choose any version ever published
|
||||||
|
by the Free Software Foundation.
|
||||||
|
.
|
||||||
|
If the Program specifies that a proxy can decide which future
|
||||||
|
versions of the GNU Affero General Public License can be used, that proxy's
|
||||||
|
public statement of acceptance of a version permanently authorizes you
|
||||||
|
to choose that version for the Program.
|
||||||
|
.
|
||||||
|
Later license versions may give you additional or different
|
||||||
|
permissions. However, no additional obligations are imposed on any
|
||||||
|
author or copyright holder as a result of your choosing to follow a
|
||||||
|
later version.
|
||||||
|
.
|
||||||
|
15. Disclaimer of Warranty.
|
||||||
|
.
|
||||||
|
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||||
|
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||||
|
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||||
|
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||||
|
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||||
|
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||||
|
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||||
|
.
|
||||||
|
16. Limitation of Liability.
|
||||||
|
.
|
||||||
|
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||||
|
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||||
|
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||||
|
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||||
|
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||||
|
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||||
|
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||||
|
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||||
|
SUCH DAMAGES.
|
||||||
|
.
|
||||||
|
17. Interpretation of Sections 15 and 16.
|
||||||
|
.
|
||||||
|
If the disclaimer of warranty and limitation of liability provided
|
||||||
|
above cannot be given local legal effect according to their terms,
|
||||||
|
reviewing courts shall apply local law that most closely approximates
|
||||||
|
an absolute waiver of all civil liability in connection with the
|
||||||
|
Program, unless a warranty or assumption of liability accompanies a
|
||||||
|
copy of the Program in return for a fee.
|
||||||
|
.
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
.
|
||||||
|
How to Apply These Terms to Your New Programs
|
||||||
|
.
|
||||||
|
If you develop a new program, and you want it to be of the greatest
|
||||||
|
possible use to the public, the best way to achieve this is to make it
|
||||||
|
free software which everyone can redistribute and change under these terms.
|
||||||
|
.
|
||||||
|
To do so, attach the following notices to the program. It is safest
|
||||||
|
to attach them to the start of each source file to most effectively
|
||||||
|
state the exclusion of warranty; and each file should have at least
|
||||||
|
the "copyright" line and a pointer to where the full notice is found.
|
||||||
|
.
|
||||||
|
<one line to give the program's name and a brief idea of what it does.>
|
||||||
|
Copyright (C) <year> <name of author>
|
||||||
|
.
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License as published by
|
||||||
|
the Free Software Foundation, either version 3 of the License, or
|
||||||
|
(at your option) any later version.
|
||||||
|
.
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
.
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
.
|
||||||
|
Also add information on how to contact you by electronic and paper mail.
|
||||||
|
.
|
||||||
|
If your software can interact with users remotely through a computer
|
||||||
|
network, you should also make sure that it provides a way for users to
|
||||||
|
get its source. For example, if your program is a web application, its
|
||||||
|
interface could display a "Source" link that leads users to an archive
|
||||||
|
of the code. There are many ways you could offer source, and different
|
||||||
|
solutions will be better for different programs; see section 13 for the
|
||||||
|
specific requirements.
|
||||||
|
.
|
||||||
|
You should also get your employer (if you work as a programmer) or school,
|
||||||
|
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||||
|
For more information on this, and how to apply and follow the GNU AGPL, see
|
||||||
|
<http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
License: MIT
|
||||||
|
Copyright (c) 2010 Ilya Kulakov
|
||||||
|
.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
.
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
23
dist/debian/debian/rules
vendored
Executable file
23
dist/debian/debian/rules
vendored
Executable file
@ -0,0 +1,23 @@
|
|||||||
|
#!/usr/bin/make -f
|
||||||
|
|
||||||
|
include /usr/share/dpkg/pkg-info.mk
|
||||||
|
|
||||||
|
override_dh_auto_build:
|
||||||
|
|
||||||
|
override_dh_auto_clean:
|
||||||
|
|
||||||
|
override_dh_auto_install:
|
||||||
|
dh_auto_install
|
||||||
|
cd scylla-jmx; ./install.sh --packaging --root "$(CURDIR)/debian/tmp" --sysconfdir /etc/default
|
||||||
|
|
||||||
|
override_dh_installinit:
|
||||||
|
ifeq ($(DEB_SOURCE),scylla-jmx)
|
||||||
|
dh_installinit --no-start
|
||||||
|
else
|
||||||
|
dh_installinit --no-start --name scylla-jmx
|
||||||
|
endif
|
||||||
|
|
||||||
|
override_dh_strip_nondeterminism:
|
||||||
|
|
||||||
|
%:
|
||||||
|
dh $@
|
4
dist/debian/debian/scylla-jmx.install
vendored
Normal file
4
dist/debian/debian/scylla-jmx.install
vendored
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
etc/default/scylla-jmx
|
||||||
|
etc/systemd/system/scylla-jmx.service.d/sysconfdir.conf
|
||||||
|
opt/scylladb/jmx/*
|
||||||
|
usr/lib/scylla/jmx/*
|
7
dist/debian/debian/scylla-jmx.postinst
vendored
Normal file
7
dist/debian/debian/scylla-jmx.postinst
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -d /run/systemd/system ]; then
|
||||||
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
#DEBHELPER#
|
7
dist/debian/debian/scylla-jmx.postrm
vendored
Normal file
7
dist/debian/debian/scylla-jmx.postrm
vendored
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if [ -d /run/systemd/system ]; then
|
||||||
|
systemctl --system daemon-reload >/dev/null || true
|
||||||
|
fi
|
||||||
|
|
||||||
|
#DEBHELPER#
|
1
dist/debian/debian/scylla-jmx.service
vendored
Symbolic link
1
dist/debian/debian/scylla-jmx.service
vendored
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
../../common/systemd/scylla-jmx.service
|
1
dist/debian/debian/source/format
vendored
Normal file
1
dist/debian/debian/source/format
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
3.0 (quilt)
|
1
dist/debian/debian/source/options
vendored
Normal file
1
dist/debian/debian/source/options
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
extend-diff-ignore = ^build/
|
80
dist/debian/debian_files_gen.py
vendored
Executable file
80
dist/debian/debian_files_gen.py
vendored
Executable file
@ -0,0 +1,80 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright (C) 2020 ScyllaDB
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# This file is part of Scylla.
|
||||||
|
#
|
||||||
|
# Scylla is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Scylla is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
import string
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import re
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
class DebianFilesTemplate(string.Template):
|
||||||
|
delimiter = '%'
|
||||||
|
|
||||||
|
scriptdir = os.path.dirname(__file__)
|
||||||
|
|
||||||
|
with open(os.path.join(scriptdir, 'changelog.template')) as f:
|
||||||
|
changelog_template = f.read()
|
||||||
|
|
||||||
|
with open(os.path.join(scriptdir, 'control.template')) as f:
|
||||||
|
control_template = f.read()
|
||||||
|
|
||||||
|
with open('build/SCYLLA-PRODUCT-FILE') as f:
|
||||||
|
product = f.read().strip()
|
||||||
|
|
||||||
|
with open('build/SCYLLA-VERSION-FILE') as f:
|
||||||
|
version = f.read().strip().replace('.rc', '~rc').replace('_', '-')
|
||||||
|
|
||||||
|
with open('build/SCYLLA-RELEASE-FILE') as f:
|
||||||
|
release = f.read().strip()
|
||||||
|
|
||||||
|
if os.path.exists('build/debian/debian'):
|
||||||
|
shutil.rmtree('build/debian/debian')
|
||||||
|
shutil.copytree('dist/debian/debian', 'build/debian/debian')
|
||||||
|
|
||||||
|
if product != 'scylla':
|
||||||
|
for p in Path('build/debian/debian').glob('scylla-*'):
|
||||||
|
# pat1: scylla-server.service
|
||||||
|
# -> scylla-enterprise-server.scylla-server.service
|
||||||
|
# pat2: scylla-server.scylla-fstrim.service
|
||||||
|
# -> scylla-enterprise-server.scylla-fstrim.service
|
||||||
|
# pat3: scylla-conf.install
|
||||||
|
# -> scylla-enterprise-conf.install
|
||||||
|
|
||||||
|
if m := re.match(r'^scylla(-[^.]+)\.service$', p.name):
|
||||||
|
p.rename(p.parent / f'{product}{m.group(1)}.{p.name}')
|
||||||
|
elif m := re.match(r'^scylla(-[^.]+\.scylla-[^.]+\.[^.]+)$', p.name):
|
||||||
|
p.rename(p.parent / f'{product}{m.group(1)}')
|
||||||
|
else:
|
||||||
|
p.rename(p.parent / p.name.replace('scylla', product, 1))
|
||||||
|
|
||||||
|
s = DebianFilesTemplate(changelog_template)
|
||||||
|
changelog_applied = s.substitute(product=product, version=version, release=release, revision='1', codename='stable')
|
||||||
|
|
||||||
|
s = DebianFilesTemplate(control_template)
|
||||||
|
control_applied = s.substitute(product=product)
|
||||||
|
|
||||||
|
with open('build/debian/debian/changelog', 'w') as f:
|
||||||
|
f.write(changelog_applied)
|
||||||
|
|
||||||
|
with open('build/debian/debian/control', 'w') as f:
|
||||||
|
f.write(control_applied)
|
34
dist/redhat/build_rpm.sh
vendored
34
dist/redhat/build_rpm.sh
vendored
@ -1,34 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
RPMBUILD=`pwd`/build/rpmbuild
|
|
||||||
|
|
||||||
if [ ! -e dist/redhat/build_rpm.sh ]; then
|
|
||||||
echo "run build_rpm.sh in top of scylla-jmx dir"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo yum install -y rpm-build git
|
|
||||||
|
|
||||||
OS=`awk '{print $1}' /etc/redhat-release`
|
|
||||||
if [ "$OS" = "Fedora" ] && [ ! -f /usr/bin/mock ]; then
|
|
||||||
sudo yum -y install mock
|
|
||||||
elif [ "$OS" = "CentOS" ] && [ ! -f /usr/bin/yum-builddep ]; then
|
|
||||||
sudo yum -y install yum-utils
|
|
||||||
fi
|
|
||||||
|
|
||||||
VERSION=$(./SCYLLA-VERSION-GEN)
|
|
||||||
SCYLLA_VERSION=$(cat build/SCYLLA-VERSION-FILE)
|
|
||||||
SCYLLA_RELEASE=$(cat build/SCYLLA-RELEASE-FILE)
|
|
||||||
mkdir -p $RPMBUILD/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
|
|
||||||
git archive --format=tar --prefix=scylla-jmx-$SCYLLA_VERSION/ HEAD -o build/rpmbuild/SOURCES/scylla-jmx-$VERSION.tar
|
|
||||||
cp dist/redhat/scylla-jmx.spec.in $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
sed -i -e "s/@@VERSION@@/$SCYLLA_VERSION/g" $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
sed -i -e "s/@@RELEASE@@/$SCYLLA_RELEASE/g" $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
|
|
||||||
if [ "$OS" = "Fedora" ]; then
|
|
||||||
rpmbuild -bs --define "_topdir $RPMBUILD" $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
/usr/bin/mock rebuild --resultdir=`pwd`/build/rpms $RPMBUILD/SRPMS/scylla-jmx-$VERSION*.src.rpm
|
|
||||||
else
|
|
||||||
sudo yum-builddep -y $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
rpmbuild -ba --define "_topdir $RPMBUILD" $RPMBUILD/SPECS/scylla-jmx.spec
|
|
||||||
fi
|
|
75
dist/redhat/scylla-jmx.spec
vendored
Normal file
75
dist/redhat/scylla-jmx.spec
vendored
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
Name: %{product}-jmx
|
||||||
|
Version: %{version}
|
||||||
|
Release: %{release}%{?dist}
|
||||||
|
Summary: Scylla JMX
|
||||||
|
Group: Applications/Databases
|
||||||
|
|
||||||
|
License: AGPLv3
|
||||||
|
URL: http://www.scylladb.com/
|
||||||
|
Source0: %{reloc_pkg}
|
||||||
|
|
||||||
|
BuildArch: noarch
|
||||||
|
BuildRequires: systemd-units
|
||||||
|
Requires: %{product}-server jre-1.8.0-headless
|
||||||
|
AutoReqProv: no
|
||||||
|
|
||||||
|
%description
|
||||||
|
|
||||||
|
|
||||||
|
%prep
|
||||||
|
%setup -q -n scylla-jmx
|
||||||
|
|
||||||
|
|
||||||
|
%build
|
||||||
|
|
||||||
|
%install
|
||||||
|
./install.sh --packaging --root "$RPM_BUILD_ROOT"
|
||||||
|
|
||||||
|
%pre
|
||||||
|
/usr/sbin/groupadd scylla 2> /dev/null || :
|
||||||
|
/usr/sbin/useradd -g scylla -s /sbin/nologin -r -d ${_sharedstatedir}/scylla scylla 2> /dev/null || :
|
||||||
|
ping -c1 `hostname` > /dev/null 2>&1
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo
|
||||||
|
echo "**************************************************************"
|
||||||
|
echo "* WARNING: You need to add hostname on /etc/hosts, otherwise *"
|
||||||
|
echo "* scylla-jmx will not able to start up. *"
|
||||||
|
echo "**************************************************************"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
%post
|
||||||
|
if [ $1 -eq 1 ] ; then
|
||||||
|
/usr/bin/systemctl preset scylla-jmx.service ||:
|
||||||
|
fi
|
||||||
|
|
||||||
|
/usr/bin/systemctl daemon-reload ||:
|
||||||
|
|
||||||
|
%preun
|
||||||
|
if [ $1 -eq 0 ] ; then
|
||||||
|
/usr/bin/systemctl --no-reload disable scylla-jmx.service ||:
|
||||||
|
/usr/bin/systemctl stop scylla-jmx.service ||:
|
||||||
|
fi
|
||||||
|
|
||||||
|
%postun
|
||||||
|
/usr/bin/systemctl daemon-reload ||:
|
||||||
|
|
||||||
|
%clean
|
||||||
|
rm -rf $RPM_BUILD_ROOT
|
||||||
|
|
||||||
|
|
||||||
|
%files
|
||||||
|
%defattr(-,root,root)
|
||||||
|
|
||||||
|
%config(noreplace) %{_sysconfdir}/sysconfig/scylla-jmx
|
||||||
|
%{_unitdir}/scylla-jmx.service
|
||||||
|
/opt/scylladb/jmx/scylla-jmx
|
||||||
|
/opt/scylladb/jmx/scylla-jmx-1.1.jar
|
||||||
|
/opt/scylladb/jmx/symlinks/scylla-jmx
|
||||||
|
%{_prefix}/lib/scylla/jmx/scylla-jmx
|
||||||
|
%{_prefix}/lib/scylla/jmx/scylla-jmx-1.1.jar
|
||||||
|
%{_prefix}/lib/scylla/jmx/symlinks/scylla-jmx
|
||||||
|
|
||||||
|
%changelog
|
||||||
|
* Fri Aug 7 2015 Takuya ASADA Takuya ASADA <syuu@cloudius-systems.com>
|
||||||
|
- inital version of scylla-tools.spec
|
74
dist/redhat/scylla-jmx.spec.in
vendored
74
dist/redhat/scylla-jmx.spec.in
vendored
@ -1,74 +0,0 @@
|
|||||||
Name: scylla-jmx
|
|
||||||
Version: @@VERSION@@
|
|
||||||
Release: @@RELEASE@@%{?dist}
|
|
||||||
Summary: Scylla JMX
|
|
||||||
Group: Applications/Databases
|
|
||||||
|
|
||||||
License: AGPLv3
|
|
||||||
URL: http://www.scylladb.com/
|
|
||||||
Source0: %{name}-@@VERSION@@-@@RELEASE@@.tar
|
|
||||||
|
|
||||||
BuildArch: noarch
|
|
||||||
BuildRequires: maven systemd-units java-devel
|
|
||||||
Requires: scylla-server java-headless
|
|
||||||
|
|
||||||
%description
|
|
||||||
|
|
||||||
|
|
||||||
%prep
|
|
||||||
%setup -q
|
|
||||||
|
|
||||||
|
|
||||||
%build
|
|
||||||
mvn install
|
|
||||||
|
|
||||||
%install
|
|
||||||
rm -rf $RPM_BUILD_ROOT
|
|
||||||
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/
|
|
||||||
mkdir -p $RPM_BUILD_ROOT%{_unitdir}
|
|
||||||
mkdir -p $RPM_BUILD_ROOT%{_prefix}/lib/scylla/
|
|
||||||
|
|
||||||
install -m644 dist/common/sysconfig/scylla-jmx $RPM_BUILD_ROOT%{_sysconfdir}/sysconfig/
|
|
||||||
install -m644 dist/redhat/systemd/scylla-jmx.service $RPM_BUILD_ROOT%{_unitdir}/
|
|
||||||
install -d -m755 $RPM_BUILD_ROOT%{_prefix}/lib/scylla
|
|
||||||
install -d -m755 $RPM_BUILD_ROOT%{_prefix}/lib/scylla/jmx
|
|
||||||
install -m644 target/urchin-mbean-1.0.jar $RPM_BUILD_ROOT%{_prefix}/lib/scylla/jmx/
|
|
||||||
install -m755 dist/common/scripts/* $RPM_BUILD_ROOT%{_prefix}/lib/scylla/jmx
|
|
||||||
|
|
||||||
%pre
|
|
||||||
/usr/sbin/groupadd scylla 2> /dev/null || :
|
|
||||||
/usr/sbin/useradd -g scylla -s /sbin/nologin -r -d ${_sharedstatedir}/scylla scylla 2> /dev/null || :
|
|
||||||
ping -c1 `hostname` > /dev/null 2>&1
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo
|
|
||||||
echo "**************************************************************"
|
|
||||||
echo "* WARNING: You need to add hostname on /etc/hosts, otherwise *"
|
|
||||||
echo "* scylla-jmx will not able to start up. *"
|
|
||||||
echo "**************************************************************"
|
|
||||||
echo
|
|
||||||
fi
|
|
||||||
|
|
||||||
%post
|
|
||||||
%systemd_post scylla-jmx.service
|
|
||||||
|
|
||||||
%preun
|
|
||||||
%systemd_preun scylla-jmx.service
|
|
||||||
|
|
||||||
%postun
|
|
||||||
%systemd_postun
|
|
||||||
|
|
||||||
%clean
|
|
||||||
rm -rf $RPM_BUILD_ROOT
|
|
||||||
|
|
||||||
|
|
||||||
%files
|
|
||||||
%defattr(-,root,root)
|
|
||||||
|
|
||||||
%{_sysconfdir}/sysconfig/scylla-jmx
|
|
||||||
%{_unitdir}/scylla-jmx.service
|
|
||||||
%{_prefix}/lib/scylla/jmx/jmx_run
|
|
||||||
%{_prefix}/lib/scylla/jmx/urchin-mbean-1.0.jar
|
|
||||||
|
|
||||||
%changelog
|
|
||||||
* Fri Aug 7 2015 Takuya ASADA Takuya ASADA <syuu@cloudius-systems.com>
|
|
||||||
- inital version of scylla-tools.spec
|
|
16
dist/redhat/systemd/scylla-jmx.service
vendored
16
dist/redhat/systemd/scylla-jmx.service
vendored
@ -1,16 +0,0 @@
|
|||||||
[Unit]
|
|
||||||
Description=Scylla JMX
|
|
||||||
Requires=scylla-server.service
|
|
||||||
After=scylla-server.service
|
|
||||||
|
|
||||||
[Service]
|
|
||||||
Type=simple
|
|
||||||
EnvironmentFile=/etc/sysconfig/scylla-jmx
|
|
||||||
User=scylla
|
|
||||||
Group=scylla
|
|
||||||
ExecStart=/usr/lib/scylla/jmx/jmx_run
|
|
||||||
KillMode=process
|
|
||||||
Restart=always
|
|
||||||
|
|
||||||
[Install]
|
|
||||||
WantedBy=multi-user.target
|
|
10
dist/ubuntu/build_deb.sh
vendored
10
dist/ubuntu/build_deb.sh
vendored
@ -1,10 +0,0 @@
|
|||||||
#!/bin/sh -e
|
|
||||||
|
|
||||||
if [ ! -e dist/ubuntu/build_deb.sh ]; then
|
|
||||||
echo "run build_deb.sh in top of scylla dir"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
sudo apt-get -y install debhelper maven openjdk-7-jdk
|
|
||||||
|
|
||||||
debuild -r fakeroot --no-tgz-check -us -uc
|
|
3
git-archive-all-license.txt
Normal file
3
git-archive-all-license.txt
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
License: MIT
|
||||||
|
|
||||||
|
https://github.com/Kentzo/git-archive-all
|
26
install-dependencies.sh
Executable file
26
install-dependencies.sh
Executable file
@ -0,0 +1,26 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# This file is open source software, licensed to you under the terms
|
||||||
|
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
|
||||||
|
# distributed with this work for additional information regarding copyright
|
||||||
|
# ownership. You may not use this file except in compliance with the License.
|
||||||
|
#
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing,
|
||||||
|
# software distributed under the License is distributed on an
|
||||||
|
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
# KIND, either express or implied. See the License for the
|
||||||
|
# specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
#
|
||||||
|
|
||||||
|
. /etc/os-release
|
||||||
|
|
||||||
|
if [ "$ID" = "ubuntu" ] || [ "$ID" = "debian" ]; then
|
||||||
|
apt -y install maven openjdk-8-jdk-headless
|
||||||
|
elif [ "$ID" = "fedora" ] || [ "$ID" = "centos" ]; then
|
||||||
|
dnf install -y maven java-1.8.0-openjdk-devel
|
||||||
|
fi
|
173
install.sh
Executable file
173
install.sh
Executable file
@ -0,0 +1,173 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright (C) 2019 ScyllaDB
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# This file is part of Scylla.
|
||||||
|
#
|
||||||
|
# Scylla is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Scylla is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
print_usage() {
|
||||||
|
cat <<EOF
|
||||||
|
Usage: install.sh [options]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
--root /path/to/root alternative install root (default /)
|
||||||
|
--prefix /prefix directory prefix (default /usr)
|
||||||
|
--nonroot shortcut of '--disttype nonroot'
|
||||||
|
--sysconfdir /etc/sysconfig specify sysconfig directory name
|
||||||
|
--packaging use install.sh for packaging
|
||||||
|
--without-systemd skip installing systemd units
|
||||||
|
--help this helpful message
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
root=/
|
||||||
|
sysconfdir=/etc/sysconfig
|
||||||
|
nonroot=false
|
||||||
|
packaging=false
|
||||||
|
without_systemd=false
|
||||||
|
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
"--root")
|
||||||
|
root="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"--prefix")
|
||||||
|
prefix="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"--nonroot")
|
||||||
|
nonroot=true
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
"--sysconfdir")
|
||||||
|
sysconfdir="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"--packaging")
|
||||||
|
packaging=true
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
"--without-systemd")
|
||||||
|
without_systemd=true
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
"--help")
|
||||||
|
shift 1
|
||||||
|
print_usage
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
check_usermode_support() {
|
||||||
|
user=$(systemctl --help|grep -e '--user')
|
||||||
|
[ -n "$user" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
if ! $packaging; then
|
||||||
|
has_java=false
|
||||||
|
if [ -x /usr/bin/java ]; then
|
||||||
|
javaver=$(/usr/bin/java -version 2>&1|head -n1|cut -f 3 -d " ")
|
||||||
|
has_java=true
|
||||||
|
fi
|
||||||
|
if ! $has_java; then
|
||||||
|
echo "Please install openjdk-8, openjdk-11, or openjdk-17 before running install.sh."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$prefix" ]; then
|
||||||
|
if $nonroot; then
|
||||||
|
prefix=~/scylladb
|
||||||
|
else
|
||||||
|
prefix=/opt/scylladb
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
rprefix=$(realpath -m "$root/$prefix")
|
||||||
|
if ! $nonroot; then
|
||||||
|
retc="$root/etc"
|
||||||
|
rsysconfdir="$root/$sysconfdir"
|
||||||
|
rusr="$root/usr"
|
||||||
|
rsystemd="$rusr/lib/systemd/system"
|
||||||
|
else
|
||||||
|
retc="$rprefix/etc"
|
||||||
|
rsysconfdir="$rprefix/$sysconfdir"
|
||||||
|
rsystemd="$HOME/.config/systemd/user"
|
||||||
|
fi
|
||||||
|
|
||||||
|
install -d -m755 "$rsysconfdir"
|
||||||
|
if ! $without_systemd; then
|
||||||
|
install -d -m755 "$rsystemd"
|
||||||
|
fi
|
||||||
|
install -d -m755 "$rprefix/scripts" "$rprefix/jmx" "$rprefix/jmx/symlinks"
|
||||||
|
|
||||||
|
install -m644 dist/common/sysconfig/scylla-jmx -Dt "$rsysconfdir"
|
||||||
|
if ! $without_systemd; then
|
||||||
|
install -m644 dist/common/systemd/scylla-jmx.service -Dt "$rsystemd"
|
||||||
|
fi
|
||||||
|
if ! $nonroot && ! $without_systemd; then
|
||||||
|
if [ "$sysconfdir" != "/etc/sysconfig" ]; then
|
||||||
|
install -d -m755 "$retc"/systemd/system/scylla-jmx.service.d
|
||||||
|
cat << EOS > "$retc"/systemd/system/scylla-jmx.service.d/sysconfdir.conf
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=
|
||||||
|
EnvironmentFile=$sysconfdir/scylla-jmx
|
||||||
|
EOS
|
||||||
|
fi
|
||||||
|
elif ! $without_systemd; then
|
||||||
|
install -d -m755 "$rsystemd"/scylla-jmx.service.d
|
||||||
|
cat << EOS > "$rsystemd"/scylla-jmx.service.d/nonroot.conf
|
||||||
|
[Service]
|
||||||
|
EnvironmentFile=
|
||||||
|
EnvironmentFile=$retc/sysconfig/scylla-jmx
|
||||||
|
ExecStart=
|
||||||
|
ExecStart=$rprefix/jmx/scylla-jmx \$SCYLLA_JMX_PORT \$SCYLLA_API_PORT \$SCYLLA_API_ADDR \$SCYLLA_JMX_ADDR \$SCYLLA_JMX_FILE \$SCYLLA_JMX_LOCAL \$SCYLLA_JMX_REMOTE \$SCYLLA_JMX_DEBUG
|
||||||
|
User=
|
||||||
|
Group=
|
||||||
|
WorkingDirectory=$rprefix
|
||||||
|
EOS
|
||||||
|
fi
|
||||||
|
|
||||||
|
install -m644 scylla-jmx-1.1.jar "$rprefix/jmx"
|
||||||
|
install -m755 scylla-jmx "$rprefix/jmx"
|
||||||
|
ln -sf /usr/bin/java "$rprefix/jmx/symlinks/scylla-jmx"
|
||||||
|
if ! $nonroot; then
|
||||||
|
install -m755 -d "$rusr"/lib/scylla/jmx/symlinks
|
||||||
|
ln -srf "$rprefix"/jmx/scylla-jmx-1.1.jar "$rusr"/lib/scylla/jmx/
|
||||||
|
ln -srf "$rprefix"/jmx/scylla-jmx "$rusr"/lib/scylla/jmx/
|
||||||
|
ln -sf /usr/bin/java "$rusr"/lib/scylla/jmx/symlinks/scylla-jmx
|
||||||
|
fi
|
||||||
|
|
||||||
|
if $nonroot; then
|
||||||
|
sed -i -e "s#/var/lib/scylla#$rprefix#g" "$rsysconfdir"/scylla-jmx
|
||||||
|
sed -i -e "s#/etc/scylla#$rprefix/etc/scylla#g" "$rsysconfdir"/scylla-jmx
|
||||||
|
sed -i -e "s#/opt/scylladb/jmx#$rprefix/jmx#g" "$rsysconfdir"/scylla-jmx
|
||||||
|
if ! $without_systemd && check_usermode_support; then
|
||||||
|
systemctl --user daemon-reload
|
||||||
|
fi
|
||||||
|
echo "Scylla-JMX non-root install completed."
|
||||||
|
elif ! $without_systemd && ! $packaging; then
|
||||||
|
systemctl --system daemon-reload
|
||||||
|
fi
|
108
pom.xml
108
pom.xml
@ -2,73 +2,81 @@
|
|||||||
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
<modelVersion>4.0.0</modelVersion>
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
<groupId>com.cloudius.urchin</groupId>
|
<artifactId>scylla-jmx</artifactId>
|
||||||
<artifactId>urchin-mbean</artifactId>
|
<version>1.1</version>
|
||||||
<version>1.0</version>
|
|
||||||
<packaging>jar</packaging>
|
<packaging>jar</packaging>
|
||||||
|
|
||||||
<name>Urchin MBean</name>
|
<parent>
|
||||||
|
<groupId>it.cavallium.scylladb.jmx</groupId>
|
||||||
|
<artifactId>scylla-jmx-parent</artifactId>
|
||||||
|
<version>1.1</version>
|
||||||
|
<relativePath>./scylla-jmx-parent/pom.xml</relativePath>
|
||||||
|
</parent>
|
||||||
|
|
||||||
<properties>
|
<name>Scylla JMX</name>
|
||||||
<maven.compiler.target>1.7</maven.compiler.target>
|
|
||||||
<maven.compiler.source>1.7</maven.compiler.source>
|
|
||||||
</properties>
|
|
||||||
|
|
||||||
<dependencies>
|
<dependencies>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.glassfish.jersey.core</groupId>
|
<groupId>it.cavallium.scylladb.jmx</groupId>
|
||||||
<artifactId>jersey-common</artifactId>
|
<artifactId>scylla-apiclient</artifactId>
|
||||||
<version>2.22.1</version>
|
<version>1.1</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>javax.ws.rs</groupId>
|
|
||||||
<artifactId>javax.ws.rs-api</artifactId>
|
|
||||||
<version>2.0.1</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>javax.ws.rs</groupId>
|
|
||||||
<artifactId>jsr311-api</artifactId>
|
|
||||||
<version>1.1.1</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>org.glassfish.jersey.core</groupId>
|
|
||||||
<artifactId>jersey-client</artifactId>
|
|
||||||
<version>2.22.1</version>
|
|
||||||
</dependency>
|
|
||||||
|
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<version>4.8.2</version>
|
<version>4.13.1</version>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
|
||||||
<groupId>org.glassfish</groupId>
|
|
||||||
<artifactId>javax.json</artifactId>
|
|
||||||
<version>1.0.4</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.google.guava</groupId>
|
|
||||||
<artifactId>guava</artifactId>
|
|
||||||
<version>18.0</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.yammer.metrics</groupId>
|
|
||||||
<artifactId>metrics-core</artifactId>
|
|
||||||
<version>2.2.0</version>
|
|
||||||
</dependency>
|
|
||||||
<dependency>
|
|
||||||
<groupId>com.google.collections</groupId>
|
|
||||||
<artifactId>google-collections</artifactId>
|
|
||||||
<version>1.0</version>
|
|
||||||
</dependency>
|
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
<plugins>
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
<version>3.10.1</version>
|
||||||
|
<configuration>
|
||||||
|
<source>11</source>
|
||||||
|
<target>11</target>
|
||||||
|
<compilerArgs>
|
||||||
|
<arg>--add-exports</arg>
|
||||||
|
<arg>java.management/com.sun.jmx.mbeanserver=scylla.jmx</arg>
|
||||||
|
<arg>--add-exports</arg>
|
||||||
|
<arg>java.management/com.sun.jmx.interceptor=scylla.jmx</arg>
|
||||||
|
</compilerArgs>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.apache.maven.plugins</groupId>
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
<artifactId>maven-shade-plugin</artifactId>
|
<artifactId>maven-shade-plugin</artifactId>
|
||||||
<version>2.4.1</version>
|
<version>3.4.1</version>
|
||||||
|
<configuration>
|
||||||
|
<artifactSet>
|
||||||
|
<includes>
|
||||||
|
<include>*:*</include>
|
||||||
|
</includes>
|
||||||
|
<excludes>
|
||||||
|
<exclude>com.sun.activation:jakarta.activation</exclude>
|
||||||
|
</excludes>
|
||||||
|
</artifactSet>
|
||||||
|
<filters>
|
||||||
|
<filter>
|
||||||
|
<artifact>*:*</artifact>
|
||||||
|
<excludes>
|
||||||
|
<exclude>module-info.class</exclude>
|
||||||
|
<exclude>META-INF/versions/*/module-info.class</exclude>
|
||||||
|
<exclude>META-INF/*.SF</exclude>
|
||||||
|
<exclude>META-INF/*.DSA</exclude>
|
||||||
|
<exclude>META-INF/*.RSA</exclude>
|
||||||
|
<exclude>META-INF/MANIFEST.MF</exclude>
|
||||||
|
<exclude>META-INF/*.MD</exclude>
|
||||||
|
<exclude>META-INF/*.md</exclude>
|
||||||
|
<exclude>META-INF/LICENSE</exclude>
|
||||||
|
<exclude>META-INF/LICENSE.txt</exclude>
|
||||||
|
<exclude>META-INF/NOTICE</exclude>
|
||||||
|
</excludes>
|
||||||
|
</filter>
|
||||||
|
</filters>
|
||||||
|
</configuration>
|
||||||
<executions>
|
<executions>
|
||||||
<execution>
|
<execution>
|
||||||
<phase>package</phase>
|
<phase>package</phase>
|
||||||
@ -79,7 +87,7 @@
|
|||||||
<transformers>
|
<transformers>
|
||||||
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
<transformer implementation="org.apache.maven.plugins.shade.resource.ManifestResourceTransformer">
|
||||||
<manifestEntries>
|
<manifestEntries>
|
||||||
<Main-Class>com.cloudius.urchin.main.Main</Main-Class>
|
<Main-Class>com.scylladb.jmx.main.Main</Main-Class>
|
||||||
</manifestEntries>
|
</manifestEntries>
|
||||||
</transformer>
|
</transformer>
|
||||||
</transformers>
|
</transformers>
|
||||||
|
42
reloc/build_deb.sh
Executable file
42
reloc/build_deb.sh
Executable file
@ -0,0 +1,42 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
print_usage() {
|
||||||
|
echo "build_deb.sh --reloc-pkg build/scylla-jmx-package.tar.gz"
|
||||||
|
echo " --reloc-pkg specify relocatable package path"
|
||||||
|
echo " --builddir specify Debian package build path"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
RELOC_PKG=build/scylla-jmx-package.tar.gz
|
||||||
|
BUILDDIR=build/debian
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
"--reloc-pkg")
|
||||||
|
RELOC_PKG=$2
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"--builddir")
|
||||||
|
BUILDDIR="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
RELOC_PKG=$(readlink -f $RELOC_PKG)
|
||||||
|
rm -rf "$BUILDDIR"/scylla-package "$BUILDDIR"/scylla-package.orig "$BUILDDIR"/debian
|
||||||
|
mkdir -p "$BUILDDIR"/scylla-package
|
||||||
|
tar -C "$BUILDDIR"/scylla-package -xpf $RELOC_PKG
|
||||||
|
cd "$BUILDDIR"/scylla-package
|
||||||
|
|
||||||
|
RELOC_PKG=$(readlink -f $RELOC_PKG)
|
||||||
|
|
||||||
|
mv scylla-jmx/debian debian
|
||||||
|
PKG_NAME=$(dpkg-parsechangelog --show-field Source)
|
||||||
|
# XXX: Drop revision number from version string.
|
||||||
|
# Since it always '1', this should be okay for now.
|
||||||
|
PKG_VERSION=$(dpkg-parsechangelog --show-field Version |sed -e 's/-1$//')
|
||||||
|
ln -fv $RELOC_PKG ../"$PKG_NAME"_"$PKG_VERSION".orig.tar.gz
|
||||||
|
debuild -rfakeroot -us -uc
|
70
reloc/build_reloc.sh
Executable file
70
reloc/build_reloc.sh
Executable file
@ -0,0 +1,70 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
. /etc/os-release
|
||||||
|
|
||||||
|
print_usage() {
|
||||||
|
echo "build_reloc.sh --clean --nodeps"
|
||||||
|
echo " --clean clean build directory"
|
||||||
|
echo " --nodeps skip installing dependencies"
|
||||||
|
echo " --version V product-version-release string (overriding SCYLLA-VERSION-GEN)"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
CLEAN=
|
||||||
|
NODEPS=
|
||||||
|
VERSION_OVERRIDE=
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
"--clean")
|
||||||
|
CLEAN=yes
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
"--nodeps")
|
||||||
|
NODEPS=yes
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
"--version")
|
||||||
|
VERSION_OVERRIDE="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
VERSION=$(./SCYLLA-VERSION-GEN ${VERSION_OVERRIDE:+ --version "$VERSION_OVERRIDE"})
|
||||||
|
# the former command should generate build/SCYLLA-PRODUCT-FILE and some other version
|
||||||
|
# related files
|
||||||
|
PRODUCT=`cat build/SCYLLA-PRODUCT-FILE`
|
||||||
|
DEST="build/$PRODUCT-jmx-$VERSION.noarch.tar.gz"
|
||||||
|
|
||||||
|
is_redhat_variant() {
|
||||||
|
[ -f /etc/redhat-release ]
|
||||||
|
}
|
||||||
|
is_debian_variant() {
|
||||||
|
[ -f /etc/debian_version ]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if [ ! -e reloc/build_reloc.sh ]; then
|
||||||
|
echo "run build_reloc.sh in top of scylla dir"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$CLEAN" = "yes" ]; then
|
||||||
|
rm -rf build target
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f "$DEST" ]; then
|
||||||
|
rm "$DEST"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z "$NODEPS" ]; then
|
||||||
|
sudo ./install-dependencies.sh
|
||||||
|
fi
|
||||||
|
|
||||||
|
mvn -B --file scylla-jmx-parent/pom.xml install
|
||||||
|
./SCYLLA-VERSION-GEN ${VERSION_OVERRIDE:+ --version "$VERSION_OVERRIDE"}
|
||||||
|
./dist/debian/debian_files_gen.py
|
||||||
|
scripts/create-relocatable-package.py "$DEST"
|
52
reloc/build_rpm.sh
Executable file
52
reloc/build_rpm.sh
Executable file
@ -0,0 +1,52 @@
|
|||||||
|
#!/bin/bash -e
|
||||||
|
|
||||||
|
print_usage() {
|
||||||
|
echo "build_rpm.sh --reloc-pkg build/scylla-jmx-package.tar.gz"
|
||||||
|
echo " --reloc-pkg specify relocatable package path"
|
||||||
|
echo " --builddir specify rpmbuild directory"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
RELOC_PKG=build/scylla-jmx-package.tar.gz
|
||||||
|
BUILDDIR=build/redhat
|
||||||
|
while [ $# -gt 0 ]; do
|
||||||
|
case "$1" in
|
||||||
|
"--reloc-pkg")
|
||||||
|
RELOC_PKG=$2
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"--builddir")
|
||||||
|
BUILDDIR="$2"
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
print_usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
|
RELOC_PKG=$(readlink -f $RELOC_PKG)
|
||||||
|
RPMBUILD=$(readlink -f $BUILDDIR)
|
||||||
|
mkdir -p "$BUILDDIR"
|
||||||
|
tar -C "$BUILDDIR" -xpf $RELOC_PKG scylla-jmx/SCYLLA-RELEASE-FILE scylla-jmx/SCYLLA-RELOCATABLE-FILE scylla-jmx/SCYLLA-VERSION-FILE scylla-jmx/SCYLLA-PRODUCT-FILE scylla-jmx/dist/redhat
|
||||||
|
cd "$BUILDDIR"/scylla-jmx
|
||||||
|
|
||||||
|
RELOC_PKG_BASENAME=$(basename "$RELOC_PKG")
|
||||||
|
SCYLLA_VERSION=$(cat SCYLLA-VERSION-FILE)
|
||||||
|
SCYLLA_RELEASE=$(cat SCYLLA-RELEASE-FILE)
|
||||||
|
VERSION=$SCYLLA_VERSION-$SCYLLA_RELEASE
|
||||||
|
PRODUCT=$(cat SCYLLA-PRODUCT-FILE)
|
||||||
|
|
||||||
|
mkdir -p $RPMBUILD/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS}
|
||||||
|
|
||||||
|
ln -fv $RELOC_PKG $RPMBUILD/SOURCES/
|
||||||
|
|
||||||
|
parameters=(
|
||||||
|
-D"version $SCYLLA_VERSION"
|
||||||
|
-D"release $SCYLLA_RELEASE"
|
||||||
|
-D"product $PRODUCT"
|
||||||
|
-D"reloc_pkg $RELOC_PKG_BASENAME"
|
||||||
|
)
|
||||||
|
|
||||||
|
cp dist/redhat/scylla-jmx.spec $RPMBUILD/SPECS
|
||||||
|
# this rpm can be install on both fedora / centos7, so drop distribution name from the file name
|
||||||
|
rpmbuild -ba "${parameters[@]}" --define '_binary_payload w2.xzdio' --define "_topdir $RPMBUILD" --undefine "dist" $RPMBUILD/SPECS/scylla-jmx.spec
|
64
scripts/create-relocatable-package.py
Executable file
64
scripts/create-relocatable-package.py
Executable file
@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright (C) 2018 ScyllaDB
|
||||||
|
#
|
||||||
|
|
||||||
|
#
|
||||||
|
# This file is part of Scylla.
|
||||||
|
#
|
||||||
|
# Scylla is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU Affero General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Scylla is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
#
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
import tarfile
|
||||||
|
import pathlib
|
||||||
|
|
||||||
|
RELOC_PREFIX='scylla-jmx'
|
||||||
|
def reloc_add(self, name, arcname=None, recursive=True, *, filter=None):
|
||||||
|
if arcname:
|
||||||
|
return self.add(name, arcname="{}/{}".format(RELOC_PREFIX, arcname))
|
||||||
|
else:
|
||||||
|
return self.add(name, arcname="{}/{}".format(RELOC_PREFIX, name))
|
||||||
|
|
||||||
|
tarfile.TarFile.reloc_add = reloc_add
|
||||||
|
|
||||||
|
ap = argparse.ArgumentParser(description='Create a relocatable scylla package.')
|
||||||
|
ap.add_argument('dest',
|
||||||
|
help='Destination file (tar format)')
|
||||||
|
|
||||||
|
args = ap.parse_args()
|
||||||
|
|
||||||
|
output = args.dest
|
||||||
|
|
||||||
|
ar = tarfile.open(output, mode='w|gz')
|
||||||
|
# relocatable package format version = 2.2
|
||||||
|
with open('build/.relocatable_package_version', 'w') as f:
|
||||||
|
f.write('2.2\n')
|
||||||
|
ar.add('build/.relocatable_package_version', arcname='.relocatable_package_version')
|
||||||
|
|
||||||
|
pathlib.Path('build/SCYLLA-RELOCATABLE-FILE').touch()
|
||||||
|
ar.reloc_add('build/SCYLLA-RELOCATABLE-FILE', arcname='SCYLLA-RELOCATABLE-FILE')
|
||||||
|
ar.reloc_add('build/SCYLLA-RELEASE-FILE', arcname='SCYLLA-RELEASE-FILE')
|
||||||
|
ar.reloc_add('build/SCYLLA-VERSION-FILE', arcname='SCYLLA-VERSION-FILE')
|
||||||
|
ar.reloc_add('build/SCYLLA-PRODUCT-FILE', arcname='SCYLLA-PRODUCT-FILE')
|
||||||
|
ar.reloc_add('dist')
|
||||||
|
ar.reloc_add('install.sh')
|
||||||
|
ar.reloc_add('target/scylla-jmx-1.1.jar', arcname='scylla-jmx-1.1.jar')
|
||||||
|
ar.reloc_add('scripts/scylla-jmx', arcname='scylla-jmx')
|
||||||
|
ar.reloc_add('README.md')
|
||||||
|
ar.reloc_add('NOTICE')
|
||||||
|
ar.reloc_add('build/debian/debian', arcname='debian')
|
494
scripts/git-archive-all
Executable file
494
scripts/git-archive-all
Executable file
@ -0,0 +1,494 @@
|
|||||||
|
#! /usr/bin/env python
|
||||||
|
# coding=utf-8
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
from __future__ import unicode_literals
|
||||||
|
|
||||||
|
__version__ = "1.9"
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from os import extsep, path, readlink, curdir
|
||||||
|
from subprocess import CalledProcessError, Popen, PIPE
|
||||||
|
import sys
|
||||||
|
import tarfile
|
||||||
|
from zipfile import ZipFile, ZipInfo, ZIP_DEFLATED
|
||||||
|
|
||||||
|
|
||||||
|
class GitArchiver(object):
|
||||||
|
"""
|
||||||
|
GitArchiver
|
||||||
|
|
||||||
|
Scan a git repository and export all tracked files, and submodules.
|
||||||
|
Checks for .gitattributes files in each directory and uses 'export-ignore'
|
||||||
|
pattern entries for ignore files in the archive.
|
||||||
|
|
||||||
|
>>> archiver = GitArchiver(main_repo_abspath='my/repo/path')
|
||||||
|
>>> archiver.create('output.zip')
|
||||||
|
"""
|
||||||
|
LOG = logging.getLogger('GitArchiver')
|
||||||
|
|
||||||
|
def __init__(self, prefix='', exclude=True, force_sub=False, extra=None, main_repo_abspath=None):
|
||||||
|
"""
|
||||||
|
@param prefix: Prefix used to prepend all paths in the resulting archive.
|
||||||
|
Extra file paths are only prefixed if they are not relative.
|
||||||
|
E.g. if prefix is 'foo' and extra is ['bar', '/baz'] the resulting archive will look like this:
|
||||||
|
/
|
||||||
|
baz
|
||||||
|
foo/
|
||||||
|
bar
|
||||||
|
@type prefix: string
|
||||||
|
|
||||||
|
@param exclude: Determines whether archiver should follow rules specified in .gitattributes files.
|
||||||
|
@type exclude: bool
|
||||||
|
|
||||||
|
@param force_sub: Determines whether submodules are initialized and updated before archiving.
|
||||||
|
@type force_sub: bool
|
||||||
|
|
||||||
|
@param extra: List of extra paths to include in the resulting archive.
|
||||||
|
@type extra: list
|
||||||
|
|
||||||
|
@param main_repo_abspath: Absolute path to the main repository (or one of subdirectories).
|
||||||
|
If given path is path to a subdirectory (but not a submodule directory!) it will be replaced
|
||||||
|
with abspath to top-level directory of the repository.
|
||||||
|
If None, current cwd is used.
|
||||||
|
@type main_repo_abspath: string
|
||||||
|
"""
|
||||||
|
if extra is None:
|
||||||
|
extra = []
|
||||||
|
|
||||||
|
if main_repo_abspath is None:
|
||||||
|
main_repo_abspath = path.abspath('')
|
||||||
|
elif not path.isabs(main_repo_abspath):
|
||||||
|
raise ValueError("You MUST pass absolute path to the main git repository.")
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.run_shell("[ -d .git ] || git rev-parse --git-dir > /dev/null 2>&1", main_repo_abspath)
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError("Not a git repository (or any of the parent directories).")
|
||||||
|
|
||||||
|
main_repo_abspath = path.abspath(self.read_git_shell('git rev-parse --show-toplevel', main_repo_abspath).rstrip())
|
||||||
|
|
||||||
|
self.prefix = prefix
|
||||||
|
self.exclude = exclude
|
||||||
|
self.extra = extra
|
||||||
|
self.force_sub = force_sub
|
||||||
|
self.main_repo_abspath = main_repo_abspath
|
||||||
|
|
||||||
|
def create(self, output_path, dry_run=False, output_format=None):
|
||||||
|
"""
|
||||||
|
Create the archive at output_file_path.
|
||||||
|
|
||||||
|
Type of the archive is determined either by extension of output_file_path or by output_format.
|
||||||
|
Supported formats are: gz, zip, bz2, xz, tar, tgz, txz
|
||||||
|
|
||||||
|
@param output_path: Output file path.
|
||||||
|
@type output_path: string
|
||||||
|
|
||||||
|
@param dry_run: Determines whether create should do nothing but print what it would archive.
|
||||||
|
@type dry_run: bool
|
||||||
|
|
||||||
|
@param output_format: Determines format of the output archive. If None, format is determined from extension
|
||||||
|
of output_file_path.
|
||||||
|
@type output_format: string
|
||||||
|
"""
|
||||||
|
if output_format is None:
|
||||||
|
file_name, file_ext = path.splitext(output_path)
|
||||||
|
output_format = file_ext[len(extsep):].lower()
|
||||||
|
self.LOG.debug("Output format is not explicitly set, determined format is {}.".format(output_format))
|
||||||
|
|
||||||
|
if not dry_run:
|
||||||
|
if output_format == 'zip':
|
||||||
|
archive = ZipFile(path.abspath(output_path), 'w')
|
||||||
|
|
||||||
|
def add_file(file_path, arcname):
|
||||||
|
if not path.islink(file_path):
|
||||||
|
archive.write(file_path, arcname, ZIP_DEFLATED)
|
||||||
|
else:
|
||||||
|
i = ZipInfo(arcname)
|
||||||
|
i.create_system = 3
|
||||||
|
i.external_attr = 0xA1ED0000
|
||||||
|
archive.writestr(i, readlink(file_path))
|
||||||
|
elif output_format in ['tar', 'bz2', 'gz', 'xz', 'tgz', 'txz']:
|
||||||
|
if output_format == 'tar':
|
||||||
|
t_mode = 'w'
|
||||||
|
elif output_format == 'tgz':
|
||||||
|
t_mode = 'w:gz'
|
||||||
|
elif output_format == 'txz':
|
||||||
|
t_mode = 'w:xz'
|
||||||
|
else:
|
||||||
|
t_mode = 'w:{}'.format(output_format)
|
||||||
|
|
||||||
|
archive = tarfile.open(path.abspath(output_path), t_mode)
|
||||||
|
add_file = lambda file_path, arcname: archive.add(file_path, arcname)
|
||||||
|
else:
|
||||||
|
raise RuntimeError("Unknown format: {}".format(output_format))
|
||||||
|
|
||||||
|
def archiver(file_path, arcname):
|
||||||
|
self.LOG.debug("Compressing {} => {}...".format(file_path, arcname))
|
||||||
|
add_file(file_path, arcname)
|
||||||
|
else:
|
||||||
|
archive = None
|
||||||
|
archiver = lambda file_path, arcname: self.LOG.info("{} => {}".format(file_path, arcname))
|
||||||
|
|
||||||
|
self.archive_all_files(archiver)
|
||||||
|
|
||||||
|
if archive is not None:
|
||||||
|
archive.close()
|
||||||
|
|
||||||
|
def get_exclude_patterns(self, repo_abspath, repo_file_paths):
|
||||||
|
"""
|
||||||
|
Returns exclude patterns for a given repo. It looks for .gitattributes files in repo_file_paths.
|
||||||
|
|
||||||
|
Resulting dictionary will contain exclude patterns per path (relative to the repo_abspath).
|
||||||
|
E.g. {('.', 'Catalyst', 'Editions', 'Base'), ['Foo*', '*Bar']}
|
||||||
|
|
||||||
|
@type repo_abspath: string
|
||||||
|
@param repo_abspath: Absolute path to the git repository.
|
||||||
|
|
||||||
|
@type repo_file_paths: list
|
||||||
|
@param repo_file_paths: List of paths relative to the repo_abspath that are under git control.
|
||||||
|
|
||||||
|
@rtype: dict
|
||||||
|
@return: Dictionary representing exclude patterns.
|
||||||
|
Keys are tuples of strings. Values are lists of strings.
|
||||||
|
Returns None if self.exclude is not set.
|
||||||
|
"""
|
||||||
|
if not self.exclude:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def read_attributes(attributes_abspath):
|
||||||
|
patterns = []
|
||||||
|
if path.isfile(attributes_abspath):
|
||||||
|
attributes = open(attributes_abspath, 'r').readlines()
|
||||||
|
patterns = []
|
||||||
|
for line in attributes:
|
||||||
|
tokens = line.strip().split()
|
||||||
|
if "export-ignore" in tokens[1:]:
|
||||||
|
patterns.append(tokens[0])
|
||||||
|
return patterns
|
||||||
|
|
||||||
|
exclude_patterns = {(): []}
|
||||||
|
|
||||||
|
# There may be no gitattributes.
|
||||||
|
try:
|
||||||
|
global_attributes_abspath = self.read_shell("git config --get core.attributesfile", repo_abspath).rstrip()
|
||||||
|
exclude_patterns[()] = read_attributes(global_attributes_abspath)
|
||||||
|
except:
|
||||||
|
# And it's valid to not have them.
|
||||||
|
pass
|
||||||
|
|
||||||
|
for attributes_abspath in [path.join(repo_abspath, f) for f in repo_file_paths if f.endswith(".gitattributes")]:
|
||||||
|
# Each .gitattributes affects only files within its directory.
|
||||||
|
key = tuple(self.get_path_components(repo_abspath, path.dirname(attributes_abspath)))
|
||||||
|
exclude_patterns[key] = read_attributes(attributes_abspath)
|
||||||
|
|
||||||
|
local_attributes_abspath = path.join(repo_abspath, ".git", "info", "attributes")
|
||||||
|
key = tuple(self.get_path_components(repo_abspath, repo_abspath))
|
||||||
|
|
||||||
|
if key in exclude_patterns:
|
||||||
|
exclude_patterns[key].extend(read_attributes(local_attributes_abspath))
|
||||||
|
else:
|
||||||
|
exclude_patterns[key] = read_attributes(local_attributes_abspath)
|
||||||
|
|
||||||
|
return exclude_patterns
|
||||||
|
|
||||||
|
def is_file_excluded(self, repo_abspath, repo_file_path, exclude_patterns):
|
||||||
|
"""
|
||||||
|
Checks whether file at a given path is excluded.
|
||||||
|
|
||||||
|
@type repo_abspath: string
|
||||||
|
@param repo_abspath: Absolute path to the git repository.
|
||||||
|
|
||||||
|
@type repo_file_path: string
|
||||||
|
@param repo_file_path: Path to a file within repo_abspath.
|
||||||
|
|
||||||
|
@type exclude_patterns: dict
|
||||||
|
@param exclude_patterns: Exclude patterns with format specified for get_exclude_patterns.
|
||||||
|
|
||||||
|
@rtype: bool
|
||||||
|
@return: True if file should be excluded. Otherwise False.
|
||||||
|
"""
|
||||||
|
if exclude_patterns is None or not len(exclude_patterns):
|
||||||
|
return False
|
||||||
|
|
||||||
|
from fnmatch import fnmatch
|
||||||
|
|
||||||
|
file_name = path.basename(repo_file_path)
|
||||||
|
components = self.get_path_components(repo_abspath, path.join(repo_abspath, path.dirname(repo_file_path)))
|
||||||
|
|
||||||
|
is_excluded = False
|
||||||
|
# We should check all patterns specified in intermediate directories to the given file.
|
||||||
|
# At the end we should also check for the global patterns (key '()' or empty tuple).
|
||||||
|
while not is_excluded:
|
||||||
|
key = tuple(components)
|
||||||
|
if key in exclude_patterns:
|
||||||
|
patterns = exclude_patterns[key]
|
||||||
|
for p in patterns:
|
||||||
|
if fnmatch(file_name, p) or fnmatch(repo_file_path, p):
|
||||||
|
self.LOG.debug("Exclude pattern matched {}: {}".format(p, repo_file_path))
|
||||||
|
is_excluded = True
|
||||||
|
|
||||||
|
if not len(components):
|
||||||
|
break
|
||||||
|
|
||||||
|
components.pop()
|
||||||
|
|
||||||
|
return is_excluded
|
||||||
|
|
||||||
|
def archive_all_files(self, archiver):
|
||||||
|
"""
|
||||||
|
Archive all files using archiver.
|
||||||
|
|
||||||
|
@param archiver: Function that accepts 2 arguments: abspath to file on the system and relative path within archive.
|
||||||
|
"""
|
||||||
|
for file_path in self.extra:
|
||||||
|
archiver(path.abspath(file_path), path.join(self.prefix, file_path))
|
||||||
|
|
||||||
|
for file_path in self.walk_git_files():
|
||||||
|
archiver(path.join(self.main_repo_abspath, file_path), path.join(self.prefix, file_path))
|
||||||
|
|
||||||
|
def walk_git_files(self, repo_path=''):
|
||||||
|
"""
|
||||||
|
An iterator method that yields a file path relative to main_repo_abspath
|
||||||
|
for each file that should be included in the archive.
|
||||||
|
Skips those that match the exclusion patterns found in
|
||||||
|
any discovered .gitattributes files along the way.
|
||||||
|
|
||||||
|
Recurs into submodules as well.
|
||||||
|
|
||||||
|
@type repo_path: string
|
||||||
|
@param repo_path: Path to the git submodule repository relative to main_repo_abspath.
|
||||||
|
|
||||||
|
@rtype: iterator
|
||||||
|
@return: Iterator to traverse files under git control relative to main_repo_abspath.
|
||||||
|
"""
|
||||||
|
repo_abspath = path.join(self.main_repo_abspath, repo_path)
|
||||||
|
repo_file_paths = self.read_git_shell("git ls-files --cached --full-name --no-empty-directory", repo_abspath).splitlines()
|
||||||
|
exclude_patterns = self.get_exclude_patterns(repo_abspath, repo_file_paths)
|
||||||
|
|
||||||
|
for repo_file_path in repo_file_paths:
|
||||||
|
# Git puts path in quotes if file path has unicode characters.
|
||||||
|
repo_file_path = repo_file_path.strip('"') # file path relative to current repo
|
||||||
|
file_name = path.basename(repo_file_path)
|
||||||
|
main_repo_file_path = path.join(repo_path, repo_file_path) # file path relative to the main repo
|
||||||
|
|
||||||
|
# Only list symlinks and files that don't start with git.
|
||||||
|
if file_name.startswith(".git") or (not path.islink(main_repo_file_path) and path.isdir(main_repo_file_path)):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.is_file_excluded(repo_abspath, repo_file_path, exclude_patterns):
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield main_repo_file_path
|
||||||
|
|
||||||
|
if self.force_sub:
|
||||||
|
self.run_shell("git submodule init", repo_abspath)
|
||||||
|
self.run_shell("git submodule update", repo_abspath)
|
||||||
|
|
||||||
|
for submodule_path in self.read_shell("git submodule --quiet foreach 'pwd -P'", repo_abspath).splitlines():
|
||||||
|
# Shell command returns absolute paths to submodules.
|
||||||
|
submodule_path = path.relpath(submodule_path, self.main_repo_abspath)
|
||||||
|
for file_path in self.walk_git_files(submodule_path):
|
||||||
|
yield file_path
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_path_components(repo_abspath, abspath):
|
||||||
|
"""
|
||||||
|
Split given abspath into components relative to repo_abspath.
|
||||||
|
These components are primarily used as unique keys of files and folders within a repository.
|
||||||
|
|
||||||
|
E.g. if repo_abspath is '/Documents/Hobby/ParaView/' and abspath is
|
||||||
|
'/Documents/Hobby/ParaView/Catalyst/Editions/Base/', function will return:
|
||||||
|
['.', 'Catalyst', 'Editions', 'Base']
|
||||||
|
|
||||||
|
First element is always '.' (concrete symbol depends on OS).
|
||||||
|
|
||||||
|
@param repo_abspath: Absolute path to the git repository. Normalized via os.path.normpath.
|
||||||
|
@type repo_abspath: string
|
||||||
|
|
||||||
|
@param abspath: Absolute path to a file within repo_abspath. Normalized via os.path.normpath.
|
||||||
|
@type abspath: string
|
||||||
|
|
||||||
|
@return: List of path components.
|
||||||
|
@rtype: list
|
||||||
|
"""
|
||||||
|
repo_abspath = path.normpath(repo_abspath)
|
||||||
|
abspath = path.normpath(abspath)
|
||||||
|
|
||||||
|
if not path.isabs(repo_abspath):
|
||||||
|
raise ValueError("repo_abspath MUST be absolute path.")
|
||||||
|
|
||||||
|
if not path.isabs(abspath):
|
||||||
|
raise ValueError("abspath MUST be absoulte path.")
|
||||||
|
|
||||||
|
if not path.commonprefix([repo_abspath, abspath]):
|
||||||
|
raise ValueError("abspath (\"{}\") MUST have common prefix with repo_abspath (\"{}\")".format(abspath, repo_abspath))
|
||||||
|
|
||||||
|
components = []
|
||||||
|
|
||||||
|
while not abspath == repo_abspath:
|
||||||
|
abspath, tail = path.split(abspath)
|
||||||
|
|
||||||
|
if tail:
|
||||||
|
components.insert(0, tail)
|
||||||
|
|
||||||
|
components.insert(0, curdir)
|
||||||
|
return components
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def run_shell(cmd, cwd=None):
|
||||||
|
"""
|
||||||
|
Runs shell command.
|
||||||
|
|
||||||
|
@type cmd: string
|
||||||
|
@param cmd: Command to be executed.
|
||||||
|
|
||||||
|
@type cwd: string
|
||||||
|
@param cwd: Working directory.
|
||||||
|
|
||||||
|
@rtype: int
|
||||||
|
@return: Return code of the command.
|
||||||
|
|
||||||
|
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
|
||||||
|
"""
|
||||||
|
p = Popen(cmd, shell=True, cwd=cwd)
|
||||||
|
p.wait()
|
||||||
|
|
||||||
|
if p.returncode:
|
||||||
|
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
|
||||||
|
|
||||||
|
return p.returncode
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_shell(cmd, cwd=None, encoding='utf-8'):
|
||||||
|
"""
|
||||||
|
Runs shell command and reads output.
|
||||||
|
|
||||||
|
@type cmd: string
|
||||||
|
@param cmd: Command to be executed.
|
||||||
|
|
||||||
|
@type cwd: string
|
||||||
|
@param cwd: Working directory.
|
||||||
|
|
||||||
|
@type encoding: string
|
||||||
|
@param encoding: Encoding used to decode bytes returned by Popen into string.
|
||||||
|
|
||||||
|
@rtype: string
|
||||||
|
@return: Output of the command.
|
||||||
|
|
||||||
|
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
|
||||||
|
"""
|
||||||
|
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
|
||||||
|
output, _ = p.communicate()
|
||||||
|
output = output.decode(encoding)
|
||||||
|
|
||||||
|
if p.returncode:
|
||||||
|
if sys.version_info > (2,6):
|
||||||
|
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
|
||||||
|
else:
|
||||||
|
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def read_git_shell(cmd, cwd=None):
|
||||||
|
"""
|
||||||
|
Runs git shell command, reads output and decodes it into unicode string
|
||||||
|
|
||||||
|
@type cmd: string
|
||||||
|
@param cmd: Command to be executed.
|
||||||
|
|
||||||
|
@type cwd: string
|
||||||
|
@param cwd: Working directory.
|
||||||
|
|
||||||
|
@rtype: string
|
||||||
|
@return: Output of the command.
|
||||||
|
|
||||||
|
@raise CalledProcessError: Raises exception if return code of the command is non-zero.
|
||||||
|
"""
|
||||||
|
p = Popen(cmd, shell=True, stdout=PIPE, cwd=cwd)
|
||||||
|
output, _ = p.communicate()
|
||||||
|
output = output.decode('unicode_escape').encode('raw_unicode_escape').decode('utf-8')
|
||||||
|
|
||||||
|
if p.returncode:
|
||||||
|
if sys.version_info > (2,6):
|
||||||
|
raise CalledProcessError(returncode=p.returncode, cmd=cmd, output=output)
|
||||||
|
else:
|
||||||
|
raise CalledProcessError(returncode=p.returncode, cmd=cmd)
|
||||||
|
|
||||||
|
return output
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
parser = OptionParser(usage="usage: %prog [-v] [--prefix PREFIX] [--no-exclude] [--force-submodules] [--extra EXTRA1 [EXTRA2]] [--dry-run] OUTPUT_FILE",
|
||||||
|
version="%prog {}".format(__version__))
|
||||||
|
|
||||||
|
parser.add_option('--prefix',
|
||||||
|
type='string',
|
||||||
|
dest='prefix',
|
||||||
|
default=None,
|
||||||
|
help="prepend PREFIX to each filename in the archive. OUTPUT_FILE name is used by default to avoid tarbomb. You can set it to '' in order to explicitly request tarbomb")
|
||||||
|
|
||||||
|
parser.add_option('-v', '--verbose',
|
||||||
|
action='store_true',
|
||||||
|
dest='verbose',
|
||||||
|
help='enable verbose mode')
|
||||||
|
|
||||||
|
parser.add_option('--no-exclude',
|
||||||
|
action='store_false',
|
||||||
|
dest='exclude',
|
||||||
|
default=True,
|
||||||
|
help="don't read .gitattributes files for patterns containing export-ignore attrib")
|
||||||
|
|
||||||
|
parser.add_option('--force-submodules',
|
||||||
|
action='store_true',
|
||||||
|
dest='force_sub',
|
||||||
|
help="force a git submodule init && git submodule update at each level before iterating submodules")
|
||||||
|
|
||||||
|
parser.add_option('--extra',
|
||||||
|
action='append',
|
||||||
|
dest='extra',
|
||||||
|
default=[],
|
||||||
|
help="any additional files to include in the archive")
|
||||||
|
|
||||||
|
parser.add_option('--dry-run',
|
||||||
|
action='store_true',
|
||||||
|
dest='dry_run',
|
||||||
|
help="don't actually archive anything, just show what would be done")
|
||||||
|
|
||||||
|
options, args = parser.parse_args()
|
||||||
|
|
||||||
|
if len(args) != 1:
|
||||||
|
parser.error("You must specify exactly one output file")
|
||||||
|
|
||||||
|
output_file_path = args[0]
|
||||||
|
|
||||||
|
if path.isdir(output_file_path):
|
||||||
|
parser.error("You cannot use directory as output")
|
||||||
|
|
||||||
|
# avoid tarbomb
|
||||||
|
if options.prefix is not None:
|
||||||
|
options.prefix = path.join(options.prefix, '')
|
||||||
|
else:
|
||||||
|
import re
|
||||||
|
|
||||||
|
output_name = path.basename(output_file_path)
|
||||||
|
output_name = re.sub('(\.zip|\.tar|\.tgz|\.txz|\.gz|\.bz2|\.xz|\.tar\.gz|\.tar\.bz2|\.tar\.xz)$', '', output_name) or "Archive"
|
||||||
|
options.prefix = path.join(output_name, '')
|
||||||
|
|
||||||
|
try:
|
||||||
|
handler = logging.StreamHandler(sys.stdout)
|
||||||
|
handler.setFormatter(logging.Formatter('%(message)s'))
|
||||||
|
GitArchiver.LOG.addHandler(handler)
|
||||||
|
GitArchiver.LOG.setLevel(logging.DEBUG if options.verbose else logging.INFO)
|
||||||
|
archiver = GitArchiver(options.prefix,
|
||||||
|
options.exclude,
|
||||||
|
options.force_sub,
|
||||||
|
options.extra)
|
||||||
|
archiver.create(output_file_path, options.dry_run)
|
||||||
|
except Exception as e:
|
||||||
|
parser.exit(2, "{}\n".format(e))
|
||||||
|
|
||||||
|
sys.exit(0)
|
@ -1,21 +1,38 @@
|
|||||||
#!/bin/sh
|
#!/bin/bash
|
||||||
#
|
#
|
||||||
# Copyright (C) 2015 Cloudius Systems, Ltd.
|
# Copyright (C) 2015 Cloudius Systems, Ltd.
|
||||||
|
|
||||||
JMX_PORT=7199
|
JMX_PORT="7199"
|
||||||
API_ADDR="127.0.0.1"
|
JMX_ADDR=
|
||||||
API_PORT="10000"
|
|
||||||
|
API_ADDR=
|
||||||
|
API_PORT=
|
||||||
|
|
||||||
|
CONF_FILE=""
|
||||||
|
DEBUG=""
|
||||||
PARAM_HELP="-h"
|
PARAM_HELP="-h"
|
||||||
PARAM_JMX_PORT="-jp"
|
PARAM_JMX_PORT="-jp"
|
||||||
|
PARAM_JMX_ADDR="-ja"
|
||||||
PARAM_API_PORT="-p"
|
PARAM_API_PORT="-p"
|
||||||
PARAM_ADDR="-a"
|
PARAM_ADDR="-a"
|
||||||
PARAM_LOCATION="-l"
|
PARAM_LOCATION="-l"
|
||||||
LOCATION="target"
|
LOCATION="target"
|
||||||
|
LOCATION_SCRIPTS="scripts"
|
||||||
|
PARAM_FILE="-cf"
|
||||||
|
ALLOW_REMOTE="-r"
|
||||||
|
ALLOW_DEBUG="-d"
|
||||||
|
REMOTE=0
|
||||||
|
HOSTNAME=`hostname`
|
||||||
|
|
||||||
|
|
||||||
|
PROPERTIES=
|
||||||
|
JMX_AUTH=-Dcom.sun.management.jmxremote.authenticate=false
|
||||||
|
JMX_SSL=-Dcom.sun.management.jmxremote.ssl=false
|
||||||
|
|
||||||
print_help() {
|
print_help() {
|
||||||
cat <<HLPEND
|
cat <<HLPEND
|
||||||
|
|
||||||
scylla-jmx [$PARAM_HELP] [$PARAM_PORT port] [$PARAM_ADDR address]
|
scylla-jmx [$PARAM_HELP] [$PARAM_API_PORT port] [$PARAM_ADDR address] [$PARAM_JMX_PORT port] [$PARAM_FILE file]
|
||||||
|
|
||||||
This script is used to run the jmx proxy
|
This script is used to run the jmx proxy
|
||||||
|
|
||||||
@ -27,7 +44,11 @@ This script receives the following command line arguments:
|
|||||||
$PARAM_JMX_PORT <port> - The jmx port to open
|
$PARAM_JMX_PORT <port> - The jmx port to open
|
||||||
$PARAM_API_PORT <port> - The API port to connect to
|
$PARAM_API_PORT <port> - The API port to connect to
|
||||||
$PARAM_ADDR <address> - The API address to connect to
|
$PARAM_ADDR <address> - The API address to connect to
|
||||||
|
$PARAM_JMX_ADDR <address> - JMX bind address
|
||||||
|
$PARAM_FILE <file> - A configuration file to use
|
||||||
$PARAM_LOCATION <location> - The location of the jmx proxy jar file
|
$PARAM_LOCATION <location> - The location of the jmx proxy jar file
|
||||||
|
$ALLOW_REMOTE - When set allow remote jmx connectivity
|
||||||
|
$ALLOW_DEBUG - When set open debug ports for remote debugger
|
||||||
HLPEND
|
HLPEND
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -35,31 +56,88 @@ while test "$#" -ne 0
|
|||||||
do
|
do
|
||||||
case "$1" in
|
case "$1" in
|
||||||
"$PARAM_API_PORT")
|
"$PARAM_API_PORT")
|
||||||
API_PORT=$2
|
API_PORT="-Dapiport="$2
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
"$PARAM_ADDR")
|
"$PARAM_ADDR")
|
||||||
API_ADDR=$2
|
API_ADDR="-Dapiaddress="$2
|
||||||
shift 2
|
|
||||||
;;
|
|
||||||
"$PARAM_PORT")
|
|
||||||
API_ADDR=$2
|
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
"$PARAM_JMX_PORT")
|
"$PARAM_JMX_PORT")
|
||||||
JMX_PORT=$2
|
JMX_PORT=$2
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
|
"$PARAM_JMX_ADDR")
|
||||||
|
JMX_ADDR=-Dcom.sun.management.jmxremote.host=$2
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
"$PARAM_LOCATION")
|
"$PARAM_LOCATION")
|
||||||
LOCATION=$2
|
LOCATION=$2
|
||||||
|
LOCATION_SCRIPTS="$2"
|
||||||
shift 2
|
shift 2
|
||||||
;;
|
;;
|
||||||
|
"$PARAM_FILE")
|
||||||
|
CONF_FILE="-Dapiconfig="$2
|
||||||
|
shift 2
|
||||||
|
;;
|
||||||
|
"$ALLOW_REMOTE")
|
||||||
|
REMOTE=1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
"$PARAM_HELP")
|
"$PARAM_HELP")
|
||||||
print_help
|
print_help
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
|
"$ALLOW_DEBUG")
|
||||||
|
DEBUG="-agentlib:jdwp=transport=dt_socket,address=127.0.0.1:7690,server=y,suspend=n"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-Dcom.sun.management.jmxremote.host=*)
|
||||||
|
JMX_ADDR=$1
|
||||||
|
HOSTNAME=${1:36}
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-Dcom.sun.management.jmxremote.authenticate=*)
|
||||||
|
JMX_AUTH=$1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-Dcom.sun.management.jmxremote.ssl=*)
|
||||||
|
JMX_SSL=$1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-Dcom.sun.management.jmxremote.local.only=*)
|
||||||
|
JMX_LOCAL=$1
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
|
-D*)
|
||||||
|
PROPERTIES="$PROPERTIES $1"
|
||||||
|
shift 1
|
||||||
|
;;
|
||||||
*)
|
*)
|
||||||
|
echo "Unknown parameter: $1"
|
||||||
|
print_help
|
||||||
|
exit 1
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
java -Dapiaddress=$API_ADDR -Dapiport=$API_PORT -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMX_PORT -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT -Dcom.sun.management.jmxremote.local.only=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -jar $LOCATION/urchin-mbean-1.0.jar
|
if [ $REMOTE -eq 0 ]; then
|
||||||
|
if [ -z $JMX_ADDR ]; then
|
||||||
|
JMX_ADDR=-Dcom.sun.management.jmxremote.host=localhost
|
||||||
|
fi
|
||||||
|
HOSTNAME=localhost
|
||||||
|
else
|
||||||
|
if [ -z $JMX_LOCAL ]; then
|
||||||
|
JMX_LOCAL=-Dcom.sun.management.jmxremote.local.only=false
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
"$LOCATION_SCRIPTS"/symlinks/scylla-jmx $DEBUG \
|
||||||
|
$API_PORT $API_ADDR $CONF_FILE -Xmx256m -XX:+UseSerialGC \
|
||||||
|
-XX:+HeapDumpOnOutOfMemoryError \
|
||||||
|
$JMX_AUTH $JMX_SSL $JMX_ADDR $JMX_LOCAL \
|
||||||
|
--add-exports java.management/com.sun.jmx.mbeanserver=ALL-UNNAMED \
|
||||||
|
--add-exports java.management/com.sun.jmx.interceptor=ALL-UNNAMED \
|
||||||
|
-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$JMX_PORT \
|
||||||
|
-Djava.rmi.server.hostname=$HOSTNAME -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT \
|
||||||
|
-Djavax.management.builder.initial=com.scylladb.jmx.utils.APIBuilder \
|
||||||
|
$PROPERTIES -jar $LOCATION/scylla-jmx-1.1.jar
|
||||||
|
1
scripts/symlinks/scylla-jmx
Symbolic link
1
scripts/symlinks/scylla-jmx
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
/usr/bin/java
|
99
scylla-apiclient/pom.xml
Normal file
99
scylla-apiclient/pom.xml
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<artifactId>scylla-apiclient</artifactId>
|
||||||
|
<packaging>jar</packaging>
|
||||||
|
<version>1.1</version>
|
||||||
|
|
||||||
|
<parent>
|
||||||
|
<relativePath>../scylla-jmx-parent/pom.xml</relativePath>
|
||||||
|
<groupId>it.cavallium.scylladb.jmx</groupId>
|
||||||
|
<artifactId>scylla-jmx-parent</artifactId>
|
||||||
|
<version>1.1</version>
|
||||||
|
</parent>
|
||||||
|
|
||||||
|
<name>Scylla REST API client</name>
|
||||||
|
|
||||||
|
<properties>
|
||||||
|
<jackson.version>2.14.0</jackson.version>
|
||||||
|
<jackson.databind.version>2.14.0</jackson.databind.version>
|
||||||
|
</properties>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.eclipse.parsson</groupId>
|
||||||
|
<artifactId>parsson</artifactId>
|
||||||
|
<version>1.1.1</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.yaml</groupId>
|
||||||
|
<artifactId>snakeyaml</artifactId>
|
||||||
|
<version>1.33</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.glassfish.jersey.core</groupId>
|
||||||
|
<artifactId>jersey-common</artifactId>
|
||||||
|
<version>3.1.0</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>jakarta.ws.rs</groupId>
|
||||||
|
<artifactId>jakarta.ws.rs-api</artifactId>
|
||||||
|
<version>3.1.0</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.glassfish.jersey.core</groupId>
|
||||||
|
<artifactId>jersey-client</artifactId>
|
||||||
|
<version>3.1.0</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.glassfish.jersey.inject</groupId>
|
||||||
|
<artifactId>jersey-hk2</artifactId>
|
||||||
|
<version>3.1.0</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>jakarta.json</groupId>
|
||||||
|
<artifactId>jakarta.json-api</artifactId>
|
||||||
|
<version>2.1.1</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.guava</groupId>
|
||||||
|
<artifactId>guava</artifactId>
|
||||||
|
<version>31.1-jre</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>jakarta.activation</groupId>
|
||||||
|
<artifactId>jakarta.activation-api</artifactId>
|
||||||
|
<version>2.1.1</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-annotations</artifactId>
|
||||||
|
<version>${jackson.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.core</groupId>
|
||||||
|
<artifactId>jackson-databind</artifactId>
|
||||||
|
<version>${jackson.databind.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.jakarta.rs</groupId>
|
||||||
|
<artifactId>jackson-jakarta-rs-json-provider</artifactId>
|
||||||
|
<version>2.14.1</version>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
<version>3.10.1</version>
|
||||||
|
<configuration>
|
||||||
|
<release>11</release>
|
||||||
|
</configuration>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
|
||||||
|
</project>
|
@ -1,44 +1,64 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2015 Cloudius Systems
|
* Copyright 2015 Cloudius Systems
|
||||||
*/
|
*/
|
||||||
package com.cloudius.urchin.api;
|
package com.scylladb.jmx.api;
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.jakarta.rs.json.JacksonJsonProvider;
|
||||||
|
import jakarta.json.Json;
|
||||||
|
import jakarta.json.JsonArray;
|
||||||
|
import jakarta.json.JsonObject;
|
||||||
|
import jakarta.json.JsonReader;
|
||||||
|
import jakarta.json.JsonReaderFactory;
|
||||||
|
import jakarta.json.JsonString;
|
||||||
|
import jakarta.ws.rs.ProcessingException;
|
||||||
|
import jakarta.ws.rs.client.Client;
|
||||||
|
import jakarta.ws.rs.client.ClientBuilder;
|
||||||
|
import jakarta.ws.rs.client.Entity;
|
||||||
|
import jakarta.ws.rs.client.Invocation;
|
||||||
|
import jakarta.ws.rs.client.WebTarget;
|
||||||
|
import jakarta.ws.rs.core.MediaType;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedMap;
|
||||||
|
import jakarta.ws.rs.core.Response;
|
||||||
import java.io.StringReader;
|
import java.io.StringReader;
|
||||||
|
import java.lang.System.Logger.Level;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
|
import java.util.LinkedHashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
import javax.json.Json;
|
|
||||||
import javax.json.JsonArray;
|
|
||||||
import javax.json.JsonObject;
|
|
||||||
import javax.json.JsonReader;
|
|
||||||
import javax.json.JsonReaderFactory;
|
|
||||||
import javax.json.JsonString;
|
|
||||||
import javax.management.openmbean.TabularData;
|
import javax.management.openmbean.TabularData;
|
||||||
import javax.management.openmbean.TabularDataSupport;
|
import javax.management.openmbean.TabularDataSupport;
|
||||||
import javax.ws.rs.client.Client;
|
|
||||||
import javax.ws.rs.client.ClientBuilder;
|
|
||||||
import javax.ws.rs.client.Entity;
|
|
||||||
import javax.ws.rs.client.Invocation;
|
|
||||||
import javax.ws.rs.client.WebTarget;
|
|
||||||
import javax.ws.rs.core.MediaType;
|
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
|
||||||
import javax.ws.rs.core.Response;
|
|
||||||
import org.glassfish.jersey.client.ClientConfig;
|
import org.glassfish.jersey.client.ClientConfig;
|
||||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
|
||||||
import com.cloudius.urchin.utils.SnapshotDetailsTabularData;
|
import com.scylladb.jmx.api.utils.SnapshotDetailsTabularData;
|
||||||
import com.yammer.metrics.core.HistogramValues;
|
|
||||||
|
|
||||||
public class APIClient {
|
public class APIClient {
|
||||||
Map<String, CacheEntry> cache = new HashMap<String, CacheEntry>();
|
private Map<String, CacheEntry> cache = new HashMap<String, CacheEntry>();
|
||||||
String getCacheKey(String key, MultivaluedMap<String, String> param, long duration) {
|
private final APIConfig config;
|
||||||
|
private final ClientConfig clientConfig;
|
||||||
|
private final Client client;
|
||||||
|
private JsonReaderFactory factory = Json.createReaderFactory(null);
|
||||||
|
|
||||||
|
private static final Logger logger = Logger.getLogger(APIClient.class.getName());
|
||||||
|
|
||||||
|
public APIClient(APIConfig config) {
|
||||||
|
this.config = config;
|
||||||
|
this.clientConfig = new ClientConfig();
|
||||||
|
clientConfig.register(new JacksonJsonProvider());
|
||||||
|
this.client = ClientBuilder.newClient(clientConfig);
|
||||||
|
}
|
||||||
|
|
||||||
|
private String getCacheKey(String key, MultivaluedMap<String, String> param, long duration) {
|
||||||
if (duration <= 0) {
|
if (duration <= 0) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -53,36 +73,31 @@ public class APIClient {
|
|||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
|
|
||||||
String getStringFromCache(String key, long duration) {
|
private String getStringFromCache(String key, long duration) {
|
||||||
if (key == null) {
|
if (key == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
CacheEntry value = cache.get(key);
|
CacheEntry value = cache.get(key);
|
||||||
return (value!= null && value.valid(duration))? value.stringValue() : null;
|
return (value != null && value.valid(duration)) ? value.stringValue() : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
EstimatedHistogram getEstimatedHistogramFromCache(String key, long duration) {
|
private JsonObject getJsonObjectFromCache(String key, long duration) {
|
||||||
if (key == null) {
|
if (key == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
CacheEntry value = cache.get(key);
|
CacheEntry value = cache.get(key);
|
||||||
return (value!= null && value.valid(duration))? value.getEstimatedHistogram() : null;
|
return (value != null && value.valid(duration)) ? value.jsonObject() : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
JsonReaderFactory factory = Json.createReaderFactory(null);
|
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
|
||||||
.getLogger(APIClient.class.getName());
|
|
||||||
|
|
||||||
public static String getBaseUrl() {
|
private String getBaseUrl() {
|
||||||
return "http://" + System.getProperty("apiaddress", "localhost") + ":"
|
return config.getBaseUrl();
|
||||||
+ System.getProperty("apiport", "10000");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public Invocation.Builder get(String path, MultivaluedMap<String, String> queryParams) {
|
public Invocation.Builder get(String path, MultivaluedMap<String, String> queryParams) {
|
||||||
Client client = ClientBuilder.newClient( new ClientConfig());
|
|
||||||
WebTarget webTarget = client.target(getBaseUrl()).path(path);
|
WebTarget webTarget = client.target(getBaseUrl()).path(path);
|
||||||
if (queryParams != null) {
|
if (queryParams != null) {
|
||||||
for (Entry<String, List<String>> qp : queryParams.entrySet()) {
|
for (Entry<String, List<String>> qp : queryParams.entrySet()) {
|
||||||
for (String e : qp.getValue()) {
|
for (String e : qp.getValue()) {
|
||||||
webTarget = webTarget.queryParam(qp.getKey(), e);
|
webTarget = webTarget.queryParam(qp.getKey(), e);
|
||||||
}
|
}
|
||||||
@ -96,22 +111,34 @@ public class APIClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Response post(String path, MultivaluedMap<String, String> queryParams) {
|
public Response post(String path, MultivaluedMap<String, String> queryParams) {
|
||||||
Response response = get(path, queryParams).post(Entity.entity(null, MediaType.TEXT_PLAIN));
|
return post(path, queryParams, null);
|
||||||
if (response.getStatus() != Response.Status.OK.getStatusCode() ) {
|
}
|
||||||
throw getException(response.readEntity(String.class));
|
|
||||||
}
|
|
||||||
return response;
|
|
||||||
|
|
||||||
|
public Response post(String path, MultivaluedMap<String, String> queryParams, Object object, String type) {
|
||||||
|
try {
|
||||||
|
Response response = get(path, queryParams).post(Entity.entity(object, type));
|
||||||
|
if (response.getStatus() != Response.Status.OK.getStatusCode()) {
|
||||||
|
throw getException("Scylla API server HTTP POST to URL '" + path + "' failed",
|
||||||
|
response.readEntity(String.class));
|
||||||
|
}
|
||||||
|
return response;
|
||||||
|
} catch (ProcessingException e) {
|
||||||
|
throw new IllegalStateException("Unable to connect to Scylla API server: " + e.getMessage());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Response post(String path, MultivaluedMap<String, String> queryParams, Object object) {
|
||||||
|
return post(path, queryParams, object, MediaType.TEXT_PLAIN);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void post(String path) {
|
public void post(String path) {
|
||||||
post(path, null);
|
post(path, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public RuntimeException getException(String txt) {
|
public IllegalStateException getException(String msg, String json) {
|
||||||
JsonReader reader = factory.createReader(new StringReader(txt));
|
JsonReader reader = factory.createReader(new StringReader(json));
|
||||||
JsonObject res = reader.readObject();
|
JsonObject res = reader.readObject();
|
||||||
return new RuntimeException(res.getString("message"));
|
return new IllegalStateException(msg + ": " + res.getString("message"));
|
||||||
}
|
}
|
||||||
|
|
||||||
public String postGetVal(String path, MultivaluedMap<String, String> queryParams) {
|
public String postGetVal(String path, MultivaluedMap<String, String> queryParams) {
|
||||||
@ -131,40 +158,47 @@ public class APIClient {
|
|||||||
get(path, queryParams).delete();
|
get(path, queryParams).delete();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
get(path).delete();
|
Response response = get(path).delete();
|
||||||
|
if (response.getStatus() != Response.Status.OK.getStatusCode()) {
|
||||||
|
throw getException("Scylla API server HTTP delete to URL '" + path + "' failed",
|
||||||
|
response.readEntity(String.class));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public void delete(String path) {
|
public void delete(String path) {
|
||||||
delete(path, null);
|
delete(path, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getRawValue(String string,
|
public String getRawValue(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||||
MultivaluedMap<String, String> queryParams, long duration) {
|
try {
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return "";
|
return "";
|
||||||
}
|
}
|
||||||
String key = getCacheKey(string, queryParams, duration);
|
String key = getCacheKey(string, queryParams, duration);
|
||||||
String res = getStringFromCache(key, duration);
|
String res = getStringFromCache(key, duration);
|
||||||
if (res != null) {
|
if (res != null) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
Response response = get(string, queryParams).get(Response.class);
|
Response response = get(string, queryParams).get(Response.class);
|
||||||
|
|
||||||
if (response.getStatus() != Response.Status.OK.getStatusCode() ) {
|
if (response.getStatus() != Response.Status.OK.getStatusCode()) {
|
||||||
// TBD
|
// TBD
|
||||||
// We are currently not caching errors,
|
// We are currently not caching errors,
|
||||||
// it should be reconsider.
|
// it should be reconsider.
|
||||||
throw getException(response.readEntity(String.class));
|
throw getException("Scylla API server HTTP GET to URL '" + string + "' failed",
|
||||||
|
response.readEntity(String.class));
|
||||||
|
}
|
||||||
|
res = response.readEntity(String.class);
|
||||||
|
if (duration > 0) {
|
||||||
|
cache.put(key, new CacheEntry(res));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
} catch (ProcessingException e) {
|
||||||
|
throw new IllegalStateException("Unable to connect to Scylla API server: " + e.getMessage());
|
||||||
}
|
}
|
||||||
res = response.readEntity(String.class);
|
|
||||||
if (duration > 0) {
|
|
||||||
cache.put(key, new CacheEntry(res));
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getRawValue(String string,
|
public String getRawValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
return getRawValue(string, queryParams, 0);
|
return getRawValue(string, queryParams, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -177,23 +211,19 @@ public class APIClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public String getStringValue(String string, MultivaluedMap<String, String> queryParams) {
|
public String getStringValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
return getRawValue(string,
|
return getRawValue(string, queryParams).replaceAll("^\"|\"$", "");
|
||||||
queryParams).replaceAll("^\"|\"$", "");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getStringValue(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
public String getStringValue(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||||
return getRawValue(string,
|
return getRawValue(string, queryParams, duration).replaceAll("^\"|\"$", "");
|
||||||
queryParams, duration).replaceAll("^\"|\"$", "");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getStringValue(String string) {
|
public String getStringValue(String string) {
|
||||||
return getStringValue(string, null);
|
return getStringValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public JsonReader getReader(String string,
|
public JsonReader getReader(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
return factory.createReader(new StringReader(getRawValue(string, queryParams)));
|
||||||
return factory.createReader(new StringReader(getRawValue(string,
|
|
||||||
queryParams)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public JsonReader getReader(String string) {
|
public JsonReader getReader(String string) {
|
||||||
@ -205,8 +235,7 @@ public class APIClient {
|
|||||||
return val.toArray(new String[val.size()]);
|
return val.toArray(new String[val.size()]);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getIntValue(String string,
|
public int getIntValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
return Integer.parseInt(getRawValue(string, queryParams));
|
return Integer.parseInt(getRawValue(string, queryParams));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,6 +243,19 @@ public class APIClient {
|
|||||||
return getIntValue(string, null);
|
return getIntValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static <T> BiFunction<APIClient, String, T> getReader(Class<T> type) {
|
||||||
|
if (type == String.class) {
|
||||||
|
return (c, s) -> type.cast(c.getRawValue(s));
|
||||||
|
} else if (type == Integer.class) {
|
||||||
|
return (c, s) -> type.cast(c.getIntValue(s));
|
||||||
|
} else if (type == Double.class) {
|
||||||
|
return (c, s) -> type.cast(c.getDoubleValue(s));
|
||||||
|
} else if (type == Long.class) {
|
||||||
|
return (c, s) -> type.cast(c.getLongValue(s));
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException(type.getName());
|
||||||
|
}
|
||||||
|
|
||||||
public boolean getBooleanValue(String string) {
|
public boolean getBooleanValue(String string) {
|
||||||
return Boolean.parseBoolean(getRawValue(string));
|
return Boolean.parseBoolean(getRawValue(string));
|
||||||
}
|
}
|
||||||
@ -222,8 +264,7 @@ public class APIClient {
|
|||||||
return Double.parseDouble(getRawValue(string));
|
return Double.parseDouble(getRawValue(string));
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<String> getListStrValue(String string,
|
public List<String> getListStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
JsonReader reader = getReader(string, queryParams);
|
JsonReader reader = getReader(string, queryParams);
|
||||||
JsonArray arr = reader.readArray();
|
JsonArray arr = reader.readArray();
|
||||||
List<String> res = new ArrayList<String>(arr.size());
|
List<String> res = new ArrayList<String>(arr.size());
|
||||||
@ -278,8 +319,7 @@ public class APIClient {
|
|||||||
return join(arr, ",");
|
return join(arr, ",");
|
||||||
}
|
}
|
||||||
|
|
||||||
public static String mapToString(Map<String, String> mp, String pairJoin,
|
public static String mapToString(Map<String, String> mp, String pairJoin, String joiner) {
|
||||||
String joiner) {
|
|
||||||
String res = "";
|
String res = "";
|
||||||
if (mp != null) {
|
if (mp != null) {
|
||||||
for (String name : mp.keySet()) {
|
for (String name : mp.keySet()) {
|
||||||
@ -296,19 +336,15 @@ public class APIClient {
|
|||||||
return mapToString(mp, "=", ",");
|
return mapToString(mp, "=", ",");
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean set_query_param(
|
public static boolean set_query_param(MultivaluedMap<String, String> queryParams, String key, String value) {
|
||||||
MultivaluedMap<String, String> queryParams, String key, String value) {
|
if (queryParams != null && key != null && value != null && !value.equals("")) {
|
||||||
if (queryParams != null && key != null && value != null
|
|
||||||
&& !value.equals("")) {
|
|
||||||
queryParams.add(key, value);
|
queryParams.add(key, value);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
public static boolean set_bool_query_param(
|
public static boolean set_bool_query_param(MultivaluedMap<String, String> queryParams, String key, boolean value) {
|
||||||
MultivaluedMap<String, String> queryParams, String key,
|
|
||||||
boolean value) {
|
|
||||||
if (queryParams != null && key != null && value) {
|
if (queryParams != null && key != null && value) {
|
||||||
queryParams.add(key, "true");
|
queryParams.add(key, "true");
|
||||||
return true;
|
return true;
|
||||||
@ -327,8 +363,7 @@ public class APIClient {
|
|||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
JsonObject obj = arr.getJsonObject(i);
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
if (obj.containsKey("key") && obj.containsKey("value")) {
|
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||||
map.put(obj.getString("key"),
|
map.put(obj.getString("key"), listStrFromJArr(obj.getJsonArray("value")));
|
||||||
listStrFromJArr(obj.getJsonArray("value")));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reader.close();
|
reader.close();
|
||||||
@ -350,8 +385,7 @@ public class APIClient {
|
|||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
JsonObject obj = arr.getJsonObject(i);
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
if (obj.containsKey("key") && obj.containsKey("value")) {
|
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||||
map.put(listStrFromJArr(obj.getJsonArray("key")),
|
map.put(listStrFromJArr(obj.getJsonArray("key")), listStrFromJArr(obj.getJsonArray("value")));
|
||||||
listStrFromJArr(obj.getJsonArray("value")));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
reader.close();
|
reader.close();
|
||||||
@ -362,8 +396,7 @@ public class APIClient {
|
|||||||
return getMapListStrValue(string, null);
|
return getMapListStrValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Set<String> getSetStringValue(String string,
|
public Set<String> getSetStringValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
JsonReader reader = getReader(string, queryParams);
|
JsonReader reader = getReader(string, queryParams);
|
||||||
JsonArray arr = reader.readArray();
|
JsonArray arr = reader.readArray();
|
||||||
Set<String> res = new HashSet<String>();
|
Set<String> res = new HashSet<String>();
|
||||||
@ -378,14 +411,13 @@ public class APIClient {
|
|||||||
return getSetStringValue(string, null);
|
return getSetStringValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, String> getMapStrValue(String string,
|
public Map<String, String> getMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
JsonReader reader = getReader(string, queryParams);
|
JsonReader reader = getReader(string, queryParams);
|
||||||
JsonArray arr = reader.readArray();
|
JsonArray arr = reader.readArray();
|
||||||
Map<String, String> map = new HashMap<String, String>();
|
Map<String, String> map = new LinkedHashMap<String, String>();
|
||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
JsonObject obj = arr.getJsonObject(i);
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
if (obj.containsKey("key") && obj.containsKey("value")) {
|
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||||
@ -400,8 +432,28 @@ public class APIClient {
|
|||||||
return getMapStrValue(string, null);
|
return getMapStrValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<InetAddress> getListInetAddressValue(String string,
|
public Map<String, String> getReverseMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
if (string.equals("")) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
JsonReader reader = getReader(string, queryParams);
|
||||||
|
JsonArray arr = reader.readArray();
|
||||||
|
Map<String, String> map = new HashMap<String, String>();
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
|
if (obj.containsKey("key") && obj.containsKey("value")) {
|
||||||
|
map.put(obj.getString("value"), obj.getString("key"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reader.close();
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, String> getReverseMapStrValue(String string) {
|
||||||
|
return getReverseMapStrValue(string, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<InetAddress> getListInetAddressValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
List<String> vals = getListStrValue(string, queryParams);
|
List<String> vals = getListStrValue(string, queryParams);
|
||||||
List<InetAddress> res = new ArrayList<InetAddress>();
|
List<InetAddress> res = new ArrayList<InetAddress>();
|
||||||
for (String val : vals) {
|
for (String val : vals) {
|
||||||
@ -424,23 +476,21 @@ public class APIClient {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private TabularDataSupport getSnapshotData(String ks, JsonArray arr) {
|
private TabularDataSupport getSnapshotData(String key, JsonArray arr) {
|
||||||
TabularDataSupport data = new TabularDataSupport(
|
TabularDataSupport data = new TabularDataSupport(SnapshotDetailsTabularData.TABULAR_TYPE);
|
||||||
SnapshotDetailsTabularData.TABULAR_TYPE);
|
|
||||||
|
|
||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
JsonObject obj = arr.getJsonObject(i);
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
if (obj.containsKey("key") && obj.containsKey("cf")) {
|
if (obj.containsKey("ks") && obj.containsKey("cf")) {
|
||||||
SnapshotDetailsTabularData.from(obj.getString("key"), ks,
|
SnapshotDetailsTabularData.from(key, obj.getString("ks"), obj.getString("cf"), obj.getJsonNumber("total").longValue(),
|
||||||
obj.getString("cf"), obj.getInt("total"),
|
obj.getJsonNumber("live").longValue(), data);
|
||||||
obj.getInt("live"), data);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, TabularData> getMapStringSnapshotTabularDataValue(
|
public Map<String, TabularData> getMapStringSnapshotTabularDataValue(String string,
|
||||||
String string, MultivaluedMap<String, String> queryParams) {
|
MultivaluedMap<String, String> queryParams) {
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -474,8 +524,7 @@ public class APIClient {
|
|||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
try {
|
try {
|
||||||
obj = arr.getJsonObject(i);
|
obj = arr.getJsonObject(i);
|
||||||
res.put(InetAddress.getByName(obj.getString("key")),
|
res.put(InetAddress.getByName(obj.getString("key")), Float.parseFloat(obj.getString("value")));
|
||||||
Float.parseFloat(obj.getString("value")));
|
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
logger.warning("Bad formatted address " + obj.getString("key"));
|
logger.warning("Bad formatted address " + obj.getString("key"));
|
||||||
}
|
}
|
||||||
@ -486,13 +535,26 @@ public class APIClient {
|
|||||||
public Map<InetAddress, Float> getMapInetAddressFloatValue(String string) {
|
public Map<InetAddress, Float> getMapInetAddressFloatValue(String string) {
|
||||||
return getMapInetAddressFloatValue(string, null);
|
return getMapInetAddressFloatValue(string, null);
|
||||||
}
|
}
|
||||||
public Map<String, Long> getMapStringLongValue(String string) {
|
|
||||||
// TODO Auto-generated method stub
|
public Map<String, Long> getMapStringLongValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
return null;
|
Map<String, Long> res = new HashMap<String, Long>();
|
||||||
|
|
||||||
|
JsonReader reader = getReader(string, queryParams);
|
||||||
|
|
||||||
|
JsonArray arr = reader.readArray();
|
||||||
|
JsonObject obj = null;
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
obj = arr.getJsonObject(i);
|
||||||
|
res.put(obj.getString("key"), obj.getJsonNumber("value").longValue());
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
public long[] getLongArrValue(String string,
|
public Map<String, Long> getMapStringLongValue(String string) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
return getMapStringLongValue(string, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public long[] getLongArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
JsonReader reader = getReader(string, queryParams);
|
JsonReader reader = getReader(string, queryParams);
|
||||||
JsonArray arr = reader.readArray();
|
JsonArray arr = reader.readArray();
|
||||||
long[] res = new long[arr.size()];
|
long[] res = new long[arr.size()];
|
||||||
@ -507,13 +569,25 @@ public class APIClient {
|
|||||||
return getLongArrValue(string, null);
|
return getLongArrValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, Integer> getMapStringIntegerValue(String string) {
|
public Map<String, Integer> getMapStringIntegerValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
// TODO Auto-generated method stub
|
Map<String, Integer> res = new HashMap<String, Integer>();
|
||||||
return null;
|
|
||||||
|
JsonReader reader = getReader(string, queryParams);
|
||||||
|
|
||||||
|
JsonArray arr = reader.readArray();
|
||||||
|
JsonObject obj = null;
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
obj = arr.getJsonObject(i);
|
||||||
|
res.put(obj.getString("key"), obj.getInt("value"));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
public int[] getIntArrValue(String string,
|
public Map<String, Integer> getMapStringIntegerValue(String string) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
return getMapStringIntegerValue(string, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
public int[] getIntArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
JsonReader reader = getReader(string, queryParams);
|
JsonReader reader = getReader(string, queryParams);
|
||||||
JsonArray arr = reader.readArray();
|
JsonArray arr = reader.readArray();
|
||||||
int[] res = new int[arr.size()];
|
int[] res = new int[arr.size()];
|
||||||
@ -528,8 +602,7 @@ public class APIClient {
|
|||||||
return getIntArrValue(string, null);
|
return getIntArrValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public Map<String, Long> getListMapStringLongValue(String string,
|
public Map<String, Long> getListMapStringLongValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -546,7 +619,7 @@ public class APIClient {
|
|||||||
if (obj.get(k) instanceof JsonString) {
|
if (obj.get(k) instanceof JsonString) {
|
||||||
key = obj.getString(k);
|
key = obj.getString(k);
|
||||||
} else {
|
} else {
|
||||||
val = obj.getInt(k);
|
val = obj.getJsonNumber(k).longValue();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (val > 0 && !key.equals("")) {
|
if (val > 0 && !key.equals("")) {
|
||||||
@ -562,8 +635,7 @@ public class APIClient {
|
|||||||
return getListMapStringLongValue(string, null);
|
return getListMapStringLongValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public JsonArray getJsonArray(String string,
|
public JsonArray getJsonArray(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
@ -577,8 +649,7 @@ public class APIClient {
|
|||||||
return getJsonArray(string, null);
|
return getJsonArray(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
public List<Map<String, String>> getListMapStrValue(String string,
|
public List<Map<String, String>> getListMapStrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
JsonArray arr = getJsonArray(string, queryParams);
|
JsonArray arr = getJsonArray(string, queryParams);
|
||||||
List<Map<String, String>> res = new ArrayList<Map<String, String>>();
|
List<Map<String, String>> res = new ArrayList<Map<String, String>>();
|
||||||
for (int i = 0; i < arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
@ -596,61 +667,36 @@ public class APIClient {
|
|||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
public JsonObject getJsonObj(String string,
|
public JsonObject getJsonObj(String string, MultivaluedMap<String, String> queryParams, long duration) {
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
if (string.equals("")) {
|
if (string.equals("")) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
JsonReader reader = getReader(string, queryParams);
|
|
||||||
JsonObject res = reader.readObject();
|
|
||||||
reader.close();
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
public HistogramValues getHistogramValue(String url,
|
|
||||||
MultivaluedMap<String, String> queryParams) {
|
|
||||||
HistogramValues res = new HistogramValues();
|
|
||||||
JsonObject obj = getJsonObj(url, queryParams);
|
|
||||||
res.count = obj.getJsonNumber("count").longValue();
|
|
||||||
res.max = obj.getJsonNumber("max").longValue();
|
|
||||||
res.min = obj.getJsonNumber("min").longValue();
|
|
||||||
res.sum = obj.getJsonNumber("sum").longValue();
|
|
||||||
res.variance = obj.getJsonNumber("variance").doubleValue();
|
|
||||||
res.mean = obj.getJsonNumber("mean").doubleValue();
|
|
||||||
JsonArray arr = obj.getJsonArray("sample");
|
|
||||||
if (arr != null) {
|
|
||||||
res.sample = new long[arr.size()];
|
|
||||||
for (int i = 0; i < arr.size(); i++) {
|
|
||||||
res.sample[i] = arr.getJsonNumber(i).longValue();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
public HistogramValues getHistogramValue(String url) {
|
|
||||||
return getHistogramValue(url, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
public EstimatedHistogram getEstimatedHistogram(String string,
|
|
||||||
MultivaluedMap<String, String> queryParams, long duration) {
|
|
||||||
String key = getCacheKey(string, queryParams, duration);
|
String key = getCacheKey(string, queryParams, duration);
|
||||||
EstimatedHistogram res = getEstimatedHistogramFromCache(key, duration);
|
JsonObject res = getJsonObjectFromCache(key, duration);
|
||||||
if (res != null) {
|
if (res != null) {
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
res = new EstimatedHistogram(getEstimatedHistogramAsLongArrValue(string, queryParams));
|
JsonReader reader = getReader(string, queryParams);
|
||||||
|
res = reader.readObject();
|
||||||
|
reader.close();
|
||||||
if (duration > 0) {
|
if (duration > 0) {
|
||||||
cache.put(key, new CacheEntry(res));
|
cache.put(key, new CacheEntry(res));
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
|
||||||
}
|
}
|
||||||
public long[] getEstimatedHistogramAsLongArrValue(String string,
|
|
||||||
MultivaluedMap<String, String> queryParams) {
|
public JsonObject getJsonObj(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
|
return getJsonObj(string, queryParams, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
public long[] getEstimatedHistogramAsLongArrValue(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
JsonObject obj = getJsonObj(string, queryParams);
|
JsonObject obj = getJsonObj(string, queryParams);
|
||||||
JsonArray arr = obj.getJsonArray("buckets");
|
JsonArray arr = obj.getJsonArray("buckets");
|
||||||
|
if (arr == null) {
|
||||||
|
return new long[0];
|
||||||
|
}
|
||||||
long res[] = new long[arr.size()];
|
long res[] = new long[arr.size()];
|
||||||
for (int i = 0; i< arr.size(); i++) {
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
res[i] = arr.getJsonNumber(i).longValue();
|
res[i] = arr.getJsonNumber(i).longValue();
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
@ -659,4 +705,37 @@ public class APIClient {
|
|||||||
public long[] getEstimatedHistogramAsLongArrValue(String string) {
|
public long[] getEstimatedHistogramAsLongArrValue(String string) {
|
||||||
return getEstimatedHistogramAsLongArrValue(string, null);
|
return getEstimatedHistogramAsLongArrValue(string, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public Map<String, Double> getMapStringDouble(String string, MultivaluedMap<String, String> queryParams) {
|
||||||
|
if (string.equals("")) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
JsonReader reader = getReader(string, queryParams);
|
||||||
|
JsonArray arr = reader.readArray();
|
||||||
|
Map<String, Double> map = new HashMap<String, Double>();
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
|
Iterator<String> it = obj.keySet().iterator();
|
||||||
|
String key = "";
|
||||||
|
double val = -1;
|
||||||
|
while (it.hasNext()) {
|
||||||
|
String k = it.next();
|
||||||
|
if (obj.get(k) instanceof JsonString) {
|
||||||
|
key = obj.getString(k);
|
||||||
|
} else {
|
||||||
|
val = obj.getJsonNumber(k).doubleValue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!key.equals("")) {
|
||||||
|
map.put(key, val);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
reader.close();
|
||||||
|
return map;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, Double> getMapStringDouble(String string) {
|
||||||
|
return getMapStringDouble(string, null);
|
||||||
|
}
|
||||||
}
|
}
|
@ -0,0 +1,111 @@
|
|||||||
|
package com.scylladb.jmx.api;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
import org.yaml.snakeyaml.Yaml;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file is part of Scylla.
|
||||||
|
*
|
||||||
|
* Scylla is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* Scylla is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class APIConfig {
|
||||||
|
private String address = "localhost";
|
||||||
|
private String port = "10000";
|
||||||
|
|
||||||
|
public String getAddress() {
|
||||||
|
return address;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPort() {
|
||||||
|
return port;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getBaseUrl() {
|
||||||
|
return "http://" + address + ":" + port;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void readFile(String name) {
|
||||||
|
System.out.println("Using config file: " + name);
|
||||||
|
InputStream input;
|
||||||
|
try {
|
||||||
|
input = new FileInputStream(new File(name));
|
||||||
|
Yaml yaml = new Yaml();
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
|
Map<String, Object> map = (Map<String, Object>) yaml.load(input);
|
||||||
|
if (map.containsKey("listen_address")) {
|
||||||
|
address = (String) map.get("listen_address");
|
||||||
|
}
|
||||||
|
if (map.containsKey("api_address")) {
|
||||||
|
address = (String) map.get("api_address");
|
||||||
|
}
|
||||||
|
if (map.containsKey("api_port")) {
|
||||||
|
port = map.get("api_port").toString();
|
||||||
|
}
|
||||||
|
} catch (FileNotFoundException e) {
|
||||||
|
System.err.println("fail reading from config file: " + name);
|
||||||
|
System.exit(-1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static boolean fileExists(String name) {
|
||||||
|
File varTmpDir = new File(name);
|
||||||
|
return varTmpDir.exists();
|
||||||
|
}
|
||||||
|
|
||||||
|
private boolean loadIfExists(String path, String name) {
|
||||||
|
if (path == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!fileExists(path + name)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
readFile(path + name);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* setConfig load the JMX proxy configuration The configuration hierarchy is
|
||||||
|
* as follow: Command line argument takes precedence over everything Then
|
||||||
|
* configuration file in the command line (command line argument can replace
|
||||||
|
* specific values in it. Then SCYLLA_CONF/scylla.yaml Then
|
||||||
|
* SCYLLA_HOME/conf/scylla.yaml Then conf/scylla.yaml Then the default
|
||||||
|
* values With file configuration, to make it clearer what is been used,
|
||||||
|
* only one file will be chosen with the highest precedence
|
||||||
|
*/
|
||||||
|
public APIConfig() {
|
||||||
|
if (!System.getProperty("apiconfig", "").equals("")) {
|
||||||
|
readFile(System.getProperty("apiconfig"));
|
||||||
|
} else if (!loadIfExists(System.getenv("SCYLLA_CONF"), "/scylla.yaml")
|
||||||
|
&& !loadIfExists(System.getenv("SCYLLA_HOME"), "/conf/scylla.yaml")) {
|
||||||
|
loadIfExists("", "conf/scylla.yaml");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!System.getProperty("apiaddress", "").equals("")) {
|
||||||
|
address = System.getProperty("apiaddress");
|
||||||
|
}
|
||||||
|
if (!System.getProperty("apiport", "").equals("")) {
|
||||||
|
port = System.getProperty("apiport", "10000");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -19,15 +19,16 @@
|
|||||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package com.cloudius.urchin.api;
|
package com.scylladb.jmx.api;
|
||||||
|
|
||||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
|
||||||
|
|
||||||
public class CacheEntry {
|
import jakarta.json.JsonObject;
|
||||||
long time;
|
|
||||||
Object value;
|
|
||||||
|
|
||||||
CacheEntry(Object res) {
|
class CacheEntry {
|
||||||
|
private long time;
|
||||||
|
private Object value;
|
||||||
|
|
||||||
|
public CacheEntry(Object res) {
|
||||||
time = System.currentTimeMillis();
|
time = System.currentTimeMillis();
|
||||||
this.value = res;
|
this.value = res;
|
||||||
}
|
}
|
||||||
@ -40,7 +41,7 @@ public class CacheEntry {
|
|||||||
return (String) value;
|
return (String) value;
|
||||||
}
|
}
|
||||||
|
|
||||||
public EstimatedHistogram getEstimatedHistogram() {
|
public JsonObject jsonObject() {
|
||||||
return (EstimatedHistogram)value;
|
return (JsonObject) value;
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -22,71 +22,59 @@
|
|||||||
* Modified by Cloudius Systems
|
* Modified by Cloudius Systems
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package com.cloudius.urchin.utils;
|
package com.scylladb.jmx.api.utils;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.File;
|
||||||
import java.text.DecimalFormat;
|
import java.text.DecimalFormat;
|
||||||
|
|
||||||
public class FileUtils
|
public class FileUtils {
|
||||||
{
|
|
||||||
private static final double KB = 1024d;
|
private static final double KB = 1024d;
|
||||||
private static final double MB = 1024*1024d;
|
private static final double MB = 1024 * 1024d;
|
||||||
private static final double GB = 1024*1024*1024d;
|
private static final double GB = 1024 * 1024 * 1024d;
|
||||||
private static final double TB = 1024*1024*1024*1024d;
|
private static final double TB = 1024 * 1024 * 1024 * 1024d;
|
||||||
|
|
||||||
private static final DecimalFormat df = new DecimalFormat("#.##");
|
private static final DecimalFormat df = new DecimalFormat("#.##");
|
||||||
|
|
||||||
|
|
||||||
public static String stringifyFileSize(double value)
|
public static String stringifyFileSize(double value) {
|
||||||
{
|
|
||||||
double d;
|
double d;
|
||||||
if ( value >= TB )
|
if (value >= TB) {
|
||||||
{
|
|
||||||
d = value / TB;
|
d = value / TB;
|
||||||
String val = df.format(d);
|
String val = df.format(d);
|
||||||
return val + " TB";
|
return val + " TB";
|
||||||
}
|
} else if (value >= GB) {
|
||||||
else if ( value >= GB )
|
|
||||||
{
|
|
||||||
d = value / GB;
|
d = value / GB;
|
||||||
String val = df.format(d);
|
String val = df.format(d);
|
||||||
return val + " GB";
|
return val + " GB";
|
||||||
}
|
} else if (value >= MB) {
|
||||||
else if ( value >= MB )
|
|
||||||
{
|
|
||||||
d = value / MB;
|
d = value / MB;
|
||||||
String val = df.format(d);
|
String val = df.format(d);
|
||||||
return val + " MB";
|
return val + " MB";
|
||||||
}
|
} else if (value >= KB) {
|
||||||
else if ( value >= KB )
|
|
||||||
{
|
|
||||||
d = value / KB;
|
d = value / KB;
|
||||||
String val = df.format(d);
|
String val = df.format(d);
|
||||||
return val + " KB";
|
return val + " KB";
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
{
|
|
||||||
String val = df.format(value);
|
String val = df.format(value);
|
||||||
return val + " bytes";
|
return val + " bytes";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the size of a directory in bytes
|
* Get the size of a directory in bytes
|
||||||
* @param directory The directory for which we need size.
|
*
|
||||||
|
* @param directory
|
||||||
|
* The directory for which we need size.
|
||||||
* @return The size of the directory
|
* @return The size of the directory
|
||||||
*/
|
*/
|
||||||
public static long folderSize(File directory)
|
public static long folderSize(File directory) {
|
||||||
{
|
|
||||||
long length = 0;
|
long length = 0;
|
||||||
for (File file : directory.listFiles())
|
for (File file : directory.listFiles()) {
|
||||||
{
|
if (file.isFile()) {
|
||||||
if (file.isFile())
|
|
||||||
length += file.length();
|
length += file.length();
|
||||||
else
|
} else {
|
||||||
length += folderSize(file);
|
length += folderSize(file);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return length;
|
return length;
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -22,47 +22,42 @@
|
|||||||
* Modified by Cloudius Systems
|
* Modified by Cloudius Systems
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package com.cloudius.urchin.utils;
|
package com.scylladb.jmx.api.utils;
|
||||||
|
|
||||||
import com.google.common.base.Objects;
|
import com.google.common.base.Objects;
|
||||||
|
|
||||||
public class Pair<T1, T2>
|
public class Pair<T1, T2> {
|
||||||
{
|
|
||||||
public final T1 left;
|
public final T1 left;
|
||||||
public final T2 right;
|
public final T2 right;
|
||||||
|
|
||||||
protected Pair(T1 left, T2 right)
|
protected Pair(T1 left, T2 right) {
|
||||||
{
|
|
||||||
this.left = left;
|
this.left = left;
|
||||||
this.right = right;
|
this.right = right;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final int hashCode()
|
public final int hashCode() {
|
||||||
{
|
|
||||||
int hashCode = 31 + (left == null ? 0 : left.hashCode());
|
int hashCode = 31 + (left == null ? 0 : left.hashCode());
|
||||||
return 31*hashCode + (right == null ? 0 : right.hashCode());
|
return 31 * hashCode + (right == null ? 0 : right.hashCode());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public final boolean equals(Object o)
|
public final boolean equals(Object o) {
|
||||||
{
|
if (!(o instanceof Pair)) {
|
||||||
if(!(o instanceof Pair))
|
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
@SuppressWarnings("rawtypes")
|
@SuppressWarnings("rawtypes")
|
||||||
Pair that = (Pair)o;
|
Pair that = (Pair) o;
|
||||||
// handles nulls properly
|
// handles nulls properly
|
||||||
return Objects.equal(left, that.left) && Objects.equal(right, that.right);
|
return Objects.equal(left, that.left) && Objects.equal(right, that.right);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString()
|
public String toString() {
|
||||||
{
|
|
||||||
return "(" + left + "," + right + ")";
|
return "(" + left + "," + right + ")";
|
||||||
}
|
}
|
||||||
|
|
||||||
public static <X, Y> Pair<X, Y> create(X x, Y y)
|
public static <X, Y> Pair<X, Y> create(X x, Y y) {
|
||||||
{
|
|
||||||
return new Pair<X, Y>(x, y);
|
return new Pair<X, Y>(x, y);
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -20,21 +20,27 @@
|
|||||||
*
|
*
|
||||||
* Modified by Cloudius Systems
|
* Modified by Cloudius Systems
|
||||||
*/
|
*/
|
||||||
package com.cloudius.urchin.utils;
|
package com.scylladb.jmx.api.utils;
|
||||||
|
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import javax.management.openmbean.*;
|
|
||||||
|
import javax.management.openmbean.CompositeDataSupport;
|
||||||
|
import javax.management.openmbean.CompositeType;
|
||||||
|
import javax.management.openmbean.OpenDataException;
|
||||||
|
import javax.management.openmbean.OpenType;
|
||||||
|
import javax.management.openmbean.SimpleType;
|
||||||
|
import javax.management.openmbean.TabularDataSupport;
|
||||||
|
import javax.management.openmbean.TabularType;
|
||||||
|
|
||||||
import com.google.common.base.Throwables;
|
import com.google.common.base.Throwables;
|
||||||
|
|
||||||
public class SnapshotDetailsTabularData {
|
public class SnapshotDetailsTabularData {
|
||||||
|
|
||||||
private static final String[] ITEM_NAMES = new String[] { "Snapshot name",
|
private static final String[] ITEM_NAMES = new String[] { "Snapshot name", "Keyspace name", "Column family name",
|
||||||
"Keyspace name", "Column family name", "True size", "Size on disk" };
|
"True size", "Size on disk" };
|
||||||
|
|
||||||
private static final String[] ITEM_DESCS = new String[] { "snapshot_name",
|
private static final String[] ITEM_DESCS = new String[] { "snapshot_name", "keyspace_name", "columnfamily_name",
|
||||||
"keyspace_name", "columnfamily_name", "TrueDiskSpaceUsed",
|
"TrueDiskSpaceUsed", "TotalDiskSpaceUsed" };
|
||||||
"TotalDiskSpaceUsed" };
|
|
||||||
|
|
||||||
private static final String TYPE_NAME = "SnapshotDetails";
|
private static final String TYPE_NAME = "SnapshotDetails";
|
||||||
|
|
||||||
@ -48,28 +54,22 @@ public class SnapshotDetailsTabularData {
|
|||||||
|
|
||||||
static {
|
static {
|
||||||
try {
|
try {
|
||||||
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING,
|
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.STRING,
|
||||||
SimpleType.STRING, SimpleType.STRING, SimpleType.STRING };
|
SimpleType.STRING };
|
||||||
|
|
||||||
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES,
|
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
|
||||||
ITEM_DESCS, ITEM_TYPES);
|
|
||||||
|
|
||||||
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE,
|
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES);
|
||||||
ITEM_NAMES);
|
|
||||||
} catch (OpenDataException e) {
|
} catch (OpenDataException e) {
|
||||||
throw Throwables.propagate(e);
|
throw Throwables.propagate(e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void from(final String snapshot, final String ks,
|
public static void from(final String snapshot, final String ks, final String cf,
|
||||||
final String cf,
|
Map.Entry<String, Pair<Long, Long>> snapshotDetail, TabularDataSupport result) {
|
||||||
Map.Entry<String, Pair<Long, Long>> snapshotDetail,
|
|
||||||
TabularDataSupport result) {
|
|
||||||
try {
|
try {
|
||||||
final String totalSize = FileUtils.stringifyFileSize(snapshotDetail
|
final String totalSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().left);
|
||||||
.getValue().left);
|
final String liveSize = FileUtils.stringifyFileSize(snapshotDetail.getValue().right);
|
||||||
final String liveSize = FileUtils.stringifyFileSize(snapshotDetail
|
|
||||||
.getValue().right);
|
|
||||||
result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
|
result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
|
||||||
new Object[] { snapshot, ks, cf, liveSize, totalSize }));
|
new Object[] { snapshot, ks, cf, liveSize, totalSize }));
|
||||||
} catch (OpenDataException e) {
|
} catch (OpenDataException e) {
|
||||||
@ -77,8 +77,8 @@ public class SnapshotDetailsTabularData {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void from(final String snapshot, final String ks,
|
public static void from(final String snapshot, final String ks, final String cf, long total, long live,
|
||||||
final String cf, long total, long live, TabularDataSupport result) {
|
TabularDataSupport result) {
|
||||||
try {
|
try {
|
||||||
final String totalSize = FileUtils.stringifyFileSize(total);
|
final String totalSize = FileUtils.stringifyFileSize(total);
|
||||||
final String liveSize = FileUtils.stringifyFileSize(live);
|
final String liveSize = FileUtils.stringifyFileSize(live);
|
15
scylla-apiclient/src/main/java/module-info.java
Normal file
15
scylla-apiclient/src/main/java/module-info.java
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
module scylla.apiclient {
|
||||||
|
exports com.scylladb.jmx.api;
|
||||||
|
exports com.scylladb.jmx.api.utils;
|
||||||
|
requires org.eclipse.parsson;
|
||||||
|
requires jakarta.ws.rs;
|
||||||
|
requires com.fasterxml.jackson.jakarta.rs.json;
|
||||||
|
requires jersey.client;
|
||||||
|
requires java.logging;
|
||||||
|
requires jakarta.json;
|
||||||
|
requires java.management;
|
||||||
|
requires org.yaml.snakeyaml;
|
||||||
|
requires com.google.common;
|
||||||
|
requires jersey.common;
|
||||||
|
requires jersey.hk2;
|
||||||
|
}
|
29
scylla-jmx-parent/pom.xml
Normal file
29
scylla-jmx-parent/pom.xml
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
|
||||||
|
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<groupId>it.cavallium.scylladb.jmx</groupId>
|
||||||
|
<artifactId>scylla-jmx-parent</artifactId>
|
||||||
|
<version>1.1</version>
|
||||||
|
<packaging>pom</packaging>
|
||||||
|
|
||||||
|
<modules>
|
||||||
|
<module>../</module>
|
||||||
|
<module>../scylla-apiclient</module>
|
||||||
|
</modules>
|
||||||
|
|
||||||
|
<name>Scylla JMX Parent</name>
|
||||||
|
|
||||||
|
<distributionManagement>
|
||||||
|
<repository>
|
||||||
|
<id>mchv-release-distribution</id>
|
||||||
|
<name>MCHV Release Apache Maven Packages Distribution</name>
|
||||||
|
<url>https://mvn.mchv.eu/repository/mchv</url>
|
||||||
|
</repository>
|
||||||
|
<snapshotRepository>
|
||||||
|
<id>mchv-snapshot-distribution</id>
|
||||||
|
<name>MCHV Snapshot Apache Maven Packages Distribution</name>
|
||||||
|
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
|
||||||
|
</snapshotRepository>
|
||||||
|
</distributionManagement>
|
||||||
|
</project>
|
@ -1,37 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*/
|
|
||||||
package com.cloudius.urchin.main;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
|
|
||||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
|
||||||
import org.apache.cassandra.db.commitlog.CommitLog;
|
|
||||||
import org.apache.cassandra.db.compaction.CompactionManager;
|
|
||||||
import org.apache.cassandra.gms.Gossiper;
|
|
||||||
import org.apache.cassandra.gms.FailureDetector;
|
|
||||||
import org.apache.cassandra.locator.EndpointSnitchInfo;
|
|
||||||
import org.apache.cassandra.net.MessagingService;
|
|
||||||
import org.apache.cassandra.service.CacheService;
|
|
||||||
import org.apache.cassandra.service.StorageProxy;
|
|
||||||
import org.apache.cassandra.service.StorageService;
|
|
||||||
|
|
||||||
public class Main {
|
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
|
||||||
System.out.println("Connecting to " + APIClient.getBaseUrl());
|
|
||||||
System.out.println("Starting the JMX server");
|
|
||||||
StorageService.getInstance();
|
|
||||||
StorageProxy.getInstance();
|
|
||||||
MessagingService.getInstance();
|
|
||||||
CommitLog.getInstance();
|
|
||||||
Gossiper.getInstance();
|
|
||||||
EndpointSnitchInfo.getInstance();
|
|
||||||
FailureDetector.getInstance();
|
|
||||||
ColumnFamilyStore.register_mbeans();
|
|
||||||
CacheService.getInstance();
|
|
||||||
CompactionManager.getInstance();
|
|
||||||
Thread.sleep(Long.MAX_VALUE);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,399 +0,0 @@
|
|||||||
package com.cloudius.urchin.metrics;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.yammer.metrics.core.APIMetricsRegistry;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
import com.yammer.metrics.core.Gauge;
|
|
||||||
import com.yammer.metrics.core.Histogram;
|
|
||||||
import com.yammer.metrics.core.Meter;
|
|
||||||
import com.yammer.metrics.core.MetricName;
|
|
||||||
import com.yammer.metrics.core.APITimer;
|
|
||||||
import com.yammer.metrics.core.Timer;
|
|
||||||
import com.yammer.metrics.reporting.JmxReporter;
|
|
||||||
|
|
||||||
public class APIMetrics {
|
|
||||||
private static final APIMetricsRegistry DEFAULT_REGISTRY = new APIMetricsRegistry();
|
|
||||||
private static final Thread SHUTDOWN_HOOK = new Thread() {
|
|
||||||
public void run() {
|
|
||||||
JmxReporter.shutdownDefault();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
static {
|
|
||||||
JmxReporter.startDefault(DEFAULT_REGISTRY);
|
|
||||||
Runtime.getRuntime().addShutdownHook(SHUTDOWN_HOOK);
|
|
||||||
}
|
|
||||||
|
|
||||||
private APIMetrics() { /* unused */
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
|
||||||
* given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param metric
|
|
||||||
* the metric
|
|
||||||
* @param <T>
|
|
||||||
* the type of the value returned by the metric
|
|
||||||
* @return {@code metric}
|
|
||||||
*/
|
|
||||||
public static <T> Gauge<T> newGauge(Class<?> klass, String name,
|
|
||||||
Gauge<T> metric) {
|
|
||||||
return DEFAULT_REGISTRY.newGauge(klass, name, metric);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
|
||||||
* given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param metric
|
|
||||||
* the metric
|
|
||||||
* @param <T>
|
|
||||||
* the type of the value returned by the metric
|
|
||||||
* @return {@code metric}
|
|
||||||
*/
|
|
||||||
public static <T> Gauge<T> newGauge(Class<?> klass, String name,
|
|
||||||
String scope, Gauge<T> metric) {
|
|
||||||
return DEFAULT_REGISTRY.newGauge(klass, name, scope, metric);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a new {@link com.yammer.metrics.core.Gauge}, registers it under the
|
|
||||||
* given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param metric
|
|
||||||
* the metric
|
|
||||||
* @param <T>
|
|
||||||
* the type of the value returned by the metric
|
|
||||||
* @return {@code metric}
|
|
||||||
*/
|
|
||||||
public static <T> Gauge<T> newGauge(MetricName metricName, Gauge<T> metric) {
|
|
||||||
return DEFAULT_REGISTRY.newGauge(metricName, metric);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
|
||||||
* under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
|
||||||
*/
|
|
||||||
public static Counter newCounter(String url, Class<?> klass, String name) {
|
|
||||||
return DEFAULT_REGISTRY.newCounter(url, klass, name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
|
||||||
* under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
|
||||||
*/
|
|
||||||
public static Counter newCounter(String url, Class<?> klass, String name,
|
|
||||||
String scope) {
|
|
||||||
return DEFAULT_REGISTRY.newCounter(url, klass, name, scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Counter} and registers it
|
|
||||||
* under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Counter}
|
|
||||||
*/
|
|
||||||
public static Counter newCounter(String url, MetricName metricName) {
|
|
||||||
return DEFAULT_REGISTRY.newCounter(url, metricName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
|
||||||
* under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, Class<?> klass,
|
|
||||||
String name, boolean biased) {
|
|
||||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, biased);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
|
||||||
* under the given class, name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, Class<?> klass,
|
|
||||||
String name, String scope, boolean biased) {
|
|
||||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope, biased);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Histogram} and registers it
|
|
||||||
* under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, MetricName metricName,
|
|
||||||
boolean biased) {
|
|
||||||
return DEFAULT_REGISTRY.newHistogram(url, metricName, biased);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
|
||||||
* registers it under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, Class<?> klass, String name) {
|
|
||||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
|
||||||
* registers it under the given class, name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, Class<?> klass,
|
|
||||||
String name, String scope) {
|
|
||||||
return DEFAULT_REGISTRY.newHistogram(url, klass, name, scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new non-biased {@link com.yammer.metrics.core.Histogram} and
|
|
||||||
* registers it under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Histogram}
|
|
||||||
*/
|
|
||||||
public static Histogram newHistogram(String url, MetricName metricName) {
|
|
||||||
return newHistogram(url, metricName, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
|
||||||
* under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
|
||||||
*/
|
|
||||||
public static Meter newMeter(String url, Class<?> klass, String name,
|
|
||||||
String eventType, TimeUnit unit) {
|
|
||||||
return DEFAULT_REGISTRY.newMeter(url, klass, name, eventType, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
|
||||||
* under the given class, name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
|
||||||
*/
|
|
||||||
public static Meter newMeter(String url, Class<?> klass, String name,
|
|
||||||
String scope, String eventType, TimeUnit unit) {
|
|
||||||
return DEFAULT_REGISTRY.newMeter(url, klass, name, scope, eventType,
|
|
||||||
unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.Meter} and registers it
|
|
||||||
* under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link com.yammer.metrics.core.Meter}
|
|
||||||
*/
|
|
||||||
public static Meter newMeter(String url, MetricName metricName,
|
|
||||||
String eventType, TimeUnit unit) {
|
|
||||||
return DEFAULT_REGISTRY.newMeter(url, metricName, eventType, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
|
||||||
* under the given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
|
||||||
*/
|
|
||||||
public static Timer newTimer(String url, Class<?> klass, String name,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, durationUnit, rateUnit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
|
||||||
* under the given class and name, measuring elapsed time in milliseconds
|
|
||||||
* and invocations per second.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
|
||||||
*/
|
|
||||||
public static Timer newTimer(String url, Class<?> klass, String name) {
|
|
||||||
return DEFAULT_REGISTRY.newTimer(url, klass, name);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
|
||||||
* under the given class, name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
|
||||||
*/
|
|
||||||
public static Timer newTimer(String url, Class<?> klass, String name, String scope,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, scope, durationUnit,
|
|
||||||
rateUnit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
|
||||||
* under the given class, name, and scope, measuring elapsed time in
|
|
||||||
* milliseconds and invocations per second.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
|
||||||
*/
|
|
||||||
public static Timer newTimer(String url, Class<?> klass, String name, String scope) {
|
|
||||||
return DEFAULT_REGISTRY.newTimer(url, klass, name, scope);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link com.yammer.metrics.core.APITimer} and registers it
|
|
||||||
* under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link com.yammer.metrics.core.APITimer}
|
|
||||||
*/
|
|
||||||
public static Timer newTimer(String url, MetricName metricName, TimeUnit durationUnit,
|
|
||||||
TimeUnit rateUnit) {
|
|
||||||
return DEFAULT_REGISTRY.newTimer(url, metricName, durationUnit, rateUnit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the (static) default registry.
|
|
||||||
*
|
|
||||||
* @return the metrics registry
|
|
||||||
*/
|
|
||||||
public static APIMetricsRegistry defaultRegistry() {
|
|
||||||
return DEFAULT_REGISTRY;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Shuts down all thread pools for the default registry.
|
|
||||||
*/
|
|
||||||
public static void shutdown() {
|
|
||||||
DEFAULT_REGISTRY.shutdown();
|
|
||||||
JmxReporter.shutdownDefault();
|
|
||||||
Runtime.getRuntime().removeShutdownHook(SHUTDOWN_HOOK);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,312 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
|
|
||||||
package com.cloudius.urchin.utils;
|
|
||||||
|
|
||||||
import java.util.Arrays;
|
|
||||||
import java.util.concurrent.atomic.AtomicLongArray;
|
|
||||||
|
|
||||||
import com.google.common.base.Objects;
|
|
||||||
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
|
|
||||||
public class EstimatedHistogram {
|
|
||||||
/**
|
|
||||||
* The series of values to which the counts in `buckets` correspond: 1, 2,
|
|
||||||
* 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of [0, 0, 1,
|
|
||||||
* 10] would mean we had seen one value of 3 and 10 values of 4.
|
|
||||||
*
|
|
||||||
* The series starts at 1 and grows by 1.2 each time (rounding and removing
|
|
||||||
* duplicates). It goes from 1 to around 36M by default (creating 90+1
|
|
||||||
* buckets), which will give us timing resolution from microseconds to 36
|
|
||||||
* seconds, with less precision as the numbers get larger.
|
|
||||||
*
|
|
||||||
* Each bucket represents values from (previous bucket offset, current
|
|
||||||
* offset].
|
|
||||||
*/
|
|
||||||
private final long[] bucketOffsets;
|
|
||||||
|
|
||||||
// buckets is one element longer than bucketOffsets -- the last element is
|
|
||||||
// values greater than the last offset
|
|
||||||
final AtomicLongArray buckets;
|
|
||||||
|
|
||||||
public EstimatedHistogram() {
|
|
||||||
this(90);
|
|
||||||
}
|
|
||||||
|
|
||||||
public EstimatedHistogram(int bucketCount) {
|
|
||||||
bucketOffsets = newOffsets(bucketCount);
|
|
||||||
buckets = new AtomicLongArray(bucketOffsets.length + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
public EstimatedHistogram(long[] offsets, long[] bucketData) {
|
|
||||||
assert bucketData.length == offsets.length + 1;
|
|
||||||
bucketOffsets = offsets;
|
|
||||||
buckets = new AtomicLongArray(bucketData);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public EstimatedHistogram(long[] bucketData) {
|
|
||||||
bucketOffsets = newOffsets(bucketData.length - 1);
|
|
||||||
buckets = new AtomicLongArray(bucketData);
|
|
||||||
}
|
|
||||||
|
|
||||||
private static long[] newOffsets(int size) {
|
|
||||||
long[] result = new long[size];
|
|
||||||
long last = 1;
|
|
||||||
result[0] = last;
|
|
||||||
for (int i = 1; i < size; i++) {
|
|
||||||
long next = Math.round(last * 1.2);
|
|
||||||
if (next == last)
|
|
||||||
next++;
|
|
||||||
result[i] = next;
|
|
||||||
last = next;
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the histogram values corresponding to each bucket index
|
|
||||||
*/
|
|
||||||
public long[] getBucketOffsets() {
|
|
||||||
return bucketOffsets;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Increments the count of the bucket closest to n, rounding UP.
|
|
||||||
*
|
|
||||||
* @param n
|
|
||||||
*/
|
|
||||||
public void add(long n) {
|
|
||||||
int index = Arrays.binarySearch(bucketOffsets, n);
|
|
||||||
if (index < 0) {
|
|
||||||
// inexact match, take the first bucket higher than n
|
|
||||||
index = -index - 1;
|
|
||||||
}
|
|
||||||
// else exact match; we're good
|
|
||||||
buckets.incrementAndGet(index);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the count in the given bucket
|
|
||||||
*/
|
|
||||||
long get(int bucket) {
|
|
||||||
return buckets.get(bucket);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param reset
|
|
||||||
* zero out buckets afterwards if true
|
|
||||||
* @return a long[] containing the current histogram buckets
|
|
||||||
*/
|
|
||||||
public long[] getBuckets(boolean reset) {
|
|
||||||
final int len = buckets.length();
|
|
||||||
long[] rv = new long[len];
|
|
||||||
|
|
||||||
if (reset)
|
|
||||||
for (int i = 0; i < len; i++)
|
|
||||||
rv[i] = buckets.getAndSet(i, 0L);
|
|
||||||
else
|
|
||||||
for (int i = 0; i < len; i++)
|
|
||||||
rv[i] = buckets.get(i);
|
|
||||||
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the smallest value that could have been added to this histogram
|
|
||||||
*/
|
|
||||||
public long min() {
|
|
||||||
for (int i = 0; i < buckets.length(); i++) {
|
|
||||||
if (buckets.get(i) > 0)
|
|
||||||
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the largest value that could have been added to this histogram.
|
|
||||||
* If the histogram overflowed, returns Long.MAX_VALUE.
|
|
||||||
*/
|
|
||||||
public long max() {
|
|
||||||
int lastBucket = buckets.length() - 1;
|
|
||||||
if (buckets.get(lastBucket) > 0)
|
|
||||||
return Long.MAX_VALUE;
|
|
||||||
|
|
||||||
for (int i = lastBucket - 1; i >= 0; i--) {
|
|
||||||
if (buckets.get(i) > 0)
|
|
||||||
return bucketOffsets[i];
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @param percentile
|
|
||||||
* @return estimated value at given percentile
|
|
||||||
*/
|
|
||||||
public long percentile(double percentile) {
|
|
||||||
assert percentile >= 0 && percentile <= 1.0;
|
|
||||||
int lastBucket = buckets.length() - 1;
|
|
||||||
if (buckets.get(lastBucket) > 0)
|
|
||||||
throw new IllegalStateException(
|
|
||||||
"Unable to compute when histogram overflowed");
|
|
||||||
|
|
||||||
long pcount = (long) Math.floor(count() * percentile);
|
|
||||||
if (pcount == 0)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
long elements = 0;
|
|
||||||
for (int i = 0; i < lastBucket; i++) {
|
|
||||||
elements += buckets.get(i);
|
|
||||||
if (elements >= pcount)
|
|
||||||
return bucketOffsets[i];
|
|
||||||
}
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the mean histogram value (average of bucket offsets, weighted by
|
|
||||||
* count)
|
|
||||||
* @throws IllegalStateException
|
|
||||||
* if any values were greater than the largest bucket threshold
|
|
||||||
*/
|
|
||||||
public long mean() {
|
|
||||||
int lastBucket = buckets.length() - 1;
|
|
||||||
if (buckets.get(lastBucket) > 0)
|
|
||||||
throw new IllegalStateException(
|
|
||||||
"Unable to compute ceiling for max when histogram overflowed");
|
|
||||||
|
|
||||||
long elements = 0;
|
|
||||||
long sum = 0;
|
|
||||||
for (int i = 0; i < lastBucket; i++) {
|
|
||||||
long bCount = buckets.get(i);
|
|
||||||
elements += bCount;
|
|
||||||
sum += bCount * bucketOffsets[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
return (long) Math.ceil((double) sum / elements);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return the total number of non-zero values
|
|
||||||
*/
|
|
||||||
public long count() {
|
|
||||||
long sum = 0L;
|
|
||||||
for (int i = 0; i < buckets.length(); i++)
|
|
||||||
sum += buckets.get(i);
|
|
||||||
return sum;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @return true if this histogram has overflowed -- that is, a value larger
|
|
||||||
* than our largest bucket could bound was added
|
|
||||||
*/
|
|
||||||
public boolean isOverflowed() {
|
|
||||||
return buckets.get(buckets.length() - 1) > 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* log.debug() every record in the histogram
|
|
||||||
*
|
|
||||||
* @param log
|
|
||||||
*/
|
|
||||||
public void log(Logger log) {
|
|
||||||
// only print overflow if there is any
|
|
||||||
int nameCount;
|
|
||||||
if (buckets.get(buckets.length() - 1) == 0)
|
|
||||||
nameCount = buckets.length() - 1;
|
|
||||||
else
|
|
||||||
nameCount = buckets.length();
|
|
||||||
String[] names = new String[nameCount];
|
|
||||||
|
|
||||||
int maxNameLength = 0;
|
|
||||||
for (int i = 0; i < nameCount; i++) {
|
|
||||||
names[i] = nameOfRange(bucketOffsets, i);
|
|
||||||
maxNameLength = Math.max(maxNameLength, names[i].length());
|
|
||||||
}
|
|
||||||
|
|
||||||
// emit log records
|
|
||||||
String formatstr = "%" + maxNameLength + "s: %d";
|
|
||||||
for (int i = 0; i < nameCount; i++) {
|
|
||||||
long count = buckets.get(i);
|
|
||||||
// sort-of-hack to not print empty ranges at the start that are only
|
|
||||||
// used to demarcate the
|
|
||||||
// first populated range. for code clarity we don't omit this record
|
|
||||||
// from the maxNameLength
|
|
||||||
// calculation, and accept the unnecessary whitespace prefixes that
|
|
||||||
// will occasionally occur
|
|
||||||
if (i == 0 && count == 0)
|
|
||||||
continue;
|
|
||||||
log.debug(String.format(formatstr, names[i], count));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static String nameOfRange(long[] bucketOffsets, int index) {
|
|
||||||
StringBuilder sb = new StringBuilder();
|
|
||||||
appendRange(sb, bucketOffsets, index);
|
|
||||||
return sb.toString();
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void appendRange(StringBuilder sb, long[] bucketOffsets,
|
|
||||||
int index) {
|
|
||||||
sb.append("[");
|
|
||||||
if (index == 0)
|
|
||||||
if (bucketOffsets[0] > 0)
|
|
||||||
// by original definition, this histogram is for values greater
|
|
||||||
// than zero only;
|
|
||||||
// if values of 0 or less are required, an entry of lb-1 must be
|
|
||||||
// inserted at the start
|
|
||||||
sb.append("1");
|
|
||||||
else
|
|
||||||
sb.append("-Inf");
|
|
||||||
else
|
|
||||||
sb.append(bucketOffsets[index - 1] + 1);
|
|
||||||
sb.append("..");
|
|
||||||
if (index == bucketOffsets.length)
|
|
||||||
sb.append("Inf");
|
|
||||||
else
|
|
||||||
sb.append(bucketOffsets[index]);
|
|
||||||
sb.append("]");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public boolean equals(Object o) {
|
|
||||||
if (this == o)
|
|
||||||
return true;
|
|
||||||
|
|
||||||
if (!(o instanceof EstimatedHistogram))
|
|
||||||
return false;
|
|
||||||
|
|
||||||
EstimatedHistogram that = (EstimatedHistogram) o;
|
|
||||||
return Arrays.equals(getBucketOffsets(), that.getBucketOffsets())
|
|
||||||
&& Arrays.equals(getBuckets(false), that.getBuckets(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public int hashCode() {
|
|
||||||
return Objects.hashCode(getBucketOffsets(), getBuckets(false));
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,62 +0,0 @@
|
|||||||
package com.cloudius.urchin.utils;
|
|
||||||
/*
|
|
||||||
* Copyright (C) 2015 ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This file is part of Scylla.
|
|
||||||
*
|
|
||||||
* Scylla is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU Affero General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* Scylla is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
* RecentEstimatedHistogram In the (deprecated) 'recent' functionality, each
|
|
||||||
* call to get the values cleans the value.
|
|
||||||
*
|
|
||||||
* The RecentEstimatedHistogram support recent call to EstimatedHistogram.
|
|
||||||
* It holds the latest total values and a call to getBuckets return the delta.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class RecentEstimatedHistogram extends EstimatedHistogram {
|
|
||||||
public RecentEstimatedHistogram() {
|
|
||||||
}
|
|
||||||
|
|
||||||
public RecentEstimatedHistogram(int bucketCount) {
|
|
||||||
super(bucketCount);
|
|
||||||
}
|
|
||||||
|
|
||||||
public RecentEstimatedHistogram(long[] offsets, long[] bucketData) {
|
|
||||||
super(offsets, bucketData);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Set the current buckets to new value and return the delta from the last
|
|
||||||
* getBuckets call
|
|
||||||
*
|
|
||||||
* @param bucketData
|
|
||||||
* - new bucket value
|
|
||||||
* @return a long[] containing the current histogram difference buckets
|
|
||||||
*/
|
|
||||||
public long[] getBuckets(long[] bucketData) {
|
|
||||||
final int len = buckets.length();
|
|
||||||
long[] rv = new long[len];
|
|
||||||
|
|
||||||
for (int i = 0; i < len; i++) {
|
|
||||||
rv[i] = bucketData[i];
|
|
||||||
rv[i] -= buckets.getAndSet(i, bucketData[i]);
|
|
||||||
}
|
|
||||||
return rv;
|
|
||||||
}
|
|
||||||
}
|
|
77
src/main/java/com/scylladb/jmx/main/Main.java
Normal file
77
src/main/java/com/scylladb/jmx/main/Main.java
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2015 Cloudius Systems
|
||||||
|
*/
|
||||||
|
package com.scylladb.jmx.main;
|
||||||
|
|
||||||
|
import static java.lang.management.ManagementFactory.getPlatformMBeanServer;
|
||||||
|
import static java.util.Arrays.asList;
|
||||||
|
|
||||||
|
import java.lang.reflect.Constructor;
|
||||||
|
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
|
||||||
|
import org.apache.cassandra.db.commitlog.CommitLog;
|
||||||
|
import org.apache.cassandra.db.compaction.CompactionManager;
|
||||||
|
import org.apache.cassandra.gms.FailureDetector;
|
||||||
|
import org.apache.cassandra.gms.Gossiper;
|
||||||
|
import org.apache.cassandra.locator.EndpointSnitchInfo;
|
||||||
|
import org.apache.cassandra.net.MessagingService;
|
||||||
|
import org.apache.cassandra.service.CacheService;
|
||||||
|
import org.apache.cassandra.service.GCInspector;
|
||||||
|
import org.apache.cassandra.service.StorageProxy;
|
||||||
|
import org.apache.cassandra.service.StorageService;
|
||||||
|
import org.apache.cassandra.streaming.StreamManager;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.api.APIConfig;
|
||||||
|
import com.scylladb.jmx.metrics.APIMBean;
|
||||||
|
|
||||||
|
public class Main {
|
||||||
|
|
||||||
|
private static APIConfig config;
|
||||||
|
private static APIClient client;
|
||||||
|
|
||||||
|
public static synchronized APIConfig getApiConfig() {
|
||||||
|
if (config == null) {
|
||||||
|
config = new APIConfig();
|
||||||
|
}
|
||||||
|
return config;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static synchronized APIClient getApiClient() {
|
||||||
|
if (client == null) {
|
||||||
|
client = new APIClient(getApiConfig());
|
||||||
|
}
|
||||||
|
return client;
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
System.out.printf("Java %s%n", System.getProperty("java.version"));
|
||||||
|
System.out.printf("Connecting to %s%n", getApiConfig().getBaseUrl());
|
||||||
|
System.out.println("Starting the JMX server");
|
||||||
|
|
||||||
|
MBeanServer server = getPlatformMBeanServer();
|
||||||
|
for (Class<? extends APIMBean> clazz : asList(StorageService.class, StorageProxy.class, MessagingService.class,
|
||||||
|
CommitLog.class, Gossiper.class, EndpointSnitchInfo.class, FailureDetector.class, CacheService.class,
|
||||||
|
CompactionManager.class, GCInspector.class, StreamManager.class)) {
|
||||||
|
Constructor<? extends APIMBean> c = clazz.getDeclaredConstructor(APIClient.class);
|
||||||
|
APIMBean m = c.newInstance(getApiClient());
|
||||||
|
server.registerMBean(m, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
// forces check for dynamically created mbeans
|
||||||
|
server.queryNames(null, null);
|
||||||
|
} catch (IllegalStateException e) {
|
||||||
|
// ignore this. Just means we started before scylla.
|
||||||
|
}
|
||||||
|
|
||||||
|
String jmxPort = System.getProperty("com.sun.management.jmxremote.port");
|
||||||
|
System.out.println("JMX is enabled to receive remote connections on port: " + jmxPort);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
Thread.sleep(Long.MAX_VALUE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
195
src/main/java/com/scylladb/jmx/metrics/APIMBean.java
Normal file
195
src/main/java/com/scylladb/jmx/metrics/APIMBean.java
Normal file
@ -0,0 +1,195 @@
|
|||||||
|
package com.scylladb.jmx.metrics;
|
||||||
|
|
||||||
|
import java.lang.reflect.Field;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.function.Function;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
|
||||||
|
import javax.management.BadAttributeValueExpException;
|
||||||
|
import javax.management.BadBinaryOpValueExpException;
|
||||||
|
import javax.management.BadStringOperationException;
|
||||||
|
import javax.management.InstanceAlreadyExistsException;
|
||||||
|
import javax.management.InstanceNotFoundException;
|
||||||
|
import javax.management.InvalidApplicationException;
|
||||||
|
import javax.management.MBeanRegistration;
|
||||||
|
import javax.management.MBeanRegistrationException;
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.NotCompliantMBeanException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
import javax.management.QueryExp;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base type for MBeans in scylla-jmx. Wraps auto naming and {@link APIClient}
|
||||||
|
* holding.
|
||||||
|
*
|
||||||
|
* @author calle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public class APIMBean implements MBeanRegistration {
|
||||||
|
protected final APIClient client;
|
||||||
|
protected final String mbeanName;
|
||||||
|
|
||||||
|
public APIMBean(APIClient client) {
|
||||||
|
this(null, client);
|
||||||
|
}
|
||||||
|
|
||||||
|
public APIMBean(String mbeanName, APIClient client) {
|
||||||
|
this.mbeanName = mbeanName;
|
||||||
|
this.client = client;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to add/remove dynamically created MBeans from a server
|
||||||
|
* instance.
|
||||||
|
*
|
||||||
|
* @param server
|
||||||
|
* The {@link MBeanServer} to check
|
||||||
|
* @param all
|
||||||
|
* All {@link ObjectName}s that should be bound
|
||||||
|
* @param predicate
|
||||||
|
* {@link QueryExp} predicate to filter relevant object names.
|
||||||
|
* @param generator
|
||||||
|
* {@link Function} to create a new MBean instance for a given
|
||||||
|
* {@link ObjectName}
|
||||||
|
* @return
|
||||||
|
* @throws MalformedObjectNameException
|
||||||
|
*/
|
||||||
|
public static boolean checkRegistration(JmxMBeanServer server, Set<ObjectName> all,
|
||||||
|
EnumSet<RegistrationMode> mode, final Predicate<ObjectName> predicate,
|
||||||
|
Function<ObjectName, Object> generator) throws MalformedObjectNameException {
|
||||||
|
Set<ObjectName> registered = queryNames(server, predicate);
|
||||||
|
if (mode.contains(RegistrationMode.Remove)) {
|
||||||
|
for (ObjectName name : registered) {
|
||||||
|
if (!all.contains(name)) {
|
||||||
|
try {
|
||||||
|
server.getMBeanServerInterceptor().unregisterMBean(name);
|
||||||
|
} catch (MBeanRegistrationException | InstanceNotFoundException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int added = 0;
|
||||||
|
if (mode.contains(RegistrationMode.Add)) {
|
||||||
|
for (ObjectName name : all) {
|
||||||
|
if (!registered.contains(name)) {
|
||||||
|
try {
|
||||||
|
server.getMBeanServerInterceptor().registerMBean(generator.apply(name), name);
|
||||||
|
added++;
|
||||||
|
} catch (InstanceAlreadyExistsException | MBeanRegistrationException
|
||||||
|
| NotCompliantMBeanException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return added > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper method to query {@link ObjectName}s from an {@link MBeanServer}
|
||||||
|
* based on {@link Predicate}
|
||||||
|
*
|
||||||
|
* @param server
|
||||||
|
* @param predicate
|
||||||
|
* @return
|
||||||
|
*/
|
||||||
|
public static Set<ObjectName> queryNames(JmxMBeanServer server, final Predicate<ObjectName> predicate) {
|
||||||
|
@SuppressWarnings("serial")
|
||||||
|
Set<ObjectName> registered = server.queryNames(null, new QueryExp() {
|
||||||
|
@Override
|
||||||
|
public void setMBeanServer(MBeanServer s) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean apply(ObjectName name) throws BadStringOperationException, BadBinaryOpValueExpException,
|
||||||
|
BadAttributeValueExpException, InvalidApplicationException {
|
||||||
|
return predicate.test(name);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
return registered;
|
||||||
|
}
|
||||||
|
|
||||||
|
JmxMBeanServer server;
|
||||||
|
ObjectName name;
|
||||||
|
|
||||||
|
protected final ObjectName getBoundName() {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Figure out an {@link ObjectName} for this object based on either
|
||||||
|
* contructor parameter, static field, or just package/class name.
|
||||||
|
*
|
||||||
|
* @return
|
||||||
|
* @throws MalformedObjectNameException
|
||||||
|
*/
|
||||||
|
protected ObjectName generateName() throws MalformedObjectNameException {
|
||||||
|
String mbeanName = this.mbeanName;
|
||||||
|
if (mbeanName == null) {
|
||||||
|
Field f;
|
||||||
|
try {
|
||||||
|
f = getClass().getDeclaredField("MBEAN_NAME");
|
||||||
|
f.setAccessible(true);
|
||||||
|
mbeanName = (String) f.get(null);
|
||||||
|
} catch (Throwable t) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (mbeanName == null) {
|
||||||
|
for (Class<?> c : getClass().getInterfaces()) {
|
||||||
|
Field f;
|
||||||
|
try {
|
||||||
|
f = c.getDeclaredField("OBJECT_NAME");
|
||||||
|
f.setAccessible(true);
|
||||||
|
mbeanName = (String) f.get(null);
|
||||||
|
break;
|
||||||
|
} catch (Throwable t) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (mbeanName == null) {
|
||||||
|
String name = getClass().getName();
|
||||||
|
int i = name.lastIndexOf('.');
|
||||||
|
mbeanName = name.substring(0, i) + ":type=" + name.substring(i + 1);
|
||||||
|
}
|
||||||
|
return new ObjectName(mbeanName);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Keeps track of bound server and optionally generates an
|
||||||
|
* {@link ObjectName} for this instance.
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception {
|
||||||
|
if (this.server != null) {
|
||||||
|
throw new IllegalStateException("Can only exist in a single MBeanServer");
|
||||||
|
}
|
||||||
|
this.server = (JmxMBeanServer) server;
|
||||||
|
if (name == null) {
|
||||||
|
name = generateName();
|
||||||
|
}
|
||||||
|
this.name = name;
|
||||||
|
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void postRegister(Boolean registrationDone) {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void preDeregister() throws Exception {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void postDeregister() {
|
||||||
|
assert server != null;
|
||||||
|
assert name != null;
|
||||||
|
this.server = null;
|
||||||
|
this.name = null;
|
||||||
|
}
|
||||||
|
}
|
137
src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java
Normal file
137
src/main/java/com/scylladb/jmx/metrics/MetricsMBean.java
Normal file
@ -0,0 +1,137 @@
|
|||||||
|
package com.scylladb.jmx.metrics;
|
||||||
|
|
||||||
|
import static java.util.Arrays.asList;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.function.Predicate;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import javax.management.InstanceNotFoundException;
|
||||||
|
import javax.management.MBeanRegistrationException;
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
|
import org.apache.cassandra.metrics.Metrics;
|
||||||
|
import org.apache.cassandra.metrics.MetricsRegistry;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Base type for MBeans containing {@link Metrics}.
|
||||||
|
*
|
||||||
|
* @author calle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public abstract class MetricsMBean extends APIMBean {
|
||||||
|
private static final Map<JmxMBeanServer, Map<String, Integer>> registered = new HashMap<>();
|
||||||
|
private static final Object registrationLock = new Object();
|
||||||
|
|
||||||
|
private final Collection<Metrics> metrics;
|
||||||
|
|
||||||
|
public MetricsMBean(APIClient client, Metrics... metrics) {
|
||||||
|
this(null, client, metrics);
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricsMBean(String mbeanName, APIClient client, Metrics... metrics) {
|
||||||
|
this(mbeanName, client, asList(metrics));
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricsMBean(String mbeanName, APIClient client, Collection<Metrics> metrics) {
|
||||||
|
super(mbeanName, client);
|
||||||
|
this.metrics = metrics;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected Predicate<ObjectName> getTypePredicate() {
|
||||||
|
String domain = name.getDomain();
|
||||||
|
String type = name.getKeyProperty("type");
|
||||||
|
return n -> {
|
||||||
|
return domain.equals(n.getDomain()) && type.equals(n.getKeyProperty("type"));
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Has to be called with registrationLock hold
|
||||||
|
private static boolean shouldRegisterGlobals(JmxMBeanServer server, String domainAndType, boolean reversed) {
|
||||||
|
Map<String, Integer> serverMap = registered.get(server);
|
||||||
|
if (serverMap == null) {
|
||||||
|
assert !reversed;
|
||||||
|
serverMap = new HashMap<>();
|
||||||
|
serverMap.put(domainAndType, 1);
|
||||||
|
registered.put(server, serverMap);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
Integer count = serverMap.get(domainAndType);
|
||||||
|
if (count == null) {
|
||||||
|
assert !reversed;
|
||||||
|
serverMap.put(domainAndType, 1);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (reversed) {
|
||||||
|
--count;
|
||||||
|
if (count == 0) {
|
||||||
|
serverMap.remove(domainAndType);
|
||||||
|
if (serverMap.isEmpty()) {
|
||||||
|
registered.remove(server);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
serverMap.put(domainAndType, count);
|
||||||
|
return false;
|
||||||
|
} else {
|
||||||
|
serverMap.put(domainAndType, count + 1);
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void register(MetricsRegistry registry, JmxMBeanServer server, boolean reversed) throws MalformedObjectNameException {
|
||||||
|
// Check if we're the first/last of our type bound/removed.
|
||||||
|
synchronized (registrationLock) {
|
||||||
|
boolean registerGlobals = shouldRegisterGlobals(server, name.getDomain() + ":" + name.getKeyProperty("type"), reversed);
|
||||||
|
if (registerGlobals) {
|
||||||
|
for (Metrics m : metrics) {
|
||||||
|
m.registerGlobals(registry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (Metrics m : metrics) {
|
||||||
|
m.register(registry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectName preRegister(MBeanServer server, ObjectName name) throws Exception {
|
||||||
|
// Get name etc.
|
||||||
|
name = super.preRegister(server, name);
|
||||||
|
// Register all metrics in server
|
||||||
|
register(new MetricsRegistry(client, (JmxMBeanServer) server), (JmxMBeanServer) server, false);
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void postDeregister() {
|
||||||
|
// We're officially unbound. Remove all metrics we added.
|
||||||
|
try {
|
||||||
|
register(new MetricsRegistry(client, server) {
|
||||||
|
// Unbind instead of bind. Yes.
|
||||||
|
@Override
|
||||||
|
public void register(Supplier<MetricMBean> s, ObjectName... objectNames) {
|
||||||
|
for (ObjectName name : objectNames) {
|
||||||
|
try {
|
||||||
|
server.getMBeanServerInterceptor().unregisterMBean(name);
|
||||||
|
} catch (MBeanRegistrationException | InstanceNotFoundException e) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, server, true);
|
||||||
|
} catch (MalformedObjectNameException e) {
|
||||||
|
// TODO : log?
|
||||||
|
}
|
||||||
|
super.postDeregister();
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,69 @@
|
|||||||
|
package com.scylladb.jmx.metrics;
|
||||||
|
|
||||||
|
import static com.scylladb.jmx.metrics.RegistrationMode.Remove;
|
||||||
|
import static com.scylladb.jmx.metrics.RegistrationMode.Wait;
|
||||||
|
import static java.util.EnumSet.allOf;
|
||||||
|
import static java.util.EnumSet.of;
|
||||||
|
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.concurrent.locks.Lock;
|
||||||
|
import java.util.concurrent.locks.ReentrantLock;
|
||||||
|
|
||||||
|
import javax.management.OperationsException;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Helper type to do optional locking for registration. Allows for
|
||||||
|
* per-bind-point locks and registration, instead of per-type or per-instance
|
||||||
|
* locks which may be misguiding, since for example one instance can be bound to
|
||||||
|
* many MBeanServers etc.
|
||||||
|
*
|
||||||
|
* Also allows for polled checks, i.e. try-lock and either wait or skip. Wait,
|
||||||
|
* because we probably should not repeat things hidden by this type too often,
|
||||||
|
* and skip because for example a periodic task checking can just skip if a
|
||||||
|
* user-initiated registration check is being done.
|
||||||
|
*
|
||||||
|
* @author calle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("restriction")
|
||||||
|
public abstract class RegistrationChecker {
|
||||||
|
private final Lock lock = new ReentrantLock();
|
||||||
|
|
||||||
|
public static final EnumSet<RegistrationMode> REMOVE_NO_WAIT = of(Remove);
|
||||||
|
public static final EnumSet<RegistrationMode> ADD_AND_REMOVE = allOf(RegistrationMode.class);
|
||||||
|
|
||||||
|
public final void reap(APIClient client, JmxMBeanServer server) throws OperationsException, UnknownHostException {
|
||||||
|
check(client, server, REMOVE_NO_WAIT);
|
||||||
|
}
|
||||||
|
|
||||||
|
public final void check(APIClient client, JmxMBeanServer server) throws OperationsException, UnknownHostException {
|
||||||
|
check(client, server, ADD_AND_REMOVE);
|
||||||
|
}
|
||||||
|
|
||||||
|
public final void check(APIClient client, JmxMBeanServer server, EnumSet<RegistrationMode> mode)
|
||||||
|
throws OperationsException, UnknownHostException {
|
||||||
|
if (!lock.tryLock()) {
|
||||||
|
if (mode.contains(Wait)) {
|
||||||
|
// someone is doing update.
|
||||||
|
// since this is jmx, and sloppy, we'll just
|
||||||
|
// assume that once he is done, things are
|
||||||
|
// good enough.
|
||||||
|
lock.lock();
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
doCheck(client, server, mode);
|
||||||
|
} finally {
|
||||||
|
lock.unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected abstract void doCheck(APIClient client, JmxMBeanServer server, EnumSet<RegistrationMode> mode)
|
||||||
|
throws OperationsException, UnknownHostException;
|
||||||
|
}
|
@ -0,0 +1,5 @@
|
|||||||
|
package com.scylladb.jmx.metrics;
|
||||||
|
|
||||||
|
public enum RegistrationMode {
|
||||||
|
Wait, Add, Remove,
|
||||||
|
}
|
496
src/main/java/com/scylladb/jmx/utils/APIBuilder.java
Normal file
496
src/main/java/com/scylladb/jmx/utils/APIBuilder.java
Normal file
@ -0,0 +1,496 @@
|
|||||||
|
package com.scylladb.jmx.utils;
|
||||||
|
/**
|
||||||
|
* Copyright 2016 ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
import static com.scylladb.jmx.main.Main.getApiClient;
|
||||||
|
import static com.sun.jmx.mbeanserver.Util.wildmatch;
|
||||||
|
import static java.util.logging.Level.SEVERE;
|
||||||
|
import static javax.management.MBeanServerDelegate.DELEGATE_NAME;
|
||||||
|
|
||||||
|
import java.security.AccessController;
|
||||||
|
import java.security.PrivilegedActionException;
|
||||||
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This file is part of Scylla.
|
||||||
|
*
|
||||||
|
* Scylla is free software: you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU Affero General Public License as published by
|
||||||
|
* the Free Software Foundation, either version 3 of the License, or
|
||||||
|
* (at your option) any later version.
|
||||||
|
*
|
||||||
|
* Scylla is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import javax.management.DynamicMBean;
|
||||||
|
import javax.management.InstanceAlreadyExistsException;
|
||||||
|
import javax.management.InstanceNotFoundException;
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MBeanServerBuilder;
|
||||||
|
import javax.management.MBeanServerDelegate;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
import javax.management.QueryExp;
|
||||||
|
import javax.management.RuntimeOperationsException;
|
||||||
|
|
||||||
|
import com.sun.jmx.interceptor.DefaultMBeanServerInterceptor;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
import com.sun.jmx.mbeanserver.NamedObject;
|
||||||
|
import com.sun.jmx.mbeanserver.Repository;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This class purposly knows way to much of the inner workings
|
||||||
|
* of Oracle JDK MBeanServer workings, and pervert it for
|
||||||
|
* performance sakes. It is not portable to other MBean implementations.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("restriction")
|
||||||
|
public class APIBuilder extends MBeanServerBuilder {
|
||||||
|
|
||||||
|
private static final Logger logger = Logger.getLogger(APIBuilder.class.getName());
|
||||||
|
|
||||||
|
private static class TableRepository extends Repository {
|
||||||
|
private static final Logger logger = Logger.getLogger(TableRepository.class.getName());
|
||||||
|
|
||||||
|
private final Repository wrapped;
|
||||||
|
|
||||||
|
private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
|
||||||
|
|
||||||
|
private final Map<TableMetricParams, DynamicMBean> tableMBeans = new HashMap<>();
|
||||||
|
|
||||||
|
private static boolean isTableMetricName(ObjectName name) {
|
||||||
|
return isTableMetricDomain(name.getDomain());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isTableMetricDomain(String domain) {
|
||||||
|
return TableMetricParams.TABLE_METRICS_DOMAIN.equals(domain);
|
||||||
|
}
|
||||||
|
|
||||||
|
public TableRepository(String defaultDomain, final Repository repository) {
|
||||||
|
super(defaultDomain);
|
||||||
|
wrapped = repository;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDefaultDomain() {
|
||||||
|
return wrapped.getDefaultDomain();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean contains(final ObjectName name) {
|
||||||
|
if (!isTableMetricName(name)) {
|
||||||
|
return wrapped.contains(name);
|
||||||
|
} else {
|
||||||
|
lock.readLock().lock();
|
||||||
|
try {
|
||||||
|
return tableMBeans.containsKey(new TableMetricParams(name));
|
||||||
|
} finally {
|
||||||
|
lock.readLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String[] getDomains() {
|
||||||
|
final String[] domains = wrapped.getDomains();
|
||||||
|
if (tableMBeans.isEmpty()) {
|
||||||
|
return domains;
|
||||||
|
}
|
||||||
|
final String[] res = new String[domains.length + 1];
|
||||||
|
System.arraycopy(domains, 0, res, 0, domains.length);
|
||||||
|
res[domains.length] = TableMetricParams.TABLE_METRICS_DOMAIN;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Integer getCount() {
|
||||||
|
lock.readLock().lock();
|
||||||
|
try {
|
||||||
|
return wrapped.getCount() + tableMBeans.size();
|
||||||
|
} finally {
|
||||||
|
lock.readLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addMBean(final DynamicMBean bean, final ObjectName name, final RegistrationContext ctx)
|
||||||
|
throws InstanceAlreadyExistsException {
|
||||||
|
if (!isTableMetricName(name)) {
|
||||||
|
wrapped.addMBean(bean, name, ctx);
|
||||||
|
} else {
|
||||||
|
final TableMetricParams key = new TableMetricParams(name);
|
||||||
|
lock.writeLock().lock();
|
||||||
|
try {
|
||||||
|
if (tableMBeans.containsKey(key)) {
|
||||||
|
throw new InstanceAlreadyExistsException(name.toString());
|
||||||
|
}
|
||||||
|
tableMBeans.put(key, bean);
|
||||||
|
if (ctx == null) return;
|
||||||
|
try {
|
||||||
|
ctx.registering();
|
||||||
|
} catch (RuntimeOperationsException x) {
|
||||||
|
throw x;
|
||||||
|
} catch (RuntimeException x) {
|
||||||
|
throw new RuntimeOperationsException(x);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lock.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void remove(final ObjectName name, final RegistrationContext ctx) throws InstanceNotFoundException {
|
||||||
|
if (!isTableMetricName(name)) {
|
||||||
|
wrapped.remove(name, ctx);
|
||||||
|
} else {
|
||||||
|
final TableMetricParams key = new TableMetricParams(name);
|
||||||
|
lock.writeLock().lock();
|
||||||
|
try {
|
||||||
|
if (tableMBeans.remove(key) == null) {
|
||||||
|
throw new InstanceNotFoundException(name.toString());
|
||||||
|
}
|
||||||
|
|
||||||
|
if (ctx == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
ctx.unregistered();
|
||||||
|
} catch (Exception x) {
|
||||||
|
logger.log(SEVERE, "Unexpected error.", x);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lock.writeLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DynamicMBean retrieve(final ObjectName name) {
|
||||||
|
if (!isTableMetricName(name)) {
|
||||||
|
return wrapped.retrieve(name);
|
||||||
|
} else {
|
||||||
|
lock.readLock().lock();
|
||||||
|
try {
|
||||||
|
return tableMBeans.get(new TableMetricParams(name));
|
||||||
|
} finally {
|
||||||
|
lock.readLock().unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addAll(final Set<NamedObject> res) {
|
||||||
|
for (Map.Entry<TableMetricParams, DynamicMBean> e : tableMBeans.entrySet()) {
|
||||||
|
try {
|
||||||
|
res.add(new NamedObject(e.getKey().toName(), e.getValue()));
|
||||||
|
} catch (MalformedObjectNameException e1) {
|
||||||
|
// This should never happen
|
||||||
|
logger.log(SEVERE, "Unexpected error.", e1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void addAllMatching(final Set<NamedObject> res,
|
||||||
|
final ObjectNamePattern pattern) {
|
||||||
|
for (Map.Entry<TableMetricParams, DynamicMBean> e : tableMBeans.entrySet()) {
|
||||||
|
try {
|
||||||
|
ObjectName name = e.getKey().toName();
|
||||||
|
if (pattern.matchKeys(name)) {
|
||||||
|
res.add(new NamedObject(name, e.getValue()));
|
||||||
|
}
|
||||||
|
} catch (MalformedObjectNameException e1) {
|
||||||
|
// This should never happen
|
||||||
|
logger.log(SEVERE, "Unexpected error.", e1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<NamedObject> query(final ObjectName pattern, final QueryExp query) {
|
||||||
|
Set<NamedObject> res = wrapped.query(pattern, query);
|
||||||
|
ObjectName name;
|
||||||
|
if (pattern == null ||
|
||||||
|
pattern.getCanonicalName().length() == 0 ||
|
||||||
|
pattern.equals(ObjectName.WILDCARD)) {
|
||||||
|
name = ObjectName.WILDCARD;
|
||||||
|
} else {
|
||||||
|
name = pattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
lock.readLock().lock();
|
||||||
|
try {
|
||||||
|
// If pattern is not a pattern, retrieve this mbean !
|
||||||
|
if (!name.isPattern() && isTableMetricName(name)) {
|
||||||
|
final DynamicMBean bean = tableMBeans.get(new TableMetricParams(name));
|
||||||
|
if (bean != null) {
|
||||||
|
res.add(new NamedObject(name, bean));
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// All names in all domains
|
||||||
|
if (name == ObjectName.WILDCARD) {
|
||||||
|
addAll(res);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
final String canonical_key_property_list_string =
|
||||||
|
name.getCanonicalKeyPropertyListString();
|
||||||
|
|
||||||
|
final boolean allNames =
|
||||||
|
(canonical_key_property_list_string.length()==0);
|
||||||
|
final ObjectNamePattern namePattern =
|
||||||
|
(allNames?null:new ObjectNamePattern(name));
|
||||||
|
|
||||||
|
// All names in default domain
|
||||||
|
if (name.getDomain().length() == 0) {
|
||||||
|
if (isTableMetricDomain(getDefaultDomain())) {
|
||||||
|
if (allNames) {
|
||||||
|
addAll(res);
|
||||||
|
} else {
|
||||||
|
addAllMatching(res, namePattern);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!name.isDomainPattern()) {
|
||||||
|
if (isTableMetricDomain(getDefaultDomain())) {
|
||||||
|
if (allNames) {
|
||||||
|
addAll(res);
|
||||||
|
} else {
|
||||||
|
addAllMatching(res, namePattern);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern matching in the domain name (*, ?)
|
||||||
|
final String dom2Match = name.getDomain();
|
||||||
|
if (wildmatch(TableMetricParams.TABLE_METRICS_DOMAIN, dom2Match)) {
|
||||||
|
if (allNames) {
|
||||||
|
addAll(res);
|
||||||
|
} else {
|
||||||
|
addAllMatching(res, namePattern);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
lock.readLock().unlock();
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final static class ObjectNamePattern {
|
||||||
|
private final String[] keys;
|
||||||
|
private final String[] values;
|
||||||
|
private final String properties;
|
||||||
|
private final boolean isPropertyListPattern;
|
||||||
|
private final boolean isPropertyValuePattern;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The ObjectName pattern against which ObjectNames are matched.
|
||||||
|
**/
|
||||||
|
public final ObjectName pattern;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a new ObjectNamePattern object from an ObjectName pattern.
|
||||||
|
* @param pattern The ObjectName pattern under examination.
|
||||||
|
**/
|
||||||
|
public ObjectNamePattern(ObjectName pattern) {
|
||||||
|
this(pattern.isPropertyListPattern(),
|
||||||
|
pattern.isPropertyValuePattern(),
|
||||||
|
pattern.getCanonicalKeyPropertyListString(),
|
||||||
|
pattern.getKeyPropertyList(),
|
||||||
|
pattern);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Builds a new ObjectNamePattern object from an ObjectName pattern
|
||||||
|
* constituents.
|
||||||
|
* @param propertyListPattern pattern.isPropertyListPattern().
|
||||||
|
* @param propertyValuePattern pattern.isPropertyValuePattern().
|
||||||
|
* @param canonicalProps pattern.getCanonicalKeyPropertyListString().
|
||||||
|
* @param keyPropertyList pattern.getKeyPropertyList().
|
||||||
|
* @param pattern The ObjectName pattern under examination.
|
||||||
|
**/
|
||||||
|
ObjectNamePattern(boolean propertyListPattern,
|
||||||
|
boolean propertyValuePattern,
|
||||||
|
String canonicalProps,
|
||||||
|
Map<String,String> keyPropertyList,
|
||||||
|
ObjectName pattern) {
|
||||||
|
this.isPropertyListPattern = propertyListPattern;
|
||||||
|
this.isPropertyValuePattern = propertyValuePattern;
|
||||||
|
this.properties = canonicalProps;
|
||||||
|
final int len = keyPropertyList.size();
|
||||||
|
this.keys = new String[len];
|
||||||
|
this.values = new String[len];
|
||||||
|
int i = 0;
|
||||||
|
for (Map.Entry<String,String> entry : keyPropertyList.entrySet()) {
|
||||||
|
keys[i] = entry.getKey();
|
||||||
|
values[i] = entry.getValue();
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
this.pattern = pattern;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Return true if the given ObjectName matches the ObjectName pattern
|
||||||
|
* for which this object has been built.
|
||||||
|
* WARNING: domain name is not considered here because it is supposed
|
||||||
|
* not to be wildcard when called. PropertyList is also
|
||||||
|
* supposed not to be zero-length.
|
||||||
|
* @param name The ObjectName we want to match against the pattern.
|
||||||
|
* @return true if <code>name</code> matches the pattern.
|
||||||
|
**/
|
||||||
|
public boolean matchKeys(ObjectName name) {
|
||||||
|
// If key property value pattern but not key property list
|
||||||
|
// pattern, then the number of key properties must be equal
|
||||||
|
//
|
||||||
|
if (isPropertyValuePattern &&
|
||||||
|
!isPropertyListPattern &&
|
||||||
|
(name.getKeyPropertyList().size() != keys.length)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If key property value pattern or key property list pattern,
|
||||||
|
// then every property inside pattern should exist in name
|
||||||
|
//
|
||||||
|
if (isPropertyValuePattern || isPropertyListPattern) {
|
||||||
|
for (int i = keys.length - 1; i >= 0 ; i--) {
|
||||||
|
// Find value in given object name for key at current
|
||||||
|
// index in receiver
|
||||||
|
//
|
||||||
|
String v = name.getKeyProperty(keys[i]);
|
||||||
|
// Did we find a value for this key ?
|
||||||
|
//
|
||||||
|
if (v == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// If this property is ok (same key, same value), go to next
|
||||||
|
//
|
||||||
|
if (isPropertyValuePattern &&
|
||||||
|
pattern.isPropertyValuePattern(keys[i])) {
|
||||||
|
// wildmatch key property values
|
||||||
|
// values[i] is the pattern;
|
||||||
|
// v is the string
|
||||||
|
if (wildmatch(v,values[i])) {
|
||||||
|
continue;
|
||||||
|
} else {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (v.equals(values[i])) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If no pattern, then canonical names must be equal
|
||||||
|
//
|
||||||
|
final String p1 = name.getCanonicalKeyPropertyListString();
|
||||||
|
final String p2 = properties;
|
||||||
|
return (p1.equals(p2));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class TableMetricParams {
|
||||||
|
public static final String TABLE_METRICS_DOMAIN = "org.apache.cassandra.metrics";
|
||||||
|
|
||||||
|
private final ObjectName name;
|
||||||
|
|
||||||
|
public TableMetricParams(ObjectName name) {
|
||||||
|
this.name = name;
|
||||||
|
}
|
||||||
|
|
||||||
|
public ObjectName toName() throws MalformedObjectNameException {
|
||||||
|
return name;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean equal(Object a, Object b) {
|
||||||
|
return (a == null) ? b == null : a.equals(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) return true;
|
||||||
|
if (!(o instanceof TableMetricParams)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
TableMetricParams oo = (TableMetricParams) o;
|
||||||
|
return equal(name.getKeyProperty("keyspace"), oo.name.getKeyProperty("keyspace"))
|
||||||
|
&& equal(name.getKeyProperty("scope"), oo.name.getKeyProperty("scope"))
|
||||||
|
&& equal(name.getKeyProperty("name"), oo.name.getKeyProperty("name"))
|
||||||
|
&& equal(name.getKeyProperty("type"), oo.name.getKeyProperty("type"));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int hash(Object o) {
|
||||||
|
return o == null ? 0 : o.hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static int safeAdd(int ... nums) {
|
||||||
|
long res = 0;
|
||||||
|
for (int n : nums) {
|
||||||
|
res = (res + n) % Integer.MAX_VALUE;
|
||||||
|
}
|
||||||
|
return (int)res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return safeAdd(hash(name.getKeyProperty("keyspace")),
|
||||||
|
hash(name.getKeyProperty("scope")),
|
||||||
|
hash(name.getKeyProperty("name")),
|
||||||
|
hash(name.getKeyProperty("type")));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public MBeanServer newMBeanServer(String defaultDomain, MBeanServer outer, MBeanServerDelegate delegate) {
|
||||||
|
// It is important to set |interceptors| to true while creating the
|
||||||
|
// JmxMBeanSearver. It is required for calls to
|
||||||
|
// JmxMBeanServer.setMBeanServerInterceptor() to be allowed.
|
||||||
|
JmxMBeanServer nested = (JmxMBeanServer) JmxMBeanServer.newMBeanServer(defaultDomain, outer, delegate, true);
|
||||||
|
// This is not very clean, we depend on knowledge of how the Sun/Oracle
|
||||||
|
// MBean chain looks internally. But we need haxxor support, so
|
||||||
|
// lets replace the interceptor.
|
||||||
|
// Note: Removed reflection gunk to eliminate jdk9+ warnings on
|
||||||
|
// execution. Also, if we can get by without reflection, it is
|
||||||
|
// better.
|
||||||
|
final DefaultMBeanServerInterceptor interceptor = new DefaultMBeanServerInterceptor(outer != null ? outer : nested,
|
||||||
|
delegate, nested.getMBeanInstantiator(),
|
||||||
|
new TableRepository(defaultDomain, new Repository(defaultDomain)));
|
||||||
|
nested.setMBeanServerInterceptor(interceptor);
|
||||||
|
final MBeanServerDelegate d = nested.getMBeanServerDelegate();
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Interceptor needs the delegate present. Normally done
|
||||||
|
// by inaccessible method in JmxMBeanServer
|
||||||
|
AccessController.doPrivileged(new PrivilegedExceptionAction<Object>() {
|
||||||
|
public Object run() throws Exception {
|
||||||
|
interceptor.registerMBean(d, DELEGATE_NAME);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
} catch (PrivilegedActionException e) {
|
||||||
|
logger.log(SEVERE, "Unexpected error.", e);
|
||||||
|
throw new RuntimeException(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new APIMBeanServer(getApiClient(), nested);
|
||||||
|
}
|
||||||
|
}
|
327
src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java
Normal file
327
src/main/java/com/scylladb/jmx/utils/APIMBeanServer.java
Normal file
@ -0,0 +1,327 @@
|
|||||||
|
package com.scylladb.jmx.utils;
|
||||||
|
|
||||||
|
import static java.util.Arrays.asList;
|
||||||
|
import static java.util.concurrent.Executors.newScheduledThreadPool;
|
||||||
|
import static java.util.concurrent.TimeUnit.MINUTES;
|
||||||
|
|
||||||
|
import java.io.ObjectInputStream;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ScheduledExecutorService;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
import java.util.regex.Pattern;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import javax.management.Attribute;
|
||||||
|
import javax.management.AttributeList;
|
||||||
|
import javax.management.AttributeNotFoundException;
|
||||||
|
import javax.management.InstanceAlreadyExistsException;
|
||||||
|
import javax.management.InstanceNotFoundException;
|
||||||
|
import javax.management.IntrospectionException;
|
||||||
|
import javax.management.InvalidAttributeValueException;
|
||||||
|
import javax.management.ListenerNotFoundException;
|
||||||
|
import javax.management.MBeanException;
|
||||||
|
import javax.management.MBeanInfo;
|
||||||
|
import javax.management.MBeanRegistrationException;
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.NotCompliantMBeanException;
|
||||||
|
import javax.management.NotificationFilter;
|
||||||
|
import javax.management.NotificationListener;
|
||||||
|
import javax.management.ObjectInstance;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
import javax.management.OperationsException;
|
||||||
|
import javax.management.QueryExp;
|
||||||
|
import javax.management.ReflectionException;
|
||||||
|
import javax.management.loading.ClassLoaderRepository;
|
||||||
|
|
||||||
|
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||||
|
import org.apache.cassandra.metrics.StreamingMetrics;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.RegistrationChecker;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
@SuppressWarnings("restriction")
|
||||||
|
public class APIMBeanServer implements MBeanServer {
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
private static final Logger logger = Logger.getLogger(APIMBeanServer.class.getName());
|
||||||
|
private static final ScheduledExecutorService executor = newScheduledThreadPool(1);
|
||||||
|
|
||||||
|
private final RegistrationChecker columnFamilyStoreChecker = ColumnFamilyStore.createRegistrationChecker();
|
||||||
|
private final RegistrationChecker streamingMetricsChecker = StreamingMetrics.createRegistrationChecker();
|
||||||
|
|
||||||
|
private final APIClient client;
|
||||||
|
private final JmxMBeanServer server;
|
||||||
|
|
||||||
|
public APIMBeanServer(APIClient client, JmxMBeanServer server) {
|
||||||
|
this.client = client;
|
||||||
|
this.server = server;
|
||||||
|
|
||||||
|
executor.scheduleWithFixedDelay(() -> {
|
||||||
|
for (RegistrationChecker c : asList(columnFamilyStoreChecker, streamingMetricsChecker)) {
|
||||||
|
try {
|
||||||
|
c.reap(client, server);
|
||||||
|
} catch (OperationsException | UnknownHostException e) {
|
||||||
|
// TODO: log?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, 1, 5, MINUTES);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ObjectInstance prepareForRemote(final ObjectInstance i) {
|
||||||
|
return new ObjectInstance(prepareForRemote(i.getObjectName()), i.getClassName());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static ObjectName prepareForRemote(final ObjectName n) {
|
||||||
|
/*
|
||||||
|
* ObjectName.getInstance has changed in JDK (micro) updates so it no longer applies
|
||||||
|
* overridable methods -> wrong name published.
|
||||||
|
* Fix by doing explicit ObjectName instansiation.
|
||||||
|
*/
|
||||||
|
try {
|
||||||
|
return new ObjectName(n.getCanonicalName());
|
||||||
|
} catch (MalformedObjectNameException e) {
|
||||||
|
throw new IllegalArgumentException(n.toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance createMBean(String className, ObjectName name) throws ReflectionException,
|
||||||
|
InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException, NotCompliantMBeanException {
|
||||||
|
return prepareForRemote(server.createMBean(className, name));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName)
|
||||||
|
throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException,
|
||||||
|
NotCompliantMBeanException, InstanceNotFoundException {
|
||||||
|
return prepareForRemote(server.createMBean(className, name, loaderName));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance createMBean(String className, ObjectName name, Object[] params, String[] signature)
|
||||||
|
throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException, MBeanException,
|
||||||
|
NotCompliantMBeanException {
|
||||||
|
return prepareForRemote(server.createMBean(className, name, params, signature));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance createMBean(String className, ObjectName name, ObjectName loaderName, Object[] params,
|
||||||
|
String[] signature) throws ReflectionException, InstanceAlreadyExistsException, MBeanRegistrationException,
|
||||||
|
MBeanException, NotCompliantMBeanException, InstanceNotFoundException {
|
||||||
|
return prepareForRemote(server.createMBean(className, name, loaderName, params, signature));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance registerMBean(Object object, ObjectName name)
|
||||||
|
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||||
|
return prepareForRemote(server.registerMBean(object, name));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void unregisterMBean(ObjectName name) throws InstanceNotFoundException, MBeanRegistrationException {
|
||||||
|
server.unregisterMBean(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectInstance getObjectInstance(ObjectName name) throws InstanceNotFoundException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return prepareForRemote(server.getObjectInstance(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<ObjectName> queryNames(ObjectName name, QueryExp query) {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.queryNames(name, query).stream().map(n -> prepareForRemote(n)).collect(Collectors.toSet());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Set<ObjectInstance> queryMBeans(ObjectName name, QueryExp query) {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.queryMBeans(name, query).stream().map(i -> prepareForRemote(i)).collect(Collectors.toSet());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isRegistered(ObjectName name) {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.isRegistered(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Integer getMBeanCount() {
|
||||||
|
return server.getMBeanCount();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getAttribute(ObjectName name, String attribute)
|
||||||
|
throws MBeanException, AttributeNotFoundException, InstanceNotFoundException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.getAttribute(name, attribute);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AttributeList getAttributes(ObjectName name, String[] attributes)
|
||||||
|
throws InstanceNotFoundException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.getAttributes(name, attributes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setAttribute(ObjectName name, Attribute attribute) throws InstanceNotFoundException,
|
||||||
|
AttributeNotFoundException, InvalidAttributeValueException, MBeanException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
server.setAttribute(name, attribute);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public AttributeList setAttributes(ObjectName name, AttributeList attributes)
|
||||||
|
throws InstanceNotFoundException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.setAttributes(name, attributes);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object invoke(ObjectName name, String operationName, Object[] params, String[] signature)
|
||||||
|
throws InstanceNotFoundException, MBeanException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.invoke(name, operationName, params, signature);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDefaultDomain() {
|
||||||
|
return server.getDefaultDomain();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String[] getDomains() {
|
||||||
|
return server.getDomains();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter,
|
||||||
|
Object handback) throws InstanceNotFoundException {
|
||||||
|
server.addNotificationListener(name, listener, filter, handback);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void addNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter,
|
||||||
|
Object handback) throws InstanceNotFoundException {
|
||||||
|
server.addNotificationListener(name, listener, filter, handback);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeNotificationListener(ObjectName name, ObjectName listener)
|
||||||
|
throws InstanceNotFoundException, ListenerNotFoundException {
|
||||||
|
server.removeNotificationListener(name, listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeNotificationListener(ObjectName name, ObjectName listener, NotificationFilter filter,
|
||||||
|
Object handback) throws InstanceNotFoundException, ListenerNotFoundException {
|
||||||
|
server.removeNotificationListener(name, listener, filter, handback);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeNotificationListener(ObjectName name, NotificationListener listener)
|
||||||
|
throws InstanceNotFoundException, ListenerNotFoundException {
|
||||||
|
server.removeNotificationListener(name, listener);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void removeNotificationListener(ObjectName name, NotificationListener listener, NotificationFilter filter,
|
||||||
|
Object handback) throws InstanceNotFoundException, ListenerNotFoundException {
|
||||||
|
server.removeNotificationListener(name, listener, filter, handback);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public MBeanInfo getMBeanInfo(ObjectName name)
|
||||||
|
throws InstanceNotFoundException, IntrospectionException, ReflectionException {
|
||||||
|
checkRegistrations(name);
|
||||||
|
return server.getMBeanInfo(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isInstanceOf(ObjectName name, String className) throws InstanceNotFoundException {
|
||||||
|
return server.isInstanceOf(name, className);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object instantiate(String className) throws ReflectionException, MBeanException {
|
||||||
|
return server.instantiate(className);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object instantiate(String className, ObjectName loaderName)
|
||||||
|
throws ReflectionException, MBeanException, InstanceNotFoundException {
|
||||||
|
return server.instantiate(className, loaderName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object instantiate(String className, Object[] params, String[] signature)
|
||||||
|
throws ReflectionException, MBeanException {
|
||||||
|
return server.instantiate(className, params, signature);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object instantiate(String className, ObjectName loaderName, Object[] params, String[] signature)
|
||||||
|
throws ReflectionException, MBeanException, InstanceNotFoundException {
|
||||||
|
return server.instantiate(className, loaderName, params, signature);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@Deprecated
|
||||||
|
public ObjectInputStream deserialize(ObjectName name, byte[] data)
|
||||||
|
throws InstanceNotFoundException, OperationsException {
|
||||||
|
return server.deserialize(name, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@Deprecated
|
||||||
|
public ObjectInputStream deserialize(String className, byte[] data)
|
||||||
|
throws OperationsException, ReflectionException {
|
||||||
|
return server.deserialize(className, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
@Deprecated
|
||||||
|
public ObjectInputStream deserialize(String className, ObjectName loaderName, byte[] data)
|
||||||
|
throws InstanceNotFoundException, OperationsException, ReflectionException {
|
||||||
|
return server.deserialize(className, loaderName, data);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ClassLoader getClassLoaderFor(ObjectName mbeanName) throws InstanceNotFoundException {
|
||||||
|
return server.getClassLoaderFor(mbeanName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ClassLoader getClassLoader(ObjectName loaderName) throws InstanceNotFoundException {
|
||||||
|
return server.getClassLoader(loaderName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ClassLoaderRepository getClassLoaderRepository() {
|
||||||
|
return server.getClassLoaderRepository();
|
||||||
|
}
|
||||||
|
|
||||||
|
static final Pattern tables = Pattern.compile("^\\*?((Index)?ColumnFamil(ies|y)|(Index)?(Table(s)?)?)$");
|
||||||
|
|
||||||
|
private void checkRegistrations(ObjectName name) {
|
||||||
|
if (name != null && server.isRegistered(name)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
String type = name != null ? name.getKeyProperty("type") : null;
|
||||||
|
if (type == null || tables.matcher(type).matches()) {
|
||||||
|
columnFamilyStoreChecker.check(client, server);
|
||||||
|
}
|
||||||
|
if (type == null || StreamingMetrics.TYPE_NAME.equals(type)) {
|
||||||
|
streamingMetricsChecker.check(client, server);
|
||||||
|
}
|
||||||
|
} catch (OperationsException | UnknownHostException e) {
|
||||||
|
// TODO: log
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
18
src/main/java/com/scylladb/jmx/utils/DateXmlAdapter.java
Normal file
18
src/main/java/com/scylladb/jmx/utils/DateXmlAdapter.java
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
package com.scylladb.jmx.utils;
|
||||||
|
|
||||||
|
import jakarta.xml.bind.annotation.adapters.XmlAdapter;
|
||||||
|
import java.time.Instant;
|
||||||
|
import java.util.Date;
|
||||||
|
|
||||||
|
public class DateXmlAdapter extends XmlAdapter<String, Date> {
|
||||||
|
@Override
|
||||||
|
public String marshal(Date v) throws Exception {
|
||||||
|
return Instant.ofEpochMilli(v.getTime()).toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Date unmarshal(String v) throws Exception {
|
||||||
|
return new Date(Instant.parse(v).toEpochMilli());
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
@ -1,29 +0,0 @@
|
|||||||
package com.yammer.metrics.core;
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
|
|
||||||
public class APICounter extends Counter {
|
|
||||||
String url;
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
public APICounter(String _url) {
|
|
||||||
super();
|
|
||||||
url = _url;
|
|
||||||
}
|
|
||||||
/**
|
|
||||||
* Returns the counter's current value.
|
|
||||||
*
|
|
||||||
* @return the counter's current value
|
|
||||||
*/
|
|
||||||
public long count() {
|
|
||||||
return c.getLongValue(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
}
|
|
@ -1,201 +0,0 @@
|
|||||||
package com.yammer.metrics.core;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.concurrent.atomic.AtomicLong;
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
import com.yammer.metrics.stats.Sample;
|
|
||||||
import com.yammer.metrics.stats.Snapshot;
|
|
||||||
|
|
||||||
public class APIHistogram extends Histogram {
|
|
||||||
Field countField;
|
|
||||||
Field minField;
|
|
||||||
Field maxField;
|
|
||||||
Field sumField;
|
|
||||||
Field varianceField;
|
|
||||||
Field sampleField;
|
|
||||||
|
|
||||||
long last_update = 0;
|
|
||||||
static final long UPDATE_INTERVAL = 50;
|
|
||||||
long updateInterval;
|
|
||||||
String url;
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
private void setFields() {
|
|
||||||
try {
|
|
||||||
minField = Histogram.class.getDeclaredField("min");
|
|
||||||
minField.setAccessible(true);
|
|
||||||
maxField = Histogram.class.getDeclaredField("max");
|
|
||||||
maxField.setAccessible(true);
|
|
||||||
sumField = Histogram.class.getDeclaredField("sum");
|
|
||||||
sumField.setAccessible(true);
|
|
||||||
varianceField = Histogram.class.getDeclaredField("variance");
|
|
||||||
varianceField.setAccessible(true);
|
|
||||||
sampleField = Histogram.class.getDeclaredField("sample");
|
|
||||||
sampleField.setAccessible(true);
|
|
||||||
countField = Histogram.class.getDeclaredField("count");
|
|
||||||
countField.setAccessible(true);
|
|
||||||
try {
|
|
||||||
getCount().set(0);
|
|
||||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
// There's no reason to get here
|
|
||||||
// and there's nothing we can do even if we would
|
|
||||||
}
|
|
||||||
} catch (NoSuchFieldException | SecurityException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public AtomicLong getMin() throws IllegalArgumentException,
|
|
||||||
IllegalAccessException {
|
|
||||||
return (AtomicLong) minField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public AtomicLong getMax() throws IllegalArgumentException,
|
|
||||||
IllegalAccessException {
|
|
||||||
return (AtomicLong) maxField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public AtomicLong getSum() throws IllegalArgumentException,
|
|
||||||
IllegalAccessException {
|
|
||||||
return (AtomicLong) sumField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public AtomicLong getCount() throws IllegalArgumentException,
|
|
||||||
IllegalAccessException {
|
|
||||||
return (AtomicLong) countField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public AtomicReference<double[]> getVariance()
|
|
||||||
throws IllegalArgumentException, IllegalAccessException {
|
|
||||||
return (AtomicReference<double[]>) varianceField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public Sample getSample() throws IllegalArgumentException,
|
|
||||||
IllegalAccessException {
|
|
||||||
return (Sample) sampleField.get(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
public APIHistogram(String url, Sample sample) {
|
|
||||||
super(sample);
|
|
||||||
setFields();
|
|
||||||
this.url = url;
|
|
||||||
}
|
|
||||||
|
|
||||||
public APIHistogram(String url, SampleType type, long updateInterval) {
|
|
||||||
super(type);
|
|
||||||
setFields();
|
|
||||||
this.url = url;
|
|
||||||
this.updateInterval = updateInterval;
|
|
||||||
}
|
|
||||||
|
|
||||||
public APIHistogram(String url, SampleType type) {
|
|
||||||
this(url, type, UPDATE_INTERVAL);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void update() {
|
|
||||||
long now = System.currentTimeMillis();
|
|
||||||
if (now - last_update < UPDATE_INTERVAL) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
last_update = now;
|
|
||||||
clear();
|
|
||||||
HistogramValues vals = c.getHistogramValue(url);
|
|
||||||
try {
|
|
||||||
if (vals.sample != null) {
|
|
||||||
for (long v : vals.sample) {
|
|
||||||
getSample().update(v);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
getCount().set(vals.count);
|
|
||||||
getMax().set(vals.max);
|
|
||||||
getMin().set(vals.min);
|
|
||||||
getSum().set(vals.sum);
|
|
||||||
double[] newValue = new double[2];
|
|
||||||
newValue[0] = vals.mean;
|
|
||||||
newValue[1] = vals.variance;
|
|
||||||
getVariance().getAndSet(newValue);
|
|
||||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the number of values recorded.
|
|
||||||
*
|
|
||||||
* @return the number of values recorded
|
|
||||||
*/
|
|
||||||
public long count() {
|
|
||||||
update();
|
|
||||||
return super.count();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* (non-Javadoc)
|
|
||||||
*
|
|
||||||
* @see com.yammer.metrics.core.Summarizable#max()
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public double max() {
|
|
||||||
update();
|
|
||||||
return super.max();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* (non-Javadoc)
|
|
||||||
*
|
|
||||||
* @see com.yammer.metrics.core.Summarizable#min()
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public double min() {
|
|
||||||
update();
|
|
||||||
return super.min();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* (non-Javadoc)
|
|
||||||
*
|
|
||||||
* @see com.yammer.metrics.core.Summarizable#mean()
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public double mean() {
|
|
||||||
update();
|
|
||||||
return super.mean();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* (non-Javadoc)
|
|
||||||
*
|
|
||||||
* @see com.yammer.metrics.core.Summarizable#stdDev()
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public double stdDev() {
|
|
||||||
update();
|
|
||||||
return super.stdDev();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* (non-Javadoc)
|
|
||||||
*
|
|
||||||
* @see com.yammer.metrics.core.Summarizable#sum()
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public double sum() {
|
|
||||||
update();
|
|
||||||
return super.sum();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public Snapshot getSnapshot() {
|
|
||||||
update();
|
|
||||||
return super.getSnapshot();
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,45 +0,0 @@
|
|||||||
package com.yammer.metrics.core;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
|
|
||||||
public class APIMeter extends Meter {
|
|
||||||
String url;
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
public APIMeter(String _url, ScheduledExecutorService tickThread,
|
|
||||||
String eventType, TimeUnit rateUnit, Clock clock) {
|
|
||||||
super(tickThread, eventType, rateUnit, clock);
|
|
||||||
// TODO Auto-generated constructor stub
|
|
||||||
url = _url;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long get_value() {
|
|
||||||
return c.getLongValue(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Meter doesn't have a set value method.
|
|
||||||
// to mimic it, we clear the old value and set it to a new one.
|
|
||||||
// This is safe because the only this method would be used
|
|
||||||
// to update the values
|
|
||||||
public long set(long new_value) {
|
|
||||||
long res = super.count();
|
|
||||||
mark(-res);
|
|
||||||
mark(new_value);
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
void tick() {
|
|
||||||
set(get_value());
|
|
||||||
super.tick();
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,362 +0,0 @@
|
|||||||
package com.yammer.metrics.core;
|
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.concurrent.ConcurrentMap;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.yammer.metrics.core.APICounter;
|
|
||||||
import com.yammer.metrics.core.APIMeter;
|
|
||||||
import com.yammer.metrics.core.Clock;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
import com.yammer.metrics.core.Meter;
|
|
||||||
import com.yammer.metrics.core.Metric;
|
|
||||||
import com.yammer.metrics.core.MetricName;
|
|
||||||
import com.yammer.metrics.core.MetricsRegistry;
|
|
||||||
import com.yammer.metrics.core.ThreadPools;
|
|
||||||
import com.yammer.metrics.core.Histogram.SampleType;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
|
|
||||||
public class APIMetricsRegistry extends MetricsRegistry {
|
|
||||||
Field fieldMetrics;
|
|
||||||
Field fieldClock;
|
|
||||||
Field fieldThreadPool;
|
|
||||||
|
|
||||||
public APIMetricsRegistry() {
|
|
||||||
try {
|
|
||||||
fieldMetrics = MetricsRegistry.class.getDeclaredField("metrics");
|
|
||||||
fieldMetrics.setAccessible(true);
|
|
||||||
fieldClock = MetricsRegistry.class.getDeclaredField("clock");
|
|
||||||
fieldClock.setAccessible(true);
|
|
||||||
fieldThreadPool = MetricsRegistry.class
|
|
||||||
.getDeclaredField("threadPools");
|
|
||||||
fieldThreadPool.setAccessible(true);
|
|
||||||
} catch (NoSuchFieldException | SecurityException e) {
|
|
||||||
// TODO Auto-generated catch block
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public ThreadPools getThreadPools() {
|
|
||||||
try {
|
|
||||||
return (ThreadPools) fieldThreadPool.get(this);
|
|
||||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
public Clock getClock() {
|
|
||||||
try {
|
|
||||||
return (Clock) fieldClock.get(this);
|
|
||||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
public ConcurrentMap<MetricName, Metric> getMetrics() {
|
|
||||||
try {
|
|
||||||
return (ConcurrentMap<MetricName, Metric>) fieldMetrics.get(this);
|
|
||||||
} catch (IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Counter} and registers it under the given class and
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link Counter}
|
|
||||||
*/
|
|
||||||
public Counter newCounter(String url, Class<?> klass, String name) {
|
|
||||||
return newCounter(url, klass, name, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Counter} and registers it under the given class and
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link Counter}
|
|
||||||
*/
|
|
||||||
public Counter newCounter(String url, Class<?> klass, String name,
|
|
||||||
String scope) {
|
|
||||||
return newCounter(url, createName(klass, name, scope));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Counter} and registers it under the given metric
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link Counter}
|
|
||||||
*/
|
|
||||||
public Counter newCounter(String url, MetricName metricName) {
|
|
||||||
return getOrAdd(metricName, new APICounter(url));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Meter} and registers it under the given class and
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link Meter}
|
|
||||||
*/
|
|
||||||
public Meter newMeter(String url, Class<?> klass, String name,
|
|
||||||
String eventType, TimeUnit unit) {
|
|
||||||
return newMeter(url, klass, name, null, eventType, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Meter} and registers it under the given class, name,
|
|
||||||
* and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link Meter}
|
|
||||||
*/
|
|
||||||
public Meter newMeter(String url, Class<?> klass, String name,
|
|
||||||
String scope, String eventType, TimeUnit unit) {
|
|
||||||
return newMeter(url, createName(klass, name, scope), eventType, unit);
|
|
||||||
}
|
|
||||||
|
|
||||||
private ScheduledExecutorService newMeterTickThreadPool() {
|
|
||||||
return getThreadPools().newScheduledThreadPool(2, "meter-tick");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Meter} and registers it under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param eventType
|
|
||||||
* the plural name of the type of events the meter is measuring
|
|
||||||
* (e.g., {@code "requests"})
|
|
||||||
* @param unit
|
|
||||||
* the rate unit of the new meter
|
|
||||||
* @return a new {@link Meter}
|
|
||||||
*/
|
|
||||||
public Meter newMeter(String url, MetricName metricName, String eventType,
|
|
||||||
TimeUnit unit) {
|
|
||||||
final Metric existingMetric = getMetrics().get(metricName);
|
|
||||||
if (existingMetric != null) {
|
|
||||||
return (Meter) existingMetric;
|
|
||||||
}
|
|
||||||
return getOrAdd(metricName, new APIMeter(url, newMeterTickThreadPool(),
|
|
||||||
eventType, unit, getClock()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Histogram} and registers it under the given class
|
|
||||||
* and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link Histogram}
|
|
||||||
*/
|
|
||||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
|
||||||
boolean biased) {
|
|
||||||
return newHistogram(url, klass, name, null, biased);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Histogram} and registers it under the given class,
|
|
||||||
* name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link Histogram}
|
|
||||||
*/
|
|
||||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
|
||||||
String scope, boolean biased) {
|
|
||||||
return newHistogram(url, createName(klass, name, scope), biased);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new non-biased {@link Histogram} and registers it under the
|
|
||||||
* given class and name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link Histogram}
|
|
||||||
*/
|
|
||||||
public Histogram newHistogram(String url, Class<?> klass, String name) {
|
|
||||||
return newHistogram(url, klass, name, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new non-biased {@link Histogram} and registers it under the
|
|
||||||
* given class, name, and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link Histogram}
|
|
||||||
*/
|
|
||||||
public Histogram newHistogram(String url, Class<?> klass, String name,
|
|
||||||
String scope) {
|
|
||||||
return newHistogram(url, klass, name, scope, false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Histogram} and registers it under the given metric
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param biased
|
|
||||||
* whether or not the histogram should be biased
|
|
||||||
* @return a new {@link Histogram}
|
|
||||||
*/
|
|
||||||
public Histogram newHistogram(String url, MetricName metricName,
|
|
||||||
boolean biased) {
|
|
||||||
return getOrAdd(metricName, new APIHistogram(url,
|
|
||||||
biased ? SampleType.BIASED : SampleType.UNIFORM));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Timer} and registers it under the given class and
|
|
||||||
* name, measuring elapsed time in milliseconds and invocations per second.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @return a new {@link Timer}
|
|
||||||
*/
|
|
||||||
public Timer newTimer(String url, Class<?> klass, String name) {
|
|
||||||
return newTimer(url, klass, name, null, TimeUnit.MILLISECONDS,
|
|
||||||
TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Timer} and registers it under the given class and
|
|
||||||
* name.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link Timer}
|
|
||||||
*/
|
|
||||||
public Timer newTimer(String url, Class<?> klass, String name,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
return newTimer(url, klass, name, null, durationUnit, rateUnit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Timer} and registers it under the given class, name,
|
|
||||||
* and scope, measuring elapsed time in milliseconds and invocations per
|
|
||||||
* second.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @return a new {@link Timer}
|
|
||||||
*/
|
|
||||||
public Timer newTimer(String url, Class<?> klass, String name, String scope) {
|
|
||||||
return newTimer(url, klass, name, scope, TimeUnit.MILLISECONDS,
|
|
||||||
TimeUnit.SECONDS);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Timer} and registers it under the given class, name,
|
|
||||||
* and scope.
|
|
||||||
*
|
|
||||||
* @param klass
|
|
||||||
* the class which owns the metric
|
|
||||||
* @param name
|
|
||||||
* the name of the metric
|
|
||||||
* @param scope
|
|
||||||
* the scope of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link Timer}
|
|
||||||
*/
|
|
||||||
public Timer newTimer(String url, Class<?> klass, String name,
|
|
||||||
String scope, TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
return newTimer(url, createName(klass, name, scope), durationUnit,
|
|
||||||
rateUnit);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a new {@link Timer} and registers it under the given metric name.
|
|
||||||
*
|
|
||||||
* @param metricName
|
|
||||||
* the name of the metric
|
|
||||||
* @param durationUnit
|
|
||||||
* the duration scale unit of the new timer
|
|
||||||
* @param rateUnit
|
|
||||||
* the rate scale unit of the new timer
|
|
||||||
* @return a new {@link Timer}
|
|
||||||
*/
|
|
||||||
public Timer newTimer(String url, MetricName metricName,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
final Metric existingMetric = getMetrics().get(metricName);
|
|
||||||
if (existingMetric != null) {
|
|
||||||
return (Timer) existingMetric;
|
|
||||||
}
|
|
||||||
return getOrAdd(metricName, new APITimer(url, newMeterTickThreadPool(),
|
|
||||||
durationUnit, rateUnit, getClock()));
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,44 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
package com.yammer.metrics.core;
|
|
||||||
|
|
||||||
import java.lang.reflect.Field;
|
|
||||||
import java.util.concurrent.ScheduledExecutorService;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.yammer.metrics.core.Histogram.SampleType;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* A timer metric which aggregates timing durations and provides duration
|
|
||||||
* statistics, plus throughput statistics via {@link Meter}.
|
|
||||||
*/
|
|
||||||
public class APITimer extends Timer {
|
|
||||||
|
|
||||||
public APITimer(String url, ScheduledExecutorService tickThread,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit) {
|
|
||||||
super(tickThread, durationUnit, rateUnit);
|
|
||||||
setHistogram(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
public APITimer(String url, ScheduledExecutorService tickThread,
|
|
||||||
TimeUnit durationUnit, TimeUnit rateUnit, Clock clock) {
|
|
||||||
super(tickThread, durationUnit, rateUnit, clock);
|
|
||||||
setHistogram(url);
|
|
||||||
}
|
|
||||||
|
|
||||||
private void setHistogram(String url) {
|
|
||||||
Field histogram;
|
|
||||||
try {
|
|
||||||
histogram = Timer.class.getDeclaredField("histogram");
|
|
||||||
histogram.setAccessible(true);
|
|
||||||
histogram.set(this, new APIHistogram(url, SampleType.BIASED));
|
|
||||||
} catch (NoSuchFieldException | SecurityException
|
|
||||||
| IllegalArgumentException | IllegalAccessException e) {
|
|
||||||
// TODO Auto-generated catch block
|
|
||||||
e.printStackTrace();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,11 +0,0 @@
|
|||||||
package com.yammer.metrics.core;
|
|
||||||
|
|
||||||
public class HistogramValues {
|
|
||||||
public long count;
|
|
||||||
public long min;
|
|
||||||
public long max;
|
|
||||||
public long sum;
|
|
||||||
public double variance;
|
|
||||||
public double mean;
|
|
||||||
public long sample[];
|
|
||||||
}
|
|
16
src/main/java/module-info.java
Normal file
16
src/main/java/module-info.java
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
module scylla.jmx {
|
||||||
|
opens com.scylladb.jmx.utils;
|
||||||
|
exports com.scylladb.jmx.utils;
|
||||||
|
opens com.scylladb.jmx.main;
|
||||||
|
exports com.scylladb.jmx.main;
|
||||||
|
opens com.scylladb.jmx.metrics;
|
||||||
|
exports com.scylladb.jmx.metrics;
|
||||||
|
requires java.logging;
|
||||||
|
requires java.management;
|
||||||
|
requires scylla.apiclient;
|
||||||
|
requires jakarta.json;
|
||||||
|
requires jakarta.ws.rs;
|
||||||
|
requires com.google.common;
|
||||||
|
requires jakarta.xml.bind;
|
||||||
|
requires com.fasterxml.jackson.annotation;
|
||||||
|
}
|
@ -23,61 +23,146 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.db;
|
package org.apache.cassandra.db;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import static jakarta.json.Json.createObjectBuilder;
|
||||||
import java.net.ConnectException;
|
import static java.lang.String.valueOf;
|
||||||
import java.util.*;
|
import static java.util.Arrays.asList;
|
||||||
import java.util.concurrent.*;
|
import static java.util.stream.Collectors.toMap;
|
||||||
|
|
||||||
import javax.json.JsonArray;
|
import jakarta.json.Json;
|
||||||
import javax.json.JsonObject;
|
import jakarta.json.JsonArray;
|
||||||
import javax.management.*;
|
import jakarta.json.JsonObject;
|
||||||
|
import jakarta.json.JsonObjectBuilder;
|
||||||
|
import jakarta.json.JsonReader;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedHashMap;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedMap;
|
||||||
|
import java.io.StringReader;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.ExecutionException;
|
||||||
|
import java.util.concurrent.ExecutorService;
|
||||||
|
import java.util.concurrent.Executors;
|
||||||
|
import java.util.concurrent.Future;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
import javax.management.OperationsException;
|
||||||
import javax.management.openmbean.CompositeData;
|
import javax.management.openmbean.CompositeData;
|
||||||
|
import javax.management.openmbean.CompositeDataSupport;
|
||||||
|
import javax.management.openmbean.CompositeType;
|
||||||
import javax.management.openmbean.OpenDataException;
|
import javax.management.openmbean.OpenDataException;
|
||||||
import javax.ws.rs.ProcessingException;
|
import javax.management.openmbean.OpenType;
|
||||||
import javax.ws.rs.core.MultivaluedHashMap;
|
import javax.management.openmbean.SimpleType;
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
import javax.management.openmbean.TabularDataSupport;
|
||||||
|
import javax.management.openmbean.TabularType;
|
||||||
|
|
||||||
import org.apache.cassandra.metrics.ColumnFamilyMetrics;
|
import org.apache.cassandra.metrics.TableMetrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||||
|
import com.scylladb.jmx.metrics.RegistrationChecker;
|
||||||
|
import com.scylladb.jmx.metrics.RegistrationMode;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
import com.google.common.base.Throwables;
|
import com.google.common.base.Throwables;
|
||||||
|
|
||||||
public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
public class ColumnFamilyStore extends MetricsMBean implements ColumnFamilyStoreMBean {
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final Logger logger = Logger.getLogger(ColumnFamilyStore.class.getName());
|
||||||
.getLogger(ColumnFamilyStore.class.getName());
|
@SuppressWarnings("unused")
|
||||||
private APIClient c = new APIClient();
|
private final String type;
|
||||||
private String type;
|
private final String keyspace;
|
||||||
private String keyspace;
|
private final String name;
|
||||||
private String name;
|
private static final String[] COUNTER_NAMES = new String[]{"raw", "count", "error", "string"};
|
||||||
private String mbeanName;
|
private static final String[] COUNTER_DESCS = new String[]
|
||||||
static final int INTERVAL = 1000; // update every 1second
|
{ "partition key in raw hex bytes", // Table name and comments match Cassandra, we will use the partition key
|
||||||
public final ColumnFamilyMetrics metric;
|
"value of this partition for given sampler",
|
||||||
|
"value is within the error bounds plus or minus of this",
|
||||||
|
"the partition key turned into a human readable format" };
|
||||||
|
private static final CompositeType COUNTER_COMPOSITE_TYPE;
|
||||||
|
private static final TabularType COUNTER_TYPE;
|
||||||
|
|
||||||
private static Map<String, ColumnFamilyStore> cf = new HashMap<String, ColumnFamilyStore>();
|
private static final String[] SAMPLER_NAMES = new String[]{"cardinality", "partitions"};
|
||||||
private static Timer timer = new Timer("Column Family");
|
private static final String[] SAMPLER_DESCS = new String[]
|
||||||
|
{ "cardinality of partitions",
|
||||||
|
"list of counter results" };
|
||||||
|
|
||||||
|
private static final String SAMPLING_RESULTS_NAME = "SAMPLING_RESULTS";
|
||||||
|
private static final CompositeType SAMPLING_RESULT;
|
||||||
|
|
||||||
|
public static final String SNAPSHOT_TRUNCATE_PREFIX = "truncated";
|
||||||
|
public static final String SNAPSHOT_DROP_PREFIX = "dropped";
|
||||||
|
private JsonObject tableSamplerResult = null;
|
||||||
|
|
||||||
|
private Future<JsonObject> futureTableSamperResult = null;
|
||||||
|
private ExecutorService service = null;
|
||||||
|
|
||||||
|
static
|
||||||
|
{
|
||||||
|
try
|
||||||
|
{
|
||||||
|
OpenType<?>[] counterTypes = new OpenType[] { SimpleType.STRING, SimpleType.LONG, SimpleType.LONG, SimpleType.STRING };
|
||||||
|
COUNTER_COMPOSITE_TYPE = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_NAMES, COUNTER_DESCS, counterTypes);
|
||||||
|
COUNTER_TYPE = new TabularType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, COUNTER_COMPOSITE_TYPE, COUNTER_NAMES);
|
||||||
|
|
||||||
|
OpenType<?>[] samplerTypes = new OpenType[] { SimpleType.LONG, COUNTER_TYPE };
|
||||||
|
SAMPLING_RESULT = new CompositeType(SAMPLING_RESULTS_NAME, SAMPLING_RESULTS_NAME, SAMPLER_NAMES, SAMPLER_DESCS, samplerTypes);
|
||||||
|
} catch (OpenDataException e)
|
||||||
|
{
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
protected synchronized void startTableSampling(MultivaluedMap<String, String> queryParams) {
|
||||||
|
if (futureTableSamperResult != null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
futureTableSamperResult = service.submit(() -> {
|
||||||
|
tableSamplerResult = client.getJsonObj("column_family/toppartitions/" + getCFName(), queryParams);
|
||||||
|
return null;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait until the action is completed
|
||||||
|
* It is safe to call this method multiple times
|
||||||
|
*/
|
||||||
|
public synchronized void waitUntilSamplingCompleted() {
|
||||||
|
try {
|
||||||
|
if (futureTableSamperResult != null) {
|
||||||
|
futureTableSamperResult.get();
|
||||||
|
futureTableSamperResult = null;
|
||||||
|
}
|
||||||
|
} catch (InterruptedException | ExecutionException e) {
|
||||||
|
futureTableSamperResult = null;
|
||||||
|
throw new RuntimeException("Failed getting table statistics", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
public static final Set<String> TYPE_NAMES = new HashSet<>(asList("ColumnFamilies", "IndexTables", "Tables"));
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void register_mbeans() {
|
public ColumnFamilyStore(APIClient client, String type, String keyspace, String name) {
|
||||||
TimerTask taskToExecute = new CheckRegistration();
|
super(client,
|
||||||
timer.schedule(taskToExecute, 100, INTERVAL);
|
new TableMetrics(keyspace, name, false /* hardcoded for now */));
|
||||||
}
|
|
||||||
|
|
||||||
public ColumnFamilyStore(String type, String keyspace, String name) {
|
|
||||||
this.type = type;
|
this.type = type;
|
||||||
this.keyspace = keyspace;
|
this.keyspace = keyspace;
|
||||||
this.name = name;
|
this.name = name;
|
||||||
mbeanName = getName(type, keyspace, name);
|
service = Executors.newSingleThreadExecutor();
|
||||||
try {
|
}
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
ObjectName nameObj = new ObjectName(mbeanName);
|
public ColumnFamilyStore(APIClient client, ObjectName name) {
|
||||||
mbs.registerMBean(this, nameObj);
|
this(client, name.getKeyProperty("type"), name.getKeyProperty("keyspace"), name.getKeyProperty("columnfamily"));
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
metric = new ColumnFamilyMetrics(this);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** true if this CFS contains secondary index data */
|
/** true if this CFS contains secondary index data */
|
||||||
@ -97,422 +182,96 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
return keyspace + ":" + name;
|
return keyspace + ":" + name;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static String getName(String type, String keyspace, String name) {
|
private static ObjectName getName(String type, String keyspace, String name) throws MalformedObjectNameException {
|
||||||
return "org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace
|
return new ObjectName(
|
||||||
+ ",columnfamily=" + name;
|
"org.apache.cassandra.db:type=" + type + ",keyspace=" + keyspace + ",columnfamily=" + name);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final class CheckRegistration extends TimerTask {
|
public static RegistrationChecker createRegistrationChecker() {
|
||||||
private APIClient c = new APIClient();
|
return new RegistrationChecker() {
|
||||||
private int missed_response = 0;
|
@Override
|
||||||
// After MAX_RETRY retry we assume the API is not available
|
protected void doCheck(APIClient client, JmxMBeanServer server, EnumSet<RegistrationMode> mode)
|
||||||
// and the jmx will shutdown
|
throws OperationsException {
|
||||||
private static final int MAX_RETRY = 30;
|
JsonArray mbeans = client.getJsonArray("/column_family/");
|
||||||
@Override
|
Set<ObjectName> all = new HashSet<ObjectName>();
|
||||||
public void run() {
|
for (int i = 0; i < mbeans.size(); i++) {
|
||||||
try {
|
JsonObject mbean = mbeans.getJsonObject(i);
|
||||||
JsonArray mbeans = c.getJsonArray("/column_family/");
|
all.add(getName(mbean.getString("type"), mbean.getString("ks"), mbean.getString("cf")));
|
||||||
Set<String> all_cf = new HashSet<String>();
|
}
|
||||||
for (int i = 0; i < mbeans.size(); i++) {
|
checkRegistration(server, all, mode,
|
||||||
JsonObject mbean = mbeans.getJsonObject(i);
|
n -> TYPE_NAMES.contains(n.getKeyProperty("type")), n -> new ColumnFamilyStore(client, n));
|
||||||
String name = getName(mbean.getString("type"),
|
}
|
||||||
mbean.getString("ks"), mbean.getString("cf"));
|
};
|
||||||
if (!cf.containsKey(name)) {
|
}
|
||||||
ColumnFamilyStore cfs = new ColumnFamilyStore(
|
|
||||||
mbean.getString("type"), mbean.getString("ks"),
|
|
||||||
mbean.getString("cf"));
|
|
||||||
cf.put(name, cfs);
|
|
||||||
}
|
|
||||||
all_cf.add(name);
|
|
||||||
}
|
|
||||||
// removing deleted column family
|
|
||||||
for (String n : cf.keySet()) {
|
|
||||||
if (!all_cf.contains(n)) {
|
|
||||||
cf.remove(n);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
missed_response = 0;
|
|
||||||
} catch (ProcessingException e) {
|
|
||||||
if (Throwables.getRootCause(e) instanceof ConnectException) {
|
|
||||||
if (missed_response++ > MAX_RETRY) {
|
|
||||||
System.err.println("API is not available, JMX is shuting down");
|
|
||||||
System.exit(-1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// ignoring exceptions, will retry on the next interval
|
|
||||||
}
|
|
||||||
} catch (Exception e) {
|
|
||||||
// ignoring exceptions, will retry on the next interval
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the name of the column family
|
* @return the name of the column family
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public String getColumnFamilyName() {
|
public String getColumnFamilyName() {
|
||||||
log(" getColumnFamilyName()");
|
log(" getColumnFamilyName()");
|
||||||
return name;
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the total amount of data stored in the memtable, including column
|
|
||||||
* related overhead.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize
|
|
||||||
* @return The size in bytes.
|
|
||||||
* @deprecated
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMemtableDataSize() {
|
|
||||||
log(" getMemtableDataSize()");
|
|
||||||
return c.getLongValue("/column_family/metrics/memtable_on_heap_size/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the total number of columns present in the memtable.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount
|
|
||||||
* @return The number of columns.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMemtableColumnsCount() {
|
|
||||||
log(" getMemtableColumnsCount()");
|
|
||||||
return metric.memtableColumnsCount.value();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the number of times that a flush has resulted in the memtable
|
|
||||||
* being switched out.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount
|
|
||||||
* @return the number of memtable switches
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getMemtableSwitchCount() {
|
|
||||||
log(" getMemtableSwitchCount()");
|
|
||||||
return c.getIntValue("/column_family/metrics/memtable_switch_count/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead
|
|
||||||
* @return a histogram of the number of sstable data files accessed per
|
|
||||||
* read: reading this property resets it
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentSSTablesPerReadHistogram() {
|
|
||||||
log(" getRecentSSTablesPerReadHistogram()");
|
|
||||||
return metric.getRecentSSTablesPerRead();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram
|
|
||||||
* @return a histogram of the number of sstable data files accessed per read
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getSSTablesPerReadHistogram() {
|
|
||||||
log(" getSSTablesPerReadHistogram()");
|
|
||||||
return metric.sstablesPerRead.getBuckets(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return the number of read operations on this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getReadCount() {
|
|
||||||
log(" getReadCount()");
|
|
||||||
return c.getIntValue("/column_family/metrics/read/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return total read latency (divide by getReadCount() for average)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalReadLatencyMicros() {
|
|
||||||
log(" getTotalReadLatencyMicros()");
|
|
||||||
return c.getLongValue("/column_family/metrics/read_latency/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getLifetimeReadLatencyHistogramMicros() {
|
|
||||||
log(" getLifetimeReadLatencyHistogramMicros()");
|
|
||||||
return metric.readLatency.totalLatencyHistogram.getBuckets(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentReadLatencyHistogramMicros() {
|
|
||||||
log(" getRecentReadLatencyHistogramMicros()");
|
|
||||||
return metric.readLatency.getRecentLatencyHistogram();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return average latency per read operation since the last call
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentReadLatencyMicros() {
|
|
||||||
log(" getRecentReadLatencyMicros()");
|
|
||||||
return metric.readLatency.getRecentLatency();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return the number of write operations on this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getWriteCount() {
|
|
||||||
log(" getWriteCount()");
|
|
||||||
return c.getLongValue("/column_family/metrics/write/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return total write latency (divide by getReadCount() for average)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalWriteLatencyMicros() {
|
|
||||||
log(" getTotalWriteLatencyMicros()");
|
|
||||||
return c.getLongValue("/column_family/metrics/write_latency/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getLifetimeWriteLatencyHistogramMicros() {
|
|
||||||
log(" getLifetimeWriteLatencyHistogramMicros()");
|
|
||||||
return metric.writeLatency.totalLatencyHistogram.getBuckets(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentWriteLatencyHistogramMicros() {
|
|
||||||
log(" getRecentWriteLatencyHistogramMicros()");
|
|
||||||
return metric.writeLatency.getRecentLatencyHistogram();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return average latency per write operation since the last call
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentWriteLatencyMicros() {
|
|
||||||
log(" getRecentWriteLatencyMicros()");
|
|
||||||
return metric.writeLatency.getRecentLatency();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes
|
|
||||||
* @return the estimated number of tasks pending for this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getPendingTasks() {
|
|
||||||
log(" getPendingTasks()");
|
|
||||||
return c.getIntValue("/column_family/metrics/pending_flushes/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount
|
|
||||||
* @return the number of SSTables on disk for this CF
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getLiveSSTableCount() {
|
|
||||||
log(" getLiveSSTableCount()");
|
|
||||||
return c.getIntValue("/column_family/metrics/live_ss_table_count/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed
|
|
||||||
* @return disk space used by SSTables belonging to this CF
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getLiveDiskSpaceUsed() {
|
|
||||||
log(" getLiveDiskSpaceUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/live_disk_space_used/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed
|
|
||||||
* @return total disk space used by SSTables belonging to this CF, including
|
|
||||||
* obsolete ones waiting to be GC'd
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalDiskSpaceUsed() {
|
|
||||||
log(" getTotalDiskSpaceUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/total_disk_space_used/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* force a major compaction of this column family
|
* force a major compaction of this column family
|
||||||
*/
|
*/
|
||||||
public void forceMajorCompaction()
|
public void forceMajorCompaction() throws ExecutionException, InterruptedException {
|
||||||
throws ExecutionException, InterruptedException {
|
|
||||||
log(" forceMajorCompaction() throws ExecutionException, InterruptedException");
|
log(" forceMajorCompaction() throws ExecutionException, InterruptedException");
|
||||||
c.post("column_family/major_compaction/" + getCFName());
|
client.post("column_family/major_compaction/" + getCFName());
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize
|
|
||||||
* @return the size of the smallest compacted row
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMinRowSize() {
|
|
||||||
log(" getMinRowSize()");
|
|
||||||
return c.getLongValue("/column_family/metrics/min_row_size/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize
|
|
||||||
* @return the size of the largest compacted row
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMaxRowSize() {
|
|
||||||
log(" getMaxRowSize()");
|
|
||||||
return c.getLongValue("/column_family/metrics/max_row_size/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize
|
|
||||||
* @return the average row size across all the sstables
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMeanRowSize() {
|
|
||||||
log(" getMeanRowSize()");
|
|
||||||
return c.getLongValue("/column_family/metrics/mean_row_size/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterFalsePositives() {
|
|
||||||
log(" getBloomFilterFalsePositives()");
|
|
||||||
return c.getLongValue("/column_family/metrics/bloom_filter_false_positives/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getRecentBloomFilterFalsePositives() {
|
|
||||||
log(" getRecentBloomFilterFalsePositives()");
|
|
||||||
return c.getLongValue("/column_family/metrics/recent_bloom_filter_false_positives/" +getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getBloomFilterFalseRatio() {
|
|
||||||
log(" getBloomFilterFalseRatio()");
|
|
||||||
return c.getDoubleValue("/column_family/metrics/bloom_filter_false_ratio/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentBloomFilterFalseRatio() {
|
|
||||||
log(" getRecentBloomFilterFalseRatio()");
|
|
||||||
return c.getDoubleValue("/column_family/metrics/recent_bloom_filter_false_ratio/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterDiskSpaceUsed() {
|
|
||||||
log(" getBloomFilterDiskSpaceUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/bloom_filter_disk_space_used/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterOffHeapMemoryUsed() {
|
|
||||||
log(" getBloomFilterOffHeapMemoryUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/bloom_filter_off_heap_memory_used/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getIndexSummaryOffHeapMemoryUsed() {
|
|
||||||
log(" getIndexSummaryOffHeapMemoryUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/index_summary_off_heap_memory_used/" + getCFName());
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompressionMetadataOffHeapMemoryUsed() {
|
|
||||||
log(" getCompressionMetadataOffHeapMemoryUsed()");
|
|
||||||
return c.getLongValue("/column_family/metrics/compression_metadata_off_heap_memory_used/" + getCFName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the minimum number of sstables in queue before compaction kicks off
|
* Gets the minimum number of sstables in queue before compaction kicks off
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getMinimumCompactionThreshold() {
|
public int getMinimumCompactionThreshold() {
|
||||||
log(" getMinimumCompactionThreshold()");
|
log(" getMinimumCompactionThreshold()");
|
||||||
return c.getIntValue("column_family/minimum_compaction/" + getCFName());
|
return client.getIntValue("column_family/minimum_compaction/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the minimum number of sstables in queue before compaction kicks off
|
* Sets the minimum number of sstables in queue before compaction kicks off
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setMinimumCompactionThreshold(int threshold) {
|
public void setMinimumCompactionThreshold(int threshold) {
|
||||||
log(" setMinimumCompactionThreshold(int threshold)");
|
log(" setMinimumCompactionThreshold(int threshold)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("value", Integer.toString(threshold));
|
queryParams.add("value", Integer.toString(threshold));
|
||||||
c.post("column_family/minimum_compaction/" + getCFName(), queryParams);
|
client.post("column_family/minimum_compaction/" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the maximum number of sstables in queue before compaction kicks off
|
* Gets the maximum number of sstables in queue before compaction kicks off
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getMaximumCompactionThreshold() {
|
public int getMaximumCompactionThreshold() {
|
||||||
log(" getMaximumCompactionThreshold()");
|
log(" getMaximumCompactionThreshold()");
|
||||||
return c.getIntValue("column_family/maximum_compaction/" + getCFName());
|
return client.getIntValue("column_family/maximum_compaction/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the maximum and maximum number of SSTables in queue before
|
* Sets the maximum and maximum number of SSTables in queue before
|
||||||
* compaction kicks off
|
* compaction kicks off
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setCompactionThresholds(int minThreshold, int maxThreshold) {
|
public void setCompactionThresholds(int minThreshold, int maxThreshold) {
|
||||||
log(" setCompactionThresholds(int minThreshold, int maxThreshold)");
|
log(" setCompactionThresholds(int minThreshold, int maxThreshold)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("minimum", Integer.toString(minThreshold));
|
queryParams.add("minimum", Integer.toString(minThreshold));
|
||||||
queryParams.add("maximum", Integer.toString(maxThreshold));
|
queryParams.add("maximum", Integer.toString(maxThreshold));
|
||||||
c.post("column_family/compaction" + getCFName(), queryParams);
|
client.post("column_family/compaction" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the maximum number of sstables in queue before compaction kicks off
|
* Sets the maximum number of sstables in queue before compaction kicks off
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setMaximumCompactionThreshold(int threshold) {
|
public void setMaximumCompactionThreshold(int threshold) {
|
||||||
log(" setMaximumCompactionThreshold(int threshold)");
|
log(" setMaximumCompactionThreshold(int threshold)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("value", Integer.toString(threshold));
|
queryParams.add("value", Integer.toString(threshold));
|
||||||
c.post("column_family/maximum_compaction/" + getCFName(), queryParams);
|
client.post("column_family/maximum_compaction/" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -525,7 +284,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
log(" setCompactionStrategyClass(String className)");
|
log(" setCompactionStrategyClass(String className)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("class_name", className);
|
queryParams.add("class_name", className);
|
||||||
c.post("column_family/compaction_strategy/" + getCFName(), queryParams);
|
client.post("column_family/compaction_strategy/" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -533,17 +292,16 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
*/
|
*/
|
||||||
public String getCompactionStrategyClass() {
|
public String getCompactionStrategyClass() {
|
||||||
log(" getCompactionStrategyClass()");
|
log(" getCompactionStrategyClass()");
|
||||||
return c.getStringValue(
|
return client.getStringValue("column_family/compaction_strategy/" + getCFName());
|
||||||
"column_family/compaction_strategy/" + getCFName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the compression parameters
|
* Get the compression parameters
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, String> getCompressionParameters() {
|
public Map<String, String> getCompressionParameters() {
|
||||||
log(" getCompressionParameters()");
|
log(" getCompressionParameters()");
|
||||||
return c.getMapStrValue(
|
return client.getMapStrValue("column_family/compression_parameters/" + getCFName());
|
||||||
"column_family/compression_parameters/" + getCFName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -552,73 +310,49 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
* @param opts
|
* @param opts
|
||||||
* map of string names to values
|
* map of string names to values
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setCompressionParameters(Map<String, String> opts) {
|
public void setCompressionParameters(Map<String, String> opts) {
|
||||||
log(" setCompressionParameters(Map<String,String> opts)");
|
log(" setCompressionParameters(Map<String,String> opts)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("opts", APIClient.mapToString(opts));
|
queryParams.add("opts", APIClient.mapToString(opts));
|
||||||
c.post("column_family/compression_parameters/" + getCFName(),
|
client.post("column_family/compression_parameters/" + getCFName(), queryParams);
|
||||||
queryParams);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set new crc check chance
|
* Set new crc check chance
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setCrcCheckChance(double crcCheckChance) {
|
public void setCrcCheckChance(double crcCheckChance) {
|
||||||
log(" setCrcCheckChance(double crcCheckChance)");
|
log(" setCrcCheckChance(double crcCheckChance)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("check_chance", Double.toString(crcCheckChance));
|
queryParams.add("check_chance", Double.toString(crcCheckChance));
|
||||||
c.post("column_family/crc_check_chance/" + getCFName(), queryParams);
|
client.post("column_family/crc_check_chance/" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean isAutoCompactionDisabled() {
|
public boolean isAutoCompactionDisabled() {
|
||||||
log(" isAutoCompactionDisabled()");
|
log(" isAutoCompactionDisabled()");
|
||||||
return c.getBooleanValue("column_family/autocompaction/" + getCFName());
|
return !client.getBooleanValue("column_family/autocompaction/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Number of tombstoned cells retreived during the last slicequery */
|
/** Number of tombstoned cells retreived during the last slicequery */
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public double getTombstonesPerSlice() {
|
public double getTombstonesPerSlice() {
|
||||||
log(" getTombstonesPerSlice()");
|
log(" getTombstonesPerSlice()");
|
||||||
return c.getDoubleValue("");
|
return client.getDoubleValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Number of live cells retreived during the last slicequery */
|
/** Number of live cells retreived during the last slicequery */
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public double getLiveCellsPerSlice() {
|
public double getLiveCellsPerSlice() {
|
||||||
log(" getLiveCellsPerSlice()");
|
log(" getLiveCellsPerSlice()");
|
||||||
return c.getDoubleValue("");
|
return client.getDoubleValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long estimateKeys() {
|
public long estimateKeys() {
|
||||||
log(" estimateKeys()");
|
log(" estimateKeys()");
|
||||||
return c.getLongValue("column_family/estimate_keys/" + getCFName());
|
return client.getLongValue("column_family/estimate_keys/" + getCFName());
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getEstimatedRowSizeHistogram() {
|
|
||||||
log(" getEstimatedRowSizeHistogram()");
|
|
||||||
return metric.estimatedRowSizeHistogram.value();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getEstimatedColumnCountHistogram() {
|
|
||||||
log(" getEstimatedColumnCountHistogram()");
|
|
||||||
return metric.estimatedColumnCountHistogram.value();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getCompressionRatio() {
|
|
||||||
log(" getCompressionRatio()");
|
|
||||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/" + getCFName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -626,9 +360,10 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
*
|
*
|
||||||
* @return list of the index names
|
* @return list of the index names
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public List<String> getBuiltIndexes() {
|
public List<String> getBuiltIndexes() {
|
||||||
log(" getBuiltIndexes()");
|
log(" getBuiltIndexes()");
|
||||||
return c.getListStrValue("column_family/built_indexes/" + getCFName());
|
return client.getListStrValue("column_family/built_indexes/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -637,30 +372,49 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
* @param key
|
* @param key
|
||||||
* @return list of filenames containing the key
|
* @return list of filenames containing the key
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public List<String> getSSTablesForKey(String key) {
|
public List<String> getSSTablesForKey(String key) {
|
||||||
log(" getSSTablesForKey(String key)");
|
log(" getSSTablesForKey(String key)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("key", key);
|
queryParams.add("key", key);
|
||||||
return c.getListStrValue("column_family/sstables/by_key/" + getCFName(),
|
return client.getListStrValue("column_family/sstables/by_key/" + getCFName(), queryParams);
|
||||||
queryParams);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a list of filenames that contain the given key on this node
|
||||||
|
* @param key
|
||||||
|
* @param hexFormat if key is in hex string format
|
||||||
|
* @return list of filenames containing the key
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public List<String> getSSTablesForKey(String key, boolean hexFormat)
|
||||||
|
{
|
||||||
|
log(" getSSTablesForKey(String key)");
|
||||||
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
|
queryParams.add("key", key);
|
||||||
|
if (hexFormat) {
|
||||||
|
queryParams.add("format", "hex");
|
||||||
|
}
|
||||||
|
return client.getListStrValue("column_family/sstables/by_key/" + getCFName(), queryParams);
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Scan through Keyspace/ColumnFamily's data directory determine which
|
* Scan through Keyspace/ColumnFamily's data directory determine which
|
||||||
* SSTables should be loaded and load them
|
* SSTables should be loaded and load them
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void loadNewSSTables() {
|
public void loadNewSSTables() {
|
||||||
log(" loadNewSSTables()");
|
log(" loadNewSSTables()");
|
||||||
c.post("column_family/sstable/" + getCFName());
|
client.post("column_family/sstable/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the number of SSTables in L0. Always return 0 if Leveled
|
* @return the number of SSTables in L0. Always return 0 if Leveled
|
||||||
* compaction is not enabled.
|
* compaction is not enabled.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getUnleveledSSTables() {
|
public int getUnleveledSSTables() {
|
||||||
log(" getUnleveledSSTables()");
|
log(" getUnleveledSSTables()");
|
||||||
return c.getIntValue("column_family/sstables/unleveled/" + getCFName());
|
return client.getIntValue("column_family/sstables/unleveled/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -668,10 +422,16 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
* used. array index corresponds to level(int[0] is for level 0,
|
* used. array index corresponds to level(int[0] is for level 0,
|
||||||
* ...).
|
* ...).
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int[] getSSTableCountPerLevel() {
|
public int[] getSSTableCountPerLevel() {
|
||||||
log(" getSSTableCountPerLevel()");
|
log(" getSSTableCountPerLevel()");
|
||||||
return c.getIntArrValue(
|
int[] res = client.getIntArrValue("column_family/sstables/per_level/" + getCFName());
|
||||||
"column_family/sstables/per_level/" + getCFName());
|
if (res.length == 0) {
|
||||||
|
// no sstable count
|
||||||
|
// should return null
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -680,18 +440,20 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
*
|
*
|
||||||
* @return ratio
|
* @return ratio
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public double getDroppableTombstoneRatio() {
|
public double getDroppableTombstoneRatio() {
|
||||||
log(" getDroppableTombstoneRatio()");
|
log(" getDroppableTombstoneRatio()");
|
||||||
return c.getDoubleValue("column_family/droppable_ratio/" + getCFName());
|
return client.getDoubleValue("column_family/droppable_ratio/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the size of SSTables in "snapshots" subdirectory which aren't
|
* @return the size of SSTables in "snapshots" subdirectory which aren't
|
||||||
* live anymore
|
* live anymore
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public long trueSnapshotsSize() {
|
public long trueSnapshotsSize() {
|
||||||
log(" trueSnapshotsSize()");
|
log(" trueSnapshotsSize()");
|
||||||
return c.getLongValue("column_family/snapshots_size/" + getCFName());
|
return client.getLongValue("column_family/metrics/snapshots_size/" + getCFName());
|
||||||
}
|
}
|
||||||
|
|
||||||
public String getKeyspace() {
|
public String getKeyspace() {
|
||||||
@ -699,48 +461,104 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getRangeCount() {
|
public String getTableName() {
|
||||||
log("getRangeCount()");
|
log(" getTableName()");
|
||||||
return metric.rangeLatency.latency.count();
|
return name;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getTotalRangeLatencyMicros() {
|
public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException {
|
||||||
log("getTotalRangeLatencyMicros()");
|
log(" forceMajorCompaction(boolean) throws ExecutionException, InterruptedException");
|
||||||
return metric.rangeLatency.totalLatency.count();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
|
queryParams.putSingle("value", valueOf(splitOutput));
|
||||||
|
client.post("column_family/major_compaction/" + getCFName(), queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long[] getLifetimeRangeLatencyHistogramMicros() {
|
public void setCompactionParametersJson(String options) {
|
||||||
log("getLifetimeRangeLatencyHistogramMicros()");
|
log(" setCompactionParametersJson");
|
||||||
return metric.rangeLatency.totalLatencyHistogram.getBuckets(false);
|
JsonReader reader = Json.createReaderFactory(null).createReader(new StringReader(options));
|
||||||
|
setCompactionParameters(
|
||||||
|
reader.readObject().entrySet().stream().collect(toMap(Map.Entry::getKey, e -> e.toString())));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long[] getRecentRangeLatencyHistogramMicros() {
|
public String getCompactionParametersJson() {
|
||||||
log("getRecentRangeLatencyHistogramMicros()");
|
log(" getCompactionParametersJson");
|
||||||
return metric.rangeLatency.getRecentLatencyHistogram();
|
JsonObjectBuilder b = createObjectBuilder();
|
||||||
|
getCompactionParameters().forEach(b::add);
|
||||||
|
return b.build().toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public double getRecentRangeLatencyMicros() {
|
public void setCompactionParameters(Map<String, String> options) {
|
||||||
log("getRecentRangeLatencyMicros()");
|
for (Map.Entry<String, String> e : options.entrySet()) {
|
||||||
return metric.rangeLatency.getRecentLatency();
|
// See below
|
||||||
|
if ("class".equals(e.getKey())) {
|
||||||
|
setCompactionStrategyClass(e.getValue());
|
||||||
|
} else {
|
||||||
|
throw new IllegalArgumentException(e.getKey());
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void beginLocalSampling(String sampler, int capacity) {
|
public Map<String, String> getCompactionParameters() {
|
||||||
|
// We only currently support class. Here could have been a call that can
|
||||||
|
// be expanded only on the server side, but that raises controversy.
|
||||||
|
// Lets add some technical debt instead.
|
||||||
|
return Collections.singletonMap("class", getCompactionStrategyClass());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isCompactionDiskSpaceCheckEnabled() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log("beginLocalSampling()");
|
log(" isCompactionDiskSpaceCheckEnabled()");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public CompositeData finishLocalSampling(String sampler, int count)
|
public void compactionDiskSpaceCheck(boolean enable) {
|
||||||
throws OpenDataException {
|
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log("finishLocalSampling()");
|
log(" compactionDiskSpaceCheck()");
|
||||||
return null;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void beginLocalSampling(String sampler_base, int capacity) {
|
||||||
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
|
queryParams.add("capacity", Integer.toString(capacity));
|
||||||
|
if (sampler_base.contains(":")) {
|
||||||
|
String[] parts = sampler_base.split(":");
|
||||||
|
queryParams.add("duration", parts[1]);
|
||||||
|
} else {
|
||||||
|
queryParams.add("duration", "10000");
|
||||||
|
}
|
||||||
|
startTableSampling(queryParams);
|
||||||
|
log(" beginLocalSampling()");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public CompositeData finishLocalSampling(String samplerType, int count) throws OpenDataException {
|
||||||
|
log(" finishLocalSampling()");
|
||||||
|
|
||||||
|
waitUntilSamplingCompleted();
|
||||||
|
|
||||||
|
TabularDataSupport result = new TabularDataSupport(COUNTER_TYPE);
|
||||||
|
|
||||||
|
JsonArray counters = tableSamplerResult.getJsonArray((samplerType.equalsIgnoreCase("reads")) ? "read" : "write");
|
||||||
|
long cardinality = tableSamplerResult.getJsonNumber((samplerType.equalsIgnoreCase("reads")) ? "read_cardinality" : "write_cardinality").longValue();
|
||||||
|
long size = 0;
|
||||||
|
if (counters != null) {
|
||||||
|
size = (count > counters.size()) ? counters.size() : count;
|
||||||
|
for (int i = 0; i < size; i++) {
|
||||||
|
JsonObject counter = counters.getJsonObject(i);
|
||||||
|
result.put(new CompositeDataSupport(COUNTER_COMPOSITE_TYPE, COUNTER_NAMES,
|
||||||
|
new Object[] { counter.getString("partition"), // raw
|
||||||
|
counter.getJsonNumber("count").longValue(), // count
|
||||||
|
counter.getJsonNumber("error").longValue(), // error
|
||||||
|
counter.getString("partition") })); // string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return new CompositeDataSupport(SAMPLING_RESULT, SAMPLER_NAMES, new Object[] { cardinality, result });
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.db;
|
package org.apache.cassandra.db;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
@ -27,263 +28,28 @@ import javax.management.openmbean.OpenDataException;
|
|||||||
/**
|
/**
|
||||||
* The MBean interface for ColumnFamilyStore
|
* The MBean interface for ColumnFamilyStore
|
||||||
*/
|
*/
|
||||||
public interface ColumnFamilyStoreMBean
|
public interface ColumnFamilyStoreMBean {
|
||||||
{
|
|
||||||
/**
|
/**
|
||||||
* @return the name of the column family
|
* @return the name of the column family
|
||||||
*/
|
*/
|
||||||
|
@Deprecated
|
||||||
public String getColumnFamilyName();
|
public String getColumnFamilyName();
|
||||||
|
|
||||||
/**
|
public String getTableName();
|
||||||
* Returns the total amount of data stored in the memtable, including
|
|
||||||
* column related overhead.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableOnHeapSize
|
|
||||||
* @return The size in bytes.
|
|
||||||
* @deprecated
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMemtableDataSize();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the total number of columns present in the memtable.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableColumnsCount
|
|
||||||
* @return The number of columns.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMemtableColumnsCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the number of times that a flush has resulted in the
|
|
||||||
* memtable being switched out.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#memtableSwitchCount
|
|
||||||
* @return the number of memtable switches
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getMemtableSwitchCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentSSTablesPerRead
|
|
||||||
* @return a histogram of the number of sstable data files accessed per read: reading this property resets it
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentSSTablesPerReadHistogram();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#sstablesPerReadHistogram
|
|
||||||
* @return a histogram of the number of sstable data files accessed per read
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getSSTablesPerReadHistogram();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return the number of read operations on this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getReadCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return total read latency (divide by getReadCount() for average)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalReadLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getLifetimeReadLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentReadLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#readLatency
|
|
||||||
* @return average latency per read operation since the last call
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentReadLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return the number of write operations on this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getWriteCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return total write latency (divide by getReadCount() for average)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalWriteLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getLifetimeWriteLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentWriteLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#writeLatency
|
|
||||||
* @return average latency per write operation since the last call
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentWriteLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
|
||||||
* @return the number of range slice operations on this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getRangeCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
|
||||||
* @return total range slice latency (divide by getRangeCount() for average)
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalRangeLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getLifetimeRangeLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
|
||||||
* @return an array representing the latency histogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getRecentRangeLatencyHistogramMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#rangeLatency
|
|
||||||
* @return average latency per range slice operation since the last call
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentRangeLatencyMicros();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#pendingFlushes
|
|
||||||
* @return the estimated number of tasks pending for this column family
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getPendingTasks();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveSSTableCount
|
|
||||||
* @return the number of SSTables on disk for this CF
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getLiveSSTableCount();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#liveDiskSpaceUsed
|
|
||||||
* @return disk space used by SSTables belonging to this CF
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getLiveDiskSpaceUsed();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#totalDiskSpaceUsed
|
|
||||||
* @return total disk space used by SSTables belonging to this CF, including obsolete ones waiting to be GC'd
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalDiskSpaceUsed();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* force a major compaction of this column family
|
* force a major compaction of this column family
|
||||||
|
*
|
||||||
|
* @param splitOutput
|
||||||
|
* true if the output of the major compaction should be split in
|
||||||
|
* several sstables
|
||||||
*/
|
*/
|
||||||
public void forceMajorCompaction() throws ExecutionException, InterruptedException;
|
public void forceMajorCompaction(boolean splitOutput) throws ExecutionException, InterruptedException;
|
||||||
|
|
||||||
/**
|
// NOT even default-throw implementing
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#minRowSize
|
// forceCompactionForTokenRange
|
||||||
* @return the size of the smallest compacted row
|
// as this is clearly a misplaced method that should not be in the mbean interface
|
||||||
*/
|
// (uses internal cassandra types)
|
||||||
@Deprecated
|
|
||||||
public long getMinRowSize();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#maxRowSize
|
|
||||||
* @return the size of the largest compacted row
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMaxRowSize();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#meanRowSize
|
|
||||||
* @return the average row size across all the sstables
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getMeanRowSize();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalsePositives
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterFalsePositives();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalsePositives
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getRecentBloomFilterFalsePositives();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterFalseRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getBloomFilterFalseRatio();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#recentBloomFilterFalseRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getRecentBloomFilterFalseRatio();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterDiskSpaceUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterDiskSpaceUsed();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#bloomFilterOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getBloomFilterOffHeapMemoryUsed();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#indexSummaryOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getIndexSummaryOffHeapMemoryUsed();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionMetadataOffHeapMemoryUsed
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompressionMetadataOffHeapMemoryUsed();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the minimum number of sstables in queue before compaction kicks off
|
* Gets the minimum number of sstables in queue before compaction kicks off
|
||||||
@ -301,7 +67,8 @@ public interface ColumnFamilyStoreMBean
|
|||||||
public int getMaximumCompactionThreshold();
|
public int getMaximumCompactionThreshold();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the maximum and maximum number of SSTables in queue before compaction kicks off
|
* Sets the maximum and maximum number of SSTables in queue before
|
||||||
|
* compaction kicks off
|
||||||
*/
|
*/
|
||||||
public void setCompactionThresholds(int minThreshold, int maxThreshold);
|
public void setCompactionThresholds(int minThreshold, int maxThreshold);
|
||||||
|
|
||||||
@ -311,26 +78,44 @@ public interface ColumnFamilyStoreMBean
|
|||||||
public void setMaximumCompactionThreshold(int threshold);
|
public void setMaximumCompactionThreshold(int threshold);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the compaction strategy by class name
|
* Sets the compaction parameters locally for this node
|
||||||
* @param className the name of the compaction strategy class
|
*
|
||||||
|
* Note that this will be set until an ALTER with compaction = {..} is
|
||||||
|
* executed or the node is restarted
|
||||||
|
*
|
||||||
|
* @param options
|
||||||
|
* compaction options with the same syntax as when doing ALTER
|
||||||
|
* ... WITH compaction = {..}
|
||||||
*/
|
*/
|
||||||
public void setCompactionStrategyClass(String className);
|
public void setCompactionParametersJson(String options);
|
||||||
|
|
||||||
|
public String getCompactionParametersJson();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the compaction strategy class name
|
* Sets the compaction parameters locally for this node
|
||||||
|
*
|
||||||
|
* Note that this will be set until an ALTER with compaction = {..} is
|
||||||
|
* executed or the node is restarted
|
||||||
|
*
|
||||||
|
* @param options
|
||||||
|
* compaction options map
|
||||||
*/
|
*/
|
||||||
public String getCompactionStrategyClass();
|
public void setCompactionParameters(Map<String, String> options);
|
||||||
|
|
||||||
|
public Map<String, String> getCompactionParameters();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the compression parameters
|
* Get the compression parameters
|
||||||
*/
|
*/
|
||||||
public Map<String,String> getCompressionParameters();
|
public Map<String, String> getCompressionParameters();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set the compression parameters
|
* Set the compression parameters
|
||||||
* @param opts map of string names to values
|
*
|
||||||
|
* @param opts
|
||||||
|
* map of string names to values
|
||||||
*/
|
*/
|
||||||
public void setCompressionParameters(Map<String,String> opts);
|
public void setCompressionParameters(Map<String, String> opts);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Set new crc check chance
|
* Set new crc check chance
|
||||||
@ -339,81 +124,92 @@ public interface ColumnFamilyStoreMBean
|
|||||||
|
|
||||||
public boolean isAutoCompactionDisabled();
|
public boolean isAutoCompactionDisabled();
|
||||||
|
|
||||||
/** Number of tombstoned cells retreived during the last slicequery */
|
|
||||||
@Deprecated
|
|
||||||
public double getTombstonesPerSlice();
|
|
||||||
|
|
||||||
/** Number of live cells retreived during the last slicequery */
|
|
||||||
@Deprecated
|
|
||||||
public double getLiveCellsPerSlice();
|
|
||||||
|
|
||||||
public long estimateKeys();
|
public long estimateKeys();
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedRowSizeHistogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getEstimatedRowSizeHistogram();
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#estimatedColumnCountHistogram
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long[] getEstimatedColumnCountHistogram();
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.ColumnFamilyMetrics#compressionRatio
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public double getCompressionRatio();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of the names of the built column indexes for current store
|
* Returns a list of the names of the built column indexes for current store
|
||||||
|
*
|
||||||
* @return list of the index names
|
* @return list of the index names
|
||||||
*/
|
*/
|
||||||
public List<String> getBuiltIndexes();
|
public List<String> getBuiltIndexes();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a list of filenames that contain the given key on this node
|
* Returns a list of filenames that contain the given key on this node
|
||||||
|
*
|
||||||
* @param key
|
* @param key
|
||||||
* @return list of filenames containing the key
|
* @return list of filenames containing the key
|
||||||
*/
|
*/
|
||||||
public List<String> getSSTablesForKey(String key);
|
public List<String> getSSTablesForKey(String key);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scan through Keyspace/ColumnFamily's data directory
|
* Returns a list of filenames that contain the given key on this node
|
||||||
* determine which SSTables should be loaded and load them
|
* @param key
|
||||||
|
* @param hexFormat if key is in hex string format
|
||||||
|
* @return list of filenames containing the key
|
||||||
|
*/
|
||||||
|
public List<String> getSSTablesForKey(String key, boolean hexFormat);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Scan through Keyspace/ColumnFamily's data directory determine which
|
||||||
|
* SSTables should be loaded and load them
|
||||||
*/
|
*/
|
||||||
public void loadNewSSTables();
|
public void loadNewSSTables();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the number of SSTables in L0. Always return 0 if Leveled compaction is not enabled.
|
* @return the number of SSTables in L0. Always return 0 if Leveled
|
||||||
|
* compaction is not enabled.
|
||||||
*/
|
*/
|
||||||
public int getUnleveledSSTables();
|
public int getUnleveledSSTables();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return sstable count for each level. null unless leveled compaction is used.
|
* @return sstable count for each level. null unless leveled compaction is
|
||||||
* array index corresponds to level(int[0] is for level 0, ...).
|
* used. array index corresponds to level(int[0] is for level 0,
|
||||||
|
* ...).
|
||||||
*/
|
*/
|
||||||
public int[] getSSTableCountPerLevel();
|
public int[] getSSTableCountPerLevel();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the ratio of droppable tombstones to real columns (and non-droppable tombstones)
|
* @return sstable fanout size for level compaction strategy.
|
||||||
|
*/
|
||||||
|
default public int getLevelFanoutSize() {
|
||||||
|
// TODO: implement for real. This is sort of default.
|
||||||
|
return 10;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Get the ratio of droppable tombstones to real columns (and non-droppable
|
||||||
|
* tombstones)
|
||||||
|
*
|
||||||
* @return ratio
|
* @return ratio
|
||||||
*/
|
*/
|
||||||
public double getDroppableTombstoneRatio();
|
public double getDroppableTombstoneRatio();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return the size of SSTables in "snapshots" subdirectory which aren't live anymore
|
* @return the size of SSTables in "snapshots" subdirectory which aren't
|
||||||
|
* live anymore
|
||||||
*/
|
*/
|
||||||
public long trueSnapshotsSize();
|
public long trueSnapshotsSize();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* begin sampling for a specific sampler with a given capacity. The cardinality may
|
* begin sampling for a specific sampler with a given capacity. The
|
||||||
* be larger than the capacity, but depending on the use case it may affect its accuracy
|
* cardinality may be larger than the capacity, but depending on the use
|
||||||
|
* case it may affect its accuracy
|
||||||
*/
|
*/
|
||||||
public void beginLocalSampling(String sampler, int capacity);
|
public void beginLocalSampling(String sampler, int capacity);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return top <i>count</i> items for the sampler since beginLocalSampling was called
|
* @return top <i>count</i> items for the sampler since beginLocalSampling
|
||||||
|
* was called
|
||||||
*/
|
*/
|
||||||
public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException;
|
public CompositeData finishLocalSampling(String sampler, int count) throws OpenDataException;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Is Compaction space check enabled
|
||||||
|
*/
|
||||||
|
public boolean isCompactionDiskSpaceCheckEnabled();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Enable/Disable compaction space check
|
||||||
|
*/
|
||||||
|
public void compactionDiskSpaceCheck(boolean enable);
|
||||||
}
|
}
|
||||||
|
@ -22,85 +22,39 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.db.commitlog;
|
package org.apache.cassandra.db.commitlog;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.IOException;
|
||||||
import java.lang.management.ManagementFactory;
|
import java.util.ArrayList;
|
||||||
import java.util.*;
|
import java.util.HashMap;
|
||||||
|
import java.util.HashSet;
|
||||||
import javax.management.MBeanServer;
|
import java.util.List;
|
||||||
import javax.management.ObjectName;
|
import java.util.Map;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.cassandra.metrics.CommitLogMetrics;
|
import org.apache.cassandra.metrics.CommitLogMetrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Commit Log tracks every write operation into the system. The aim of the commit log is to be able to
|
* Commit Log tracks every write operation into the system. The aim of the commit log is to be able to
|
||||||
* successfully recover data that was not stored to disk via the Memtable.
|
* successfully recover data that was not stored to disk via the Memtable.
|
||||||
*/
|
*/
|
||||||
public class CommitLog implements CommitLogMBean {
|
public class CommitLog extends MetricsMBean implements CommitLogMBean {
|
||||||
|
|
||||||
CommitLogMetrics metrics = new CommitLogMetrics();
|
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||||
.getLogger(CommitLog.class.getName());
|
.getLogger(CommitLog.class.getName());
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final CommitLog instance = new CommitLog();
|
public CommitLog(APIClient client) {
|
||||||
|
super("org.apache.cassandra.db:type=Commitlog", client, new CommitLogMetrics());
|
||||||
public static CommitLog getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
private CommitLog() {
|
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
try {
|
|
||||||
mbs.registerMBean(this,
|
|
||||||
new ObjectName("org.apache.cassandra.db:type=Commitlog"));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the number of completed tasks
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompletedTasks() {
|
|
||||||
log(" getCompletedTasks()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the number of tasks waiting to be executed
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getPendingTasks() {
|
|
||||||
log(" getPendingTasks()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the current size used by all the commitlog segments.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalCommitlogSize() {
|
|
||||||
log(" getTotalCommitlogSize()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Recover a single file.
|
* Recover a single file.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void recover(String path) throws IOException {
|
public void recover(String path) throws IOException {
|
||||||
log(" recover(String path) throws IOException");
|
log(" recover(String path) throws IOException");
|
||||||
}
|
}
|
||||||
@ -109,9 +63,10 @@ public class CommitLog implements CommitLogMBean {
|
|||||||
* @return file names (not full paths) of active commit log segments
|
* @return file names (not full paths) of active commit log segments
|
||||||
* (segments containing unflushed data)
|
* (segments containing unflushed data)
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public List<String> getActiveSegmentNames() {
|
public List<String> getActiveSegmentNames() {
|
||||||
log(" getActiveSegmentNames()");
|
log(" getActiveSegmentNames()");
|
||||||
List<String> lst = c.getListStrValue("/commitlog/segments/active");
|
List<String> lst = client.getListStrValue("/commitlog/segments/active");
|
||||||
Set<String> set = new HashSet<String>();
|
Set<String> set = new HashSet<String>();
|
||||||
for (String l : lst) {
|
for (String l : lst) {
|
||||||
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
||||||
@ -124,9 +79,10 @@ public class CommitLog implements CommitLogMBean {
|
|||||||
* @return Files which are pending for archival attempt. Does NOT include
|
* @return Files which are pending for archival attempt. Does NOT include
|
||||||
* failed archive attempts.
|
* failed archive attempts.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public List<String> getArchivingSegmentNames() {
|
public List<String> getArchivingSegmentNames() {
|
||||||
log(" getArchivingSegmentNames()");
|
log(" getArchivingSegmentNames()");
|
||||||
List<String> lst = c.getListStrValue("/commitlog/segments/archiving");
|
List<String> lst = client.getListStrValue("/commitlog/segments/archiving");
|
||||||
Set<String> set = new HashSet<String>();
|
Set<String> set = new HashSet<String>();
|
||||||
for (String l : lst) {
|
for (String l : lst) {
|
||||||
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
String name = l.substring(l.lastIndexOf("/") + 1, l.length());
|
||||||
@ -139,35 +95,54 @@ public class CommitLog implements CommitLogMBean {
|
|||||||
public String getArchiveCommand() {
|
public String getArchiveCommand() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log(" getArchiveCommand()");
|
log(" getArchiveCommand()");
|
||||||
return c.getStringValue("");
|
return client.getStringValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getRestoreCommand() {
|
public String getRestoreCommand() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log(" getRestoreCommand()");
|
log(" getRestoreCommand()");
|
||||||
return c.getStringValue("");
|
return client.getStringValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getRestoreDirectories() {
|
public String getRestoreDirectories() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log(" getRestoreDirectories()");
|
log(" getRestoreDirectories()");
|
||||||
return c.getStringValue("");
|
return client.getStringValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public long getRestorePointInTime() {
|
public long getRestorePointInTime() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log(" getRestorePointInTime()");
|
log(" getRestorePointInTime()");
|
||||||
return c.getLongValue("");
|
return client.getLongValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String getRestorePrecision() {
|
public String getRestorePrecision() {
|
||||||
// TODO Auto-generated method stub
|
// TODO Auto-generated method stub
|
||||||
log(" getRestorePrecision()");
|
log(" getRestorePrecision()");
|
||||||
return c.getStringValue("");
|
return client.getStringValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getActiveContentSize() {
|
||||||
|
// scylla does not compress commit log, so this is equivalent
|
||||||
|
return getActiveOnDiskSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getActiveOnDiskSize() {
|
||||||
|
return client.getLongValue("/commitlog/metrics/total_commit_log_size");
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Double> getActiveSegmentCompressionRatios() {
|
||||||
|
HashMap<String, Double> res = new HashMap<>();
|
||||||
|
for (String name : getActiveSegmentNames()) {
|
||||||
|
res.put(name, 1.0);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,32 +19,9 @@ package org.apache.cassandra.db.commitlog;
|
|||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
public interface CommitLogMBean {
|
public interface CommitLogMBean {
|
||||||
/**
|
|
||||||
* Get the number of completed tasks
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#completedTasks
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompletedTasks();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the number of tasks waiting to be executed
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#pendingTasks
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getPendingTasks();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Get the current size used by all the commitlog segments.
|
|
||||||
*
|
|
||||||
* @see org.apache.cassandra.metrics.CommitLogMetrics#totalCommitLogSize
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalCommitlogSize();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Command to execute to archive a commitlog segment. Blank to disabled.
|
* Command to execute to archive a commitlog segment. Blank to disabled.
|
||||||
*/
|
*/
|
||||||
@ -92,4 +69,21 @@ public interface CommitLogMBean {
|
|||||||
* failed archive attempts.
|
* failed archive attempts.
|
||||||
*/
|
*/
|
||||||
public List<String> getArchivingSegmentNames();
|
public List<String> getArchivingSegmentNames();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The size of the mutations in all active commit log segments
|
||||||
|
* (uncompressed).
|
||||||
|
*/
|
||||||
|
public long getActiveContentSize();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return The space taken on disk by the commit log (compressed).
|
||||||
|
*/
|
||||||
|
public long getActiveOnDiskSize();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return A map between active log segments and the compression ratio
|
||||||
|
* achieved for each.
|
||||||
|
*/
|
||||||
|
public Map<String, Double> getActiveSegmentCompressionRatios();
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,98 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Copyright 2015 ScyllaDB
|
||||||
|
*
|
||||||
|
* Modified by ScyllaDB
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.db.compaction;
|
||||||
|
|
||||||
|
import jakarta.json.JsonArray;
|
||||||
|
import jakarta.json.JsonObject;
|
||||||
|
import javax.management.openmbean.CompositeDataSupport;
|
||||||
|
import javax.management.openmbean.CompositeType;
|
||||||
|
import javax.management.openmbean.OpenDataException;
|
||||||
|
import javax.management.openmbean.OpenType;
|
||||||
|
import javax.management.openmbean.SimpleType;
|
||||||
|
import javax.management.openmbean.TabularData;
|
||||||
|
import javax.management.openmbean.TabularDataSupport;
|
||||||
|
import javax.management.openmbean.TabularType;
|
||||||
|
|
||||||
|
import com.google.common.base.Throwables;
|
||||||
|
|
||||||
|
public class CompactionHistoryTabularData {
|
||||||
|
private static final String[] ITEM_NAMES = new String[] { "id", "keyspace_name", "columnfamily_name",
|
||||||
|
"compacted_at", "bytes_in", "bytes_out", "rows_merged" };
|
||||||
|
|
||||||
|
private static final String[] ITEM_DESCS = new String[] { "time uuid", "keyspace name", "column family name",
|
||||||
|
"compaction finished at", "total bytes in", "total bytes out", "total rows merged" };
|
||||||
|
|
||||||
|
private static final String TYPE_NAME = "CompactionHistory";
|
||||||
|
|
||||||
|
private static final String ROW_DESC = "CompactionHistory";
|
||||||
|
|
||||||
|
private static final OpenType<?>[] ITEM_TYPES;
|
||||||
|
|
||||||
|
private static final CompositeType COMPOSITE_TYPE;
|
||||||
|
|
||||||
|
private static final TabularType TABULAR_TYPE;
|
||||||
|
|
||||||
|
static {
|
||||||
|
try {
|
||||||
|
ITEM_TYPES = new OpenType[] { SimpleType.STRING, SimpleType.STRING, SimpleType.STRING, SimpleType.LONG,
|
||||||
|
SimpleType.LONG, SimpleType.LONG, SimpleType.STRING };
|
||||||
|
|
||||||
|
COMPOSITE_TYPE = new CompositeType(TYPE_NAME, ROW_DESC, ITEM_NAMES, ITEM_DESCS, ITEM_TYPES);
|
||||||
|
|
||||||
|
TABULAR_TYPE = new TabularType(TYPE_NAME, ROW_DESC, COMPOSITE_TYPE, ITEM_NAMES);
|
||||||
|
} catch (OpenDataException e) {
|
||||||
|
throw Throwables.propagate(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static TabularData from(JsonArray resultSet) throws OpenDataException {
|
||||||
|
TabularDataSupport result = new TabularDataSupport(TABULAR_TYPE);
|
||||||
|
for (int i = 0; i < resultSet.size(); i++) {
|
||||||
|
JsonObject row = resultSet.getJsonObject(i);
|
||||||
|
String id = row.getString("id");
|
||||||
|
String ksName = row.getString("ks");
|
||||||
|
String cfName = row.getString("cf");
|
||||||
|
long compactedAt = row.getJsonNumber("compacted_at").longValue();
|
||||||
|
long bytesIn = row.getJsonNumber("bytes_in").longValue();
|
||||||
|
long bytesOut = row.getJsonNumber("bytes_out").longValue();
|
||||||
|
|
||||||
|
JsonArray merged = row.getJsonArray("rows_merged");
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
if (merged != null) {
|
||||||
|
sb.append('{');
|
||||||
|
for (int m = 0; m < merged.size(); m++) {
|
||||||
|
JsonObject entry = merged.getJsonObject(m);
|
||||||
|
if (m > 0) {
|
||||||
|
sb.append(',');
|
||||||
|
}
|
||||||
|
sb.append(entry.getString("key")).append(':').append(entry.getString("value"));
|
||||||
|
|
||||||
|
}
|
||||||
|
sb.append('}');
|
||||||
|
}
|
||||||
|
result.put(new CompositeDataSupport(COMPOSITE_TYPE, ITEM_NAMES,
|
||||||
|
new Object[] { id, ksName, cfName, compactedAt, bytesIn, bytesOut, sb.toString() }));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
@ -17,18 +17,23 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.db.compaction;
|
package org.apache.cassandra.db.compaction;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import jakarta.json.JsonArray;
|
||||||
import java.util.*;
|
import jakarta.json.JsonObject;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedHashMap;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedMap;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import javax.management.openmbean.OpenDataException;
|
||||||
import javax.management.ObjectName;
|
|
||||||
import javax.management.openmbean.TabularData;
|
import javax.management.openmbean.TabularData;
|
||||||
import javax.ws.rs.core.MultivaluedHashMap;
|
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
|
||||||
|
|
||||||
import org.apache.cassandra.metrics.CompactionMetrics;
|
import org.apache.cassandra.metrics.CompactionMetrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A singleton which manages a private executor of ongoing compactions.
|
* A singleton which manages a private executor of ongoing compactions.
|
||||||
@ -40,91 +45,58 @@ import com.cloudius.urchin.api.APIClient;
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright 2015 Cloudius Systems
|
* Copyright 2015 Cloudius Systems
|
||||||
*
|
*
|
||||||
* Modified by Cloudius Systems
|
* Modified by Cloudius Systems
|
||||||
*/
|
*/
|
||||||
public class CompactionManager implements CompactionManagerMBean {
|
public class CompactionManager extends MetricsMBean implements CompactionManagerMBean {
|
||||||
public static final String MBEAN_OBJECT_NAME = "org.apache.cassandra.db:type=CompactionManager";
|
public static final String MBEAN_OBJECT_NAME = "org.apache.cassandra.db:type=CompactionManager";
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final Logger logger = Logger.getLogger(CompactionManager.class.getName());
|
||||||
.getLogger(CompactionManager.class.getName());
|
|
||||||
public static final CompactionManager instance;
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
CompactionMetrics metrics = new CompactionMetrics();
|
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
}
|
||||||
|
|
||||||
static {
|
public CompactionManager(APIClient client) {
|
||||||
instance = new CompactionManager();
|
super(MBEAN_OBJECT_NAME, client, new CompactionMetrics());
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
try {
|
|
||||||
mbs.registerMBean(instance, new ObjectName(MBEAN_OBJECT_NAME));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static CompactionManager getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** List of running compaction objects. */
|
/** List of running compaction objects. */
|
||||||
|
@Override
|
||||||
public List<Map<String, String>> getCompactions() {
|
public List<Map<String, String>> getCompactions() {
|
||||||
log(" getCompactions()");
|
log(" getCompactions()");
|
||||||
return c.getListMapStrValue("compaction_manager/compactions");
|
List<Map<String, String>> results = new ArrayList<Map<String, String>>();
|
||||||
|
JsonArray compactions = client.getJsonArray("compaction_manager/compactions");
|
||||||
|
for (int i = 0; i < compactions.size(); i++) {
|
||||||
|
JsonObject compaction = compactions.getJsonObject(i);
|
||||||
|
Map<String, String> result = new HashMap<String, String>();
|
||||||
|
result.put("total", Long.toString(compaction.getJsonNumber("total").longValue()));
|
||||||
|
result.put("completed", Long.toString(compaction.getJsonNumber("completed").longValue()));
|
||||||
|
result.put("taskType", compaction.getString("task_type"));
|
||||||
|
result.put("keyspace", compaction.getString("ks"));
|
||||||
|
result.put("columnfamily", compaction.getString("cf"));
|
||||||
|
result.put("unit", compaction.getString("unit"));
|
||||||
|
result.put("compactionId", (compaction.containsKey("id"))? compaction.getString("id") : "<none>");
|
||||||
|
results.add(result);
|
||||||
|
}
|
||||||
|
return results;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** List of running compaction summary strings. */
|
/** List of running compaction summary strings. */
|
||||||
|
@Override
|
||||||
public List<String> getCompactionSummary() {
|
public List<String> getCompactionSummary() {
|
||||||
log(" getCompactionSummary()");
|
log(" getCompactionSummary()");
|
||||||
return c.getListStrValue("compaction_manager/compaction_summary");
|
return client.getListStrValue("compaction_manager/compaction_summary");
|
||||||
}
|
}
|
||||||
|
|
||||||
/** compaction history **/
|
/** compaction history **/
|
||||||
|
@Override
|
||||||
public TabularData getCompactionHistory() {
|
public TabularData getCompactionHistory() {
|
||||||
log(" getCompactionHistory()");
|
log(" getCompactionHistory()");
|
||||||
return c.getCQLResult("SELECT * from system.compaction_history");
|
try {
|
||||||
}
|
return CompactionHistoryTabularData.from(client.getJsonArray("/compaction_manager/compaction_history"));
|
||||||
|
} catch (OpenDataException e) {
|
||||||
/**
|
return null;
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks
|
}
|
||||||
* @return estimated number of compactions remaining to perform
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getPendingTasks() {
|
|
||||||
log(" getPendingTasks()");
|
|
||||||
return c.getIntValue("");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks
|
|
||||||
* @return number of completed compactions since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompletedTasks() {
|
|
||||||
log(" getCompletedTasks()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted
|
|
||||||
* @return total number of bytes compacted since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalBytesCompacted() {
|
|
||||||
log(" getTotalBytesCompacted()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted
|
|
||||||
* @return total number of compactions since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalCompactionsCompleted() {
|
|
||||||
log(" getTotalCompactionsCompleted()");
|
|
||||||
return c.getLongValue("");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -138,12 +110,12 @@ public class CompactionManager implements CompactionManagerMBean {
|
|||||||
* contain keyspace and columnfamily name in path(for 2.1+) or
|
* contain keyspace and columnfamily name in path(for 2.1+) or
|
||||||
* file name itself.
|
* file name itself.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void forceUserDefinedCompaction(String dataFiles) {
|
public void forceUserDefinedCompaction(String dataFiles) {
|
||||||
log(" forceUserDefinedCompaction(String dataFiles)");
|
log(" forceUserDefinedCompaction(String dataFiles)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("dataFiles", dataFiles);
|
queryParams.add("dataFiles", dataFiles);
|
||||||
c.post("compaction_manager/compaction_manager/force_user_defined_compaction",
|
client.post("compaction_manager/force_user_defined_compaction", queryParams);
|
||||||
queryParams);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -153,28 +125,30 @@ public class CompactionManager implements CompactionManagerMBean {
|
|||||||
* the type of compaction to stop. Can be one of: - COMPACTION -
|
* the type of compaction to stop. Can be one of: - COMPACTION -
|
||||||
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void stopCompaction(String type) {
|
public void stopCompaction(String type) {
|
||||||
log(" stopCompaction(String type)");
|
log(" stopCompaction(String type)");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("type", type);
|
queryParams.add("type", type);
|
||||||
c.post("compaction_manager/compaction_manager/stop_compaction",
|
client.post("compaction_manager/stop_compaction", queryParams);
|
||||||
queryParams);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns core size of compaction thread pool
|
* Returns core size of compaction thread pool
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getCoreCompactorThreads() {
|
public int getCoreCompactorThreads() {
|
||||||
log(" getCoreCompactorThreads()");
|
log(" getCoreCompactorThreads()");
|
||||||
return c.getIntValue("");
|
return client.getIntValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setCoreCompactorThreads(int number) {
|
public void setCoreCompactorThreads(int number) {
|
||||||
log(" setCoreCompactorThreads(int number)");
|
log(" setCoreCompactorThreads(int number)");
|
||||||
}
|
}
|
||||||
@ -182,17 +156,19 @@ public class CompactionManager implements CompactionManagerMBean {
|
|||||||
/**
|
/**
|
||||||
* Returns maximum size of compaction thread pool
|
* Returns maximum size of compaction thread pool
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getMaximumCompactorThreads() {
|
public int getMaximumCompactorThreads() {
|
||||||
log(" getMaximumCompactorThreads()");
|
log(" getMaximumCompactorThreads()");
|
||||||
return c.getIntValue("");
|
return client.getIntValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setMaximumCompactorThreads(int number) {
|
public void setMaximumCompactorThreads(int number) {
|
||||||
log(" setMaximumCompactorThreads(int number)");
|
log(" setMaximumCompactorThreads(int number)");
|
||||||
}
|
}
|
||||||
@ -200,17 +176,19 @@ public class CompactionManager implements CompactionManagerMBean {
|
|||||||
/**
|
/**
|
||||||
* Returns core size of validation thread pool
|
* Returns core size of validation thread pool
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getCoreValidationThreads() {
|
public int getCoreValidationThreads() {
|
||||||
log(" getCoreValidationThreads()");
|
log(" getCoreValidationThreads()");
|
||||||
return c.getIntValue("");
|
return client.getIntValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setCoreValidationThreads(int number) {
|
public void setCoreValidationThreads(int number) {
|
||||||
log(" setCoreValidationThreads(int number)");
|
log(" setCoreValidationThreads(int number)");
|
||||||
}
|
}
|
||||||
@ -218,19 +196,31 @@ public class CompactionManager implements CompactionManagerMBean {
|
|||||||
/**
|
/**
|
||||||
* Returns size of validator thread pool
|
* Returns size of validator thread pool
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public int getMaximumValidatorThreads() {
|
public int getMaximumValidatorThreads() {
|
||||||
log(" getMaximumValidatorThreads()");
|
log(" getMaximumValidatorThreads()");
|
||||||
return c.getIntValue("");
|
return client.getIntValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the validator thread pool.
|
* Allows user to resize maximum size of the validator thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of validator threads
|
* New maximum of validator threads
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public void setMaximumValidatorThreads(int number) {
|
public void setMaximumValidatorThreads(int number) {
|
||||||
log(" setMaximumValidatorThreads(int number)");
|
log(" setMaximumValidatorThreads(int number)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void stopCompactionById(String compactionId) {
|
||||||
|
// scylla does not have neither compaction ids nor the file described
|
||||||
|
// in:
|
||||||
|
// "Ids can be found in the transaction log files whose name starts with
|
||||||
|
// compaction_, located in the table transactions folder"
|
||||||
|
// (nodetool)
|
||||||
|
// TODO: throw?
|
||||||
|
log(" stopCompactionById");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -19,6 +19,7 @@ package org.apache.cassandra.db.compaction;
|
|||||||
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
import javax.management.openmbean.TabularData;
|
import javax.management.openmbean.TabularData;
|
||||||
|
|
||||||
public interface CompactionManagerMBean {
|
public interface CompactionManagerMBean {
|
||||||
@ -31,34 +32,6 @@ public interface CompactionManagerMBean {
|
|||||||
/** compaction history **/
|
/** compaction history **/
|
||||||
public TabularData getCompactionHistory();
|
public TabularData getCompactionHistory();
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#pendingTasks
|
|
||||||
* @return estimated number of compactions remaining to perform
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public int getPendingTasks();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#completedTasks
|
|
||||||
* @return number of completed compactions since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getCompletedTasks();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#bytesCompacted
|
|
||||||
* @return total number of bytes compacted since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalBytesCompacted();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @see org.apache.cassandra.metrics.CompactionMetrics#totalCompactionsCompleted
|
|
||||||
* @return total number of compactions since server [re]start
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public long getTotalCompactionsCompleted();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Triggers the compaction of user specified sstables. You can specify files
|
* Triggers the compaction of user specified sstables. You can specify files
|
||||||
* from various keyspaces and columnfamilies. If you do so, user defined
|
* from various keyspaces and columnfamilies. If you do so, user defined
|
||||||
@ -70,15 +43,37 @@ public interface CompactionManagerMBean {
|
|||||||
*/
|
*/
|
||||||
public void forceUserDefinedCompaction(String dataFiles);
|
public void forceUserDefinedCompaction(String dataFiles);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Triggers the cleanup of user specified sstables.
|
||||||
|
* You can specify files from various keyspaces and columnfamilies.
|
||||||
|
* If you do so, cleanup is performed each file individually
|
||||||
|
*
|
||||||
|
* @param dataFiles a comma separated list of sstable file to cleanup.
|
||||||
|
* must contain keyspace and columnfamily name in path(for 2.1+) or file name itself.
|
||||||
|
*/
|
||||||
|
default public void forceUserDefinedCleanup(String dataFiles) {
|
||||||
|
throw new UnsupportedOperationException();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Stop all running compaction-like tasks having the provided {@code type}.
|
* Stop all running compaction-like tasks having the provided {@code type}.
|
||||||
*
|
*
|
||||||
* @param type
|
* @param type
|
||||||
* the type of compaction to stop. Can be one of: - COMPACTION -
|
* the type of compaction to stop. Can be one of: - COMPACTION -
|
||||||
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
* VALIDATION - CLEANUP - SCRUB - INDEX_BUILD
|
||||||
*/
|
*/
|
||||||
public void stopCompaction(String type);
|
public void stopCompaction(String type);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Stop an individual running compaction using the compactionId.
|
||||||
|
*
|
||||||
|
* @param compactionId
|
||||||
|
* Compaction ID of compaction to stop. Such IDs can be found in
|
||||||
|
* the transaction log files whose name starts with compaction_,
|
||||||
|
* located in the table transactions folder.
|
||||||
|
*/
|
||||||
|
public void stopCompactionById(String compactionId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns core size of compaction thread pool
|
* Returns core size of compaction thread pool
|
||||||
*/
|
*/
|
||||||
@ -86,7 +81,7 @@ public interface CompactionManagerMBean {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
@ -99,7 +94,7 @@ public interface CompactionManagerMBean {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
@ -112,7 +107,7 @@ public interface CompactionManagerMBean {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the compaction thread pool.
|
* Allows user to resize maximum size of the compaction thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of compaction threads
|
* New maximum of compaction threads
|
||||||
*/
|
*/
|
||||||
@ -125,7 +120,7 @@ public interface CompactionManagerMBean {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows user to resize maximum size of the validator thread pool.
|
* Allows user to resize maximum size of the validator thread pool.
|
||||||
*
|
*
|
||||||
* @param number
|
* @param number
|
||||||
* New maximum of validator threads
|
* New maximum of validator threads
|
||||||
*/
|
*/
|
||||||
|
35
src/main/java/org/apache/cassandra/gms/ApplicationState.java
Normal file
35
src/main/java/org/apache/cassandra/gms/ApplicationState.java
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 ScyllaDB
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Moddified by ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.cassandra.gms;
|
||||||
|
|
||||||
|
public enum ApplicationState {
|
||||||
|
STATUS, LOAD, SCHEMA, DC, RACK, RELEASE_VERSION, REMOVAL_COORDINATOR, INTERNAL_IP, RPC_ADDRESS, X_11_PADDING, // padding
|
||||||
|
// specifically
|
||||||
|
// for
|
||||||
|
// 1.1
|
||||||
|
SEVERITY, NET_VERSION, HOST_ID, TOKENS,
|
||||||
|
// pad to allow adding new states to existing cluster
|
||||||
|
X1, X2, X3, X4, X5, X6, X7, X8, X9, X10,
|
||||||
|
}
|
109
src/main/java/org/apache/cassandra/gms/EndpointState.java
Normal file
109
src/main/java/org/apache/cassandra/gms/EndpointState.java
Normal file
@ -0,0 +1,109 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 ScyllaDB
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Moddified by ScyllaDB
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.gms;
|
||||||
|
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This abstraction represents both the HeartBeatState and the ApplicationState
|
||||||
|
* in an EndpointState instance. Any state for a given endpoint can be retrieved
|
||||||
|
* from this instance.
|
||||||
|
*/
|
||||||
|
|
||||||
|
public class EndpointState {
|
||||||
|
private volatile HeartBeatState hbState;
|
||||||
|
|
||||||
|
final Map<ApplicationState, String> applicationState = new HashMap<ApplicationState, String>();
|
||||||
|
|
||||||
|
private volatile long updateTimestamp;
|
||||||
|
private volatile boolean isAlive;
|
||||||
|
ApplicationState[] applicationValues;
|
||||||
|
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||||
|
.getLogger(EndpointState.class.getName());
|
||||||
|
|
||||||
|
EndpointState(HeartBeatState initialHbState) {
|
||||||
|
applicationValues = ApplicationState.values();
|
||||||
|
hbState = initialHbState;
|
||||||
|
updateTimestamp = System.nanoTime();
|
||||||
|
isAlive = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
HeartBeatState getHeartBeatState() {
|
||||||
|
return hbState;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setHeartBeatState(HeartBeatState newHbState) {
|
||||||
|
hbState = newHbState;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getApplicationState(ApplicationState key) {
|
||||||
|
return applicationState.get(key);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* TODO replace this with operations that don't expose private state
|
||||||
|
*/
|
||||||
|
@Deprecated
|
||||||
|
public Map<ApplicationState, String> getApplicationStateMap() {
|
||||||
|
return applicationState;
|
||||||
|
}
|
||||||
|
|
||||||
|
void addApplicationState(ApplicationState key, String value) {
|
||||||
|
applicationState.put(key, value);
|
||||||
|
}
|
||||||
|
|
||||||
|
void addApplicationState(int key, String value) {
|
||||||
|
if (key >= applicationValues.length) {
|
||||||
|
logger.warning("Unknown application state with id:" + key);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
addApplicationState(applicationValues[key], value);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* getters and setters */
|
||||||
|
/**
|
||||||
|
* @return System.nanoTime() when state was updated last time.
|
||||||
|
*/
|
||||||
|
public long getUpdateTimestamp() {
|
||||||
|
return updateTimestamp;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setUpdateTimestamp(long ts) {
|
||||||
|
updateTimestamp = ts;
|
||||||
|
}
|
||||||
|
|
||||||
|
public boolean isAlive() {
|
||||||
|
return isAlive;
|
||||||
|
}
|
||||||
|
|
||||||
|
public void setAliave(boolean alive) {
|
||||||
|
isAlive = alive;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return "EndpointState: HeartBeatState = " + hbState + ", AppStateMap = " + applicationState;
|
||||||
|
}
|
||||||
|
}
|
@ -24,77 +24,155 @@
|
|||||||
|
|
||||||
package org.apache.cassandra.gms;
|
package org.apache.cassandra.gms;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import jakarta.json.JsonArray;
|
||||||
|
import jakarta.json.JsonObject;
|
||||||
|
import jakarta.json.JsonValue;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.*;
|
import java.util.HashMap;
|
||||||
import javax.management.MBeanServer;
|
import java.util.Map;
|
||||||
import javax.management.ObjectName;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import javax.management.openmbean.CompositeData;
|
||||||
|
import javax.management.openmbean.CompositeDataSupport;
|
||||||
|
import javax.management.openmbean.CompositeType;
|
||||||
|
import javax.management.openmbean.OpenDataException;
|
||||||
|
import javax.management.openmbean.OpenType;
|
||||||
|
import javax.management.openmbean.SimpleType;
|
||||||
|
import javax.management.openmbean.TabularData;
|
||||||
|
import javax.management.openmbean.TabularDataSupport;
|
||||||
|
import javax.management.openmbean.TabularType;
|
||||||
|
|
||||||
public class FailureDetector implements FailureDetectorMBean {
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.APIMBean;
|
||||||
|
|
||||||
|
public class FailureDetector extends APIMBean implements FailureDetectorMBean {
|
||||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=FailureDetector";
|
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=FailureDetector";
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
||||||
.getLogger(FailureDetector.class.getName());
|
.getLogger(FailureDetector.class.getName());
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
public FailureDetector(APIClient c) {
|
||||||
|
super(c);
|
||||||
|
}
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
|
||||||
|
|
||||||
private static final FailureDetector instance = new FailureDetector();
|
|
||||||
|
|
||||||
public static FailureDetector getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
private FailureDetector() {
|
|
||||||
// Register this instance with JMX
|
|
||||||
try {
|
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void dumpInterArrivalTimes() {
|
public void dumpInterArrivalTimes() {
|
||||||
log(" dumpInterArrivalTimes()");
|
log(" dumpInterArrivalTimes()");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void setPhiConvictThreshold(double phi) {
|
public void setPhiConvictThreshold(double phi) {
|
||||||
log(" setPhiConvictThreshold(double phi)");
|
log(" setPhiConvictThreshold(double phi)");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public double getPhiConvictThreshold() {
|
public double getPhiConvictThreshold() {
|
||||||
log(" getPhiConvictThreshold()");
|
log(" getPhiConvictThreshold()");
|
||||||
return c.getDoubleValue("/failure_detector/phi");
|
return client.getDoubleValue("/failure_detector/phi");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String getAllEndpointStates() {
|
public String getAllEndpointStates() {
|
||||||
log(" getAllEndpointStates()");
|
log(" getAllEndpointStates()");
|
||||||
return c.getStringValue("/failure_detector/endpoints");
|
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for (Map.Entry<String, EndpointState> entry : getEndpointStateMap().entrySet()) {
|
||||||
|
sb.append('/').append(entry.getKey()).append("\n");
|
||||||
|
appendEndpointState(sb, entry.getValue());
|
||||||
|
}
|
||||||
|
return sb.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void appendEndpointState(StringBuilder sb, EndpointState endpointState) {
|
||||||
|
sb.append(" generation:").append(endpointState.getHeartBeatState().getGeneration()).append("\n");
|
||||||
|
sb.append(" heartbeat:").append(endpointState.getHeartBeatState().getHeartBeatVersion()).append("\n");
|
||||||
|
for (Map.Entry<ApplicationState, String> state : endpointState.applicationState.entrySet()) {
|
||||||
|
if (state.getKey() == ApplicationState.TOKENS) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
sb.append(" ").append(state.getKey()).append(":").append(state.getValue()).append("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Map<String, EndpointState> getEndpointStateMap() {
|
||||||
|
Map<String, EndpointState> res = new HashMap<String, EndpointState>();
|
||||||
|
JsonArray arr = client.getJsonArray("/failure_detector/endpoints");
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
|
EndpointState ep = new EndpointState(new HeartBeatState(obj.getInt("generation"), obj.getInt("version")));
|
||||||
|
ep.setAliave(obj.getBoolean("is_alive"));
|
||||||
|
ep.setUpdateTimestamp(obj.getJsonNumber("update_time").longValue());
|
||||||
|
JsonArray states = obj.getJsonArray("application_state");
|
||||||
|
if (states != null) {
|
||||||
|
for (int j = 0; j < states.size(); j++) {
|
||||||
|
JsonObject state = states.getJsonObject(j);
|
||||||
|
ep.addApplicationState(state.getInt("application_state"), state.getString("value"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
res.put(obj.getString("addrs"), ep);
|
||||||
|
}
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public String getEndpointState(String address) throws UnknownHostException {
|
public String getEndpointState(String address) throws UnknownHostException {
|
||||||
log(" getEndpointState(String address) throws UnknownHostException");
|
log(" getEndpointState(String address) throws UnknownHostException");
|
||||||
return c.getStringValue("/failure_detector/endpoints/states/" + address);
|
return client.getStringValue("/failure_detector/endpoints/states/" + address);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public Map<String, String> getSimpleStates() {
|
public Map<String, String> getSimpleStates() {
|
||||||
log(" getSimpleStates()");
|
log(" getSimpleStates()");
|
||||||
return c.getMapStrValue("/failure_detector/simple_states");
|
return client.getMapStrValue("/failure_detector/simple_states");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public int getDownEndpointCount() {
|
public int getDownEndpointCount() {
|
||||||
log(" getDownEndpointCount()");
|
log(" getDownEndpointCount()");
|
||||||
return c.getIntValue("/failure_detector/count/endpoint/down");
|
return client.getIntValue("/failure_detector/count/endpoint/down");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public int getUpEndpointCount() {
|
public int getUpEndpointCount() {
|
||||||
log(" getUpEndpointCount()");
|
log(" getUpEndpointCount()");
|
||||||
return c.getIntValue("/failure_detector/count/endpoint/up");
|
return client.getIntValue("/failure_detector/count/endpoint/up");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// From origin:
|
||||||
|
// this is useless except to provide backwards compatibility in
|
||||||
|
// phi_convict_threshold,
|
||||||
|
// because everyone seems pretty accustomed to the default of 8, and users
|
||||||
|
// who have
|
||||||
|
// already tuned their phi_convict_threshold for their own environments
|
||||||
|
// won't need to
|
||||||
|
// change.
|
||||||
|
private final double PHI_FACTOR = 1.0 / Math.log(10.0); // 0.434...
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public TabularData getPhiValues() throws OpenDataException {
|
||||||
|
final CompositeType ct = new CompositeType("Node", "Node", new String[] { "Endpoint", "PHI" },
|
||||||
|
new String[] { "IP of the endpoint", "PHI value" },
|
||||||
|
new OpenType[] { SimpleType.STRING, SimpleType.DOUBLE });
|
||||||
|
final TabularDataSupport results = new TabularDataSupport(
|
||||||
|
new TabularType("PhiList", "PhiList", ct, new String[] { "Endpoint" }));
|
||||||
|
final JsonArray arr = client.getJsonArray("/failure_detector/endpoint_phi_values");
|
||||||
|
|
||||||
|
for (JsonValue v : arr) {
|
||||||
|
JsonObject o = (JsonObject) v;
|
||||||
|
String endpoint = o.getString("endpoint");
|
||||||
|
double phi = Double.parseDouble(o.getString("phi"));
|
||||||
|
|
||||||
|
if (phi != Double.MIN_VALUE) {
|
||||||
|
// returned values are scaled by PHI_FACTOR so that the are on
|
||||||
|
// the same scale as PhiConvictThreshold
|
||||||
|
final CompositeData data = new CompositeDataSupport(ct, new String[] { "Endpoint", "PHI" },
|
||||||
|
new Object[] { endpoint, phi * PHI_FACTOR });
|
||||||
|
results.put(data);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return results;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,8 +20,10 @@ package org.apache.cassandra.gms;
|
|||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
|
||||||
public interface FailureDetectorMBean
|
import javax.management.openmbean.OpenDataException;
|
||||||
{
|
import javax.management.openmbean.TabularData;
|
||||||
|
|
||||||
|
public interface FailureDetectorMBean {
|
||||||
public void dumpInterArrivalTimes();
|
public void dumpInterArrivalTimes();
|
||||||
|
|
||||||
public void setPhiConvictThreshold(double phi);
|
public void setPhiConvictThreshold(double phi);
|
||||||
@ -37,4 +39,6 @@ public interface FailureDetectorMBean
|
|||||||
public int getDownEndpointCount();
|
public int getDownEndpointCount();
|
||||||
|
|
||||||
public int getUpEndpointCount();
|
public int getUpEndpointCount();
|
||||||
|
|
||||||
|
public TabularData getPhiValues() throws OpenDataException;
|
||||||
}
|
}
|
||||||
|
@ -23,15 +23,13 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.gms;
|
package org.apache.cassandra.gms;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import jakarta.ws.rs.core.MultivaluedHashMap;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedMap;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import com.scylladb.jmx.api.APIClient;
|
||||||
import javax.management.ObjectName;
|
import com.scylladb.jmx.metrics.APIMBean;
|
||||||
import javax.ws.rs.core.MultivaluedHashMap;
|
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This module is responsible for Gossiping information for the local endpoint.
|
* This module is responsible for Gossiping information for the local endpoint.
|
||||||
@ -48,57 +46,43 @@ import com.cloudius.urchin.api.APIClient;
|
|||||||
* node as down in the Failure Detector.
|
* node as down in the Failure Detector.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
public class Gossiper implements GossiperMBean {
|
public class Gossiper extends APIMBean implements GossiperMBean {
|
||||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=Gossiper";
|
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=Gossiper";
|
||||||
|
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final Logger logger = Logger.getLogger(Gossiper.class.getName());
|
||||||
.getLogger(Gossiper.class.getName());
|
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
public Gossiper(APIClient c) {
|
||||||
|
super(c);
|
||||||
|
}
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
|
||||||
|
|
||||||
private static final Gossiper instance = new Gossiper();
|
|
||||||
|
|
||||||
public static Gossiper getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
private Gossiper() {
|
|
||||||
|
|
||||||
// Register this instance with JMX
|
|
||||||
try {
|
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
mbs.registerMBean(this, new ObjectName(MBEAN_NAME));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public long getEndpointDowntime(String address) throws UnknownHostException {
|
public long getEndpointDowntime(String address) throws UnknownHostException {
|
||||||
log(" getEndpointDowntime(String address) throws UnknownHostException");
|
log(" getEndpointDowntime(String address) throws UnknownHostException");
|
||||||
return c.getLongValue("gossiper/downtime/" + address);
|
return client.getLongValue("gossiper/downtime/" + address);
|
||||||
}
|
}
|
||||||
|
|
||||||
public int getCurrentGenerationNumber(String address)
|
@Override
|
||||||
throws UnknownHostException {
|
public int getCurrentGenerationNumber(String address) throws UnknownHostException {
|
||||||
log(" getCurrentGenerationNumber(String address) throws UnknownHostException");
|
log(" getCurrentGenerationNumber(String address) throws UnknownHostException");
|
||||||
return c.getIntValue("gossiper/generation_number/" + address);
|
return client.getIntValue("gossiper/generation_number/" + address);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void unsafeAssassinateEndpoint(String address)
|
@Override
|
||||||
throws UnknownHostException {
|
public void unsafeAssassinateEndpoint(String address) throws UnknownHostException {
|
||||||
log(" unsafeAssassinateEndpoint(String address) throws UnknownHostException");
|
log(" unsafeAssassinateEndpoint(String address) throws UnknownHostException");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
||||||
queryParams.add("unsafe", "True");
|
queryParams.add("unsafe", "True");
|
||||||
c.post("gossiper/assassinate/" + address, queryParams);
|
client.post("gossiper/assassinate/" + address, queryParams);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void assassinateEndpoint(String address) throws UnknownHostException {
|
public void assassinateEndpoint(String address) throws UnknownHostException {
|
||||||
log(" assassinateEndpoint(String address) throws UnknownHostException");
|
log(" assassinateEndpoint(String address) throws UnknownHostException");
|
||||||
c.post("gossiper/assassinate/" + address, null);
|
client.post("gossiper/assassinate/" + address, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -19,12 +19,13 @@ package org.apache.cassandra.gms;
|
|||||||
|
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
|
||||||
public interface GossiperMBean
|
public interface GossiperMBean {
|
||||||
{
|
|
||||||
public long getEndpointDowntime(String address) throws UnknownHostException;
|
public long getEndpointDowntime(String address) throws UnknownHostException;
|
||||||
|
|
||||||
public int getCurrentGenerationNumber(String address) throws UnknownHostException;
|
public int getCurrentGenerationNumber(String address) throws UnknownHostException;
|
||||||
|
|
||||||
public void unsafeAssassinateEndpoint(String address) throws UnknownHostException;
|
public void unsafeAssassinateEndpoint(String address) throws UnknownHostException;
|
||||||
|
|
||||||
|
public void assassinateEndpoint(String address) throws UnknownHostException;
|
||||||
|
|
||||||
}
|
}
|
65
src/main/java/org/apache/cassandra/gms/HeartBeatState.java
Normal file
65
src/main/java/org/apache/cassandra/gms/HeartBeatState.java
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 ScyllaDB
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Moddified by ScyllaDB
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.gms;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* HeartBeat State associated with any given endpoint.
|
||||||
|
*/
|
||||||
|
class HeartBeatState {
|
||||||
|
private int generation;
|
||||||
|
private int version;
|
||||||
|
|
||||||
|
HeartBeatState(int gen) {
|
||||||
|
this(gen, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
HeartBeatState(int gen, int ver) {
|
||||||
|
generation = gen;
|
||||||
|
version = ver;
|
||||||
|
}
|
||||||
|
|
||||||
|
int getGeneration() {
|
||||||
|
return generation;
|
||||||
|
}
|
||||||
|
|
||||||
|
void updateHeartBeat() {
|
||||||
|
}
|
||||||
|
|
||||||
|
int getHeartBeatVersion() {
|
||||||
|
return version;
|
||||||
|
}
|
||||||
|
|
||||||
|
void forceNewerGenerationUnsafe() {
|
||||||
|
generation += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
void forceHighestPossibleVersionUnsafe() {
|
||||||
|
version = Integer.MAX_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return String.format("HeartBeat: generation = %d, version = %d", generation, version);
|
||||||
|
}
|
||||||
|
}
|
@ -17,41 +17,27 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.locator;
|
package org.apache.cassandra.locator;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import static java.util.Collections.singletonMap;
|
||||||
|
|
||||||
|
import jakarta.ws.rs.core.MultivaluedHashMap;
|
||||||
|
import jakarta.ws.rs.core.MultivaluedMap;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import com.scylladb.jmx.api.APIClient;
|
||||||
import javax.management.ObjectName;
|
import com.scylladb.jmx.metrics.APIMBean;
|
||||||
import javax.ws.rs.core.MultivaluedHashMap;
|
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
public class EndpointSnitchInfo extends APIMBean implements EndpointSnitchInfoMBean {
|
||||||
|
public static final String MBEAN_NAME = "org.apache.cassandra.db:type=EndpointSnitchInfo";
|
||||||
|
private static final Logger logger = Logger.getLogger(EndpointSnitchInfo.class.getName());
|
||||||
|
|
||||||
public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
public EndpointSnitchInfo(APIClient c) {
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
super(c);
|
||||||
.getLogger(EndpointSnitchInfo.class.getName());
|
}
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
logger.info(str);
|
logger.finest(str);
|
||||||
}
|
|
||||||
|
|
||||||
private static final EndpointSnitchInfo instance = new EndpointSnitchInfo();
|
|
||||||
|
|
||||||
public static EndpointSnitchInfo getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
|
||||||
|
|
||||||
private EndpointSnitchInfo() {
|
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
|
||||||
try {
|
|
||||||
mbs.registerMBean(this, new ObjectName(
|
|
||||||
"org.apache.cassandra.db:type=EndpointSnitchInfo"));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -64,12 +50,9 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
|||||||
@Override
|
@Override
|
||||||
public String getRack(String host) throws UnknownHostException {
|
public String getRack(String host) throws UnknownHostException {
|
||||||
log("getRack(String host) throws UnknownHostException");
|
log("getRack(String host) throws UnknownHostException");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = host != null ? new MultivaluedHashMap<String, String>(
|
||||||
if (host == null) {
|
singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null;
|
||||||
host = InetAddress.getLoopbackAddress().getHostAddress();
|
return client.getStringValue("/snitch/rack", queryParams, 10000);
|
||||||
}
|
|
||||||
queryParams.add("host", host);
|
|
||||||
return c.getStringValue("/snitch/rack", queryParams, 10000);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -82,12 +65,9 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
|||||||
@Override
|
@Override
|
||||||
public String getDatacenter(String host) throws UnknownHostException {
|
public String getDatacenter(String host) throws UnknownHostException {
|
||||||
log(" getDatacenter(String host) throws UnknownHostException");
|
log(" getDatacenter(String host) throws UnknownHostException");
|
||||||
MultivaluedMap<String, String> queryParams = new MultivaluedHashMap<String, String>();
|
MultivaluedMap<String, String> queryParams = host != null ? new MultivaluedHashMap<String, String>(
|
||||||
if (host == null) {
|
singletonMap("host", InetAddress.getByName(host).getHostAddress())) : null;
|
||||||
host = InetAddress.getLoopbackAddress().getHostAddress();
|
return client.getStringValue("/snitch/datacenter", queryParams, 10000);
|
||||||
}
|
|
||||||
queryParams.add("host", host);
|
|
||||||
return c.getStringValue("/snitch/datacenter", queryParams, 10000);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -98,7 +78,16 @@ public class EndpointSnitchInfo implements EndpointSnitchInfoMBean {
|
|||||||
@Override
|
@Override
|
||||||
public String getSnitchName() {
|
public String getSnitchName() {
|
||||||
log(" getSnitchName()");
|
log(" getSnitchName()");
|
||||||
return c.getStringValue("/snitch/name");
|
return client.getStringValue("/snitch/name");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getRack() {
|
||||||
|
return client.getStringValue("/snitch/rack", null, 10000);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDatacenter() {
|
||||||
|
return client.getStringValue("/snitch/datacenter", null, 10000);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,25 +22,40 @@ import java.net.UnknownHostException;
|
|||||||
/**
|
/**
|
||||||
* MBean exposing standard Snitch info
|
* MBean exposing standard Snitch info
|
||||||
*/
|
*/
|
||||||
public interface EndpointSnitchInfoMBean
|
public interface EndpointSnitchInfoMBean {
|
||||||
{
|
|
||||||
/**
|
/**
|
||||||
* Provides the Rack name depending on the respective snitch used, given the host name/ip
|
* Provides the Rack name depending on the respective snitch used, given the
|
||||||
|
* host name/ip
|
||||||
|
*
|
||||||
* @param host
|
* @param host
|
||||||
* @throws UnknownHostException
|
* @throws UnknownHostException
|
||||||
*/
|
*/
|
||||||
public String getRack(String host) throws UnknownHostException;
|
public String getRack(String host) throws UnknownHostException;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides the Datacenter name depending on the respective snitch used, given the hostname/ip
|
* Provides the Datacenter name depending on the respective snitch used,
|
||||||
|
* given the hostname/ip
|
||||||
|
*
|
||||||
* @param host
|
* @param host
|
||||||
* @throws UnknownHostException
|
* @throws UnknownHostException
|
||||||
*/
|
*/
|
||||||
public String getDatacenter(String host) throws UnknownHostException;
|
public String getDatacenter(String host) throws UnknownHostException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides the Rack name depending on the respective snitch used for this
|
||||||
|
* node
|
||||||
|
*/
|
||||||
|
public String getRack();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Provides the Datacenter name depending on the respective snitch used for
|
||||||
|
* this node
|
||||||
|
*/
|
||||||
|
public String getDatacenter();
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Provides the snitch name of the cluster
|
* Provides the snitch name of the cluster
|
||||||
|
*
|
||||||
* @return Snitch name
|
* @return Snitch name
|
||||||
*/
|
*/
|
||||||
public String getSnitchName();
|
public String getSnitchName();
|
||||||
|
@ -25,34 +25,20 @@
|
|||||||
|
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.yammer.metrics.core.*;
|
|
||||||
|
|
||||||
|
// TODO: In StorageProxy
|
||||||
public class CASClientRequestMetrics extends ClientRequestMetrics {
|
public class CASClientRequestMetrics extends ClientRequestMetrics {
|
||||||
|
|
||||||
public final Histogram contention;
|
public CASClientRequestMetrics(String scope, String url) {
|
||||||
/* Used only for write */
|
super(scope, url);
|
||||||
public final Counter conditionNotMet;
|
|
||||||
|
|
||||||
public final Counter unfinishedCommit;
|
|
||||||
|
|
||||||
public CASClientRequestMetrics(String url, String scope) {
|
|
||||||
super(url, scope);
|
|
||||||
contention = APIMetrics.newHistogram(url + "contention",
|
|
||||||
factory.createMetricName("ContentionHistogram"), true);
|
|
||||||
conditionNotMet = APIMetrics.newCounter(url + "condition_not_met",
|
|
||||||
factory.createMetricName("ConditionNotMet"));
|
|
||||||
unfinishedCommit = APIMetrics.newCounter(url + "unfinished_commit",
|
|
||||||
factory.createMetricName("UnfinishedCommit"));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void release() {
|
@Override
|
||||||
super.release();
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
super.register(registry);
|
||||||
factory.createMetricName("ContentionHistogram"));
|
registry.register(() -> registry.histogram(uri + "/contention", true), names("ContentionHistogram"));
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
registry.register(() -> registry.counter(uri + "/condition_not_met"), names("ConditionNotMet"));
|
||||||
factory.createMetricName("ConditionNotMet"));
|
registry.register(() -> registry.counter(uri + "/unfinished_commit"), names("UnfinishedCommit"));
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("UnfinishedCommit"));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -23,32 +23,19 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.yammer.metrics.core.Gauge;
|
|
||||||
import com.yammer.metrics.core.Meter;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metrics for {@code ICache}.
|
* Metrics for {@code ICache}.
|
||||||
*/
|
*/
|
||||||
public class CacheMetrics {
|
public class CacheMetrics implements Metrics {
|
||||||
/** Cache capacity in bytes */
|
|
||||||
public final Gauge<Long> capacity;
|
|
||||||
/** Total number of cache hits */
|
|
||||||
public final Meter hits;
|
|
||||||
/** Total number of cache requests */
|
|
||||||
public final Meter requests;
|
|
||||||
/** cache hit rate */
|
|
||||||
public final Gauge<Double> hitRate;
|
|
||||||
/** Total size of cache, in bytes */
|
|
||||||
public final Gauge<Long> size;
|
|
||||||
/** Total number of cache entries */
|
|
||||||
public final Gauge<Integer> entries;
|
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
private final String type;
|
||||||
|
private final String url;
|
||||||
|
|
||||||
|
private String compose(String value) {
|
||||||
|
return "/cache_service/metrics/" + url + "/" + value;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create metrics for given cache.
|
* Create metrics for given cache.
|
||||||
@ -59,48 +46,21 @@ public class CacheMetrics {
|
|||||||
* Cache to measure metrics
|
* Cache to measure metrics
|
||||||
*/
|
*/
|
||||||
public CacheMetrics(String type, final String url) {
|
public CacheMetrics(String type, final String url) {
|
||||||
|
this.type = type;
|
||||||
|
this.url = url;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
MetricNameFactory factory = new DefaultNameFactory("Cache", type);
|
MetricNameFactory factory = new DefaultNameFactory("Cache", type);
|
||||||
|
|
||||||
capacity = APIMetrics.newGauge(factory.createMetricName("Capacity"),
|
registry.register(() -> registry.gauge(compose("capacity")), factory.createMetricName("Capacity"));
|
||||||
new Gauge<Long>() {
|
registry.register(() -> registry.meter(compose("hits_moving_avrage")), factory.createMetricName("Hits"));
|
||||||
public Long value() {
|
registry.register(() -> registry.meter(compose("requests_moving_avrage")),
|
||||||
return c.getLongValue("/cache_service/metrics/" + url
|
factory.createMetricName("Requests"));
|
||||||
+ "/capacity");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
hits = APIMetrics.newMeter("/cache_service/metrics/" + url
|
|
||||||
+ "/hits", factory.createMetricName("Hits"), "hits",
|
|
||||||
TimeUnit.SECONDS);
|
|
||||||
requests = APIMetrics.newMeter("/cache_service/metrics/" + url
|
|
||||||
+ "/requests", factory.createMetricName("Requests"),
|
|
||||||
"requests", TimeUnit.SECONDS);
|
|
||||||
hitRate = APIMetrics.newGauge(factory.createMetricName("HitRate"),
|
|
||||||
new Gauge<Double>() {
|
|
||||||
@Override
|
|
||||||
public Double value() {
|
|
||||||
return c.getDoubleValue("/cache_service/metrics/" + url
|
|
||||||
+ "/hit_rate");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
size = APIMetrics.newGauge(factory.createMetricName("Size"),
|
|
||||||
new Gauge<Long>() {
|
|
||||||
public Long value() {
|
|
||||||
return c.getLongValue("/cache_service/metrics/" + url
|
|
||||||
+ "/size");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
entries = APIMetrics.newGauge(factory.createMetricName("Entries"),
|
|
||||||
new Gauge<Integer>() {
|
|
||||||
public Integer value() {
|
|
||||||
return c.getIntValue("/cache_service/metrics/" + url
|
|
||||||
+ "/entries");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// for backward compatibility
|
registry.register(() -> registry.gauge(Double.class, compose("hit_rate")), factory.createMetricName("HitRate"));
|
||||||
@Deprecated
|
registry.register(() -> registry.gauge(compose("size")), factory.createMetricName("Size"));
|
||||||
public double getRecentHitRate() {
|
registry.register(() -> registry.gauge(Integer.class, compose("entries")), factory.createMetricName("Entries"));
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -27,51 +27,17 @@
|
|||||||
|
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import javax.management.MalformedObjectNameException;
|
||||||
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
|
||||||
import com.yammer.metrics.Metrics;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
import com.yammer.metrics.core.Meter;
|
|
||||||
|
|
||||||
public class ClientRequestMetrics extends LatencyMetrics {
|
public class ClientRequestMetrics extends LatencyMetrics {
|
||||||
@Deprecated
|
public ClientRequestMetrics(String scope, String url) {
|
||||||
public static final Counter readTimeouts = Metrics
|
super("ClientRequest", scope, url);
|
||||||
.newCounter(DefaultNameFactory.createMetricName(
|
|
||||||
"ClientRequestMetrics", "ReadTimeouts", null));
|
|
||||||
@Deprecated
|
|
||||||
public static final Counter writeTimeouts = Metrics
|
|
||||||
.newCounter(DefaultNameFactory.createMetricName(
|
|
||||||
"ClientRequestMetrics", "WriteTimeouts", null));
|
|
||||||
@Deprecated
|
|
||||||
public static final Counter readUnavailables = Metrics
|
|
||||||
.newCounter(DefaultNameFactory.createMetricName(
|
|
||||||
"ClientRequestMetrics", "ReadUnavailables", null));
|
|
||||||
@Deprecated
|
|
||||||
public static final Counter writeUnavailables = Metrics
|
|
||||||
.newCounter(DefaultNameFactory.createMetricName(
|
|
||||||
"ClientRequestMetrics", "WriteUnavailables", null));
|
|
||||||
|
|
||||||
public final Meter timeouts;
|
|
||||||
public final Meter unavailables;
|
|
||||||
|
|
||||||
public ClientRequestMetrics(String url, String scope) {
|
|
||||||
super(url, "ClientRequest", scope);
|
|
||||||
|
|
||||||
timeouts = APIMetrics.newMeter(url + "/timeouts",
|
|
||||||
factory.createMetricName("Timeouts"), "timeouts",
|
|
||||||
TimeUnit.SECONDS);
|
|
||||||
unavailables = APIMetrics.newMeter(url + "/unavailables",
|
|
||||||
factory.createMetricName("Unavailables"), "unavailables",
|
|
||||||
TimeUnit.SECONDS);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void release() {
|
@Override
|
||||||
super.release();
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
super.register(registry);
|
||||||
factory.createMetricName("Timeouts"));
|
registry.register(() -> registry.meter(uri + "/timeouts_rates"), names("Timeouts"));
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
registry.register(() -> registry.meter(uri + "/unavailables_rates"), names("Unavailables"));
|
||||||
factory.createMetricName("Unavailables"));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,577 +0,0 @@
|
|||||||
/*
|
|
||||||
* Licensed to the Apache Software Foundation (ASF) under one
|
|
||||||
* or more contributor license agreements. See the NOTICE file
|
|
||||||
* distributed with this work for additional information
|
|
||||||
* regarding copyright ownership. The ASF licenses this file
|
|
||||||
* to you under the Apache License, Version 2.0 (the
|
|
||||||
* "License"); you may not use this file except in compliance
|
|
||||||
* with the License. You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
package org.apache.cassandra.metrics;
|
|
||||||
|
|
||||||
import java.util.HashSet;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.concurrent.ConcurrentMap;
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import org.apache.cassandra.db.ColumnFamilyStore;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
|
||||||
import com.cloudius.urchin.utils.RecentEstimatedHistogram;
|
|
||||||
import com.google.common.collect.Maps;
|
|
||||||
import com.google.common.collect.Sets;
|
|
||||||
import com.yammer.metrics.Metrics;
|
|
||||||
import com.yammer.metrics.core.*;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Metrics for {@link ColumnFamilyStore}.
|
|
||||||
*/
|
|
||||||
public class ColumnFamilyMetrics {
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
/**
|
|
||||||
* Total amount of data stored in the memtable that resides on-heap,
|
|
||||||
* including column related overhead and overwritten rows.
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> memtableOnHeapSize;
|
|
||||||
/**
|
|
||||||
* Total amount of data stored in the memtable that resides off-heap,
|
|
||||||
* including column related overhead and overwritten rows.
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> memtableOffHeapSize;
|
|
||||||
/**
|
|
||||||
* Total amount of live data stored in the memtable, excluding any data
|
|
||||||
* structure overhead
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> memtableLiveDataSize;
|
|
||||||
/**
|
|
||||||
* Total amount of data stored in the memtables (2i and pending flush
|
|
||||||
* memtables included) that resides on-heap.
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> allMemtablesOnHeapSize;
|
|
||||||
/**
|
|
||||||
* Total amount of data stored in the memtables (2i and pending flush
|
|
||||||
* memtables included) that resides off-heap.
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> allMemtablesOffHeapSize;
|
|
||||||
/**
|
|
||||||
* Total amount of live data stored in the memtables (2i and pending flush
|
|
||||||
* memtables included) that resides off-heap, excluding any data structure
|
|
||||||
* overhead
|
|
||||||
*/
|
|
||||||
public final Gauge<Long> allMemtablesLiveDataSize;
|
|
||||||
/** Total number of columns present in the memtable. */
|
|
||||||
public final Gauge<Long> memtableColumnsCount;
|
|
||||||
/** Number of times flush has resulted in the memtable being switched out. */
|
|
||||||
public final Counter memtableSwitchCount;
|
|
||||||
/** Current compression ratio for all SSTables */
|
|
||||||
public final Gauge<Double> compressionRatio;
|
|
||||||
/** Histogram of estimated row size (in bytes). */
|
|
||||||
public final Gauge<long[]> estimatedRowSizeHistogram;
|
|
||||||
/** Approximate number of keys in table. */
|
|
||||||
public final Gauge<Long> estimatedRowCount;
|
|
||||||
/** Histogram of estimated number of columns. */
|
|
||||||
public final Gauge<long[]> estimatedColumnCountHistogram;
|
|
||||||
/** Histogram of the number of sstable data files accessed per read */
|
|
||||||
public final ColumnFamilyHistogram sstablesPerReadHistogram;
|
|
||||||
/** (Local) read metrics */
|
|
||||||
public final LatencyMetrics readLatency;
|
|
||||||
/** (Local) range slice metrics */
|
|
||||||
public final LatencyMetrics rangeLatency;
|
|
||||||
/** (Local) write metrics */
|
|
||||||
public final LatencyMetrics writeLatency;
|
|
||||||
/** Estimated number of tasks pending for this column family */
|
|
||||||
public final Counter pendingFlushes;
|
|
||||||
/** Estimate of number of pending compactios for this CF */
|
|
||||||
public final Gauge<Integer> pendingCompactions;
|
|
||||||
/** Number of SSTables on disk for this CF */
|
|
||||||
public final Gauge<Integer> liveSSTableCount;
|
|
||||||
/** Disk space used by SSTables belonging to this CF */
|
|
||||||
public final Counter liveDiskSpaceUsed;
|
|
||||||
/**
|
|
||||||
* Total disk space used by SSTables belonging to this CF, including
|
|
||||||
* obsolete ones waiting to be GC'd
|
|
||||||
*/
|
|
||||||
public final Counter totalDiskSpaceUsed;
|
|
||||||
/** Size of the smallest compacted row */
|
|
||||||
public final Gauge<Long> minRowSize;
|
|
||||||
/** Size of the largest compacted row */
|
|
||||||
public final Gauge<Long> maxRowSize;
|
|
||||||
/** Size of the smallest compacted row */
|
|
||||||
public final Gauge<Long> meanRowSize;
|
|
||||||
/** Number of false positives in bloom filter */
|
|
||||||
public final Gauge<Long> bloomFilterFalsePositives;
|
|
||||||
/** Number of false positives in bloom filter from last read */
|
|
||||||
public final Gauge<Long> recentBloomFilterFalsePositives;
|
|
||||||
/** False positive ratio of bloom filter */
|
|
||||||
public final Gauge<Double> bloomFilterFalseRatio;
|
|
||||||
/** False positive ratio of bloom filter from last read */
|
|
||||||
public final Gauge<Double> recentBloomFilterFalseRatio;
|
|
||||||
/** Disk space used by bloom filter */
|
|
||||||
public final Gauge<Long> bloomFilterDiskSpaceUsed;
|
|
||||||
/** Off heap memory used by bloom filter */
|
|
||||||
public final Gauge<Long> bloomFilterOffHeapMemoryUsed;
|
|
||||||
/** Off heap memory used by index summary */
|
|
||||||
public final Gauge<Long> indexSummaryOffHeapMemoryUsed;
|
|
||||||
/** Off heap memory used by compression meta data */
|
|
||||||
public final Gauge<Long> compressionMetadataOffHeapMemoryUsed;
|
|
||||||
/** Key cache hit rate for this CF */
|
|
||||||
public final Gauge<Double> keyCacheHitRate;
|
|
||||||
/** Tombstones scanned in queries on this CF */
|
|
||||||
public final ColumnFamilyHistogram tombstoneScannedHistogram;
|
|
||||||
/** Live cells scanned in queries on this CF */
|
|
||||||
public final ColumnFamilyHistogram liveScannedHistogram;
|
|
||||||
/** Column update time delta on this CF */
|
|
||||||
public final ColumnFamilyHistogram colUpdateTimeDeltaHistogram;
|
|
||||||
/** Disk space used by snapshot files which */
|
|
||||||
public final Gauge<Long> trueSnapshotsSize;
|
|
||||||
/** Row cache hits, but result out of range */
|
|
||||||
public final Counter rowCacheHitOutOfRange;
|
|
||||||
/** Number of row cache hits */
|
|
||||||
public final Counter rowCacheHit;
|
|
||||||
/** Number of row cache misses */
|
|
||||||
public final Counter rowCacheMiss;
|
|
||||||
/** CAS Prepare metrics */
|
|
||||||
public final LatencyMetrics casPrepare;
|
|
||||||
/** CAS Propose metrics */
|
|
||||||
public final LatencyMetrics casPropose;
|
|
||||||
/** CAS Commit metrics */
|
|
||||||
public final LatencyMetrics casCommit;
|
|
||||||
|
|
||||||
public final Timer coordinatorReadLatency;
|
|
||||||
public final Timer coordinatorScanLatency;
|
|
||||||
|
|
||||||
/** Time spent waiting for free memtable space, either on- or off-heap */
|
|
||||||
public final Timer waitingOnFreeMemtableSpace;
|
|
||||||
|
|
||||||
private final MetricNameFactory factory;
|
|
||||||
private static final MetricNameFactory globalNameFactory = new AllColumnFamilyMetricNameFactory();
|
|
||||||
|
|
||||||
public final Counter speculativeRetries;
|
|
||||||
|
|
||||||
// for backward compatibility
|
|
||||||
@Deprecated
|
|
||||||
public final EstimatedHistogramWrapper sstablesPerRead;
|
|
||||||
// it should not be called directly
|
|
||||||
@Deprecated
|
|
||||||
protected final RecentEstimatedHistogram recentSSTablesPerRead = new RecentEstimatedHistogram(35);
|
|
||||||
private String cfName;
|
|
||||||
|
|
||||||
public final static LatencyMetrics globalReadLatency = new LatencyMetrics(
|
|
||||||
"/column_family/metrics/read_latency", globalNameFactory, "Read");
|
|
||||||
public final static LatencyMetrics globalWriteLatency = new LatencyMetrics(
|
|
||||||
"/column_family/metrics/write_latency", globalNameFactory, "Write");
|
|
||||||
public final static LatencyMetrics globalRangeLatency = new LatencyMetrics(
|
|
||||||
"/column_family/metrics/range_latency", globalNameFactory, "Range");
|
|
||||||
|
|
||||||
/**
|
|
||||||
* stores metrics that will be rolled into a single global metric
|
|
||||||
*/
|
|
||||||
public final static ConcurrentMap<String, Set<Metric>> allColumnFamilyMetrics = Maps
|
|
||||||
.newConcurrentMap();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Stores all metric names created that can be used when unregistering
|
|
||||||
*/
|
|
||||||
public final static Set<String> all = Sets.newHashSet();
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates metrics for given {@link ColumnFamilyStore}.
|
|
||||||
*
|
|
||||||
* @param cfs
|
|
||||||
* ColumnFamilyStore to measure metrics
|
|
||||||
*/
|
|
||||||
public ColumnFamilyMetrics(final ColumnFamilyStore cfs) {
|
|
||||||
factory = new ColumnFamilyMetricNameFactory(cfs);
|
|
||||||
cfName = cfs.getCFName();
|
|
||||||
memtableColumnsCount = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/memtable_columns_count",
|
|
||||||
"MemtableColumnsCount");
|
|
||||||
memtableOnHeapSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/memtable_on_heap_size",
|
|
||||||
"MemtableOnHeapSize");
|
|
||||||
memtableOffHeapSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/memtable_off_heap_size",
|
|
||||||
"MemtableOffHeapSize");
|
|
||||||
memtableLiveDataSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/memtable_live_data_size",
|
|
||||||
"MemtableLiveDataSize");
|
|
||||||
allMemtablesOnHeapSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/all_memtables_on_heap_size",
|
|
||||||
"AllMemtablesHeapSize");
|
|
||||||
allMemtablesOffHeapSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/all_memtables_off_heap_size",
|
|
||||||
"AllMemtablesOffHeapSize");
|
|
||||||
allMemtablesLiveDataSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/all_memtables_live_data_size",
|
|
||||||
"AllMemtablesLiveDataSize");
|
|
||||||
memtableSwitchCount = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/memtable_switch_count",
|
|
||||||
"MemtableSwitchCount");
|
|
||||||
estimatedRowSizeHistogram = Metrics.newGauge(
|
|
||||||
factory.createMetricName("EstimatedRowSizeHistogram"),
|
|
||||||
new Gauge<long[]>() {
|
|
||||||
public long[] value() {
|
|
||||||
return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_row_size_histogram/"
|
|
||||||
+ cfName);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
estimatedRowCount= Metrics.newGauge(
|
|
||||||
factory.createMetricName("EstimatedRowCount"),
|
|
||||||
new Gauge<Long>() {
|
|
||||||
public Long value() {
|
|
||||||
return c.getLongValue("/column_family/metrics/estimated_row_count/"
|
|
||||||
+ cfName);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
estimatedColumnCountHistogram = Metrics.newGauge(
|
|
||||||
factory.createMetricName("EstimatedColumnCountHistogram"),
|
|
||||||
new Gauge<long[]>() {
|
|
||||||
public long[] value() {
|
|
||||||
return c.getEstimatedHistogramAsLongArrValue("/column_family/metrics/estimated_column_count_histogram/"
|
|
||||||
+ cfName);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
sstablesPerReadHistogram = createColumnFamilyHistogram(
|
|
||||||
"/column_family/metrics/sstables_per_read_histogram",
|
|
||||||
"SSTablesPerReadHistogram");
|
|
||||||
compressionRatio = createColumnFamilyGauge("CompressionRatio",
|
|
||||||
new Gauge<Double>() {
|
|
||||||
public Double value() {
|
|
||||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/"
|
|
||||||
+ cfName);
|
|
||||||
}
|
|
||||||
}, new Gauge<Double>() // global gauge
|
|
||||||
{
|
|
||||||
public Double value() {
|
|
||||||
return c.getDoubleValue("/column_family/metrics/compression_ratio/");
|
|
||||||
}
|
|
||||||
});
|
|
||||||
readLatency = new LatencyMetrics("/column_family/metrics/read_latency",
|
|
||||||
cfName, factory, "Read");
|
|
||||||
writeLatency = new LatencyMetrics(
|
|
||||||
"/column_family/metrics/write_latency", cfName, factory,
|
|
||||||
"Write");
|
|
||||||
rangeLatency = new LatencyMetrics(
|
|
||||||
"/column_family/metrics/range_latency", cfName, factory,
|
|
||||||
"Range");
|
|
||||||
pendingFlushes = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/pending_flushes", "PendingFlushes");
|
|
||||||
pendingCompactions = createColumnFamilyGaugeInt(
|
|
||||||
"/column_family/metrics/pending_compactions",
|
|
||||||
"PendingCompactions");
|
|
||||||
liveSSTableCount = createColumnFamilyGaugeInt(
|
|
||||||
"/column_family/metrics/live_ss_table_count",
|
|
||||||
"LiveSSTableCount");
|
|
||||||
liveDiskSpaceUsed = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/live_disk_space_used",
|
|
||||||
"LiveDiskSpaceUsed");
|
|
||||||
totalDiskSpaceUsed = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/total_disk_space_used",
|
|
||||||
"TotalDiskSpaceUsed");
|
|
||||||
minRowSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/min_row_size", "MinRowSize");
|
|
||||||
maxRowSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/max_row_size", "MaxRowSize");
|
|
||||||
meanRowSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/mean_row_size", "MeanRowSize");
|
|
||||||
bloomFilterFalsePositives = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/bloom_filter_false_positives",
|
|
||||||
"BloomFilterFalsePositives");
|
|
||||||
recentBloomFilterFalsePositives = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/recent_bloom_filter_false_positives",
|
|
||||||
"RecentBloomFilterFalsePositives");
|
|
||||||
bloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
|
||||||
"/column_family/metrics/bloom_filter_false_ratio",
|
|
||||||
"BloomFilterFalseRatio");
|
|
||||||
recentBloomFilterFalseRatio = createColumnFamilyGaugeDouble(
|
|
||||||
"/column_family/metrics/recent_bloom_filter_false_ratio",
|
|
||||||
"RecentBloomFilterFalseRatio");
|
|
||||||
bloomFilterDiskSpaceUsed = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/bloom_filter_disk_space_used",
|
|
||||||
"BloomFilterDiskSpaceUsed");
|
|
||||||
bloomFilterOffHeapMemoryUsed = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/bloom_filter_off_heap_memory_used",
|
|
||||||
"BloomFilterOffHeapMemoryUsed");
|
|
||||||
indexSummaryOffHeapMemoryUsed = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/index_summary_off_heap_memory_used",
|
|
||||||
"IndexSummaryOffHeapMemoryUsed");
|
|
||||||
compressionMetadataOffHeapMemoryUsed = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/compression_metadata_off_heap_memory_used",
|
|
||||||
"CompressionMetadataOffHeapMemoryUsed");
|
|
||||||
speculativeRetries = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/speculative_retries",
|
|
||||||
"SpeculativeRetries");
|
|
||||||
keyCacheHitRate = Metrics.newGauge(
|
|
||||||
factory.createMetricName("KeyCacheHitRate"),
|
|
||||||
new Gauge<Double>() {
|
|
||||||
@Override
|
|
||||||
public Double value() {
|
|
||||||
return c.getDoubleValue("/column_family/metrics/key_cache_hit_rate/"
|
|
||||||
+ cfName);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
tombstoneScannedHistogram = createColumnFamilyHistogram(
|
|
||||||
"/column_family/metrics/tombstone_scanned_histogram",
|
|
||||||
"TombstoneScannedHistogram");
|
|
||||||
liveScannedHistogram = createColumnFamilyHistogram(
|
|
||||||
"/column_family/metrics/live_scanned_histogram",
|
|
||||||
"LiveScannedHistogram");
|
|
||||||
colUpdateTimeDeltaHistogram = createColumnFamilyHistogram(
|
|
||||||
"/column_family/metrics/col_update_time_delta_histogram",
|
|
||||||
"ColUpdateTimeDeltaHistogram");
|
|
||||||
coordinatorReadLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/read/" + cfName,
|
|
||||||
factory.createMetricName("CoordinatorReadLatency"),
|
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
|
||||||
coordinatorScanLatency = APIMetrics.newTimer("/column_family/metrics/coordinator/scan/" + cfName,
|
|
||||||
factory.createMetricName("CoordinatorScanLatency"),
|
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
|
||||||
waitingOnFreeMemtableSpace = APIMetrics.newTimer("/column_family/metrics/waiting_on_free_memtable/" + cfName,
|
|
||||||
factory.createMetricName("WaitingOnFreeMemtableSpace"),
|
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
|
||||||
|
|
||||||
trueSnapshotsSize = createColumnFamilyGauge(
|
|
||||||
"/column_family/metrics/true_snapshots_size", "SnapshotsSize");
|
|
||||||
rowCacheHitOutOfRange = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/row_cache_hit_out_of_range",
|
|
||||||
"RowCacheHitOutOfRange");
|
|
||||||
rowCacheHit = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/row_cache_hit", "RowCacheHit");
|
|
||||||
rowCacheMiss = createColumnFamilyCounter(
|
|
||||||
"/column_family/metrics/row_cache_miss", "RowCacheMiss");
|
|
||||||
|
|
||||||
casPrepare = new LatencyMetrics("/column_family/metrics/cas_prepare/"
|
|
||||||
+ cfName, factory, "CasPrepare");
|
|
||||||
casPropose = new LatencyMetrics("/column_family/metrics/cas_propose/"
|
|
||||||
+ cfName, factory, "CasPropose");
|
|
||||||
casCommit = new LatencyMetrics("/column_family/metrics/cas_commit/"
|
|
||||||
+ cfName, factory, "CasCommit");
|
|
||||||
sstablesPerRead = new EstimatedHistogramWrapper("/column_family/metrics/sstables_per_read_histogram/" + cfName);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Release all associated metrics.
|
|
||||||
*/
|
|
||||||
public void release() {
|
|
||||||
for (String name : all) {
|
|
||||||
allColumnFamilyMetrics.get(name).remove(
|
|
||||||
Metrics.defaultRegistry().allMetrics()
|
|
||||||
.get(factory.createMetricName(name)));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName(name));
|
|
||||||
}
|
|
||||||
readLatency.release();
|
|
||||||
writeLatency.release();
|
|
||||||
rangeLatency.release();
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("EstimatedRowSizeHistogram"));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("EstimatedColumnCountHistogram"));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("KeyCacheHitRate"));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("CoordinatorReadLatency"));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("CoordinatorScanLatency"));
|
|
||||||
Metrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName("WaitingOnFreeMemtableSpace"));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a gauge that will be part of a merged version of all column
|
|
||||||
* families. The global gauge will merge each CF gauge by adding their
|
|
||||||
* values
|
|
||||||
*/
|
|
||||||
protected Gauge<Double> createColumnFamilyGaugeDouble(final String url,
|
|
||||||
final String name) {
|
|
||||||
Gauge<Double> gauge = new Gauge<Double>() {
|
|
||||||
public Double value() {
|
|
||||||
return c.getDoubleValue(url + "/" + cfName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return createColumnFamilyGauge(url, name, gauge);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a gauge that will be part of a merged version of all column
|
|
||||||
* families. The global gauge will merge each CF gauge by adding their
|
|
||||||
* values
|
|
||||||
*/
|
|
||||||
protected Gauge<Long> createColumnFamilyGauge(final String url, final String name) {
|
|
||||||
Gauge<Long> gauge = new Gauge<Long>() {
|
|
||||||
public Long value() {
|
|
||||||
return c.getLongValue(url + "/" + cfName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return createColumnFamilyGauge(url, name, gauge);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a gauge that will be part of a merged version of all column
|
|
||||||
* families. The global gauge will merge each CF gauge by adding their
|
|
||||||
* values
|
|
||||||
*/
|
|
||||||
protected Gauge<Integer> createColumnFamilyGaugeInt(final String url,
|
|
||||||
final String name) {
|
|
||||||
Gauge<Integer> gauge = new Gauge<Integer>() {
|
|
||||||
public Integer value() {
|
|
||||||
return c.getIntValue(url + "/" + cfName);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
return createColumnFamilyGauge(url, name, gauge);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a gauge that will be part of a merged version of all column
|
|
||||||
* families. The global gauge will merge each CF gauge by adding their
|
|
||||||
* values
|
|
||||||
*/
|
|
||||||
protected <T extends Number> Gauge<T> createColumnFamilyGauge(final String url,
|
|
||||||
final String name, Gauge<T> gauge) {
|
|
||||||
return createColumnFamilyGauge(name, gauge, new Gauge<Long>() {
|
|
||||||
public Long value() {
|
|
||||||
// This is an optimiztion, call once for all column families
|
|
||||||
// instead
|
|
||||||
// of iterating over all of them
|
|
||||||
return c.getLongValue(url);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a gauge that will be part of a merged version of all column
|
|
||||||
* families. The global gauge is defined as the globalGauge parameter
|
|
||||||
*/
|
|
||||||
protected <G, T> Gauge<T> createColumnFamilyGauge(String name,
|
|
||||||
Gauge<T> gauge, Gauge<G> globalGauge) {
|
|
||||||
Gauge<T> cfGauge = APIMetrics.newGauge(factory.createMetricName(name),
|
|
||||||
gauge);
|
|
||||||
if (register(name, cfGauge)) {
|
|
||||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
|
||||||
globalGauge);
|
|
||||||
}
|
|
||||||
return cfGauge;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Creates a counter that will also have a global counter thats the sum of
|
|
||||||
* all counters across different column families
|
|
||||||
*/
|
|
||||||
protected Counter createColumnFamilyCounter(final String url, final String name) {
|
|
||||||
Counter cfCounter = APIMetrics.newCounter(url + "/" + cfName,
|
|
||||||
factory.createMetricName(name));
|
|
||||||
if (register(name, cfCounter)) {
|
|
||||||
Metrics.newGauge(globalNameFactory.createMetricName(name),
|
|
||||||
new Gauge<Long>() {
|
|
||||||
public Long value() {
|
|
||||||
// This is an optimiztion, call once for all column
|
|
||||||
// families instead
|
|
||||||
// of iterating over all of them
|
|
||||||
return c.getLongValue(url);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return cfCounter;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Create a histogram-like interface that will register both a CF, keyspace
|
|
||||||
* and global level histogram and forward any updates to both
|
|
||||||
*/
|
|
||||||
protected ColumnFamilyHistogram createColumnFamilyHistogram(String url,
|
|
||||||
String name) {
|
|
||||||
Histogram cfHistogram = APIMetrics.newHistogram(url + "/" + cfName,
|
|
||||||
factory.createMetricName(name), true);
|
|
||||||
register(name, cfHistogram);
|
|
||||||
|
|
||||||
// TBD add keyspace and global histograms
|
|
||||||
// keyspaceHistogram,
|
|
||||||
// Metrics.newHistogram(globalNameFactory.createMetricName(name),
|
|
||||||
// true));
|
|
||||||
return new ColumnFamilyHistogram(cfHistogram, null, null);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Registers a metric to be removed when unloading CF.
|
|
||||||
*
|
|
||||||
* @return true if first time metric with that name has been registered
|
|
||||||
*/
|
|
||||||
private boolean register(String name, Metric metric) {
|
|
||||||
boolean ret = allColumnFamilyMetrics.putIfAbsent(name,
|
|
||||||
new HashSet<Metric>()) == null;
|
|
||||||
allColumnFamilyMetrics.get(name).add(metric);
|
|
||||||
all.add(name);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
public long[] getRecentSSTablesPerRead() {
|
|
||||||
return recentSSTablesPerRead
|
|
||||||
.getBuckets(sstablesPerRead.getBuckets(false));
|
|
||||||
}
|
|
||||||
|
|
||||||
public class ColumnFamilyHistogram {
|
|
||||||
public final Histogram[] all;
|
|
||||||
public final Histogram cf;
|
|
||||||
|
|
||||||
private ColumnFamilyHistogram(Histogram cf, Histogram keyspace,
|
|
||||||
Histogram global) {
|
|
||||||
this.cf = cf;
|
|
||||||
this.all = new Histogram[] { cf, keyspace, global };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
class ColumnFamilyMetricNameFactory implements MetricNameFactory {
|
|
||||||
private final String keyspaceName;
|
|
||||||
private final String columnFamilyName;
|
|
||||||
private final boolean isIndex;
|
|
||||||
|
|
||||||
ColumnFamilyMetricNameFactory(ColumnFamilyStore cfs) {
|
|
||||||
this.keyspaceName = cfs.getKeyspace();
|
|
||||||
this.columnFamilyName = cfs.getColumnFamilyName();
|
|
||||||
isIndex = cfs.isIndex();
|
|
||||||
}
|
|
||||||
|
|
||||||
public MetricName createMetricName(String metricName) {
|
|
||||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
|
||||||
String type = isIndex ? "IndexColumnFamily" : "ColumnFamily";
|
|
||||||
|
|
||||||
StringBuilder mbeanName = new StringBuilder();
|
|
||||||
mbeanName.append(groupName).append(":");
|
|
||||||
mbeanName.append("type=").append(type);
|
|
||||||
mbeanName.append(",keyspace=").append(keyspaceName);
|
|
||||||
mbeanName.append(",scope=").append(columnFamilyName);
|
|
||||||
mbeanName.append(",name=").append(metricName);
|
|
||||||
return new MetricName(groupName, type, metricName, keyspaceName
|
|
||||||
+ "." + columnFamilyName, mbeanName.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static class AllColumnFamilyMetricNameFactory implements MetricNameFactory {
|
|
||||||
public MetricName createMetricName(String metricName) {
|
|
||||||
String groupName = ColumnFamilyMetrics.class.getPackage().getName();
|
|
||||||
StringBuilder mbeanName = new StringBuilder();
|
|
||||||
mbeanName.append(groupName).append(":");
|
|
||||||
mbeanName.append("type=ColumnFamily");
|
|
||||||
mbeanName.append(",name=").append(metricName);
|
|
||||||
return new MetricName(groupName, "ColumnFamily", metricName, "all",
|
|
||||||
mbeanName.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -23,65 +23,38 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.yammer.metrics.core.Gauge;
|
|
||||||
import com.yammer.metrics.core.Timer;
|
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metrics for commit log
|
* Metrics for commit log
|
||||||
*/
|
*/
|
||||||
public class CommitLogMetrics {
|
public class CommitLogMetrics implements Metrics {
|
||||||
public static final MetricNameFactory factory = new DefaultNameFactory(
|
|
||||||
"CommitLog");
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
|
|
||||||
/** Number of completed tasks */
|
|
||||||
public final Gauge<Long> completedTasks;
|
|
||||||
/** Number of pending tasks */
|
|
||||||
public final Gauge<Long> pendingTasks;
|
|
||||||
/** Current size used by all the commit log segments */
|
|
||||||
public final Gauge<Long> totalCommitLogSize;
|
|
||||||
/**
|
|
||||||
* Time spent waiting for a CLS to be allocated - under normal conditions
|
|
||||||
* this should be zero
|
|
||||||
*/
|
|
||||||
public final Timer waitingOnSegmentAllocation;
|
|
||||||
/**
|
|
||||||
* The time spent waiting on CL sync; for Periodic this is only occurs when
|
|
||||||
* the sync is lagging its sync interval
|
|
||||||
*/
|
|
||||||
public final Timer waitingOnCommit;
|
|
||||||
|
|
||||||
public CommitLogMetrics() {
|
public CommitLogMetrics() {
|
||||||
completedTasks = APIMetrics.newGauge(
|
}
|
||||||
factory.createMetricName("CompletedTasks"), new Gauge<Long>() {
|
|
||||||
public Long value() {
|
@Override
|
||||||
return c.getLongValue("/commitlog/metrics/completed_tasks");
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
}
|
MetricNameFactory factory = new DefaultNameFactory("CommitLog");
|
||||||
});
|
/** Number of completed tasks */
|
||||||
pendingTasks = APIMetrics.newGauge(
|
registry.register(() -> registry.gauge("/commitlog/metrics/completed_tasks"),
|
||||||
factory.createMetricName("PendingTasks"), new Gauge<Long>() {
|
factory.createMetricName("CompletedTasks"));
|
||||||
public Long value() {
|
/** Number of pending tasks */
|
||||||
return c.getLongValue("/commitlog/metrics/pending_tasks");
|
registry.register(() -> registry.gauge("/commitlog/metrics/pending_tasks"),
|
||||||
}
|
factory.createMetricName("PendingTasks"));
|
||||||
});
|
/** Current size used by all the commit log segments */
|
||||||
totalCommitLogSize = APIMetrics.newGauge(
|
registry.register(() -> registry.gauge("/commitlog/metrics/total_commit_log_size"),
|
||||||
factory.createMetricName("TotalCommitLogSize"),
|
factory.createMetricName("TotalCommitLogSize"));
|
||||||
new Gauge<Long>() {
|
/**
|
||||||
public Long value() {
|
* Time spent waiting for a CLS to be allocated - under normal
|
||||||
return c.getLongValue("/commitlog/metrics/total_commit_log_size");
|
* conditions this should be zero
|
||||||
}
|
*/
|
||||||
});
|
registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_segment_allocation"),
|
||||||
waitingOnSegmentAllocation = APIMetrics.newTimer("/commit_log/metrics/waiting_on_segment_allocation",
|
factory.createMetricName("WaitingOnSegmentAllocation"));
|
||||||
factory.createMetricName("WaitingOnSegmentAllocation"),
|
/**
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
* The time spent waiting on CL sync; for Periodic this is only occurs
|
||||||
waitingOnCommit = APIMetrics.newTimer("/commit_log/metrics/waiting_on_commit",
|
* when the sync is lagging its sync interval
|
||||||
factory.createMetricName("WaitingOnCommit"),
|
*/
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
registry.register(() -> registry.timer("/commitlog/metrics/waiting_on_commit"),
|
||||||
|
factory.createMetricName("WaitingOnCommit"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,57 +18,59 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright 2015 Cloudius Systems
|
* Copyright 2015 Cloudius Systems
|
||||||
*
|
*
|
||||||
* Modified by Cloudius Systems
|
* Modified by Cloudius Systems
|
||||||
*/
|
*/
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import java.util.concurrent.TimeUnit;
|
import jakarta.json.JsonArray;
|
||||||
|
import jakarta.json.JsonObject;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
import com.yammer.metrics.core.Gauge;
|
|
||||||
import com.yammer.metrics.core.Meter;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metrics for compaction.
|
* Metrics for compaction.
|
||||||
*/
|
*/
|
||||||
public class CompactionMetrics {
|
public class CompactionMetrics implements Metrics {
|
||||||
public static final MetricNameFactory factory = new DefaultNameFactory(
|
|
||||||
"Compaction");
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
/** Estimated number of compactions remaining to perform */
|
|
||||||
public final Gauge<Integer> pendingTasks;
|
|
||||||
/** Number of completed compactions since server [re]start */
|
|
||||||
public final Gauge<Long> completedTasks;
|
|
||||||
/** Total number of compactions since server [re]start */
|
|
||||||
public final Meter totalCompactionsCompleted;
|
|
||||||
/** Total number of bytes compacted since server [re]start */
|
|
||||||
public final Counter bytesCompacted;
|
|
||||||
|
|
||||||
public CompactionMetrics() {
|
public CompactionMetrics() {
|
||||||
|
}
|
||||||
|
|
||||||
pendingTasks = APIMetrics.newGauge(
|
@Override
|
||||||
factory.createMetricName("PendingTasks"), new Gauge<Integer>() {
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
public Integer value() {
|
MetricNameFactory factory = new DefaultNameFactory("Compaction");
|
||||||
return c.getIntValue("/compaction_manager/metrics/pending_tasks");
|
/** Estimated number of compactions remaining to perform */
|
||||||
}
|
registry.register(() -> registry.gauge(Integer.class, "/compaction_manager/metrics/pending_tasks"),
|
||||||
});
|
factory.createMetricName("PendingTasks"));
|
||||||
completedTasks = APIMetrics.newGauge(
|
/** Number of completed compactions since server [re]start */
|
||||||
factory.createMetricName("CompletedTasks"), new Gauge<Long>() {
|
registry.register(() -> registry.gauge("/compaction_manager/metrics/completed_tasks"),
|
||||||
public Long value() {
|
factory.createMetricName("CompletedTasks"));
|
||||||
return c.getLongValue("/compaction_manager/metrics/completed_tasks");
|
/** Total number of compactions since server [re]start */
|
||||||
}
|
registry.register(() -> registry.meter("/compaction_manager/metrics/total_compactions_completed"),
|
||||||
});
|
factory.createMetricName("TotalCompactionsCompleted"));
|
||||||
totalCompactionsCompleted = APIMetrics.newMeter(
|
/** Total number of bytes compacted since server [re]start */
|
||||||
"/compaction_manager/metrics/total_compactions_completed",
|
registry.register(() -> registry.meter("/compaction_manager/metrics/bytes_compacted"),
|
||||||
factory.createMetricName("TotalCompactionsCompleted"),
|
|
||||||
"compaction completed", TimeUnit.SECONDS);
|
|
||||||
bytesCompacted = APIMetrics.newCounter(
|
|
||||||
"/compaction_manager/metrics/bytes_compacted",
|
|
||||||
factory.createMetricName("BytesCompacted"));
|
factory.createMetricName("BytesCompacted"));
|
||||||
|
|
||||||
|
registry.register(() -> registry.gauge((client) -> {
|
||||||
|
Map<String, Map<String, Integer>> result = new HashMap<>();
|
||||||
|
JsonArray compactions = client.getJsonArray("compaction_manager/metrics/pending_tasks_by_table");
|
||||||
|
|
||||||
|
for (int i = 0; i < compactions.size(); i++) {
|
||||||
|
JsonObject c = compactions.getJsonObject(i);
|
||||||
|
|
||||||
|
String ks = c.getString("ks");
|
||||||
|
String cf = c.getString("cf");
|
||||||
|
|
||||||
|
if (!result.containsKey(ks)) {
|
||||||
|
result.put(ks, new HashMap<>());
|
||||||
|
}
|
||||||
|
|
||||||
|
Map<String, Integer> map = result.get(ks);
|
||||||
|
map.put(cf, (int)(c.getJsonNumber("task").longValue()));
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}), factory.createMetricName("PendingTasksByTableName"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,15 +15,10 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
/*
|
import javax.management.MalformedObjectNameException;
|
||||||
* Copyright 2015 Cloudius Systems
|
import javax.management.ObjectName;
|
||||||
*
|
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
|
||||||
package com.cloudius.urchin.metrics;
|
|
||||||
|
|
||||||
import com.yammer.metrics.core.MetricName;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* MetricNameFactory that generates default MetricName of metrics.
|
* MetricNameFactory that generates default MetricName of metrics.
|
||||||
@ -43,19 +38,14 @@ public class DefaultNameFactory implements MetricNameFactory {
|
|||||||
this.scope = scope;
|
this.scope = scope;
|
||||||
}
|
}
|
||||||
|
|
||||||
public MetricName createMetricName(String metricName) {
|
@Override
|
||||||
|
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||||
return createMetricName(type, metricName, scope);
|
return createMetricName(type, metricName, scope);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static MetricName createMetricName(String type, String metricName,
|
public static ObjectName createMetricName(String type, String name, String scope)
|
||||||
String scope) {
|
throws MalformedObjectNameException {
|
||||||
return new MetricName(GROUP_NAME, type, metricName, scope,
|
StringBuilder nameBuilder = new StringBuilder();
|
||||||
createDefaultMBeanName(type, metricName, scope));
|
|
||||||
}
|
|
||||||
|
|
||||||
protected static String createDefaultMBeanName(String type, String name,
|
|
||||||
String scope) {
|
|
||||||
final StringBuilder nameBuilder = new StringBuilder();
|
|
||||||
nameBuilder.append(GROUP_NAME);
|
nameBuilder.append(GROUP_NAME);
|
||||||
nameBuilder.append(":type=");
|
nameBuilder.append(":type=");
|
||||||
nameBuilder.append(type);
|
nameBuilder.append(type);
|
||||||
@ -67,6 +57,6 @@ public class DefaultNameFactory implements MetricNameFactory {
|
|||||||
nameBuilder.append(",name=");
|
nameBuilder.append(",name=");
|
||||||
nameBuilder.append(name);
|
nameBuilder.append(name);
|
||||||
}
|
}
|
||||||
return nameBuilder.toString();
|
return new ObjectName(nameBuilder.toString());
|
||||||
}
|
}
|
||||||
}
|
}
|
@ -0,0 +1,50 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Copyright (C) 2015 ScyllaDB
|
||||||
|
*/
|
||||||
|
/*
|
||||||
|
* Moddified by ScyllaDB
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
|
||||||
|
import org.apache.cassandra.net.MessagingService;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metrics for dropped messages by verb.
|
||||||
|
*/
|
||||||
|
public class DroppedMessageMetrics implements Metrics {
|
||||||
|
private final MessagingService.Verb verb;
|
||||||
|
|
||||||
|
public DroppedMessageMetrics(MessagingService.Verb verb) {
|
||||||
|
this.verb = verb;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
|
MetricNameFactory factory = new DefaultNameFactory("DroppedMessage", verb.toString());
|
||||||
|
/** Number of dropped messages */
|
||||||
|
// TODO: this API url does not exist. Add meter calls for verbs.
|
||||||
|
registry.register(() -> registry.meter("/messaging_service/messages/dropped/" + verb),
|
||||||
|
factory.createMetricName("Dropped"));
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -1,55 +0,0 @@
|
|||||||
package org.apache.cassandra.metrics;
|
|
||||||
/*
|
|
||||||
* Copyright (C) 2015 ScyllaDB
|
|
||||||
*/
|
|
||||||
|
|
||||||
/*
|
|
||||||
* This file is part of Scylla.
|
|
||||||
*
|
|
||||||
* Scylla is free software: you can redistribute it and/or modify
|
|
||||||
* it under the terms of the GNU Affero General Public License as published by
|
|
||||||
* the Free Software Foundation, either version 3 of the License, or
|
|
||||||
* (at your option) any later version.
|
|
||||||
*
|
|
||||||
* Scylla is distributed in the hope that it will be useful,
|
|
||||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
* GNU General Public License for more details.
|
|
||||||
*
|
|
||||||
* You should have received a copy of the GNU General Public License
|
|
||||||
* along with Scylla. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
*/
|
|
||||||
|
|
||||||
import javax.ws.rs.core.MultivaluedMap;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
|
||||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
|
||||||
|
|
||||||
public class EstimatedHistogramWrapper {
|
|
||||||
private APIClient c = new APIClient();
|
|
||||||
private String url;
|
|
||||||
private MultivaluedMap<String, String> queryParams;
|
|
||||||
private static final int DURATION = 50;
|
|
||||||
private int duration;
|
|
||||||
public EstimatedHistogramWrapper(String url, MultivaluedMap<String, String> queryParams, int duration) {
|
|
||||||
this.url = url;
|
|
||||||
this.queryParams = queryParams;
|
|
||||||
this.duration = duration;
|
|
||||||
|
|
||||||
}
|
|
||||||
public EstimatedHistogramWrapper(String url) {
|
|
||||||
this(url, null, DURATION);
|
|
||||||
|
|
||||||
}
|
|
||||||
public EstimatedHistogramWrapper(String url, int duration) {
|
|
||||||
this(url, null, duration);
|
|
||||||
|
|
||||||
}
|
|
||||||
public EstimatedHistogram get() {
|
|
||||||
return c.getEstimatedHistogram(url, queryParams, duration);
|
|
||||||
}
|
|
||||||
|
|
||||||
public long[] getBuckets(boolean reset) {
|
|
||||||
return get().getBuckets(reset);
|
|
||||||
}
|
|
||||||
}
|
|
@ -23,42 +23,19 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import java.util.List;
|
import java.util.Arrays;
|
||||||
import java.util.concurrent.TimeUnit;
|
|
||||||
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
import javax.management.ObjectName;
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.cloudius.urchin.utils.EstimatedHistogram;
|
|
||||||
import com.cloudius.urchin.utils.RecentEstimatedHistogram;
|
|
||||||
import com.google.common.collect.ImmutableList;
|
|
||||||
import com.google.common.collect.Lists;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
import com.yammer.metrics.core.Timer;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metrics about latencies
|
* Metrics about latencies
|
||||||
*/
|
*/
|
||||||
public class LatencyMetrics {
|
public class LatencyMetrics implements Metrics {
|
||||||
/** Latency */
|
protected final MetricNameFactory[] factories;
|
||||||
public final Timer latency;
|
|
||||||
/** Total latency in micro sec */
|
|
||||||
public final Counter totalLatency;
|
|
||||||
|
|
||||||
/** parent metrics to replicate any updates to **/
|
|
||||||
private List<LatencyMetrics> parents = Lists.newArrayList();
|
|
||||||
|
|
||||||
protected final MetricNameFactory factory;
|
|
||||||
protected final String namePrefix;
|
protected final String namePrefix;
|
||||||
|
protected final String uri;
|
||||||
@Deprecated public EstimatedHistogramWrapper totalLatencyHistogram;
|
protected final String param;
|
||||||
/*
|
|
||||||
* It should not be called directly, use the getRecentLatencyHistogram
|
|
||||||
*/
|
|
||||||
@Deprecated protected final RecentEstimatedHistogram recentLatencyHistogram = new RecentEstimatedHistogram();
|
|
||||||
|
|
||||||
protected long lastLatency;
|
|
||||||
protected long lastOpCount;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create LatencyMetrics with given group, type, and scope. Name prefix for
|
* Create LatencyMetrics with given group, type, and scope. Name prefix for
|
||||||
@ -69,8 +46,8 @@ public class LatencyMetrics {
|
|||||||
* @param scope
|
* @param scope
|
||||||
* Scope
|
* Scope
|
||||||
*/
|
*/
|
||||||
public LatencyMetrics(String url, String type, String scope) {
|
public LatencyMetrics(String type, String scope, String uri) {
|
||||||
this(url, type, "", scope);
|
this(type, "", scope, uri, null);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -84,88 +61,35 @@ public class LatencyMetrics {
|
|||||||
* @param scope
|
* @param scope
|
||||||
* Scope of metrics
|
* Scope of metrics
|
||||||
*/
|
*/
|
||||||
public LatencyMetrics(String url, String type, String namePrefix,
|
public LatencyMetrics(String type, String namePrefix, String scope, String uri, String param) {
|
||||||
String scope) {
|
this(namePrefix, uri, param, new DefaultNameFactory(type, scope));
|
||||||
this(url, new DefaultNameFactory(type, scope), namePrefix);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
public LatencyMetrics(String namePrefix, String uri, MetricNameFactory... factories) {
|
||||||
* Create LatencyMetrics with given group, type, prefix to append to each
|
this(namePrefix, uri, null, factories);
|
||||||
* metric name, and scope.
|
|
||||||
*
|
|
||||||
* @param factory
|
|
||||||
* MetricName factory to use
|
|
||||||
* @param namePrefix
|
|
||||||
* Prefix to append to each metric name
|
|
||||||
*/
|
|
||||||
public LatencyMetrics(String url, MetricNameFactory factory,
|
|
||||||
String namePrefix) {
|
|
||||||
this(url, null, factory, namePrefix);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public LatencyMetrics(String url, String paramName,
|
public LatencyMetrics(String namePrefix, String uri, String param, MetricNameFactory... factories) {
|
||||||
MetricNameFactory factory, String namePrefix) {
|
this.factories = factories;
|
||||||
this.factory = factory;
|
|
||||||
this.namePrefix = namePrefix;
|
this.namePrefix = namePrefix;
|
||||||
|
this.uri = uri;
|
||||||
paramName = (paramName == null)? "" : "/" + paramName;
|
this.param = param;
|
||||||
latency = APIMetrics.newTimer(url + "/histogram" + paramName,
|
|
||||||
factory.createMetricName(namePrefix + "Latency"),
|
|
||||||
TimeUnit.MICROSECONDS, TimeUnit.SECONDS);
|
|
||||||
totalLatency = APIMetrics.newCounter(url + paramName,
|
|
||||||
factory.createMetricName(namePrefix + "TotalLatency"));
|
|
||||||
totalLatencyHistogram = new EstimatedHistogramWrapper(url + "/estimated_histogram" + paramName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
protected ObjectName[] names(String suffix) throws MalformedObjectNameException {
|
||||||
* Create LatencyMetrics with given group, type, prefix to append to each
|
return Arrays.stream(factories).map(f -> {
|
||||||
* metric name, and scope. Any updates to this will also run on parent
|
try {
|
||||||
*
|
return f.createMetricName(namePrefix + suffix);
|
||||||
* @param factory
|
} catch (MalformedObjectNameException e) {
|
||||||
* MetricName factory to use
|
throw new RuntimeException(e); // dung...
|
||||||
* @param namePrefix
|
}
|
||||||
* Prefix to append to each metric name
|
}).toArray(size -> new ObjectName[size]);
|
||||||
* @param parents
|
|
||||||
* any amount of parents to replicate updates to
|
|
||||||
*/
|
|
||||||
public LatencyMetrics(String url, MetricNameFactory factory,
|
|
||||||
String namePrefix, LatencyMetrics... parents) {
|
|
||||||
this(url, factory, namePrefix);
|
|
||||||
this.parents.addAll(ImmutableList.copyOf(parents));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** takes nanoseconds **/
|
@Override
|
||||||
public void addNano(long nanos) {
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
// convert to microseconds. 1 millionth
|
String paramName = (param == null) ? "" : "/" + param;
|
||||||
latency.update(nanos, TimeUnit.NANOSECONDS);
|
registry.register(() -> registry.timer(uri + "/moving_average_histogram" + paramName), names("Latency"));
|
||||||
totalLatency.inc(nanos / 1000);
|
registry.register(() -> registry.counter(uri + paramName), names("TotalLatency"));
|
||||||
for (LatencyMetrics parent : parents) {
|
|
||||||
parent.addNano(nanos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public void release() {
|
|
||||||
APIMetrics.defaultRegistry()
|
|
||||||
.removeMetric(factory.createMetricName(namePrefix + "Latency"));
|
|
||||||
APIMetrics.defaultRegistry().removeMetric(
|
|
||||||
factory.createMetricName(namePrefix + "TotalLatency"));
|
|
||||||
}
|
|
||||||
|
|
||||||
@Deprecated
|
|
||||||
public synchronized double getRecentLatency() {
|
|
||||||
long ops = latency.count();
|
|
||||||
long n = totalLatency.count();
|
|
||||||
if (ops == lastOpCount)
|
|
||||||
return 0;
|
|
||||||
try {
|
|
||||||
return ((double) n - lastLatency) / (ops - lastOpCount);
|
|
||||||
} finally {
|
|
||||||
lastLatency = n;
|
|
||||||
lastOpCount = ops;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public long[] getRecentLatencyHistogram() {
|
|
||||||
return recentLatencyHistogram.getBuckets(totalLatencyHistogram.getBuckets(false));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -15,23 +15,26 @@
|
|||||||
* See the License for the specific language governing permissions and
|
* See the License for the specific language governing permissions and
|
||||||
* limitations under the License.
|
* limitations under the License.
|
||||||
*/
|
*/
|
||||||
/*
|
package org.apache.cassandra.metrics;
|
||||||
* Copyright 2015 Cloudius Systems
|
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Simplified version of {@link Metrics} naming factory paradigm, simply
|
||||||
|
* generating {@link ObjectName} and nothing more.
|
||||||
|
*
|
||||||
|
* @author calle
|
||||||
*
|
*
|
||||||
* Modified by Cloudius Systems
|
|
||||||
*/
|
*/
|
||||||
|
public interface MetricNameFactory {
|
||||||
package com.cloudius.urchin.metrics;
|
|
||||||
|
|
||||||
import com.yammer.metrics.core.MetricName;
|
|
||||||
|
|
||||||
public interface MetricNameFactory
|
|
||||||
{
|
|
||||||
/**
|
/**
|
||||||
* Create a qualified name from given metric name.
|
* Create a qualified name from given metric name.
|
||||||
*
|
*
|
||||||
* @param metricName part of qualified name.
|
* @param metricName
|
||||||
|
* part of qualified name.
|
||||||
* @return new String with given metric name.
|
* @return new String with given metric name.
|
||||||
|
* @throws MalformedObjectNameException
|
||||||
*/
|
*/
|
||||||
MetricName createMetricName(String metricName);
|
ObjectName createMetricName(String metricName) throws MalformedObjectNameException;
|
||||||
}
|
}
|
38
src/main/java/org/apache/cassandra/metrics/Metrics.java
Normal file
38
src/main/java/org/apache/cassandra/metrics/Metrics.java
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
|
import java.util.function.Function;
|
||||||
|
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Action interface for any type that encapsulates n metrics.
|
||||||
|
*
|
||||||
|
* @author calle
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
public interface Metrics {
|
||||||
|
/**
|
||||||
|
* Implementors should issue
|
||||||
|
* {@link MetricsRegistry#register(java.util.function.Supplier, javax.management.ObjectName...)}
|
||||||
|
* for every {@link Metrics} they generate. This method is called in both
|
||||||
|
* bind (create) and unbind (remove) phase, so an appropriate use of
|
||||||
|
* {@link Function} binding is advisable.
|
||||||
|
*
|
||||||
|
* @param registry
|
||||||
|
* @throws MalformedObjectNameException
|
||||||
|
*/
|
||||||
|
void register(MetricsRegistry registry) throws MalformedObjectNameException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Same as {{@link #register(MetricsRegistry)}, but for {@link Metric}s that
|
||||||
|
* are "global" (i.e. static - not bound to an individual bean instance.
|
||||||
|
* This method is called whenever the first encapsulating MBean is
|
||||||
|
* added/removed from a {@link MBeanServer}.
|
||||||
|
*
|
||||||
|
* @param registry
|
||||||
|
* @throws MalformedObjectNameException
|
||||||
|
*/
|
||||||
|
default void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
|
}
|
||||||
|
}
|
813
src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java
Normal file
813
src/main/java/org/apache/cassandra/metrics/MetricsRegistry.java
Normal file
@ -0,0 +1,813 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
|
import static com.scylladb.jmx.api.APIClient.getReader;
|
||||||
|
import static java.lang.Math.floor;
|
||||||
|
import static java.util.logging.Level.SEVERE;
|
||||||
|
|
||||||
|
import jakarta.json.JsonArray;
|
||||||
|
import jakarta.json.JsonNumber;
|
||||||
|
import jakarta.json.JsonObject;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Locale;
|
||||||
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.function.Function;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
|
||||||
|
import javax.management.InstanceAlreadyExistsException;
|
||||||
|
import javax.management.MBeanRegistrationException;
|
||||||
|
import javax.management.MBeanServer;
|
||||||
|
import javax.management.NotCompliantMBeanException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Makes integrating 3.0 metrics API with 2.0.
|
||||||
|
* <p>
|
||||||
|
* The 3.0 API comes with poor JMX integration
|
||||||
|
* </p>
|
||||||
|
*/
|
||||||
|
public class MetricsRegistry {
|
||||||
|
private static final long CACHE_DURATION = 1000;
|
||||||
|
private static final long UPDATE_INTERVAL = 50;
|
||||||
|
|
||||||
|
private static final Logger logger = Logger.getLogger(MetricsRegistry.class.getName());
|
||||||
|
|
||||||
|
private final APIClient client;
|
||||||
|
private final JmxMBeanServer mBeanServer;
|
||||||
|
|
||||||
|
public MetricsRegistry(APIClient client, JmxMBeanServer mBeanServer) {
|
||||||
|
this.client = client;
|
||||||
|
this.mBeanServer = mBeanServer;
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricsRegistry(MetricsRegistry other) {
|
||||||
|
this(other.client, other.mBeanServer);
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricMBean gauge(String url) {
|
||||||
|
return gauge(Long.class, url);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> MetricMBean gauge(Class<T> type, final String url) {
|
||||||
|
return gauge(getReader(type), url);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> MetricMBean gauge(final BiFunction<APIClient, String, T> function, final String url) {
|
||||||
|
return gauge(c -> function.apply(c, url));
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> MetricMBean gauge(final Function<APIClient, T> function) {
|
||||||
|
return gauge(() -> function.apply(client));
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JmxGauge implements JmxGaugeMBean {
|
||||||
|
private final Supplier<?> function;
|
||||||
|
|
||||||
|
public JmxGauge(Supplier<?> function) {
|
||||||
|
this.function = function;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Object getValue() {
|
||||||
|
return function.get();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> MetricMBean gauge(final Supplier<T> function) {
|
||||||
|
return new JmxGauge(function);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Default approach to register is to actually register/add to
|
||||||
|
* {@link MBeanServer} For unbind phase, override here.
|
||||||
|
*
|
||||||
|
* @param bean
|
||||||
|
* @param objectNames
|
||||||
|
*/
|
||||||
|
public void register(Supplier<MetricMBean> f, ObjectName... objectNames) {
|
||||||
|
MetricMBean bean = f.get();
|
||||||
|
for (ObjectName name : objectNames) {
|
||||||
|
try {
|
||||||
|
mBeanServer.getMBeanServerInterceptor().registerMBean(bean, name);
|
||||||
|
} catch (InstanceAlreadyExistsException | MBeanRegistrationException | NotCompliantMBeanException e) {
|
||||||
|
logger.log(SEVERE, "Could not register mbean", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JmxCounter implements JmxCounterMBean {
|
||||||
|
private final String url;
|
||||||
|
|
||||||
|
public JmxCounter(String url) {
|
||||||
|
super();
|
||||||
|
this.url = url;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getCount() {
|
||||||
|
return client.getLongValue(url);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricMBean counter(final String url) {
|
||||||
|
if (url != null) {
|
||||||
|
return new JmxCounter(url);
|
||||||
|
}
|
||||||
|
return new JmxCounter(url) {
|
||||||
|
@Override
|
||||||
|
public long getCount() {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private abstract class IntermediatelyUpdated {
|
||||||
|
private final long interval;
|
||||||
|
private final Supplier<JsonObject> supplier;
|
||||||
|
private long lastUpdate;
|
||||||
|
|
||||||
|
public IntermediatelyUpdated(String url, long interval) {
|
||||||
|
this.supplier = () -> client.getJsonObj(url, null);
|
||||||
|
this.interval = interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
public IntermediatelyUpdated(Supplier<JsonObject> supplier, long interval) {
|
||||||
|
this.supplier = supplier;
|
||||||
|
this.interval = interval;
|
||||||
|
}
|
||||||
|
|
||||||
|
public abstract void update(JsonObject obj);
|
||||||
|
|
||||||
|
public final void update() {
|
||||||
|
long now = System.currentTimeMillis();
|
||||||
|
if (now - lastUpdate < interval) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
JsonObject obj = supplier.get();
|
||||||
|
update(obj);
|
||||||
|
} finally {
|
||||||
|
lastUpdate = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class Meter {
|
||||||
|
public final long count;
|
||||||
|
public final double oneMinuteRate;
|
||||||
|
public final double fiveMinuteRate;
|
||||||
|
public final double fifteenMinuteRate;
|
||||||
|
public final double meanRate;
|
||||||
|
|
||||||
|
public Meter(long count, double oneMinuteRate, double fiveMinuteRate, double fifteenMinuteRate,
|
||||||
|
double meanRate) {
|
||||||
|
this.count = count;
|
||||||
|
this.oneMinuteRate = oneMinuteRate;
|
||||||
|
this.fiveMinuteRate = fiveMinuteRate;
|
||||||
|
this.fifteenMinuteRate = fifteenMinuteRate;
|
||||||
|
this.meanRate = meanRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Meter() {
|
||||||
|
this(0, 0, 0, 0, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
public Meter(JsonObject obj) {
|
||||||
|
JsonArray rates = obj.getJsonArray("rates");
|
||||||
|
oneMinuteRate = rates.getJsonNumber(0).doubleValue();
|
||||||
|
fiveMinuteRate = rates.getJsonNumber(1).doubleValue();
|
||||||
|
fifteenMinuteRate = rates.getJsonNumber(2).doubleValue();
|
||||||
|
meanRate = obj.getJsonNumber("mean_rate").doubleValue();
|
||||||
|
count = obj.getJsonNumber("count").longValue();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final TimeUnit RATE_UNIT = TimeUnit.SECONDS;
|
||||||
|
private static final TimeUnit DURATION_UNIT = TimeUnit.MICROSECONDS;
|
||||||
|
private static final TimeUnit API_DURATION_UNIT = TimeUnit.MICROSECONDS;
|
||||||
|
private static final double DURATION_FACTOR = 1.0 / API_DURATION_UNIT.convert(1, DURATION_UNIT);
|
||||||
|
|
||||||
|
private static double toDuration(double micro) {
|
||||||
|
return micro * DURATION_FACTOR;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String unitString(TimeUnit u) {
|
||||||
|
String s = u.toString().toLowerCase(Locale.US);
|
||||||
|
return s.substring(0, s.length() - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JmxMeter extends IntermediatelyUpdated implements JmxMeterMBean {
|
||||||
|
private Meter meter = new Meter();
|
||||||
|
|
||||||
|
public JmxMeter(String url, long interval) {
|
||||||
|
super(url, interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
public JmxMeter(Supplier<JsonObject> supplier, long interval) {
|
||||||
|
super(supplier, interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void update(JsonObject obj) {
|
||||||
|
meter = new Meter(obj);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getCount() {
|
||||||
|
update();
|
||||||
|
return meter.count;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getMeanRate() {
|
||||||
|
update();
|
||||||
|
return meter.meanRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getOneMinuteRate() {
|
||||||
|
update();
|
||||||
|
return meter.oneMinuteRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getFiveMinuteRate() {
|
||||||
|
update();
|
||||||
|
return meter.fiveMinuteRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getFifteenMinuteRate() {
|
||||||
|
update();
|
||||||
|
return meter.fifteenMinuteRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getRateUnit() {
|
||||||
|
return "event/" + unitString(RATE_UNIT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricMBean meter(String url) {
|
||||||
|
return new JmxMeter(url, CACHE_DURATION);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static long[] asLongArray(JsonArray a) {
|
||||||
|
return a.getValuesAs(JsonNumber.class).stream().mapToLong(n -> n.longValue()).toArray();
|
||||||
|
}
|
||||||
|
|
||||||
|
private static interface Samples {
|
||||||
|
default double getValue(double quantile) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
default long[] getValues() {
|
||||||
|
return new long[0];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class BufferSamples implements Samples {
|
||||||
|
private final long[] samples;
|
||||||
|
|
||||||
|
public BufferSamples(long[] samples) {
|
||||||
|
this.samples = samples;
|
||||||
|
Arrays.sort(this.samples);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long[] getValues() {
|
||||||
|
return samples;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getValue(double quantile) {
|
||||||
|
if (quantile < 0.0 || quantile > 1.0) {
|
||||||
|
throw new IllegalArgumentException(quantile + " is not in [0..1]");
|
||||||
|
}
|
||||||
|
|
||||||
|
if (samples.length == 0) {
|
||||||
|
return 0.0;
|
||||||
|
}
|
||||||
|
|
||||||
|
final double pos = quantile * (samples.length + 1);
|
||||||
|
|
||||||
|
if (pos < 1) {
|
||||||
|
return samples[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pos >= samples.length) {
|
||||||
|
return samples[samples.length - 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
final double lower = samples[(int) pos - 1];
|
||||||
|
final double upper = samples[(int) pos];
|
||||||
|
return lower + (pos - floor(pos)) * (upper - lower);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class Histogram {
|
||||||
|
private final long count;
|
||||||
|
private final long min;
|
||||||
|
private final long max;
|
||||||
|
private final double mean;
|
||||||
|
private final double stdDev;
|
||||||
|
|
||||||
|
private final Samples samples;
|
||||||
|
|
||||||
|
public Histogram(long count, long min, long max, double mean, double stdDev, Samples samples) {
|
||||||
|
this.count = count;
|
||||||
|
this.min = min;
|
||||||
|
this.max = max;
|
||||||
|
this.mean = mean;
|
||||||
|
this.stdDev = stdDev;
|
||||||
|
this.samples = samples;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Histogram() {
|
||||||
|
this(0, 0, 0, 0, 0, new Samples() {
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
public Histogram(JsonObject obj) {
|
||||||
|
this(obj.getJsonNumber("count").longValue(), obj.getJsonNumber("min").longValue(),
|
||||||
|
obj.getJsonNumber("max").longValue(), obj.getJsonNumber("mean").doubleValue(),
|
||||||
|
obj.getJsonNumber("variance").doubleValue(), new BufferSamples(getValues(obj)));
|
||||||
|
}
|
||||||
|
|
||||||
|
public Histogram(EstimatedHistogram h) {
|
||||||
|
this(h.count(), h.min(), h.max(), h.mean(), 0, h);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static long[] getValues(JsonObject obj) {
|
||||||
|
JsonArray arr = obj.getJsonArray("sample");
|
||||||
|
if (arr != null) {
|
||||||
|
return asLongArray(arr);
|
||||||
|
}
|
||||||
|
return new long[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
public long[] getValues() {
|
||||||
|
return samples.getValues();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Origin (and previous iterations of scylla-jxm)
|
||||||
|
// uses biased/ExponentiallyDecaying measurements
|
||||||
|
// for the history & quantile resolution.
|
||||||
|
// However, for use that is just gobbletigook, since
|
||||||
|
// we, at occasions of being asked, and when certain time
|
||||||
|
// has passed, ask the actual scylla server for a
|
||||||
|
// "values" buffer. A buffer with no information whatsoever
|
||||||
|
// on how said values correlate to actual sampling
|
||||||
|
// time.
|
||||||
|
// So, applying time weights at this level is just
|
||||||
|
// wrong. We can just as well treat this as a uniform
|
||||||
|
// distribution.
|
||||||
|
// Obvious improvement: Send time/value tuples instead.
|
||||||
|
public double getValue(double quantile) {
|
||||||
|
return samples.getValue(quantile);
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getCount() {
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getMin() {
|
||||||
|
return min;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long getMax() {
|
||||||
|
return max;
|
||||||
|
}
|
||||||
|
|
||||||
|
public double getMean() {
|
||||||
|
return mean;
|
||||||
|
}
|
||||||
|
|
||||||
|
public double getStdDev() {
|
||||||
|
return stdDev;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static class EstimatedHistogram implements Samples {
|
||||||
|
/**
|
||||||
|
* The series of values to which the counts in `buckets` correspond: 1,
|
||||||
|
* 2, 3, 4, 5, 6, 7, 8, 10, 12, 14, 17, 20, etc. Thus, a `buckets` of
|
||||||
|
* [0, 0, 1, 10] would mean we had seen one value of 3 and 10 values of
|
||||||
|
* 4.
|
||||||
|
*
|
||||||
|
* The series starts at 1 and grows by 1.2 each time (rounding and
|
||||||
|
* removing duplicates). It goes from 1 to around 36M by default
|
||||||
|
* (creating 90+1 buckets), which will give us timing resolution from
|
||||||
|
* microseconds to 36 seconds, with less precision as the numbers get
|
||||||
|
* larger.
|
||||||
|
*
|
||||||
|
* Each bucket represents values from (previous bucket offset, current
|
||||||
|
* offset].
|
||||||
|
*/
|
||||||
|
private final long[] bucketOffsets;
|
||||||
|
// buckets is one element longer than bucketOffsets -- the last element
|
||||||
|
// is
|
||||||
|
// values greater than the last offset
|
||||||
|
private long[] buckets;
|
||||||
|
|
||||||
|
public EstimatedHistogram(JsonObject obj) {
|
||||||
|
this(asLongArray(obj.getJsonArray("bucket_offsets")), asLongArray(obj.getJsonArray("buckets")));
|
||||||
|
}
|
||||||
|
|
||||||
|
public EstimatedHistogram(long[] offsets, long[] bucketData) {
|
||||||
|
assert bucketData.length == offsets.length + 1;
|
||||||
|
bucketOffsets = offsets;
|
||||||
|
buckets = bucketData;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the smallest value that could have been added to this
|
||||||
|
* histogram
|
||||||
|
*/
|
||||||
|
public long min() {
|
||||||
|
for (int i = 0; i < buckets.length; i++) {
|
||||||
|
if (buckets[i] > 0) {
|
||||||
|
return i == 0 ? 0 : 1 + bucketOffsets[i - 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the largest value that could have been added to this
|
||||||
|
* histogram. If the histogram overflowed, returns
|
||||||
|
* Long.MAX_VALUE.
|
||||||
|
*/
|
||||||
|
public long max() {
|
||||||
|
int lastBucket = buckets.length - 1;
|
||||||
|
if (buckets[lastBucket] > 0) {
|
||||||
|
return Long.MAX_VALUE;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = lastBucket - 1; i >= 0; i--) {
|
||||||
|
if (buckets[i] > 0) {
|
||||||
|
return bucketOffsets[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long[] getValues() {
|
||||||
|
return buckets;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @param percentile
|
||||||
|
* @return estimated value at given percentile
|
||||||
|
*/
|
||||||
|
@Override
|
||||||
|
public double getValue(double percentile) {
|
||||||
|
assert percentile >= 0 && percentile <= 1.0;
|
||||||
|
int lastBucket = buckets.length - 1;
|
||||||
|
if (buckets[lastBucket] > 0) {
|
||||||
|
throw new IllegalStateException("Unable to compute when histogram overflowed");
|
||||||
|
}
|
||||||
|
|
||||||
|
long pcount = (long) Math.floor(count() * percentile);
|
||||||
|
if (pcount == 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
long elements = 0;
|
||||||
|
for (int i = 0; i < lastBucket; i++) {
|
||||||
|
elements += buckets[i];
|
||||||
|
if (elements >= pcount) {
|
||||||
|
return bucketOffsets[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the mean histogram value (average of bucket offsets, weighted
|
||||||
|
* by count)
|
||||||
|
* @throws IllegalStateException
|
||||||
|
* if any values were greater than the largest bucket
|
||||||
|
* threshold
|
||||||
|
*/
|
||||||
|
public long mean() {
|
||||||
|
int lastBucket = buckets.length - 1;
|
||||||
|
if (buckets[lastBucket] > 0) {
|
||||||
|
throw new IllegalStateException("Unable to compute ceiling for max when histogram overflowed");
|
||||||
|
}
|
||||||
|
|
||||||
|
long elements = 0;
|
||||||
|
long sum = 0;
|
||||||
|
for (int i = 0; i < lastBucket; i++) {
|
||||||
|
long bCount = buckets[i];
|
||||||
|
elements += bCount;
|
||||||
|
sum += bCount * bucketOffsets[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
return (long) Math.ceil((double) sum / elements);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the total number of non-zero values
|
||||||
|
*/
|
||||||
|
public long count() {
|
||||||
|
return Arrays.stream(buckets).sum();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return true if this histogram has overflowed -- that is, a value
|
||||||
|
* larger than our largest bucket could bound was added
|
||||||
|
*/
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
public boolean isOverflowed() {
|
||||||
|
return buckets[buckets.length - 1] > 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JmxHistogram extends IntermediatelyUpdated implements JmxHistogramMBean {
|
||||||
|
private Histogram histogram = new Histogram();
|
||||||
|
|
||||||
|
public JmxHistogram(String url, long interval) {
|
||||||
|
super(url, interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void update(JsonObject obj) {
|
||||||
|
if (obj.containsKey("hist")) {
|
||||||
|
obj = obj.getJsonObject("hist");
|
||||||
|
}
|
||||||
|
if (obj.containsKey("buckets")) {
|
||||||
|
histogram = new Histogram(new EstimatedHistogram(obj));
|
||||||
|
} else {
|
||||||
|
histogram = new Histogram(obj);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getCount() {
|
||||||
|
update();
|
||||||
|
return histogram.getCount();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMin() {
|
||||||
|
update();
|
||||||
|
return histogram.getMin();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long getMax() {
|
||||||
|
update();
|
||||||
|
return histogram.getMax();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getMean() {
|
||||||
|
update();
|
||||||
|
return histogram.getMean();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getStdDev() {
|
||||||
|
update();
|
||||||
|
return histogram.getStdDev();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get50thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.5);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get75thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.75);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get95thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.95);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get98thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.98);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get99thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.99);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get999thPercentile() {
|
||||||
|
update();
|
||||||
|
return histogram.getValue(.999);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long[] values() {
|
||||||
|
update();
|
||||||
|
return histogram.getValues();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricMBean histogram(String url, boolean considerZeroes) {
|
||||||
|
return new JmxHistogram(url, UPDATE_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
private class JmxTimer extends JmxMeter implements JmxTimerMBean {
|
||||||
|
private Histogram histogram = new Histogram();
|
||||||
|
|
||||||
|
public JmxTimer(String url, long interval) {
|
||||||
|
super(url, interval);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void update(JsonObject obj) {
|
||||||
|
// TODO: this is not atomic.
|
||||||
|
super.update(obj.getJsonObject("meter"));
|
||||||
|
histogram = new Histogram(obj.getJsonObject("hist"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getMin() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getMin());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getMax() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getMax());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getMean() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getMean());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double getStdDev() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getStdDev());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get50thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.5));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get75thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.75));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get95thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.95));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get98thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.98));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get99thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.99));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public double get999thPercentile() {
|
||||||
|
update();
|
||||||
|
return toDuration(histogram.getValue(.999));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public long[] values() {
|
||||||
|
update();
|
||||||
|
return histogram.getValues();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDurationUnit() {
|
||||||
|
update();
|
||||||
|
return DURATION_UNIT.toString().toLowerCase(Locale.US);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public MetricMBean timer(String url) {
|
||||||
|
return new JmxTimer(url, UPDATE_INTERVAL);
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface MetricMBean {
|
||||||
|
}
|
||||||
|
|
||||||
|
public static interface JmxGaugeMBean extends MetricMBean {
|
||||||
|
Object getValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface JmxHistogramMBean extends MetricMBean {
|
||||||
|
long getCount();
|
||||||
|
|
||||||
|
long getMin();
|
||||||
|
|
||||||
|
long getMax();
|
||||||
|
|
||||||
|
double getMean();
|
||||||
|
|
||||||
|
double getStdDev();
|
||||||
|
|
||||||
|
double get50thPercentile();
|
||||||
|
|
||||||
|
double get75thPercentile();
|
||||||
|
|
||||||
|
double get95thPercentile();
|
||||||
|
|
||||||
|
double get98thPercentile();
|
||||||
|
|
||||||
|
double get99thPercentile();
|
||||||
|
|
||||||
|
double get999thPercentile();
|
||||||
|
|
||||||
|
long[] values();
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface JmxCounterMBean extends MetricMBean {
|
||||||
|
long getCount();
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface JmxMeterMBean extends MetricMBean {
|
||||||
|
long getCount();
|
||||||
|
|
||||||
|
double getMeanRate();
|
||||||
|
|
||||||
|
double getOneMinuteRate();
|
||||||
|
|
||||||
|
double getFiveMinuteRate();
|
||||||
|
|
||||||
|
double getFifteenMinuteRate();
|
||||||
|
|
||||||
|
String getRateUnit();
|
||||||
|
}
|
||||||
|
|
||||||
|
public interface JmxTimerMBean extends JmxMeterMBean {
|
||||||
|
double getMin();
|
||||||
|
|
||||||
|
double getMax();
|
||||||
|
|
||||||
|
double getMean();
|
||||||
|
|
||||||
|
double getStdDev();
|
||||||
|
|
||||||
|
double get50thPercentile();
|
||||||
|
|
||||||
|
double get75thPercentile();
|
||||||
|
|
||||||
|
double get95thPercentile();
|
||||||
|
|
||||||
|
double get98thPercentile();
|
||||||
|
|
||||||
|
double get99thPercentile();
|
||||||
|
|
||||||
|
double get999thPercentile();
|
||||||
|
|
||||||
|
long[] values();
|
||||||
|
|
||||||
|
String getDurationUnit();
|
||||||
|
}
|
||||||
|
}
|
@ -23,27 +23,21 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.metrics;
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
import com.cloudius.urchin.metrics.APIMetrics;
|
import javax.management.MalformedObjectNameException;
|
||||||
import com.cloudius.urchin.metrics.DefaultNameFactory;
|
|
||||||
import com.cloudius.urchin.metrics.MetricNameFactory;
|
|
||||||
import com.yammer.metrics.core.Counter;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Metrics related to Storage.
|
* Metrics related to Storage.
|
||||||
*/
|
*/
|
||||||
public class StorageMetrics {
|
public class StorageMetrics implements Metrics {
|
||||||
private static final MetricNameFactory factory = new DefaultNameFactory(
|
@Override
|
||||||
"Storage");
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
|
MetricNameFactory factory = new DefaultNameFactory("Storage");
|
||||||
public static final Counter load = APIMetrics.newCounter(
|
registry.register(() -> registry.counter("/storage_service/metrics/load"), factory.createMetricName("Load"));
|
||||||
"/storage_service/metrics/load", factory.createMetricName("Load"));
|
registry.register(() -> registry.counter("/storage_service/metrics/exceptions"),
|
||||||
public static final Counter exceptions = APIMetrics.newCounter(
|
factory.createMetricName("Exceptions"));
|
||||||
"/storage_service/metrics/exceptions",
|
registry.register(() -> registry.counter("/storage_service/metrics/hints_in_progress"),
|
||||||
factory.createMetricName("Exceptions"));
|
factory.createMetricName("TotalHintsInProgress"));
|
||||||
public static final Counter totalHintsInProgress = APIMetrics.newCounter(
|
registry.register(() -> registry.counter("/storage_service/metrics/total_hints"),
|
||||||
"/storage_service/metrics/hints_in_progress",
|
factory.createMetricName("TotalHints"));
|
||||||
factory.createMetricName("TotalHintsInProgress"));
|
}
|
||||||
public static final Counter totalHints = APIMetrics.newCounter(
|
|
||||||
"/storage_service/metrics/total_hints",
|
|
||||||
factory.createMetricName("TotalHints"));
|
|
||||||
}
|
}
|
||||||
|
111
src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java
Normal file
111
src/main/java/org/apache/cassandra/metrics/StreamingMetrics.java
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Copyright 2015 ScyllaDB
|
||||||
|
*
|
||||||
|
* Modified by ScyllaDB
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
|
import static java.util.Arrays.asList;
|
||||||
|
import static org.apache.cassandra.metrics.DefaultNameFactory.createMetricName;
|
||||||
|
|
||||||
|
import jakarta.json.JsonArray;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.EnumSet;
|
||||||
|
import java.util.HashSet;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
import javax.management.OperationsException;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.APIMBean;
|
||||||
|
import com.scylladb.jmx.metrics.RegistrationChecker;
|
||||||
|
import com.scylladb.jmx.metrics.RegistrationMode;
|
||||||
|
import com.sun.jmx.mbeanserver.JmxMBeanServer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metrics for streaming.
|
||||||
|
*/
|
||||||
|
public class StreamingMetrics {
|
||||||
|
public static final String TYPE_NAME = "Streaming";
|
||||||
|
|
||||||
|
private static final HashSet<ObjectName> globalNames;
|
||||||
|
|
||||||
|
static {
|
||||||
|
try {
|
||||||
|
globalNames = new HashSet<ObjectName>(asList(createMetricName(TYPE_NAME, "ActiveOutboundStreams", null),
|
||||||
|
createMetricName(TYPE_NAME, "TotalIncomingBytes", null),
|
||||||
|
createMetricName(TYPE_NAME, "TotalOutgoingBytes", null)));
|
||||||
|
} catch (MalformedObjectNameException e) {
|
||||||
|
throw new Error(e);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
private StreamingMetrics() {
|
||||||
|
}
|
||||||
|
|
||||||
|
private static boolean isStreamingName(ObjectName n) {
|
||||||
|
return TYPE_NAME.equals(n.getKeyProperty("type"));
|
||||||
|
}
|
||||||
|
|
||||||
|
public static RegistrationChecker createRegistrationChecker() {
|
||||||
|
return new RegistrationChecker() {
|
||||||
|
@Override
|
||||||
|
protected void doCheck(APIClient client, JmxMBeanServer server, EnumSet<RegistrationMode> mode) throws OperationsException, UnknownHostException {
|
||||||
|
Set<ObjectName> all = new HashSet<ObjectName>(globalNames);
|
||||||
|
JsonArray streams = client.getJsonArray("/stream_manager/");
|
||||||
|
for (int i = 0; i < streams.size(); i++) {
|
||||||
|
JsonArray sessions = streams.getJsonObject(i).getJsonArray("sessions");
|
||||||
|
for (int j = 0; j < sessions.size(); j++) {
|
||||||
|
String peer = sessions.getJsonObject(j).getString("peer");
|
||||||
|
String scope = InetAddress.getByName(peer).getHostAddress().replaceAll(":", ".");
|
||||||
|
all.add(createMetricName(TYPE_NAME, "IncomingBytes", scope));
|
||||||
|
all.add(createMetricName(TYPE_NAME, "OutgoingBytes", scope));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
MetricsRegistry registry = new MetricsRegistry(client, server);
|
||||||
|
APIMBean.checkRegistration(server, all, mode, StreamingMetrics::isStreamingName, n -> {
|
||||||
|
String scope = n.getKeyProperty("scope");
|
||||||
|
String name = n.getKeyProperty("name");
|
||||||
|
|
||||||
|
String url = null;
|
||||||
|
if ("ActiveOutboundStreams".equals(name)) {
|
||||||
|
url = "/stream_manager/metrics/outbound";
|
||||||
|
} else if ("IncomingBytes".equals(name) || "TotalIncomingBytes".equals(name)) {
|
||||||
|
url = "/stream_manager/metrics/incoming";
|
||||||
|
} else if ("OutgoingBytes".equals(name) || "TotalOutgoingBytes".equals(name)) {
|
||||||
|
url = "/stream_manager/metrics/outgoing";
|
||||||
|
}
|
||||||
|
if (url == null) {
|
||||||
|
throw new IllegalArgumentException();
|
||||||
|
}
|
||||||
|
if (scope != null) {
|
||||||
|
url = url + "/" + scope;
|
||||||
|
}
|
||||||
|
return registry.counter(url);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
553
src/main/java/org/apache/cassandra/metrics/TableMetrics.java
Normal file
553
src/main/java/org/apache/cassandra/metrics/TableMetrics.java
Normal file
@ -0,0 +1,553 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.cassandra.metrics;
|
||||||
|
|
||||||
|
import static com.scylladb.jmx.api.APIClient.getReader;
|
||||||
|
|
||||||
|
import java.io.InvalidObjectException;
|
||||||
|
import java.io.ObjectStreamException;
|
||||||
|
import java.util.Hashtable;
|
||||||
|
import java.util.function.BiFunction;
|
||||||
|
import java.util.function.Function;
|
||||||
|
import java.util.function.Supplier;
|
||||||
|
|
||||||
|
import javax.management.MalformedObjectNameException;
|
||||||
|
import javax.management.ObjectName;
|
||||||
|
|
||||||
|
import org.apache.cassandra.db.ColumnFamilyStore;
|
||||||
|
|
||||||
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Metrics for {@link ColumnFamilyStore}.
|
||||||
|
*/
|
||||||
|
public class TableMetrics implements Metrics {
|
||||||
|
private final MetricNameFactory factory;
|
||||||
|
private final MetricNameFactory aliasFactory;
|
||||||
|
private static final MetricNameFactory globalFactory = new AllTableMetricNameFactory("Table");
|
||||||
|
private static final MetricNameFactory globalAliasFactory = new AllTableMetricNameFactory("ColumnFamily");
|
||||||
|
private static final LatencyMetrics globalLatency[] = new LatencyMetrics[] {
|
||||||
|
new LatencyMetrics("Read", compose("read_latency"), globalFactory, globalAliasFactory),
|
||||||
|
new LatencyMetrics("Write", compose("read_latency"), globalFactory, globalAliasFactory),
|
||||||
|
new LatencyMetrics("Range", compose("read_latency"), globalFactory, globalAliasFactory), };
|
||||||
|
|
||||||
|
private final String cfName;
|
||||||
|
private final LatencyMetrics latencyMetrics[];
|
||||||
|
|
||||||
|
public TableMetrics(String keyspace, String columnFamily, boolean isIndex) {
|
||||||
|
this.factory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "Table");
|
||||||
|
this.aliasFactory = new TableMetricNameFactory(keyspace, columnFamily, isIndex, "ColumnFamily");
|
||||||
|
this.cfName = keyspace + ":" + columnFamily;
|
||||||
|
|
||||||
|
latencyMetrics = new LatencyMetrics[] {
|
||||||
|
new LatencyMetrics("Read", compose("read_latency"), cfName, factory, aliasFactory),
|
||||||
|
new LatencyMetrics("Write", compose("write_latency"), cfName, factory, aliasFactory),
|
||||||
|
new LatencyMetrics("Range", compose("range_latency"), cfName, factory, aliasFactory),
|
||||||
|
|
||||||
|
new LatencyMetrics("CasPrepare", compose("cas_prepare"), cfName, factory, aliasFactory),
|
||||||
|
new LatencyMetrics("CasPropose", compose("cas_propose"), cfName, factory, aliasFactory),
|
||||||
|
new LatencyMetrics("CasCommit", compose("cas_commit"), cfName, factory, aliasFactory), };
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void register(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
|
Registry r = new Registry(registry, factory, aliasFactory, cfName);
|
||||||
|
registerCommon(r);
|
||||||
|
registerLocal(r);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void registerGlobals(MetricsRegistry registry) throws MalformedObjectNameException {
|
||||||
|
Registry r = new Registry(registry, globalFactory, globalAliasFactory, null);
|
||||||
|
registerCommon(r);
|
||||||
|
for (LatencyMetrics l : globalLatency) {
|
||||||
|
l.register(registry);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String compose(String base, String name) {
|
||||||
|
String s = "/column_family/metrics/" + base;
|
||||||
|
return name != null ? s + "/" + name : s;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String compose(String base) {
|
||||||
|
return compose(base, null);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates metrics for given {@link ColumnFamilyStore}.
|
||||||
|
*
|
||||||
|
* @param cfs
|
||||||
|
* ColumnFamilyStore to measure metrics
|
||||||
|
*/
|
||||||
|
static class Registry extends MetricsRegistry {
|
||||||
|
@SuppressWarnings("unused")
|
||||||
|
private Function<APIClient, Long> newGauge(final String url) {
|
||||||
|
return newGauge(Long.class, url);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> Function<APIClient, T> newGauge(BiFunction<APIClient, String, T> function, String url) {
|
||||||
|
return c -> {
|
||||||
|
return function.apply(c, url);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private <T> Function<APIClient, T> newGauge(Class<T> type, final String url) {
|
||||||
|
return newGauge(getReader(type), url);
|
||||||
|
}
|
||||||
|
|
||||||
|
final MetricNameFactory factory;
|
||||||
|
final MetricNameFactory aliasFactory;
|
||||||
|
final String cfName;
|
||||||
|
final MetricsRegistry other;
|
||||||
|
|
||||||
|
public Registry(MetricsRegistry other, MetricNameFactory factory, MetricNameFactory aliasFactory,
|
||||||
|
String cfName) {
|
||||||
|
super(other);
|
||||||
|
this.other = other;
|
||||||
|
this.cfName = cfName;
|
||||||
|
this.factory = factory;
|
||||||
|
this.aliasFactory = aliasFactory;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void register(Supplier<MetricMBean> f, ObjectName... objectNames) {
|
||||||
|
other.register(f, objectNames);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableGauge(String name, String uri) throws MalformedObjectNameException {
|
||||||
|
createTableGauge(name, name, uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableGauge(String name, String alias, String uri) throws MalformedObjectNameException {
|
||||||
|
createTableGauge(Long.class, name, alias, uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void createTableGauge(Class<T> c, String name, String uri) throws MalformedObjectNameException {
|
||||||
|
createTableGauge(c, c, name, name, uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void createTableGauge(Class<T> c, String name, String alias, String uri) throws MalformedObjectNameException {
|
||||||
|
createTableGauge(c, name, alias, uri, getReader(c));
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void createTableGauge(Class<T> c, String name, String uri, BiFunction<APIClient, String, T> f)
|
||||||
|
throws MalformedObjectNameException {
|
||||||
|
createTableGauge(c, name, name, uri, f);
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void createTableGauge(Class<T> c, String name, String alias, String uri,
|
||||||
|
BiFunction<APIClient, String, T> f) throws MalformedObjectNameException {
|
||||||
|
register(() -> gauge(newGauge(f, compose(uri, cfName))), factory.createMetricName(name),
|
||||||
|
aliasFactory.createMetricName(alias));
|
||||||
|
}
|
||||||
|
|
||||||
|
private static <T> BiFunction<APIClient, String, T> getDummy(Class<T> type) {
|
||||||
|
if (type == String.class) {
|
||||||
|
return (c, s) -> type.cast("");
|
||||||
|
} else if (type == Integer.class) {
|
||||||
|
return (c, s) -> type.cast(0);
|
||||||
|
} else if (type == Double.class) {
|
||||||
|
return (c, s) -> type.cast(0.0);
|
||||||
|
} else if (type == Long.class) {
|
||||||
|
return (c, s) -> type.cast(0L);
|
||||||
|
}
|
||||||
|
throw new IllegalArgumentException(type.getName());
|
||||||
|
}
|
||||||
|
|
||||||
|
public <T> void createDummyTableGauge(Class<T> c, String name) throws MalformedObjectNameException {
|
||||||
|
register(() -> gauge(newGauge(getDummy(c), null)), factory.createMetricName(name),
|
||||||
|
aliasFactory.createMetricName(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
public <L, G> void createTableGauge(Class<L> c1, Class<G> c2, String name, String alias, String uri)
|
||||||
|
throws MalformedObjectNameException {
|
||||||
|
if (cfName != null) {
|
||||||
|
createTableGauge(c1, name, alias, uri, getReader(c1));
|
||||||
|
} else { // global case
|
||||||
|
createTableGauge(c2, name, alias, uri, getReader(c2));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableCounter(String name, String uri) throws MalformedObjectNameException {
|
||||||
|
createTableCounter(name, name, uri);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableCounter(String name, String alias, String uri) throws MalformedObjectNameException {
|
||||||
|
register(() -> counter(compose(uri, cfName)), factory.createMetricName(name),
|
||||||
|
aliasFactory.createMetricName(alias));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createDummyTableCounter(String name) throws MalformedObjectNameException {
|
||||||
|
register(() -> counter(null), factory.createMetricName(name),
|
||||||
|
aliasFactory.createMetricName(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableHistogram(String name, String uri, boolean considerZeros)
|
||||||
|
throws MalformedObjectNameException {
|
||||||
|
createTableHistogram(name, name, uri, considerZeros);
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTableHistogram(String name, String alias, String uri, boolean considerZeros)
|
||||||
|
throws MalformedObjectNameException {
|
||||||
|
register(() -> histogram(compose(uri, cfName), considerZeros), factory.createMetricName(name),
|
||||||
|
aliasFactory.createMetricName(alias));
|
||||||
|
}
|
||||||
|
|
||||||
|
public void createTimer(String name, String uri) throws MalformedObjectNameException {
|
||||||
|
register(() -> timer(compose(uri, cfName)), factory.createMetricName(name));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private void registerLocal(Registry registry) throws MalformedObjectNameException {
|
||||||
|
registry.createTableGauge(long[].class, "EstimatedPartitionSizeHistogram", "EstimatedRowSizeHistogram",
|
||||||
|
"estimated_row_size_histogram", APIClient::getEstimatedHistogramAsLongArrValue);
|
||||||
|
registry.createTableGauge("EstimatedPartitionCount", "EstimatedRowCount", "estimated_row_count");
|
||||||
|
|
||||||
|
registry.createTableGauge(long[].class, "EstimatedColumnCountHistogram", "estimated_column_count_histogram",
|
||||||
|
APIClient::getEstimatedHistogramAsLongArrValue);
|
||||||
|
registry.createTableGauge(Double.class, "KeyCacheHitRate", "key_cache_hit_rate");
|
||||||
|
|
||||||
|
registry.createTimer("CoordinatorReadLatency", "coordinator/read");
|
||||||
|
registry.createTimer("CoordinatorScanLatency", "coordinator/scan");
|
||||||
|
registry.createTimer("WaitingOnFreeMemtableSpace", "waiting_on_free_memtable");
|
||||||
|
|
||||||
|
for (LatencyMetrics l : latencyMetrics) {
|
||||||
|
l.register(registry);
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
registry.createDummyTableCounter("DroppedMutations");
|
||||||
|
}
|
||||||
|
|
||||||
|
private static void registerCommon(Registry registry) throws MalformedObjectNameException {
|
||||||
|
registry.createTableGauge("MemtableColumnsCount", "memtable_columns_count");
|
||||||
|
registry.createTableGauge("MemtableOnHeapSize", "memtable_on_heap_size");
|
||||||
|
registry.createTableGauge("MemtableOffHeapSize", "memtable_off_heap_size");
|
||||||
|
registry.createTableGauge("MemtableLiveDataSize", "memtable_live_data_size");
|
||||||
|
registry.createTableGauge("AllMemtablesHeapSize", "all_memtables_on_heap_size");
|
||||||
|
registry.createTableGauge("AllMemtablesOffHeapSize", "all_memtables_off_heap_size");
|
||||||
|
registry.createTableGauge("AllMemtablesLiveDataSize", "all_memtables_live_data_size");
|
||||||
|
|
||||||
|
registry.createTableCounter("MemtableSwitchCount", "memtable_switch_count");
|
||||||
|
|
||||||
|
registry.createTableHistogram("SSTablesPerReadHistogram", "sstables_per_read_histogram", true);
|
||||||
|
registry.createTableGauge(Double.class, "CompressionRatio", "compression_ratio");
|
||||||
|
|
||||||
|
registry.createTableCounter("PendingFlushes", "pending_flushes");
|
||||||
|
|
||||||
|
registry.createTableGauge(Integer.class, Long.class, "PendingCompactions", "PendingCompactions",
|
||||||
|
"pending_compactions");
|
||||||
|
registry.createTableGauge(Integer.class, Long.class, "LiveSSTableCount", "LiveSSTableCount",
|
||||||
|
"live_ss_table_count");
|
||||||
|
|
||||||
|
registry.createTableCounter("LiveDiskSpaceUsed", "live_disk_space_used");
|
||||||
|
registry.createTableCounter("TotalDiskSpaceUsed", "total_disk_space_used");
|
||||||
|
registry.createTableGauge("MinPartitionSize", "MinRowSize", "min_row_size");
|
||||||
|
registry.createTableGauge("MaxPartitionSize", "MaxRowSize", "max_row_size");
|
||||||
|
registry.createTableGauge("MeanPartitionSize", "MeanRowSize", "mean_row_size");
|
||||||
|
|
||||||
|
registry.createTableGauge("BloomFilterFalsePositives", "bloom_filter_false_positives");
|
||||||
|
registry.createTableGauge("RecentBloomFilterFalsePositives", "recent_bloom_filter_false_positives");
|
||||||
|
registry.createTableGauge(Double.class, "BloomFilterFalseRatio", "bloom_filter_false_ratio");
|
||||||
|
registry.createTableGauge(Double.class, "RecentBloomFilterFalseRatio", "recent_bloom_filter_false_ratio");
|
||||||
|
|
||||||
|
registry.createTableGauge("BloomFilterDiskSpaceUsed", "bloom_filter_disk_space_used");
|
||||||
|
registry.createTableGauge("BloomFilterOffHeapMemoryUsed", "bloom_filter_off_heap_memory_used");
|
||||||
|
registry.createTableGauge("IndexSummaryOffHeapMemoryUsed", "index_summary_off_heap_memory_used");
|
||||||
|
registry.createTableGauge("CompressionMetadataOffHeapMemoryUsed", "compression_metadata_off_heap_memory_used");
|
||||||
|
registry.createTableGauge("SpeculativeRetries", "speculative_retries");
|
||||||
|
|
||||||
|
registry.createTableHistogram("TombstoneScannedHistogram", "tombstone_scanned_histogram", false);
|
||||||
|
registry.createTableHistogram("LiveScannedHistogram", "live_scanned_histogram", false);
|
||||||
|
registry.createTableHistogram("ColUpdateTimeDeltaHistogram", "col_update_time_delta_histogram", false);
|
||||||
|
|
||||||
|
// We do not want to capture view mutation specific metrics for a view
|
||||||
|
// They only makes sense to capture on the base table
|
||||||
|
// TODO: views
|
||||||
|
// if (!cfs.metadata.isView())
|
||||||
|
// {
|
||||||
|
// viewLockAcquireTime = createTableTimer("ViewLockAcquireTime",
|
||||||
|
// cfs.keyspace.metric.viewLockAcquireTime);
|
||||||
|
// viewReadTime = createTableTimer("ViewReadTime",
|
||||||
|
// cfs.keyspace.metric.viewReadTime);
|
||||||
|
// }
|
||||||
|
|
||||||
|
registry.createTableGauge("SnapshotsSize", "snapshots_size");
|
||||||
|
registry.createTableCounter("RowCacheHitOutOfRange", "row_cache_hit_out_of_range");
|
||||||
|
registry.createTableCounter("RowCacheHit", "row_cache_hit");
|
||||||
|
registry.createTableCounter("RowCacheMiss", "row_cache_miss");
|
||||||
|
|
||||||
|
// TODO: implement
|
||||||
|
registry.createDummyTableGauge(Double.class, "PercentRepaired");
|
||||||
|
}
|
||||||
|
|
||||||
|
static class TableMetricObjectName extends javax.management.ObjectName {
|
||||||
|
private final TableMetricStringNameFactory factory;
|
||||||
|
private final String metricName;
|
||||||
|
|
||||||
|
public TableMetricObjectName(TableMetricStringNameFactory factory, String metricName) throws MalformedObjectNameException {
|
||||||
|
super("");
|
||||||
|
this.factory = factory;
|
||||||
|
this.metricName = metricName;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPropertyValuePattern(String property) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCanonicalName() {
|
||||||
|
return factory.createMetricStringName(metricName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return factory.getDomain();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyProperty(String property) {
|
||||||
|
if (property == "name") {
|
||||||
|
return metricName;
|
||||||
|
}
|
||||||
|
return factory.getKeyProperty(property);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Hashtable<String,String> getKeyPropertyList() {
|
||||||
|
Hashtable<String, String> res = factory.getKeyPropertyList();
|
||||||
|
res.put("name", metricName);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyPropertyListString() {
|
||||||
|
return factory.getKeyPropertyListString(metricName);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getCanonicalKeyPropertyListString() {
|
||||||
|
return getKeyPropertyListString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return getCanonicalName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean equals(Object o) {
|
||||||
|
if (this == o) return true;
|
||||||
|
return getCanonicalName().equals(((ObjectName) o).getCanonicalName());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int hashCode() {
|
||||||
|
return getCanonicalName().hashCode();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean apply(ObjectName name) {
|
||||||
|
if (name.isDomainPattern() || name.isPropertyListPattern() || name.isPropertyValuePattern()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return getCanonicalName().equals(name.getCanonicalName());
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPattern() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isDomainPattern() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPropertyPattern() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPropertyListPattern() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isPropertyValuePattern() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This type is not really serializable.
|
||||||
|
* Replace it with vanilla objectname.
|
||||||
|
*/
|
||||||
|
private Object writeReplace() throws ObjectStreamException {
|
||||||
|
try {
|
||||||
|
return new ObjectName(getDomain(), getKeyPropertyList());
|
||||||
|
} catch (MalformedObjectNameException e) {
|
||||||
|
throw new InvalidObjectException(toString());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static interface TableMetricStringNameFactory {
|
||||||
|
String createMetricStringName(String metricName);
|
||||||
|
String getDomain();
|
||||||
|
String getKeyProperty(String property);
|
||||||
|
Hashtable<String,String> getKeyPropertyList();
|
||||||
|
String getKeyPropertyListString(String metricName);
|
||||||
|
}
|
||||||
|
|
||||||
|
static class TableMetricNameFactory implements MetricNameFactory, TableMetricStringNameFactory {
|
||||||
|
private final String keyspaceName;
|
||||||
|
private final String tableName;
|
||||||
|
private final boolean isIndex;
|
||||||
|
private final String type;
|
||||||
|
|
||||||
|
public TableMetricNameFactory(String keyspaceName, String tableName, boolean isIndex, String type) {
|
||||||
|
this.keyspaceName = keyspaceName;
|
||||||
|
this.tableName = tableName;
|
||||||
|
this.isIndex = isIndex;
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void appendKeyPropertyListString(final StringBuilder sb, final String metricName) {
|
||||||
|
String type = isIndex ? "Index" + this.type : this.type;
|
||||||
|
// Order matters here - keys have to be sorted
|
||||||
|
sb.append("keyspace=").append(keyspaceName);
|
||||||
|
sb.append(",name=").append(metricName);
|
||||||
|
sb.append(",scope=").append(tableName);
|
||||||
|
sb.append(",type=").append(type);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createMetricStringName(String metricName) {
|
||||||
|
String groupName = TableMetrics.class.getPackage().getName();
|
||||||
|
|
||||||
|
StringBuilder mbeanName = new StringBuilder();
|
||||||
|
mbeanName.append(groupName).append(":");
|
||||||
|
appendKeyPropertyListString(mbeanName, metricName);
|
||||||
|
return mbeanName.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return TableMetrics.class.getPackage().getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyProperty(String property) {
|
||||||
|
switch (property) {
|
||||||
|
case "keyspace": return keyspaceName;
|
||||||
|
case "scope": return tableName;
|
||||||
|
case "type": return type;
|
||||||
|
default: return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Hashtable<String,String> getKeyPropertyList() {
|
||||||
|
Hashtable<String, String> res = new Hashtable<>();
|
||||||
|
res.put("keyspace", keyspaceName);
|
||||||
|
res.put("scope", tableName);
|
||||||
|
res.put("type", type);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyPropertyListString(String metricName) {
|
||||||
|
final StringBuilder sb = new StringBuilder();
|
||||||
|
appendKeyPropertyListString(sb, metricName);
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||||
|
return new TableMetricObjectName(this, metricName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static class AllTableMetricNameFactory implements MetricNameFactory, TableMetricStringNameFactory {
|
||||||
|
private final String type;
|
||||||
|
|
||||||
|
public AllTableMetricNameFactory(String type) {
|
||||||
|
this.type = type;
|
||||||
|
}
|
||||||
|
|
||||||
|
private void appendKeyPropertyListString(final StringBuilder sb, final String metricName) {
|
||||||
|
// Order matters here - keys have to be sorted
|
||||||
|
sb.append("name=").append(metricName);
|
||||||
|
sb.append(",type=" + type);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String createMetricStringName(String metricName) {
|
||||||
|
String groupName = TableMetrics.class.getPackage().getName();
|
||||||
|
StringBuilder mbeanName = new StringBuilder();
|
||||||
|
mbeanName.append(groupName).append(":");
|
||||||
|
appendKeyPropertyListString(mbeanName, metricName);
|
||||||
|
return mbeanName.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getDomain() {
|
||||||
|
return TableMetrics.class.getPackage().getName();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyProperty(String property) {
|
||||||
|
switch (property) {
|
||||||
|
case "type": return type;
|
||||||
|
default: return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Hashtable<String,String> getKeyPropertyList() {
|
||||||
|
Hashtable<String, String> res = new Hashtable<>();
|
||||||
|
res.put("type", type);
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getKeyPropertyListString(String metricName) {
|
||||||
|
final StringBuilder sb = new StringBuilder();
|
||||||
|
appendKeyPropertyListString(sb, metricName);
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ObjectName createMetricName(String metricName) throws MalformedObjectNameException {
|
||||||
|
return new TableMetricObjectName(this, metricName);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public enum Sampler {
|
||||||
|
READS, WRITES
|
||||||
|
}
|
||||||
|
}
|
@ -22,140 +22,255 @@
|
|||||||
*/
|
*/
|
||||||
package org.apache.cassandra.net;
|
package org.apache.cassandra.net;
|
||||||
|
|
||||||
import java.lang.management.ManagementFactory;
|
import static java.util.Collections.emptyMap;
|
||||||
import java.net.*;
|
|
||||||
import java.util.*;
|
|
||||||
|
|
||||||
import javax.management.MBeanServer;
|
import jakarta.json.JsonArray;
|
||||||
import javax.management.ObjectName;
|
import jakarta.json.JsonObject;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
|
import java.util.logging.Logger;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
import java.util.stream.Stream;
|
||||||
|
|
||||||
import com.cloudius.urchin.api.APIClient;
|
import org.apache.cassandra.metrics.DroppedMessageMetrics;
|
||||||
|
|
||||||
public final class MessagingService implements MessagingServiceMBean {
|
import com.scylladb.jmx.api.APIClient;
|
||||||
|
import com.scylladb.jmx.metrics.MetricsMBean;
|
||||||
|
|
||||||
|
public final class MessagingService extends MetricsMBean implements MessagingServiceMBean {
|
||||||
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
|
public static final String MBEAN_NAME = "org.apache.cassandra.net:type=MessagingService";
|
||||||
private static final java.util.logging.Logger logger = java.util.logging.Logger
|
private static final Logger logger = Logger.getLogger(MessagingService.class.getName());
|
||||||
.getLogger(MessagingService.class.getName());
|
|
||||||
|
|
||||||
private APIClient c = new APIClient();
|
private Map<String, Long> resentTimeouts = new HashMap<String, Long>();
|
||||||
|
private long recentTimeoutCount;
|
||||||
|
|
||||||
private final ObjectName jmxObjectName;
|
/* All verb handler identifiers */
|
||||||
|
public enum Verb {
|
||||||
|
MUTATION, @Deprecated BINARY, READ_REPAIR, READ, REQUEST_RESPONSE, // client-initiated
|
||||||
|
// reads
|
||||||
|
// and
|
||||||
|
// writes
|
||||||
|
@Deprecated STREAM_INITIATE, @Deprecated STREAM_INITIATE_DONE, @Deprecated STREAM_REPLY, @Deprecated STREAM_REQUEST, RANGE_SLICE, @Deprecated BOOTSTRAP_TOKEN, @Deprecated TREE_REQUEST, @Deprecated TREE_RESPONSE, @Deprecated JOIN, GOSSIP_DIGEST_SYN, GOSSIP_DIGEST_ACK, GOSSIP_DIGEST_ACK2, @Deprecated DEFINITIONS_ANNOUNCE, DEFINITIONS_UPDATE, TRUNCATE, SCHEMA_CHECK, @Deprecated INDEX_SCAN, REPLICATION_FINISHED, INTERNAL_RESPONSE, // responses
|
||||||
|
// to
|
||||||
|
// internal
|
||||||
|
// calls
|
||||||
|
COUNTER_MUTATION, @Deprecated STREAMING_REPAIR_REQUEST, @Deprecated STREAMING_REPAIR_RESPONSE, SNAPSHOT, // Similar
|
||||||
|
// to
|
||||||
|
// nt
|
||||||
|
// snapshot
|
||||||
|
MIGRATION_REQUEST, GOSSIP_SHUTDOWN, _TRACE, // dummy verb so we can use
|
||||||
|
// MS.droppedMessages
|
||||||
|
ECHO, REPAIR_MESSAGE,
|
||||||
|
// use as padding for backwards compatability where a previous version
|
||||||
|
// needs to validate a verb from the future.
|
||||||
|
PAXOS_PREPARE, PAXOS_PROPOSE, PAXOS_COMMIT, PAGED_RANGE,
|
||||||
|
// remember to add new verbs at the end, since we serialize by ordinal
|
||||||
|
UNUSED_1, UNUSED_2, UNUSED_3,;
|
||||||
|
}
|
||||||
|
|
||||||
public void log(String str) {
|
public void log(String str) {
|
||||||
System.out.println(str);
|
logger.finest(str);
|
||||||
logger.info(str);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public MessagingService() {
|
public MessagingService(APIClient client) {
|
||||||
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
super(MBEAN_NAME, client,
|
||||||
try {
|
Stream.of(Verb.values()).map(v -> new DroppedMessageMetrics(v)).collect(Collectors.toList()));
|
||||||
jmxObjectName = new ObjectName(MBEAN_NAME);
|
|
||||||
mbs.registerMBean(this, jmxObjectName);
|
|
||||||
// mbs.registerMBean(StreamManager.instance, new ObjectName(
|
|
||||||
// StreamManager.OBJECT_NAME));
|
|
||||||
} catch (Exception e) {
|
|
||||||
throw new RuntimeException(e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static MessagingService instance = new MessagingService();
|
|
||||||
|
|
||||||
public static MessagingService getInstance() {
|
|
||||||
return instance;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pending tasks for Command(Mutations, Read etc) TCP Connections
|
* Pending tasks for Command(Mutations, Read etc) TCP Connections
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Integer> getCommandPendingTasks() {
|
public Map<String, Integer> getCommandPendingTasks() {
|
||||||
log(" getCommandPendingTasks()");
|
log(" getCommandPendingTasks()");
|
||||||
return c.getMapStringIntegerValue("/messaging_service/messages/pending");
|
return client.getMapStringIntegerValue("/messaging_service/messages/pending");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Completed tasks for Command(Mutations, Read etc) TCP Connections
|
* Completed tasks for Command(Mutations, Read etc) TCP Connections
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Long> getCommandCompletedTasks() {
|
public Map<String, Long> getCommandCompletedTasks() {
|
||||||
System.out.println("getCommandCompletedTasks!");
|
log("getCommandCompletedTasks()");
|
||||||
Map<String, Long> res = c
|
Map<String, Long> res = client.getListMapStringLongValue("/messaging_service/messages/sent");
|
||||||
.getListMapStringLongValue("/messaging_service/messages/sent");
|
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Dropped tasks for Command(Mutations, Read etc) TCP Connections
|
* Dropped tasks for Command(Mutations, Read etc) TCP Connections
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Long> getCommandDroppedTasks() {
|
public Map<String, Long> getCommandDroppedTasks() {
|
||||||
log(" getCommandDroppedTasks()");
|
log(" getCommandDroppedTasks()");
|
||||||
return c.getMapStringLongValue("");
|
return client.getMapStringLongValue("/messaging_service/messages/dropped");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
* Pending tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Integer> getResponsePendingTasks() {
|
public Map<String, Integer> getResponsePendingTasks() {
|
||||||
log(" getResponsePendingTasks()");
|
log(" getResponsePendingTasks()");
|
||||||
return c.getMapStringIntegerValue("");
|
return client.getMapStringIntegerValue("/messaging_service/messages/respond_pending");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
* Completed tasks for Response(GOSSIP & RESPONSE) TCP Connections
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Long> getResponseCompletedTasks() {
|
public Map<String, Long> getResponseCompletedTasks() {
|
||||||
log(" getResponseCompletedTasks()");
|
log(" getResponseCompletedTasks()");
|
||||||
return c.getMapStringLongValue("");
|
return client.getMapStringLongValue("/messaging_service/messages/respond_completed");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dropped message counts for server lifetime
|
* dropped message counts for server lifetime
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Integer> getDroppedMessages() {
|
public Map<String, Integer> getDroppedMessages() {
|
||||||
log(" getDroppedMessages()");
|
log(" getDroppedMessages()");
|
||||||
return c.getMapStringIntegerValue("");
|
Map<String, Integer> res = new HashMap<String, Integer>();
|
||||||
|
JsonArray arr = client.getJsonArray("/messaging_service/messages/dropped_by_ver");
|
||||||
|
for (int i = 0; i < arr.size(); i++) {
|
||||||
|
JsonObject obj = arr.getJsonObject(i);
|
||||||
|
res.put(obj.getString("verb"), obj.getInt("count"));
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private Map<String, Integer> recent;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* dropped message counts since last called
|
* dropped message counts since last called
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Integer> getRecentlyDroppedMessages() {
|
public Map<String, Integer> getRecentlyDroppedMessages() {
|
||||||
log(" getRecentlyDroppedMessages()");
|
log(" getRecentlyDroppedMessages()");
|
||||||
return c.getMapStringIntegerValue("");
|
|
||||||
|
Map<String, Integer> dropped = getDroppedMessages(), result = new HashMap<>(dropped), old = recent;
|
||||||
|
|
||||||
|
recent = dropped;
|
||||||
|
|
||||||
|
if (old != null) {
|
||||||
|
for (Map.Entry<String, Integer> e : old.entrySet()) {
|
||||||
|
result.put(e.getKey(), result.get(e.getKey()) - e.getValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Total number of timeouts happened on this node
|
* Total number of timeouts happened on this node
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public long getTotalTimeouts() {
|
public long getTotalTimeouts() {
|
||||||
log(" getTotalTimeouts()");
|
log(" getTotalTimeouts()");
|
||||||
return c.getLongValue("");
|
Map<String, Long> timeouts = getTimeoutsPerHost();
|
||||||
|
long res = 0;
|
||||||
|
for (Entry<String, Long> t : timeouts.entrySet()) {
|
||||||
|
res += t.getValue();
|
||||||
|
}
|
||||||
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of timeouts per host
|
* Number of timeouts per host
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Long> getTimeoutsPerHost() {
|
public Map<String, Long> getTimeoutsPerHost() {
|
||||||
log(" getTimeoutsPerHost()");
|
log(" getTimeoutsPerHost()");
|
||||||
return c.getMapStringLongValue("");
|
return client.getMapStringLongValue("/messaging_service/messages/timeout");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of timeouts since last check.
|
* Number of timeouts since last check.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public long getRecentTotalTimouts() {
|
public long getRecentTotalTimouts() {
|
||||||
log(" getRecentTotalTimouts()");
|
log(" getRecentTotalTimouts()");
|
||||||
return c.getLongValue("");
|
long timeoutCount = getTotalTimeouts();
|
||||||
|
long recent = timeoutCount - recentTimeoutCount;
|
||||||
|
recentTimeoutCount = timeoutCount;
|
||||||
|
return recent;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Number of timeouts since last check per host.
|
* Number of timeouts since last check per host.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public Map<String, Long> getRecentTimeoutsPerHost() {
|
public Map<String, Long> getRecentTimeoutsPerHost() {
|
||||||
log(" getRecentTimeoutsPerHost()");
|
log(" getRecentTimeoutsPerHost()");
|
||||||
return c.getMapStringLongValue("");
|
Map<String, Long> timeouts = getTimeoutsPerHost();
|
||||||
|
Map<String, Long> result = new HashMap<String, Long>();
|
||||||
|
for (Entry<String, Long> e : timeouts.entrySet()) {
|
||||||
|
long res = e.getValue().longValue()
|
||||||
|
- ((resentTimeouts.containsKey(e.getKey())) ? (resentTimeouts.get(e.getKey())).longValue() : 0);
|
||||||
|
resentTimeouts.put(e.getKey(), e.getValue());
|
||||||
|
result.put(e.getKey(), res);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public int getVersion(String address) throws UnknownHostException {
|
public int getVersion(String address) throws UnknownHostException {
|
||||||
log(" getVersion(String address) throws UnknownHostException");
|
log(" getVersion(String address) throws UnknownHostException");
|
||||||
return c.getIntValue("");
|
return client.getIntValue("");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Integer> getLargeMessagePendingTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return getCommandPendingTasks();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getLargeMessageCompletedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return getCommandCompletedTasks();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getLargeMessageDroppedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return getCommandDroppedTasks();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Integer> getSmallMessagePendingTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return getResponsePendingTasks();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getSmallMessageCompletedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return getResponseCompletedTasks();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getSmallMessageDroppedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return emptyMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Integer> getGossipMessagePendingTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return emptyMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getGossipMessageCompletedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return emptyMap();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Map<String, Long> getGossipMessageDroppedTasks() {
|
||||||
|
// TODO: implement for realsies
|
||||||
|
return emptyMap();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user