Fix metrics
This commit is contained in:
parent
d4488a5042
commit
72fbd17768
@ -97,7 +97,6 @@ public class EmbeddedDB implements RocksDBSyncAPI, InternalConnection, Closeable
|
||||
private final MetricsManager metrics;
|
||||
private final String name;
|
||||
private final List<Meter> meters = new ArrayList<>();
|
||||
private final Timer loadTimer;
|
||||
private final Timer openTransactionTimer;
|
||||
private final Timer closeTransactionTimer;
|
||||
private final Timer closeFailedUpdateTimer;
|
||||
@ -114,6 +113,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, InternalConnection, Closeable
|
||||
private final Timer subsequentTimer;
|
||||
private final Timer reduceRangeTimer;
|
||||
private final Timer getRangeTimer;
|
||||
private final RocksDBStatistics rocksDBStatistics;
|
||||
private Path tempSSTsPath;
|
||||
|
||||
public EmbeddedDB(@Nullable Path path, String name, @Nullable Path embeddedConfigPath) throws IOException {
|
||||
@ -128,7 +128,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, InternalConnection, Closeable
|
||||
DatabaseConfig config = ConfigParser.parse(embeddedConfigPath);
|
||||
|
||||
this.metrics = new MetricsManager(config);
|
||||
this.loadTimer = createTimer(Tags.empty());
|
||||
Timer loadTimer = createTimer(Tags.of("action", "load"));
|
||||
this.openTransactionTimer = createActionTimer(OpenTransaction.class);
|
||||
this.closeTransactionTimer = createActionTimer(CloseTransaction.class);
|
||||
this.closeFailedUpdateTimer = createActionTimer(CloseFailedUpdate.class);
|
||||
@ -153,6 +153,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, InternalConnection, Closeable
|
||||
this.dbOptions = loadedDb.dbOptions();
|
||||
this.refs = loadedDb.refs();
|
||||
this.cache = loadedDb.cache();
|
||||
this.rocksDBStatistics = new RocksDBStatistics(name, dbOptions.statistics(), metrics, cache);
|
||||
try {
|
||||
int readCap = Objects.requireNonNullElse(config.parallelism().read(), Runtime.getRuntime().availableProcessors());
|
||||
int writeCap = Objects.requireNonNullElse(config.parallelism().write(), Runtime.getRuntime().availableProcessors());
|
||||
@ -345,6 +346,7 @@ public class EmbeddedDB implements RocksDBSyncAPI, InternalConnection, Closeable
|
||||
for (Meter meter : meters) {
|
||||
meter.close();
|
||||
}
|
||||
rocksDBStatistics.close();
|
||||
if (metrics != null) {
|
||||
metrics.close();
|
||||
}
|
||||
|
@ -0,0 +1,94 @@
|
||||
package it.cavallium.rockserver.core.impl;
|
||||
|
||||
import io.micrometer.core.instrument.Counter;
|
||||
import io.micrometer.core.instrument.MultiGauge;
|
||||
import io.micrometer.core.instrument.MultiGauge.Row;
|
||||
import io.micrometer.core.instrument.Tags;
|
||||
import io.micrometer.core.instrument.util.NamedThreadFactory;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumMap;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Executors;
|
||||
import java.util.concurrent.ScheduledExecutorService;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.Cache;
|
||||
import org.rocksdb.HistogramType;
|
||||
import org.rocksdb.Statistics;
|
||||
import org.rocksdb.TickerType;
|
||||
|
||||
public class RocksDBStatistics {
|
||||
|
||||
private final Statistics statistics;
|
||||
private final MetricsManager metrics;
|
||||
private final EnumMap<TickerType, Counter> tickerMap;
|
||||
private final EnumMap<HistogramType, MultiGauge> histogramMap;
|
||||
private final ScheduledExecutorService executor;
|
||||
private final ScheduledFuture<?> scheduledTask;
|
||||
private final MultiGauge cacheStats;
|
||||
|
||||
public RocksDBStatistics(String name, Statistics statistics, MetricsManager metrics, @Nullable Cache cache) {
|
||||
this.statistics = statistics;
|
||||
this.metrics = metrics;
|
||||
this.tickerMap = new EnumMap<>(Arrays
|
||||
.stream(TickerType.values())
|
||||
.collect(Collectors.toMap(Function.identity(),
|
||||
tickerType -> io.micrometer.core.instrument.Counter
|
||||
.builder("rocksdb.statistics")
|
||||
.tag("database", name)
|
||||
.tag("ticker_name", tickerType.name())
|
||||
.register(metrics.getRegistry())
|
||||
)));
|
||||
this.histogramMap = new EnumMap<>(Arrays
|
||||
.stream(HistogramType.values())
|
||||
.collect(Collectors.toMap(Function.identity(),
|
||||
histogramType -> MultiGauge
|
||||
.builder("rocksdb.statistics")
|
||||
.tag("database", name)
|
||||
.tag("histogram_name", histogramType.name())
|
||||
.register(metrics.getRegistry())
|
||||
)));
|
||||
this.cacheStats = MultiGauge
|
||||
.builder("rocksdb.cache")
|
||||
.tag("database", name)
|
||||
.register(metrics.getRegistry());
|
||||
|
||||
this.executor = Executors.newSingleThreadScheduledExecutor(new NamedThreadFactory("rocksdb-statistics"));
|
||||
this.scheduledTask = executor.scheduleAtFixedRate(() -> {
|
||||
tickerMap.forEach(((tickerType, counter) -> {
|
||||
var tickerCount = statistics.getAndResetTickerCount(tickerType);
|
||||
counter.increment(tickerCount);
|
||||
}));
|
||||
|
||||
histogramMap.forEach((histogramType, multiGauge) -> {
|
||||
var histogramData = statistics.getHistogramData(histogramType);
|
||||
multiGauge.register(List.of(
|
||||
Row.of(Tags.of("field", "average"), histogramData.getAverage()),
|
||||
Row.of(Tags.of("field", "count"), histogramData.getCount()),
|
||||
Row.of(Tags.of("field", "max"), histogramData.getMax()),
|
||||
Row.of(Tags.of("field", "min"), histogramData.getMin()),
|
||||
Row.of(Tags.of("field", "median"), histogramData.getMedian()),
|
||||
Row.of(Tags.of("field", "percentile95"), histogramData.getPercentile95()),
|
||||
Row.of(Tags.of("field", "percentile99"), histogramData.getPercentile99()),
|
||||
Row.of(Tags.of("field", "standard_deviation"), histogramData.getStandardDeviation()),
|
||||
Row.of(Tags.of("field", "sum"), histogramData.getSum())
|
||||
), true);
|
||||
});
|
||||
|
||||
if (cache != null) {
|
||||
cacheStats.register(List.of(
|
||||
Row.of(Tags.of("field", "usage"), cache.getUsage()),
|
||||
Row.of(Tags.of("field", "pinned_usage"), cache.getPinnedUsage())
|
||||
), true);
|
||||
}
|
||||
}, 10, 60, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public void close() {
|
||||
scheduledTask.cancel(false);
|
||||
executor.close();
|
||||
}
|
||||
}
|
@ -356,6 +356,12 @@ public class RocksDBLoader {
|
||||
refs.add(options);
|
||||
options.setParanoidChecks(PARANOID_CHECKS);
|
||||
options.setSkipCheckingSstFileSizesOnDbOpen(true);
|
||||
|
||||
var statistics = new Statistics();
|
||||
refs.add(statistics);
|
||||
statistics.setStatsLevel(StatsLevel.EXCEPT_TIME_FOR_MUTEX);
|
||||
options.setStatistics(statistics);
|
||||
|
||||
if (!databaseOptions.global().unorderedWrite()) {
|
||||
options.setEnablePipelinedWrite(true);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user