Speed optimizations

This commit is contained in:
Andrea Cavalli 2021-03-22 20:02:19 +01:00
parent 62b53c1399
commit 3e7ee104ff
8 changed files with 34 additions and 17 deletions

View File

@ -187,7 +187,7 @@ public class SpeedExample {
return test("MapDictionaryDeep::at::put (same key, same value, " + batchSize + " times)",
tempDb()
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
.defer(() -> Mono
.fromRunnable(() -> {
@ -214,7 +214,7 @@ public class SpeedExample {
return test("MapDictionaryDeep::putValueAndGetPrevious (same key, same value, " + batchSize + " times)",
tempDb()
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
.defer(() -> Mono
.fromRunnable(() -> {
@ -233,7 +233,7 @@ public class SpeedExample {
}
private static Mono<Void> testPutValue(int valSize) {
var ssg = new SubStageGetterSingleBytes();
var ssg = Serializer.noop();
var ser = SerializerFixedBinaryLength.noop(4);
var itemKey = new byte[]{0, 1, 2, 3};
var newValue = new byte[valSize];
@ -243,7 +243,7 @@ public class SpeedExample {
return test("MapDictionaryDeep::putValue (same key, same value, " + valSize + " bytes, " + batchSize + " times)",
tempDb()
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
.defer(() -> Mono
.fromRunnable(() -> {
@ -270,7 +270,7 @@ public class SpeedExample {
return test("MapDictionaryDeep::updateValue (same key, alternating value, " + valSize + " bytes, " + batchSize + " times)",
tempDb()
.flatMap(db -> db.getDictionary("testmap", UpdateMode.ALLOW).map(dict -> Tuples.of(db, dict)))
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
.defer(() -> tuple.getT2().updateValue(itemKey, (old) -> {
if (old.isPresent()) {
@ -297,7 +297,7 @@ public class SpeedExample {
return test("MapDictionaryDeep::putMulti (batch of " + batchSize + " entries)",
tempDb()
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
tuple -> Mono.defer(() -> tuple.getT2().putMulti(putMultiFlux)),
numRepeats,
tuple -> Mono

View File

@ -178,16 +178,26 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
public Flux<Entry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot) {
return dictionary
.getRangeKeys(resolveSnapshot(snapshot), range)
.map(keySuffix -> Map.entry(deserializeSuffix(stripPrefix(keySuffix)),
.map(key -> Map.entry(deserializeSuffix(stripPrefix(key)),
new DatabaseSingleMapped<>(
new DatabaseSingle<>(dictionary,
toKey(stripPrefix(keySuffix)),
toKey(stripPrefix(key)),
Serializer.noop()),
valueSerializer
)
));
}
@Override
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot) {
return dictionary
.getRange(resolveSnapshot(snapshot), range)
.map(serializedEntry -> Map.entry(
deserializeSuffix(stripPrefix(serializedEntry.getKey())),
valueSerializer.deserialize(serializedEntry.getValue())
));
}
@Override
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
return dictionary

View File

@ -614,6 +614,7 @@ public class LLLocalDictionary implements LLDictionary {
prefixLength,
range,
resolveSnapshot(snapshot),
true,
"getRangeKeysGrouped"
).flux().subscribeOn(dbScheduler);
}
@ -746,7 +747,7 @@ public class LLLocalDictionary implements LLDictionary {
// readOpts.setIgnoreRangeDeletions(true);
readOpts.setFillCache(false);
readOpts.setReadaheadSize(2 * 1024 * 1024);
//readOpts.setReadaheadSize(2 * 1024 * 1024);
try (CappedWriteBatch writeBatch = new CappedWriteBatch(db,
CAPPED_WRITE_BATCH_CAP,
RESERVED_WRITE_BATCH_SIZE,
@ -925,7 +926,7 @@ public class LLLocalDictionary implements LLDictionary {
private long exactSizeAll(@Nullable LLSnapshot snapshot) {
var readOpts = resolveSnapshot(snapshot);
readOpts.setFillCache(false);
readOpts.setReadaheadSize(2 * 1024 * 1024);
//readOpts.setReadaheadSize(2 * 1024 * 1024);
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
if (PARALLEL_EXACT_SIZE) {

View File

@ -16,7 +16,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends
LLRange range,
ReadOptions readOptions,
String debugName) {
super(db, cfh, prefixLength, range, readOptions, true, debugName);
super(db, cfh, prefixLength, range, readOptions, false, true, debugName);
}
@Override

View File

@ -13,7 +13,7 @@ public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReacti
LLRange range,
ReadOptions readOptions,
String debugName) {
super(db, cfh, prefixLength, range, readOptions, false, debugName);
super(db, cfh, prefixLength, range, readOptions, true, false, debugName);
}
@Override

View File

@ -22,6 +22,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
private final int prefixLength;
private final LLRange range;
private final ReadOptions readOptions;
private final boolean canFillCache;
private final boolean readValues;
private final String debugName;
@ -30,6 +31,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
int prefixLength,
LLRange range,
ReadOptions readOptions,
boolean canFillCache,
boolean readValues,
String debugName) {
this.db = db;
@ -37,6 +39,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
this.prefixLength = prefixLength;
this.range = range;
this.readOptions = readOptions;
this.canFillCache = canFillCache;
this.readValues = readValues;
this.debugName = debugName;
}
@ -47,7 +50,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
return Flux
.generate(() -> {
var readOptions = new ReadOptions(this.readOptions);
readOptions.setFillCache(range.hasMin() && range.hasMax());
readOptions.setFillCache(canFillCache && range.hasMin() && range.hasMax());
Slice sliceMin;
Slice sliceMax;
if (range.hasMin()) {

View File

@ -20,6 +20,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
private final int prefixLength;
private final LLRange range;
private final ReadOptions readOptions;
private final boolean canFillCache;
private final String debugName;
public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db,
@ -27,12 +28,14 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
int prefixLength,
LLRange range,
ReadOptions readOptions,
boolean canFillCache,
String debugName) {
this.db = db;
this.cfh = cfh;
this.prefixLength = prefixLength;
this.range = range;
this.readOptions = readOptions;
this.canFillCache = canFillCache;
this.debugName = debugName;
}
@ -42,8 +45,8 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
.generate(() -> {
var readOptions = new ReadOptions(this.readOptions);
if (!range.hasMin() || !range.hasMax()) {
readOptions.setReadaheadSize(2 * 1024 * 1024);
readOptions.setFillCache(false);
//readOptions.setReadaheadSize(2 * 1024 * 1024);
readOptions.setFillCache(canFillCache);
}
Slice sliceMin;
Slice sliceMax;

View File

@ -91,7 +91,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
if (lowMemory) {
this.dbScheduler = lowMemorySupplier.get();
} else {
this.dbScheduler = Schedulers.newBoundedElastic(6,
this.dbScheduler = Schedulers.newBoundedElastic(Math.max(8, Runtime.getRuntime().availableProcessors()),
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
"db-" + name,
60,
@ -209,7 +209,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
//options.setUseDirectReads(true);
//options.setUseDirectIoForFlushAndCompaction(true);
//options.setWritableFileMaxBufferSize(1024 * 1024); // 1MB by default
options.setCompactionReadaheadSize(2 * 1024 * 1024); // recommend at least 2MB
//options.setCompactionReadaheadSize(2 * 1024 * 1024); // recommend at least 2MB
final BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
if (lowMemory) {
// LOW MEMORY