Speed optimizations
This commit is contained in:
parent
62b53c1399
commit
3e7ee104ff
@ -187,7 +187,7 @@ public class SpeedExample {
|
|||||||
return test("MapDictionaryDeep::at::put (same key, same value, " + batchSize + " times)",
|
return test("MapDictionaryDeep::at::put (same key, same value, " + batchSize + " times)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
|
||||||
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
||||||
.defer(() -> Mono
|
.defer(() -> Mono
|
||||||
.fromRunnable(() -> {
|
.fromRunnable(() -> {
|
||||||
@ -214,7 +214,7 @@ public class SpeedExample {
|
|||||||
return test("MapDictionaryDeep::putValueAndGetPrevious (same key, same value, " + batchSize + " times)",
|
return test("MapDictionaryDeep::putValueAndGetPrevious (same key, same value, " + batchSize + " times)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
|
||||||
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
||||||
.defer(() -> Mono
|
.defer(() -> Mono
|
||||||
.fromRunnable(() -> {
|
.fromRunnable(() -> {
|
||||||
@ -233,7 +233,7 @@ public class SpeedExample {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private static Mono<Void> testPutValue(int valSize) {
|
private static Mono<Void> testPutValue(int valSize) {
|
||||||
var ssg = new SubStageGetterSingleBytes();
|
var ssg = Serializer.noop();
|
||||||
var ser = SerializerFixedBinaryLength.noop(4);
|
var ser = SerializerFixedBinaryLength.noop(4);
|
||||||
var itemKey = new byte[]{0, 1, 2, 3};
|
var itemKey = new byte[]{0, 1, 2, 3};
|
||||||
var newValue = new byte[valSize];
|
var newValue = new byte[valSize];
|
||||||
@ -243,7 +243,7 @@ public class SpeedExample {
|
|||||||
return test("MapDictionaryDeep::putValue (same key, same value, " + valSize + " bytes, " + batchSize + " times)",
|
return test("MapDictionaryDeep::putValue (same key, same value, " + valSize + " bytes, " + batchSize + " times)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
|
||||||
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
||||||
.defer(() -> Mono
|
.defer(() -> Mono
|
||||||
.fromRunnable(() -> {
|
.fromRunnable(() -> {
|
||||||
@ -270,7 +270,7 @@ public class SpeedExample {
|
|||||||
return test("MapDictionaryDeep::updateValue (same key, alternating value, " + valSize + " bytes, " + batchSize + " times)",
|
return test("MapDictionaryDeep::updateValue (same key, alternating value, " + valSize + " bytes, " + batchSize + " times)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap", UpdateMode.ALLOW).map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap", UpdateMode.ALLOW).map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
|
||||||
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
tuple -> Flux.range(0, batchSize).flatMap(n -> Mono
|
||||||
.defer(() -> tuple.getT2().updateValue(itemKey, (old) -> {
|
.defer(() -> tuple.getT2().updateValue(itemKey, (old) -> {
|
||||||
if (old.isPresent()) {
|
if (old.isPresent()) {
|
||||||
@ -297,7 +297,7 @@ public class SpeedExample {
|
|||||||
return test("MapDictionaryDeep::putMulti (batch of " + batchSize + " entries)",
|
return test("MapDictionaryDeep::putMulti (batch of " + batchSize + " entries)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap", UpdateMode.DISALLOW).map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ser, ssg))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, ssg))),
|
||||||
tuple -> Mono.defer(() -> tuple.getT2().putMulti(putMultiFlux)),
|
tuple -> Mono.defer(() -> tuple.getT2().putMulti(putMultiFlux)),
|
||||||
numRepeats,
|
numRepeats,
|
||||||
tuple -> Mono
|
tuple -> Mono
|
||||||
|
@ -178,16 +178,26 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
|||||||
public Flux<Entry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot) {
|
public Flux<Entry<T, DatabaseStageEntry<U>>> getAllStages(@Nullable CompositeSnapshot snapshot) {
|
||||||
return dictionary
|
return dictionary
|
||||||
.getRangeKeys(resolveSnapshot(snapshot), range)
|
.getRangeKeys(resolveSnapshot(snapshot), range)
|
||||||
.map(keySuffix -> Map.entry(deserializeSuffix(stripPrefix(keySuffix)),
|
.map(key -> Map.entry(deserializeSuffix(stripPrefix(key)),
|
||||||
new DatabaseSingleMapped<>(
|
new DatabaseSingleMapped<>(
|
||||||
new DatabaseSingle<>(dictionary,
|
new DatabaseSingle<>(dictionary,
|
||||||
toKey(stripPrefix(keySuffix)),
|
toKey(stripPrefix(key)),
|
||||||
Serializer.noop()),
|
Serializer.noop()),
|
||||||
valueSerializer
|
valueSerializer
|
||||||
)
|
)
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Flux<Entry<T, U>> getAllValues(@Nullable CompositeSnapshot snapshot) {
|
||||||
|
return dictionary
|
||||||
|
.getRange(resolveSnapshot(snapshot), range)
|
||||||
|
.map(serializedEntry -> Map.entry(
|
||||||
|
deserializeSuffix(stripPrefix(serializedEntry.getKey())),
|
||||||
|
valueSerializer.deserialize(serializedEntry.getValue())
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||||
return dictionary
|
return dictionary
|
||||||
|
@ -614,6 +614,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
prefixLength,
|
prefixLength,
|
||||||
range,
|
range,
|
||||||
resolveSnapshot(snapshot),
|
resolveSnapshot(snapshot),
|
||||||
|
true,
|
||||||
"getRangeKeysGrouped"
|
"getRangeKeysGrouped"
|
||||||
).flux().subscribeOn(dbScheduler);
|
).flux().subscribeOn(dbScheduler);
|
||||||
}
|
}
|
||||||
@ -746,7 +747,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
|
|
||||||
// readOpts.setIgnoreRangeDeletions(true);
|
// readOpts.setIgnoreRangeDeletions(true);
|
||||||
readOpts.setFillCache(false);
|
readOpts.setFillCache(false);
|
||||||
readOpts.setReadaheadSize(2 * 1024 * 1024);
|
//readOpts.setReadaheadSize(2 * 1024 * 1024);
|
||||||
try (CappedWriteBatch writeBatch = new CappedWriteBatch(db,
|
try (CappedWriteBatch writeBatch = new CappedWriteBatch(db,
|
||||||
CAPPED_WRITE_BATCH_CAP,
|
CAPPED_WRITE_BATCH_CAP,
|
||||||
RESERVED_WRITE_BATCH_SIZE,
|
RESERVED_WRITE_BATCH_SIZE,
|
||||||
@ -925,7 +926,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
private long exactSizeAll(@Nullable LLSnapshot snapshot) {
|
private long exactSizeAll(@Nullable LLSnapshot snapshot) {
|
||||||
var readOpts = resolveSnapshot(snapshot);
|
var readOpts = resolveSnapshot(snapshot);
|
||||||
readOpts.setFillCache(false);
|
readOpts.setFillCache(false);
|
||||||
readOpts.setReadaheadSize(2 * 1024 * 1024);
|
//readOpts.setReadaheadSize(2 * 1024 * 1024);
|
||||||
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
|
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
|
||||||
|
|
||||||
if (PARALLEL_EXACT_SIZE) {
|
if (PARALLEL_EXACT_SIZE) {
|
||||||
|
@ -16,7 +16,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends
|
|||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
super(db, cfh, prefixLength, range, readOptions, true, debugName);
|
super(db, cfh, prefixLength, range, readOptions, false, true, debugName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -13,7 +13,7 @@ public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReacti
|
|||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
super(db, cfh, prefixLength, range, readOptions, false, debugName);
|
super(db, cfh, prefixLength, range, readOptions, true, false, debugName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -22,6 +22,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
private final int prefixLength;
|
private final int prefixLength;
|
||||||
private final LLRange range;
|
private final LLRange range;
|
||||||
private final ReadOptions readOptions;
|
private final ReadOptions readOptions;
|
||||||
|
private final boolean canFillCache;
|
||||||
private final boolean readValues;
|
private final boolean readValues;
|
||||||
private final String debugName;
|
private final String debugName;
|
||||||
|
|
||||||
@ -30,6 +31,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
int prefixLength,
|
int prefixLength,
|
||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
|
boolean canFillCache,
|
||||||
boolean readValues,
|
boolean readValues,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
this.db = db;
|
this.db = db;
|
||||||
@ -37,6 +39,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
this.prefixLength = prefixLength;
|
this.prefixLength = prefixLength;
|
||||||
this.range = range;
|
this.range = range;
|
||||||
this.readOptions = readOptions;
|
this.readOptions = readOptions;
|
||||||
|
this.canFillCache = canFillCache;
|
||||||
this.readValues = readValues;
|
this.readValues = readValues;
|
||||||
this.debugName = debugName;
|
this.debugName = debugName;
|
||||||
}
|
}
|
||||||
@ -47,7 +50,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
|||||||
return Flux
|
return Flux
|
||||||
.generate(() -> {
|
.generate(() -> {
|
||||||
var readOptions = new ReadOptions(this.readOptions);
|
var readOptions = new ReadOptions(this.readOptions);
|
||||||
readOptions.setFillCache(range.hasMin() && range.hasMax());
|
readOptions.setFillCache(canFillCache && range.hasMin() && range.hasMax());
|
||||||
Slice sliceMin;
|
Slice sliceMin;
|
||||||
Slice sliceMax;
|
Slice sliceMax;
|
||||||
if (range.hasMin()) {
|
if (range.hasMin()) {
|
||||||
|
@ -20,6 +20,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
|||||||
private final int prefixLength;
|
private final int prefixLength;
|
||||||
private final LLRange range;
|
private final LLRange range;
|
||||||
private final ReadOptions readOptions;
|
private final ReadOptions readOptions;
|
||||||
|
private final boolean canFillCache;
|
||||||
private final String debugName;
|
private final String debugName;
|
||||||
|
|
||||||
public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db,
|
public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db,
|
||||||
@ -27,12 +28,14 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
|||||||
int prefixLength,
|
int prefixLength,
|
||||||
LLRange range,
|
LLRange range,
|
||||||
ReadOptions readOptions,
|
ReadOptions readOptions,
|
||||||
|
boolean canFillCache,
|
||||||
String debugName) {
|
String debugName) {
|
||||||
this.db = db;
|
this.db = db;
|
||||||
this.cfh = cfh;
|
this.cfh = cfh;
|
||||||
this.prefixLength = prefixLength;
|
this.prefixLength = prefixLength;
|
||||||
this.range = range;
|
this.range = range;
|
||||||
this.readOptions = readOptions;
|
this.readOptions = readOptions;
|
||||||
|
this.canFillCache = canFillCache;
|
||||||
this.debugName = debugName;
|
this.debugName = debugName;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -42,8 +45,8 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
|||||||
.generate(() -> {
|
.generate(() -> {
|
||||||
var readOptions = new ReadOptions(this.readOptions);
|
var readOptions = new ReadOptions(this.readOptions);
|
||||||
if (!range.hasMin() || !range.hasMax()) {
|
if (!range.hasMin() || !range.hasMax()) {
|
||||||
readOptions.setReadaheadSize(2 * 1024 * 1024);
|
//readOptions.setReadaheadSize(2 * 1024 * 1024);
|
||||||
readOptions.setFillCache(false);
|
readOptions.setFillCache(canFillCache);
|
||||||
}
|
}
|
||||||
Slice sliceMin;
|
Slice sliceMin;
|
||||||
Slice sliceMax;
|
Slice sliceMax;
|
||||||
|
@ -91,7 +91,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
|||||||
if (lowMemory) {
|
if (lowMemory) {
|
||||||
this.dbScheduler = lowMemorySupplier.get();
|
this.dbScheduler = lowMemorySupplier.get();
|
||||||
} else {
|
} else {
|
||||||
this.dbScheduler = Schedulers.newBoundedElastic(6,
|
this.dbScheduler = Schedulers.newBoundedElastic(Math.max(8, Runtime.getRuntime().availableProcessors()),
|
||||||
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE,
|
||||||
"db-" + name,
|
"db-" + name,
|
||||||
60,
|
60,
|
||||||
@ -209,7 +209,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
|||||||
//options.setUseDirectReads(true);
|
//options.setUseDirectReads(true);
|
||||||
//options.setUseDirectIoForFlushAndCompaction(true);
|
//options.setUseDirectIoForFlushAndCompaction(true);
|
||||||
//options.setWritableFileMaxBufferSize(1024 * 1024); // 1MB by default
|
//options.setWritableFileMaxBufferSize(1024 * 1024); // 1MB by default
|
||||||
options.setCompactionReadaheadSize(2 * 1024 * 1024); // recommend at least 2MB
|
//options.setCompactionReadaheadSize(2 * 1024 * 1024); // recommend at least 2MB
|
||||||
final BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
|
final BlockBasedTableConfig tableOptions = new BlockBasedTableConfig();
|
||||||
if (lowMemory) {
|
if (lowMemory) {
|
||||||
// LOW MEMORY
|
// LOW MEMORY
|
||||||
|
Loading…
x
Reference in New Issue
Block a user