Update Example.java, LLDictionary.java, and 6 more files...
This commit is contained in:
parent
023bc3b0dd
commit
dbca36b3aa
@ -5,8 +5,8 @@ import it.cavallium.dbengine.database.Column;
|
|||||||
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
||||||
import it.cavallium.dbengine.database.collections.DatabaseMapDictionary;
|
import it.cavallium.dbengine.database.collections.DatabaseMapDictionary;
|
||||||
import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep;
|
import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep;
|
||||||
import it.cavallium.dbengine.database.collections.SerializerFixedBinaryLength;
|
|
||||||
import it.cavallium.dbengine.database.collections.Serializer;
|
import it.cavallium.dbengine.database.collections.Serializer;
|
||||||
|
import it.cavallium.dbengine.database.collections.SerializerFixedBinaryLength;
|
||||||
import it.cavallium.dbengine.database.collections.SubStageGetterSingleBytes;
|
import it.cavallium.dbengine.database.collections.SubStageGetterSingleBytes;
|
||||||
import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection;
|
import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
@ -21,6 +21,7 @@ import java.util.HashMap;
|
|||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.concurrent.CompletionException;
|
import java.util.concurrent.CompletionException;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
@ -32,7 +33,7 @@ import reactor.util.function.Tuples;
|
|||||||
public class Example {
|
public class Example {
|
||||||
|
|
||||||
private static final boolean printPreviousValue = false;
|
private static final boolean printPreviousValue = false;
|
||||||
private static final int numRepeats = 100;
|
private static final int numRepeats = 1000;
|
||||||
private static final int batchSize = 10000;
|
private static final int batchSize = 10000;
|
||||||
|
|
||||||
public static void main(String[] args) throws InterruptedException {
|
public static void main(String[] args) throws InterruptedException {
|
||||||
@ -52,8 +53,8 @@ public class Example {
|
|||||||
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
testPutMulti()
|
rangeTestPutMultiProgressive()
|
||||||
.then(rangeTestPutMulti())
|
.then(rangeTestPutMultiSame())
|
||||||
.subscribeOn(Schedulers.parallel())
|
.subscribeOn(Schedulers.parallel())
|
||||||
.blockOptional();
|
.blockOptional();
|
||||||
}
|
}
|
||||||
@ -145,11 +146,14 @@ public class Example {
|
|||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap").map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap").map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ssg, ser))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionaryDeep.simple(dict, ssg, ser))),
|
||||||
tuple -> Mono
|
tuple -> Mono.defer(() -> tuple.getT2().putMulti(putMultiFlux)),
|
||||||
.defer(() -> tuple.getT2().putMulti(putMultiFlux)
|
|
||||||
),
|
|
||||||
numRepeats,
|
numRepeats,
|
||||||
tuple -> tuple.getT1().close());
|
tuple -> Mono
|
||||||
|
.fromRunnable(() -> System.out.println("Calculating size"))
|
||||||
|
.then(tuple.getT2().size(null, false))
|
||||||
|
.doOnNext(s -> System.out.println("Size after: " + s))
|
||||||
|
.then(tuple.getT1().close())
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Mono<Void> rangeTestAtPut() {
|
private static Mono<Void> rangeTestAtPut() {
|
||||||
@ -227,23 +231,56 @@ public class Example {
|
|||||||
tuple -> tuple.getT1().close());
|
tuple -> tuple.getT1().close());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Mono<Void> rangeTestPutMulti() {
|
private static Mono<Void> rangeTestPutMultiSame() {
|
||||||
var ser = SerializerFixedBinaryLength.noop(4);
|
var ser = SerializerFixedBinaryLength.noop(4);
|
||||||
var vser = Serializer.noop();
|
var vser = Serializer.noop();
|
||||||
HashMap<byte[], byte[]> keysToPut = new HashMap<>();
|
HashMap<byte[], byte[]> keysToPut = new HashMap<>();
|
||||||
for (int i = 0; i < batchSize; i++) {
|
for (int i = 0; i < batchSize; i++) {
|
||||||
keysToPut.put(Ints.toByteArray(i * 3), Ints.toByteArray(i * 11));
|
keysToPut.put(Ints.toByteArray(i * 3), Ints.toByteArray(i * 11));
|
||||||
}
|
}
|
||||||
var putMultiFlux = Flux.fromIterable(keysToPut.entrySet());
|
|
||||||
return test("MapDictionary::putMulti (batch of " + batchSize + " entries)",
|
return test("MapDictionary::putMulti (batch of " + batchSize + " entries)",
|
||||||
tempDb()
|
tempDb()
|
||||||
.flatMap(db -> db.getDictionary("testmap").map(dict -> Tuples.of(db, dict)))
|
.flatMap(db -> db.getDictionary("testmap").map(dict -> Tuples.of(db, dict)))
|
||||||
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, vser))),
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, vser))),
|
||||||
tuple -> Mono
|
tuple -> Mono
|
||||||
.defer(() -> tuple.getT2().putMulti(putMultiFlux)
|
.defer(() -> tuple.getT2().putMulti(Flux.fromIterable(keysToPut.entrySet()))
|
||||||
),
|
),
|
||||||
numRepeats,
|
numRepeats,
|
||||||
tuple -> tuple.getT1().close());
|
tuple -> Mono
|
||||||
|
.fromRunnable(() -> System.out.println("Calculating size"))
|
||||||
|
.then(tuple.getT2().size(null, false))
|
||||||
|
.doOnNext(s -> System.out.println("Size after: " + s))
|
||||||
|
.then(tuple.getT1().close())
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static Mono<Void> rangeTestPutMultiProgressive() {
|
||||||
|
var ser = SerializerFixedBinaryLength.noop(4);
|
||||||
|
var vser = Serializer.noop();
|
||||||
|
AtomicInteger ai = new AtomicInteger(0);
|
||||||
|
return test("MapDictionary::putMulti (batch of " + batchSize + " entries)",
|
||||||
|
tempDb()
|
||||||
|
.flatMap(db -> db.getDictionary("testmap").map(dict -> Tuples.of(db, dict)))
|
||||||
|
.map(tuple -> tuple.mapT2(dict -> DatabaseMapDictionary.simple(dict, ser, vser))),
|
||||||
|
tuple -> Mono
|
||||||
|
.defer(() -> {
|
||||||
|
var aiv = ai.incrementAndGet();
|
||||||
|
HashMap<byte[], byte[]> keysToPut = new HashMap<>();
|
||||||
|
for (int i = 0; i < batchSize; i++) {
|
||||||
|
keysToPut.put(
|
||||||
|
Ints.toByteArray(i * 3 + (batchSize * aiv)),
|
||||||
|
Ints.toByteArray(i * 11 + (batchSize * aiv))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return tuple.getT2().putMulti(Flux.fromIterable(keysToPut.entrySet()));
|
||||||
|
}),
|
||||||
|
numRepeats,
|
||||||
|
tuple -> Mono
|
||||||
|
.fromRunnable(() -> System.out.println("Calculating size"))
|
||||||
|
.then(tuple.getT2().size(null, false))
|
||||||
|
.doOnNext(s -> System.out.println("Size after: " + s))
|
||||||
|
.then(tuple.getT1().close())
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static <U> Mono<? extends LLKeyValueDatabase> tempDb() {
|
private static <U> Mono<? extends LLKeyValueDatabase> tempDb() {
|
||||||
@ -282,18 +319,20 @@ public class Example {
|
|||||||
Duration WAIT_TIME_END = Duration.ofSeconds(5);
|
Duration WAIT_TIME_END = Duration.ofSeconds(5);
|
||||||
return Mono
|
return Mono
|
||||||
.delay(WAIT_TIME)
|
.delay(WAIT_TIME)
|
||||||
|
.doOnSuccess(s -> {
|
||||||
|
System.out.println("----------------------------------------------------------------------");
|
||||||
|
System.out.println(name);
|
||||||
|
})
|
||||||
.then(Mono.fromRunnable(() -> instantInit.tryEmitValue(now())))
|
.then(Mono.fromRunnable(() -> instantInit.tryEmitValue(now())))
|
||||||
.then(setup)
|
.then(setup)
|
||||||
.doOnSuccess(s -> instantInitTest.tryEmitValue(now()))
|
.doOnSuccess(s -> instantInitTest.tryEmitValue(now()))
|
||||||
.flatMap(a ->Mono.defer(() -> test.apply(a)).repeat(numRepeats)
|
.flatMap(a -> Mono.defer(() -> test.apply(a)).repeat(numRepeats - 1)
|
||||||
.then()
|
.then()
|
||||||
.doOnSuccess(s -> instantEndTest.tryEmitValue(now()))
|
.doOnSuccess(s -> instantEndTest.tryEmitValue(now()))
|
||||||
.then(close.apply(a)))
|
.then(close.apply(a)))
|
||||||
.doOnSuccess(s -> instantEnd.tryEmitValue(now()))
|
.doOnSuccess(s -> instantEnd.tryEmitValue(now()))
|
||||||
.then(Mono.zip(instantInit.asMono(), instantInitTest.asMono(), instantEndTest.asMono(), instantEnd.asMono()))
|
.then(Mono.zip(instantInit.asMono(), instantInitTest.asMono(), instantEndTest.asMono(), instantEnd.asMono()))
|
||||||
.doOnSuccess(tuple -> {
|
.doOnSuccess(tuple -> {
|
||||||
System.out.println("----------------------------------------------------------------------");
|
|
||||||
System.out.println(name);
|
|
||||||
System.out.println(
|
System.out.println(
|
||||||
"\t - Executed " + DecimalFormat.getInstance(Locale.ITALY).format((numRepeats * batchSize)) + " times:");
|
"\t - Executed " + DecimalFormat.getInstance(Locale.ITALY).format((numRepeats * batchSize)) + " times:");
|
||||||
System.out.println("\t - Test time: " + DecimalFormat
|
System.out.println("\t - Test time: " + DecimalFormat
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
package it.cavallium.dbengine.database;
|
package it.cavallium.dbengine.database;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.function.Function;
|
import java.util.function.Function;
|
||||||
import org.jetbrains.annotations.Nullable;
|
import org.jetbrains.annotations.Nullable;
|
||||||
@ -22,21 +23,30 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
|
|||||||
|
|
||||||
Flux<Entry<byte[], byte[]>> getRange(@Nullable LLSnapshot snapshot, LLRange range);
|
Flux<Entry<byte[], byte[]>> getRange(@Nullable LLSnapshot snapshot, LLRange range);
|
||||||
|
|
||||||
|
Flux<List<Entry<byte[], byte[]>>> getRangeGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength);
|
||||||
|
|
||||||
Flux<byte[]> getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range);
|
Flux<byte[]> getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range);
|
||||||
|
|
||||||
|
Flux<List<byte[]>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength);
|
||||||
|
|
||||||
Flux<Entry<byte[], byte[]>> setRange(LLRange range, Flux<Entry<byte[], byte[]>> entries, boolean getOldValues);
|
Flux<Entry<byte[], byte[]>> setRange(LLRange range, Flux<Entry<byte[], byte[]>> entries, boolean getOldValues);
|
||||||
|
|
||||||
default Mono<Void> replaceRange(LLRange range, boolean canKeysChange, Function<Entry<byte[], byte[]>, Mono<Entry<byte[], byte[]>>> entriesReplacer) {
|
default Mono<Void> replaceRange(LLRange range, boolean canKeysChange, Function<Entry<byte[], byte[]>, Mono<Entry<byte[], byte[]>>> entriesReplacer) {
|
||||||
Flux<Entry<byte[], byte[]>> replacedFlux = this.getRange(null, range).flatMap(entriesReplacer);
|
return Mono.defer(() -> {
|
||||||
if (canKeysChange) {
|
if (canKeysChange) {
|
||||||
return this
|
return this
|
||||||
.setRange(range, replacedFlux, false)
|
.setRange(range, this
|
||||||
|
.getRange(null, range)
|
||||||
|
.flatMap(entriesReplacer), false)
|
||||||
.then();
|
.then();
|
||||||
} else {
|
} else {
|
||||||
return this
|
return this
|
||||||
.putMulti(replacedFlux, false)
|
.putMulti(this
|
||||||
|
.getRange(null, range)
|
||||||
|
.flatMap(entriesReplacer), false)
|
||||||
.then();
|
.then();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range);
|
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range);
|
||||||
|
@ -73,8 +73,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Entry<byte[], byte[]> stripPrefix(Entry<byte[], byte[]> entry) {
|
private Entry<byte[], byte[]> stripPrefix(Entry<byte[], byte[]> entry) {
|
||||||
byte[] keySuffix = stripPrefix(entry.getKey());
|
return Map.entry(stripPrefix(entry.getKey()), entry.getValue());
|
||||||
return Map.entry(keySuffix, entry.getValue());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -89,7 +88,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<Long> size(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
public Mono<Long> size(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||||
return dictionary.sizeRange(resolveSnapshot(snapshot), range, true);
|
return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -174,9 +173,10 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||||
var serializedEntries = entries
|
return dictionary
|
||||||
.map(entry -> Map.entry(toKey(serializeSuffix(entry.getKey())), serialize(entry.getValue())));
|
.setRange(range,
|
||||||
return dictionary.setRange(range, serializedEntries, true)
|
entries.map(entry ->
|
||||||
|
Map.entry(toKey(serializeSuffix(entry.getKey())), serialize(entry.getValue()))), true)
|
||||||
.map(entry -> Map.entry(deserializeSuffix(stripPrefix(entry.getKey())), deserialize(entry.getValue())));
|
.map(entry -> Map.entry(deserializeSuffix(stripPrefix(entry.getKey())), deserialize(entry.getValue())));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,11 +9,11 @@ import java.util.Map;
|
|||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import org.jetbrains.annotations.Nullable;
|
import org.jetbrains.annotations.Nullable;
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.GroupedFlux;
|
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
import reactor.util.function.Tuples;
|
import reactor.util.function.Tuples;
|
||||||
|
|
||||||
// todo: implement optimized methods
|
// todo: implement optimized methods
|
||||||
|
@SuppressWarnings("Convert2MethodRef")
|
||||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
|
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
|
||||||
|
|
||||||
public static final byte[] EMPTY_BYTES = new byte[0];
|
public static final byte[] EMPTY_BYTES = new byte[0];
|
||||||
@ -196,34 +196,40 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
|||||||
@Override
|
@Override
|
||||||
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||||
byte[] keySuffixData = serializeSuffix(keySuffix);
|
byte[] keySuffixData = serializeSuffix(keySuffix);
|
||||||
Flux<byte[]> rangeKeys = this
|
|
||||||
.dictionary.getRangeKeys(resolveSnapshot(snapshot), toExtRange(keySuffixData)
|
|
||||||
);
|
|
||||||
return this.subStageGetter
|
return this.subStageGetter
|
||||||
.subStage(dictionary, snapshot, toKeyWithoutExt(keySuffixData), rangeKeys);
|
.subStage(dictionary,
|
||||||
|
snapshot,
|
||||||
|
toKeyWithoutExt(keySuffixData),
|
||||||
|
this.dictionary.getRangeKeys(resolveSnapshot(snapshot), toExtRange(keySuffixData))
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot) {
|
public Flux<Entry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot) {
|
||||||
Flux<GroupedFlux<byte[], byte[]>> groupedFlux = dictionary
|
return dictionary
|
||||||
.getRangeKeys(resolveSnapshot(snapshot), range)
|
.getRangeKeysGrouped(resolveSnapshot(snapshot), range, keyPrefix.length + keySuffixLength)
|
||||||
.groupBy(this::removeExtFromFullKey);
|
.flatMap(rangeKeys -> {
|
||||||
return groupedFlux
|
//System.out.println(Thread.currentThread() + "\tkReceived range key flux");
|
||||||
.flatMap(rangeKeys -> this.subStageGetter
|
byte[] groupKeyWithoutExt = removeExtFromFullKey(rangeKeys.get(0));
|
||||||
.subStage(dictionary, snapshot, rangeKeys.key(), rangeKeys)
|
byte[] groupSuffix = this.stripPrefix(groupKeyWithoutExt);
|
||||||
.map(us -> Map.entry(this.deserializeSuffix(this.stripPrefix(rangeKeys.key())), us))
|
return this.subStageGetter
|
||||||
|
.subStage(dictionary, snapshot, groupKeyWithoutExt, Flux.fromIterable(rangeKeys))
|
||||||
|
//.doOnSuccess(s -> System.out.println(Thread.currentThread() + "\tObtained stage for a key"))
|
||||||
|
.map(us -> Map.entry(this.deserializeSuffix(groupSuffix), us));
|
||||||
|
//.doOnSuccess(s -> System.out.println(Thread.currentThread() + "\tMapped stage for a key"));
|
||||||
|
}
|
||||||
);
|
);
|
||||||
|
//.doOnNext(s -> System.out.println(Thread.currentThread() + "\tNext stage"))
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
public Flux<Entry<T, U>> setAllValuesAndGetPrevious(Flux<Entry<T, U>> entries) {
|
||||||
var newValues = entries
|
|
||||||
.flatMap(entry -> at(null, entry.getKey()).map(us -> Tuples.of(us, entry.getValue())))
|
|
||||||
.flatMap(tuple -> tuple.getT1().set(tuple.getT2()));
|
|
||||||
|
|
||||||
return getAllStages(null)
|
return getAllStages(null)
|
||||||
.flatMap(stage -> stage.getValue().get(null).map(val -> Map.entry(stage.getKey(), val)))
|
.flatMap(stage -> stage.getValue().get(null).map(val -> Map.entry(stage.getKey(), val)))
|
||||||
.concatWith(newValues.then(Mono.empty()));
|
.concatWith(entries
|
||||||
|
.flatMap(entry -> at(null, entry.getKey()).map(us -> Tuples.of(us, entry.getValue())))
|
||||||
|
.flatMap(tuple -> tuple.getT1().set(tuple.getT2()))
|
||||||
|
.then(Mono.empty()));
|
||||||
}
|
}
|
||||||
|
|
||||||
//todo: temporary wrapper. convert the whole class to buffers
|
//todo: temporary wrapper. convert the whole class to buffers
|
||||||
|
@ -30,7 +30,7 @@ public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends Dat
|
|||||||
}
|
}
|
||||||
|
|
||||||
default Mono<U> putValueAndGetPrevious(T key, U value) {
|
default Mono<U> putValueAndGetPrevious(T key, U value) {
|
||||||
return at(null, key).flatMap(v -> v.setAndGetPrevious(value));
|
return at(null, key).single().flatMap(v -> v.setAndGetPrevious(value));
|
||||||
}
|
}
|
||||||
|
|
||||||
default Mono<Boolean> putValueAndGetStatus(T key, U value) {
|
default Mono<Boolean> putValueAndGetStatus(T key, U value) {
|
||||||
@ -76,21 +76,19 @@ public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends Dat
|
|||||||
}
|
}
|
||||||
|
|
||||||
default Mono<Void> replaceAllValues(boolean canKeysChange, Function<Entry<T, U>, Mono<Entry<T, U>>> entriesReplacer) {
|
default Mono<Void> replaceAllValues(boolean canKeysChange, Function<Entry<T, U>, Mono<Entry<T, U>>> entriesReplacer) {
|
||||||
Flux<Entry<T, U>> replacedFlux = this
|
return Mono.defer(() -> {
|
||||||
.getAllValues(null)
|
|
||||||
.flatMap(entriesReplacer);
|
|
||||||
if (canKeysChange) {
|
if (canKeysChange) {
|
||||||
return this
|
return this.setAllValues(this.getAllValues(null).flatMap(entriesReplacer)).then();
|
||||||
.setAllValues(replacedFlux)
|
|
||||||
.then();
|
|
||||||
} else {
|
} else {
|
||||||
return replacedFlux
|
return this
|
||||||
|
.getAllValues(null)
|
||||||
|
.flatMap(entriesReplacer)
|
||||||
.flatMap(replacedEntry -> this
|
.flatMap(replacedEntry -> this
|
||||||
.at(null, replacedEntry.getKey())
|
.at(null, replacedEntry.getKey())
|
||||||
.map(entry -> entry.set(replacedEntry.getValue()))
|
.map(entry -> entry.set(replacedEntry.getValue())))
|
||||||
)
|
|
||||||
.then();
|
.then();
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
default Mono<Void> replaceAll(Function<Entry<T, US>, Mono<Void>> entriesReplacer) {
|
default Mono<Void> replaceAll(Function<Entry<T, US>, Mono<Void>> entriesReplacer) {
|
||||||
|
@ -20,12 +20,25 @@ public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageE
|
|||||||
@Nullable CompositeSnapshot snapshot,
|
@Nullable CompositeSnapshot snapshot,
|
||||||
byte[] keyPrefix,
|
byte[] keyPrefix,
|
||||||
Flux<byte[]> keyFlux) {
|
Flux<byte[]> keyFlux) {
|
||||||
return keyFlux.singleOrEmpty().flatMap(key -> Mono.fromCallable(() -> {
|
//System.out.println(Thread.currentThread() + "subStageGetterSingle1");
|
||||||
|
return keyFlux
|
||||||
|
.singleOrEmpty()
|
||||||
|
.flatMap(key -> Mono
|
||||||
|
.<DatabaseStageEntry<T>>fromCallable(() -> {
|
||||||
|
//System.out.println(Thread.currentThread() + "subStageGetterSingle2");
|
||||||
if (!Arrays.equals(keyPrefix, key)) {
|
if (!Arrays.equals(keyPrefix, key)) {
|
||||||
throw new IndexOutOfBoundsException("Found more than one element!");
|
throw new IndexOutOfBoundsException("Found more than one element!");
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
})).thenReturn(new DatabaseSingle<>(dictionary, keyPrefix, serializer));
|
})
|
||||||
|
)
|
||||||
|
.then(Mono.fromSupplier(() -> {
|
||||||
|
//System.out.println(Thread.currentThread() + "subStageGetterSingle3");
|
||||||
|
return new DatabaseSingle<T>(dictionary,
|
||||||
|
keyPrefix,
|
||||||
|
serializer
|
||||||
|
);
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
//todo: temporary wrapper. convert the whole class to buffers
|
//todo: temporary wrapper. convert the whole class to buffers
|
||||||
|
@ -9,7 +9,6 @@ import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
|||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
@ -27,7 +26,6 @@ import org.rocksdb.RocksIterator;
|
|||||||
import org.rocksdb.Snapshot;
|
import org.rocksdb.Snapshot;
|
||||||
import org.rocksdb.WriteOptions;
|
import org.rocksdb.WriteOptions;
|
||||||
import org.warp.commonutils.concurrency.atomicity.NotAtomic;
|
import org.warp.commonutils.concurrency.atomicity.NotAtomic;
|
||||||
import org.warp.commonutils.type.VariableWrapper;
|
|
||||||
import reactor.core.publisher.Flux;
|
import reactor.core.publisher.Flux;
|
||||||
import reactor.core.publisher.Mono;
|
import reactor.core.publisher.Mono;
|
||||||
import reactor.core.scheduler.Scheduler;
|
import reactor.core.scheduler.Scheduler;
|
||||||
@ -159,31 +157,34 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<byte[]> put(byte[] key, byte[] value, LLDictionaryResultType resultType) {
|
public Mono<byte[]> put(byte[] key, byte[] value, LLDictionaryResultType resultType) {
|
||||||
Mono<byte[]> response = getPrevValue(key, resultType);
|
return getPrevValue(key, resultType)
|
||||||
return Mono
|
.concatWith(Mono
|
||||||
.fromCallable(() -> {
|
.fromCallable(() -> {
|
||||||
db.put(cfh, key, value);
|
db.put(cfh, key, value);
|
||||||
return null;
|
return null;
|
||||||
})
|
})
|
||||||
.onErrorMap(IOException::new)
|
.onErrorMap(IOException::new)
|
||||||
.subscribeOn(dbScheduler)
|
.subscribeOn(dbScheduler)
|
||||||
.then(response);
|
.then(Mono.empty())
|
||||||
|
).singleOrEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Mono<byte[]> remove(byte[] key, LLDictionaryResultType resultType) {
|
public Mono<byte[]> remove(byte[] key, LLDictionaryResultType resultType) {
|
||||||
Mono<byte[]> response = getPrevValue(key, resultType);
|
return getPrevValue(key, resultType)
|
||||||
return Mono
|
.concatWith(Mono
|
||||||
.fromCallable(() -> {
|
.fromCallable(() -> {
|
||||||
db.delete(cfh, key);
|
db.delete(cfh, key);
|
||||||
return null;
|
return null;
|
||||||
})
|
})
|
||||||
.onErrorMap(IOException::new)
|
.onErrorMap(IOException::new)
|
||||||
.subscribeOn(dbScheduler)
|
.subscribeOn(dbScheduler)
|
||||||
.then(response);
|
.then(Mono.empty())
|
||||||
|
).singleOrEmpty();
|
||||||
}
|
}
|
||||||
|
|
||||||
private Mono<byte[]> getPrevValue(byte[] key, LLDictionaryResultType resultType) {
|
private Mono<byte[]> getPrevValue(byte[] key, LLDictionaryResultType resultType) {
|
||||||
|
return Mono.defer(() -> {
|
||||||
switch (resultType) {
|
switch (resultType) {
|
||||||
case VALUE_CHANGED:
|
case VALUE_CHANGED:
|
||||||
return containsKey(null, key).single().map(LLUtils::booleanToResponse);
|
return containsKey(null, key).single().map(LLUtils::booleanToResponse);
|
||||||
@ -208,6 +209,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
default:
|
default:
|
||||||
return Mono.error(new IllegalStateException("Unexpected value: " + resultType));
|
return Mono.error(new IllegalStateException("Unexpected value: " + resultType));
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@ -242,11 +244,12 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
public Flux<Entry<byte[], byte[]>> putMulti(Flux<Entry<byte[], byte[]>> entries, boolean getOldValues) {
|
public Flux<Entry<byte[], byte[]>> putMulti(Flux<Entry<byte[], byte[]>> entries, boolean getOldValues) {
|
||||||
return entries
|
return entries
|
||||||
.window(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP))
|
.window(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP))
|
||||||
.publishOn(dbScheduler)
|
|
||||||
.flatMap(Flux::collectList)
|
.flatMap(Flux::collectList)
|
||||||
.flatMap(entriesWindow -> this
|
.flatMap(entriesWindow -> this
|
||||||
.getMulti(null, Flux.fromIterable(entriesWindow).map(Entry::getKey))
|
.getMulti(null, Flux.fromIterable(entriesWindow).map(Entry::getKey))
|
||||||
|
.publishOn(dbScheduler)
|
||||||
.concatWith(Mono.fromCallable(() -> {
|
.concatWith(Mono.fromCallable(() -> {
|
||||||
|
//System.out.println(Thread.currentThread()+"\tTest");
|
||||||
var batch = new CappedWriteBatch(db,
|
var batch = new CappedWriteBatch(db,
|
||||||
CAPPED_WRITE_BATCH_CAP,
|
CAPPED_WRITE_BATCH_CAP,
|
||||||
RESERVED_WRITE_BATCH_SIZE,
|
RESERVED_WRITE_BATCH_SIZE,
|
||||||
@ -311,11 +314,26 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<byte[], byte[]>> getRange(@Nullable LLSnapshot snapshot, LLRange range) {
|
public Flux<Entry<byte[], byte[]>> getRange(@Nullable LLSnapshot snapshot, LLRange range) {
|
||||||
|
return Flux.defer(() -> {
|
||||||
if (range.isSingle()) {
|
if (range.isSingle()) {
|
||||||
return getRangeSingle(snapshot, range.getMin());
|
return getRangeSingle(snapshot, range.getMin());
|
||||||
} else {
|
} else {
|
||||||
return getRangeMulti(snapshot, range);
|
return getRangeMulti(snapshot, range);
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Flux<List<Entry<byte[], byte[]>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||||
|
LLRange range,
|
||||||
|
int prefixLength) {
|
||||||
|
return Flux.defer(() -> {
|
||||||
|
if (range.isSingle()) {
|
||||||
|
return getRangeSingle(snapshot, range.getMin()).map(List::of);
|
||||||
|
} else {
|
||||||
|
return getRangeMultiGrouped(snapshot, range, prefixLength);
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<Entry<byte[],byte[]>> getRangeSingle(LLSnapshot snapshot, byte[] key) {
|
private Flux<Entry<byte[],byte[]>> getRangeSingle(LLSnapshot snapshot, byte[] key) {
|
||||||
@ -326,67 +344,138 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Flux<Entry<byte[],byte[]>> getRangeMulti(LLSnapshot snapshot, LLRange range) {
|
private Flux<Entry<byte[],byte[]>> getRangeMulti(LLSnapshot snapshot, LLRange range) {
|
||||||
return Mono
|
return Flux
|
||||||
.fromCallable(() -> {
|
.<Entry<byte[], byte[]>>push(sink -> {
|
||||||
var iter = db.newIterator(cfh, resolveSnapshot(snapshot));
|
//System.out.println(Thread.currentThread() + "\tPreparing Read rande item");
|
||||||
|
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) {
|
||||||
if (range.hasMin()) {
|
if (range.hasMin()) {
|
||||||
iter.seek(range.getMin());
|
rocksIterator.seek(range.getMin());
|
||||||
} else {
|
} else {
|
||||||
iter.seekToFirst();
|
rocksIterator.seekToFirst();
|
||||||
}
|
}
|
||||||
return iter;
|
byte[] key;
|
||||||
})
|
while (rocksIterator.isValid()) {
|
||||||
.subscribeOn(dbScheduler)
|
key = rocksIterator.key();
|
||||||
.flatMapMany(rocksIterator -> Flux
|
|
||||||
.<Entry<byte[], byte[]>>fromIterable(() -> {
|
|
||||||
VariableWrapper<byte[]> nextKey = new VariableWrapper<>(null);
|
|
||||||
VariableWrapper<byte[]> nextValue = new VariableWrapper<>(null);
|
|
||||||
return new Iterator<>() {
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
assert nextKey.var == null;
|
|
||||||
assert nextValue.var == null;
|
|
||||||
if (!rocksIterator.isValid()) {
|
|
||||||
nextKey.var = null;
|
|
||||||
nextValue.var = null;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
var key = rocksIterator.key();
|
|
||||||
var value = rocksIterator.value();
|
|
||||||
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
||||||
nextKey.var = null;
|
break;
|
||||||
nextValue.var = null;
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
nextKey.var = key;
|
//System.out.println(Thread.currentThread() + "\tRead rande item");
|
||||||
nextValue.var = value;
|
sink.next(Map.entry(key, rocksIterator.value()));
|
||||||
return true;
|
rocksIterator.next();
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tFinish Read rande item");
|
||||||
|
sink.complete();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.subscribeOn(dbScheduler);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
private Flux<List<Entry<byte[],byte[]>>> getRangeMultiGrouped(LLSnapshot snapshot, LLRange range, int prefixLength) {
|
||||||
public Entry<byte[], byte[]> next() {
|
return Flux
|
||||||
var key = nextKey.var;
|
.<List<Entry<byte[], byte[]>>>push(sink -> {
|
||||||
var val = nextValue.var;
|
//System.out.println(Thread.currentThread() + "\tPreparing Read rande item");
|
||||||
assert key != null;
|
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) {
|
||||||
assert val != null;
|
if (range.hasMin()) {
|
||||||
nextKey.var = null;
|
rocksIterator.seek(range.getMin());
|
||||||
nextValue.var = null;
|
} else {
|
||||||
return Map.entry(key, val);
|
rocksIterator.seekToFirst();
|
||||||
|
}
|
||||||
|
byte[] firstGroupKey = null;
|
||||||
|
List<Entry<byte[], byte[]>> currentGroupValues = new ArrayList<>();
|
||||||
|
|
||||||
|
byte[] key;
|
||||||
|
while (rocksIterator.isValid()) {
|
||||||
|
key = rocksIterator.key();
|
||||||
|
if (firstGroupKey == null) { // Fix first value
|
||||||
|
firstGroupKey = key;
|
||||||
|
}
|
||||||
|
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (Arrays.equals(firstGroupKey, 0, prefixLength, key, 0, prefixLength)) {
|
||||||
|
currentGroupValues.add(Map.entry(key, rocksIterator.value()));
|
||||||
|
} else {
|
||||||
|
if (!currentGroupValues.isEmpty()) {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tRead rande item");
|
||||||
|
sink.next(currentGroupValues);
|
||||||
|
}
|
||||||
|
firstGroupKey = key;
|
||||||
|
currentGroupValues = new ArrayList<>();
|
||||||
|
}
|
||||||
|
rocksIterator.next();
|
||||||
|
}
|
||||||
|
if (!currentGroupValues.isEmpty()) {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tRead rande item");
|
||||||
|
sink.next(currentGroupValues);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tFinish Read rande item");
|
||||||
|
sink.complete();
|
||||||
}
|
}
|
||||||
};
|
|
||||||
})
|
})
|
||||||
.doFinally(signalType -> rocksIterator.close())
|
.subscribeOn(dbScheduler);
|
||||||
.subscribeOn(dbScheduler)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<byte[]> getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range) {
|
public Flux<byte[]> getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range) {
|
||||||
|
return Flux.defer(() -> {
|
||||||
if (range.isSingle()) {
|
if (range.isSingle()) {
|
||||||
return getRangeKeysSingle(snapshot, range.getMin());
|
//System.out.println(Thread.currentThread() + "getRangeKeys single");
|
||||||
|
return getRangeKeysSingle(snapshot, range.getMin()).doOnTerminate(() -> {}/*System.out.println(Thread.currentThread() + "getRangeKeys single end")*/);
|
||||||
} else {
|
} else {
|
||||||
|
//System.out.println(Thread.currentThread() + "getRangeKeys multi");
|
||||||
return getRangeKeysMulti(snapshot, range);
|
return getRangeKeysMulti(snapshot, range);
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public Flux<List<byte[]>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) {
|
||||||
|
return Flux
|
||||||
|
.<List<byte[]>>push(sink -> {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tPreparing Read rande item");
|
||||||
|
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) {
|
||||||
|
if (range.hasMin()) {
|
||||||
|
rocksIterator.seek(range.getMin());
|
||||||
|
} else {
|
||||||
|
rocksIterator.seekToFirst();
|
||||||
|
}
|
||||||
|
byte[] firstGroupKey = null;
|
||||||
|
List<byte[]> currentGroupValues = new ArrayList<>();
|
||||||
|
|
||||||
|
byte[] key;
|
||||||
|
while (rocksIterator.isValid()) {
|
||||||
|
key = rocksIterator.key();
|
||||||
|
if (firstGroupKey == null) { // Fix first value
|
||||||
|
firstGroupKey = key;
|
||||||
|
}
|
||||||
|
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (Arrays.equals(firstGroupKey, 0, prefixLength, key, 0, prefixLength)) {
|
||||||
|
currentGroupValues.add(key);
|
||||||
|
} else {
|
||||||
|
if (!currentGroupValues.isEmpty()) {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tRead rande item");
|
||||||
|
sink.next(currentGroupValues);
|
||||||
|
}
|
||||||
|
firstGroupKey = key;
|
||||||
|
currentGroupValues = new ArrayList<>();
|
||||||
|
currentGroupValues.add(key);
|
||||||
|
}
|
||||||
|
rocksIterator.next();
|
||||||
|
}
|
||||||
|
if (!currentGroupValues.isEmpty()) {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tRead rande item");
|
||||||
|
sink.next(currentGroupValues);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
//System.out.println(Thread.currentThread() + "\tFinish Read rande item");
|
||||||
|
sink.complete();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.subscribeOn(dbScheduler);
|
||||||
}
|
}
|
||||||
|
|
||||||
private Flux<byte[]> getRangeKeysSingle(LLSnapshot snapshot, byte[] key) {
|
private Flux<byte[]> getRangeKeysSingle(LLSnapshot snapshot, byte[] key) {
|
||||||
@ -398,56 +487,40 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private Flux<byte[]> getRangeKeysMulti(LLSnapshot snapshot, LLRange range) {
|
private Flux<byte[]> getRangeKeysMulti(LLSnapshot snapshot, LLRange range) {
|
||||||
return Mono
|
return Flux
|
||||||
.fromCallable(() -> {
|
.<byte[]>push(sink -> {
|
||||||
var iter = db.newIterator(cfh, resolveSnapshot(snapshot));
|
//System.out.println(Thread.currentThread() + "\tkPreparing Read rande item");
|
||||||
|
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) {
|
||||||
if (range.hasMin()) {
|
if (range.hasMin()) {
|
||||||
iter.seek(range.getMin());
|
rocksIterator.seek(range.getMin());
|
||||||
} else {
|
} else {
|
||||||
iter.seekToFirst();
|
rocksIterator.seekToFirst();
|
||||||
}
|
}
|
||||||
return iter;
|
byte[] key;
|
||||||
})
|
sink.onRequest(l -> {}/*System.out.println(Thread.currentThread() + "\tkRequested " + l)*/);
|
||||||
.subscribeOn(dbScheduler)
|
while (rocksIterator.isValid()) {
|
||||||
.flatMapMany(rocksIterator -> Flux
|
key = rocksIterator.key();
|
||||||
.<byte[]>fromIterable(() -> {
|
|
||||||
VariableWrapper<byte[]> nextKey = new VariableWrapper<>(null);
|
|
||||||
return new Iterator<>() {
|
|
||||||
@Override
|
|
||||||
public boolean hasNext() {
|
|
||||||
assert nextKey.var == null;
|
|
||||||
if (!rocksIterator.isValid()) {
|
|
||||||
nextKey.var = null;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
var key = rocksIterator.key();
|
|
||||||
var value = rocksIterator.value();
|
|
||||||
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
|
||||||
nextKey.var = null;
|
break;
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
nextKey.var = key;
|
//System.out.println(Thread.currentThread() + "\tkRead rande item");
|
||||||
return true;
|
sink.next(key);
|
||||||
|
rocksIterator.next();
|
||||||
}
|
}
|
||||||
|
} finally {
|
||||||
@Override
|
//System.out.println(Thread.currentThread() + "\tkFinish Read rande item");
|
||||||
public byte[] next() {
|
sink.complete();
|
||||||
var key = nextKey.var;
|
|
||||||
assert key != null;
|
|
||||||
nextKey.var = null;
|
|
||||||
return key;
|
|
||||||
}
|
}
|
||||||
};
|
//System.out.println(Thread.currentThread() + "\tkFinish end Read rande item");
|
||||||
})
|
})
|
||||||
.doFinally(signalType -> rocksIterator.close())
|
.subscribeOn(dbScheduler);
|
||||||
.subscribeOn(dbScheduler)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public Flux<Entry<byte[], byte[]>> setRange(LLRange range,
|
public Flux<Entry<byte[], byte[]>> setRange(LLRange range,
|
||||||
Flux<Entry<byte[], byte[]>> entries,
|
Flux<Entry<byte[], byte[]>> entries,
|
||||||
boolean getOldValues) {
|
boolean getOldValues) {
|
||||||
|
return Flux.defer(() -> {
|
||||||
if (range.isAll()) {
|
if (range.isAll()) {
|
||||||
return clear().thenMany(Flux.empty());
|
return clear().thenMany(Flux.empty());
|
||||||
} else {
|
} else {
|
||||||
@ -497,6 +570,7 @@ public class LLLocalDictionary implements LLDictionary {
|
|||||||
}))
|
}))
|
||||||
.onErrorMap(IOException::new);
|
.onErrorMap(IOException::new);
|
||||||
}
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
public Mono<Void> clear() {
|
public Mono<Void> clear() {
|
||||||
|
@ -319,6 +319,8 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
.unicast()
|
.unicast()
|
||||||
.onBackpressureBuffer(new ArrayBlockingQueue<>(1000));
|
.onBackpressureBuffer(new ArrayBlockingQueue<>(1000));
|
||||||
|
|
||||||
|
luceneScheduler.schedule(() -> {
|
||||||
|
try {
|
||||||
streamSearcher.search(indexSearcher,
|
streamSearcher.search(indexSearcher,
|
||||||
query,
|
query,
|
||||||
limit,
|
limit,
|
||||||
@ -337,6 +339,12 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
throw new EmissionException(result);
|
throw new EmissionException(result);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
topKeysSink.tryEmitComplete();
|
||||||
|
} catch (IOException e) {
|
||||||
|
topKeysSink.tryEmitError(e);
|
||||||
|
totalHitsCountSink.tryEmitError(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
return new LLSearchResult(totalHitsCountSink.asMono(), Flux.just(topKeysSink.asFlux()));
|
return new LLSearchResult(totalHitsCountSink.asMono(), Flux.just(topKeysSink.asFlux()));
|
||||||
}).subscribeOn(luceneScheduler)
|
}).subscribeOn(luceneScheduler)
|
||||||
@ -374,6 +382,8 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
.unicast()
|
.unicast()
|
||||||
.onBackpressureBuffer(new ArrayBlockingQueue<>(PagedStreamSearcher.MAX_ITEMS_PER_PAGE));
|
.onBackpressureBuffer(new ArrayBlockingQueue<>(PagedStreamSearcher.MAX_ITEMS_PER_PAGE));
|
||||||
|
|
||||||
|
luceneScheduler.schedule(() -> {
|
||||||
|
try {
|
||||||
streamSearcher.search(indexSearcher,
|
streamSearcher.search(indexSearcher,
|
||||||
query,
|
query,
|
||||||
limit,
|
limit,
|
||||||
@ -392,6 +402,12 @@ public class LLLocalLuceneIndex implements LLLuceneIndex {
|
|||||||
throw new EmissionException(result);
|
throw new EmissionException(result);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
topKeysSink.tryEmitComplete();
|
||||||
|
} catch (IOException e) {
|
||||||
|
topKeysSink.tryEmitError(e);
|
||||||
|
totalHitsCountSink.tryEmitError(e);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
return new LLSearchResult(totalHitsCountSink.asMono(), Flux.just(topKeysSink.asFlux()));
|
return new LLSearchResult(totalHitsCountSink.asMono(), Flux.just(topKeysSink.asFlux()));
|
||||||
}).subscribeOn(luceneScheduler)
|
}).subscribeOn(luceneScheduler)
|
||||||
|
Loading…
Reference in New Issue
Block a user