This commit is contained in:
Andrea Cavalli 2021-08-28 22:42:51 +02:00
parent 0c17af2ae5
commit 03b5876001
28 changed files with 963 additions and 567 deletions

View File

@ -377,17 +377,17 @@
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-core</artifactId>
<version>3.4.8</version>
<version>3.4.9</version>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-tools</artifactId>
<version>3.4.8</version>
<version>3.4.9</version>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-test</artifactId>
<version>3.4.8</version>
<version>3.4.9</version>
</dependency>
<dependency>
<groupId>org.novasearch</groupId>

View File

@ -14,5 +14,4 @@ public record DatabaseOptions(Map<String, String> extraFlags,
boolean allowMemoryMapping,
boolean allowNettyDirect,
boolean useNettyDirect,
boolean enableDbAssertionsWhenUsingAssertions,
int maxOpenFiles) {}

View File

@ -71,23 +71,23 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
return getMulti(snapshot, keys, false);
}
Flux<Entry<ByteBuf, ByteBuf>> putMulti(Flux<Entry<ByteBuf, ByteBuf>> entries, boolean getOldValues);
Flux<LLEntry> putMulti(Flux<LLEntry> entries, boolean getOldValues);
<X> Flux<ExtraKeyOperationResult<ByteBuf, X>> updateMulti(Flux<Tuple2<ByteBuf, X>> entries,
BiSerializationFunction<ByteBuf, X, ByteBuf> updateFunction);
Flux<Entry<ByteBuf, ByteBuf>> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean existsAlmostCertainly);
Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean existsAlmostCertainly);
default Flux<Entry<ByteBuf, ByteBuf>> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range) {
default Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range) {
return getRange(snapshot, range, false);
}
Flux<List<Entry<ByteBuf, ByteBuf>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<LLRange> range,
int prefixLength,
boolean existsAlmostCertainly);
default Flux<List<Entry<ByteBuf, ByteBuf>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
default Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<LLRange> range,
int prefixLength) {
return getRangeGrouped(snapshot, range, prefixLength, false);
@ -101,11 +101,11 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
Flux<BadBlock> badBlocks(Mono<LLRange> range);
Mono<Void> setRange(Mono<LLRange> range, Flux<Entry<ByteBuf, ByteBuf>> entries);
Mono<Void> setRange(Mono<LLRange> range, Flux<LLEntry> entries);
default Mono<Void> replaceRange(Mono<LLRange> range,
boolean canKeysChange,
Function<Entry<ByteBuf, ByteBuf>, Mono<Entry<ByteBuf, ByteBuf>>> entriesReplacer,
Function<LLEntry, Mono<LLEntry>> entriesReplacer,
boolean existsAlmostCertainly) {
return Mono.defer(() -> {
if (canKeysChange) {
@ -126,7 +126,7 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
default Mono<Void> replaceRange(Mono<LLRange> range,
boolean canKeysChange,
Function<Entry<ByteBuf, ByteBuf>, Mono<Entry<ByteBuf, ByteBuf>>> entriesReplacer) {
Function<LLEntry, Mono<LLEntry>> entriesReplacer) {
return replaceRange(range, canKeysChange, entriesReplacer, false);
}
@ -134,9 +134,9 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
Mono<Long> sizeRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean fast);
Mono<Entry<ByteBuf, ByteBuf>> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
Mono<LLEntry> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
Mono<ByteBuf> getOneKey(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
Mono<Entry<ByteBuf, ByteBuf>> removeOne(Mono<LLRange> range);
Mono<LLEntry> removeOne(Mono<LLRange> range);
}

View File

@ -0,0 +1,74 @@
package it.cavallium.dbengine.database;
import io.netty.buffer.ByteBuf;
import io.netty.util.IllegalReferenceCountException;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLEntry {
private static final Logger logger = LoggerFactory.getLogger(LLEntry.class);
private final AtomicInteger refCnt = new AtomicInteger(1);
private final ByteBuf key;
private final ByteBuf value;
public LLEntry(ByteBuf key, ByteBuf value) {
try {
this.key = key.retain();
this.value = value.retain();
} finally {
key.release();
value.release();
}
}
public ByteBuf getKey() {
if (refCnt.get() <= 0) {
throw new IllegalReferenceCountException(refCnt.get());
}
return key;
}
public ByteBuf getValue() {
if (refCnt.get() <= 0) {
throw new IllegalReferenceCountException(refCnt.get());
}
return value;
}
public void retain() {
if (refCnt.getAndIncrement() <= 0) {
throw new IllegalReferenceCountException(refCnt.get(), 1);
}
key.retain();
value.retain();
}
public void release() {
if (refCnt.decrementAndGet() < 0) {
throw new IllegalReferenceCountException(refCnt.get(), -1);
}
if (key.refCnt() > 0) {
key.release();
}
if (value.refCnt() > 0) {
value.release();
}
}
public boolean isReleased() {
return refCnt.get() <= 0;
}
@Override
protected void finalize() throws Throwable {
if (refCnt.get() > 0) {
logger.warn(this.getClass().getName() + "::release has not been called!");
}
super.finalize();
}
}

View File

@ -22,6 +22,7 @@ import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.function.Function;
import java.util.function.ToIntFunction;
@ -42,13 +43,19 @@ import org.apache.lucene.search.SortedNumericSortField;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.RocksDB;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.warp.commonutils.functional.IOFunction;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuple3;
@SuppressWarnings("unused")
public class LLUtils {
private static final Logger logger = LoggerFactory.getLogger(LLUtils.class);
private static final byte[] RESPONSE_TRUE = new byte[]{1};
private static final byte[] RESPONSE_FALSE = new byte[]{0};
private static final byte[] RESPONSE_TRUE_BUF = new byte[]{1};
@ -514,35 +521,154 @@ public class LLUtils {
}
public static <T> Mono<T> handleDiscard(Mono<T> mono) {
return mono.doOnDiscard(Map.Entry.class, e -> {
if (e.getKey() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
bb.release();
}
}
if (e.getValue() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
bb.release();
}
}
});
return mono
.doOnDiscard(Object.class, obj -> {
if (obj instanceof ReferenceCounted o) {
discardRefCounted(o);
} else if (obj instanceof Entry o) {
discardEntry(o);
} else if (obj instanceof Collection o) {
discardCollection(o);
} else if (obj instanceof Tuple3 o) {
discardTuple3(o);
} else if (obj instanceof Tuple2 o) {
discardTuple2(o);
} else if (obj instanceof LLEntry o) {
discardLLEntry(o);
} else if (obj instanceof LLRange o) {
discardLLRange(o);
} else if (obj instanceof Delta o) {
discardDelta(o);
} else if (obj instanceof Map o) {
discardMap(o);
}
});
// todo: check if the single object discard hook is more performant
/*
.doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted)
.doOnDiscard(Map.Entry.class, LLUtils::discardEntry)
.doOnDiscard(Collection.class, LLUtils::discardCollection)
.doOnDiscard(Tuple2.class, LLUtils::discardTuple2)
.doOnDiscard(Tuple3.class, LLUtils::discardTuple3)
.doOnDiscard(LLEntry.class, LLUtils::discardLLEntry)
.doOnDiscard(LLRange.class, LLUtils::discardLLRange)
.doOnDiscard(Delta.class, LLUtils::discardDelta)
.doOnDiscard(Map.class, LLUtils::discardMap);
*/
}
public static <T> Flux<T> handleDiscard(Flux<T> mono) {
return mono
.doOnDiscard(Object.class, obj -> {
if (obj instanceof ReferenceCounted o) {
discardRefCounted(o);
} else if (obj instanceof Entry o) {
discardEntry(o);
} else if (obj instanceof Collection o) {
discardCollection(o);
} else if (obj instanceof Tuple3 o) {
discardTuple3(o);
} else if (obj instanceof Tuple2 o) {
discardTuple2(o);
} else if (obj instanceof LLEntry o) {
discardLLEntry(o);
} else if (obj instanceof LLRange o) {
discardLLRange(o);
} else if (obj instanceof Delta o) {
discardDelta(o);
} else if (obj instanceof Map o) {
discardMap(o);
} else {
System.err.println(obj.getClass().getName());
}
});
// todo: check if the single object discard hook is more performant
/*
.doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted)
.doOnDiscard(Map.Entry.class, LLUtils::discardEntry)
.doOnDiscard(Collection.class, LLUtils::discardCollection);
.doOnDiscard(Collection.class, LLUtils::discardCollection)
.doOnDiscard(Tuple2.class, LLUtils::discardTuple2)
.doOnDiscard(Tuple3.class, LLUtils::discardTuple3)
.doOnDiscard(LLEntry.class, LLUtils::discardLLEntry)
.doOnDiscard(LLRange.class, LLUtils::discardLLRange)
.doOnDiscard(Delta.class, LLUtils::discardDelta)
.doOnDiscard(Map.class, LLUtils::discardMap);
*/
}
private static void discardLLEntry(LLEntry entry) {
logger.trace("Releasing discarded ByteBuf");
entry.release();
}
private static void discardLLRange(LLRange range) {
logger.trace("Releasing discarded ByteBuf");
range.release();
}
private static void discardEntry(Map.Entry<?, ?> e) {
if (e.getKey() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (e.getValue() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
}
private static void discardTuple2(Tuple2<?, ?> e) {
if (e.getT1() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (e.getT2() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
}
private static void discardTuple3(Tuple3<?, ?, ?> e) {
if (e.getT1() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
} else if (e.getT1() instanceof Optional opt) {
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (e.getT2() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
} else if (e.getT1() instanceof Optional opt) {
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (e.getT3() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
} else if (e.getT1() instanceof Optional opt) {
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
@ -550,6 +676,7 @@ public class LLUtils {
private static void discardRefCounted(ReferenceCounted referenceCounted) {
if (referenceCounted.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
referenceCounted.release();
}
}
@ -558,16 +685,19 @@ public class LLUtils {
for (Object o : collection) {
if (o instanceof ReferenceCounted referenceCounted) {
if (referenceCounted.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
referenceCounted.release();
}
} else if (o instanceof Map.Entry entry) {
if (entry.getKey() instanceof ReferenceCounted bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (entry.getValue() instanceof ReferenceCounted bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
@ -576,4 +706,42 @@ public class LLUtils {
}
}
}
private static void discardDelta(Delta<?> delta) {
if (delta.previous() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
if (delta.current() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
}
}
private static void discardMap(Map<?, ?> map) {
for (Entry<?, ?> entry : map.entrySet()) {
boolean hasByteBuf = false;
if (entry.getKey() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
hasByteBuf = true;
}
if (entry.getValue() instanceof ByteBuf bb) {
if (bb.refCnt() > 0) {
logger.trace("Releasing discarded ByteBuf");
bb.release();
}
hasByteBuf = true;
}
if (!hasByteBuf) {
break;
}
}
}
}

View File

@ -14,7 +14,11 @@ public class DatabaseEmpty {
public static final Serializer<Nothing, ByteBuf> NOTHING_SERIALIZER = new Serializer<>() {
@Override
public @NotNull Nothing deserialize(@NotNull ByteBuf serialized) {
return NOTHING;
try {
return NOTHING;
} finally {
serialized.release();
}
}
@Override

View File

@ -7,6 +7,7 @@ import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.ExtraKeyOperationResult;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.UpdateReturnMode;
@ -15,6 +16,7 @@ import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.nio.ByteBuffer;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
@ -28,6 +30,7 @@ import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.SynchronousSink;
import reactor.util.function.Tuple2;
import reactor.util.function.Tuple3;
import reactor.util.function.Tuples;
/**
@ -87,6 +90,8 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
sink.next(Map.entry(key, value));
} catch (SerializationException ex) {
sink.error(ex);
} finally {
entry.release();
}
})
.collectMap(Entry::getKey, Entry::getValue, HashMap::new)
@ -101,7 +106,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
.fromIterable(Collections.unmodifiableMap(value).entrySet())
.handle((entry, sink) -> {
try {
sink.next(Map.entry(this.toKey(serializeSuffix(entry.getKey())),
sink.next(new LLEntry(this.toKey(serializeSuffix(entry.getKey())),
valueSerializer.serialize(entry.getValue())));
} catch (SerializationException e) {
sink.error(e);
@ -151,26 +156,18 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
@Override
public Mono<Void> putValue(T keySuffix, U value) {
return Mono
.using(
() -> serializeSuffix(keySuffix),
keySuffixBuf -> Mono
.using(
() -> toKey(keySuffixBuf.retain()),
keyBuf -> Mono
.using(() -> valueSerializer.serialize(value),
valueBuf -> dictionary
.put(LLUtils.lazyRetain(keyBuf),
LLUtils.lazyRetain(valueBuf),
LLDictionaryResultType.VOID)
.doOnNext(ReferenceCounted::release),
ReferenceCounted::release
),
ReferenceCounted::release
),
return Mono.using(() -> serializeSuffix(keySuffix),
keySuffixBuf -> Mono.using(() -> toKey(keySuffixBuf.retain()),
keyBuf -> Mono.using(() -> valueSerializer.serialize(value),
valueBuf -> dictionary
.put(LLUtils.lazyRetain(keyBuf), LLUtils.lazyRetain(valueBuf), LLDictionaryResultType.VOID)
.doOnNext(ReferenceCounted::release),
ReferenceCounted::release
),
ReferenceCounted::release
)
.then();
),
ReferenceCounted::release
).then();
}
@Override
@ -340,35 +337,43 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
@Override
public Flux<Entry<T, Optional<U>>> getMulti(@Nullable CompositeSnapshot snapshot, Flux<T> keys, boolean existsAlmostCertainly) {
return dictionary
.getMulti(resolveSnapshot(snapshot), keys.flatMap(keySuffix -> Mono.fromCallable(() -> {
ByteBuf keySuffixBuf = serializeSuffix(keySuffix);
return dictionary.getMulti(resolveSnapshot(snapshot), keys.flatMap(keySuffix -> Mono.fromCallable(() -> {
ByteBuf keySuffixBuf = serializeSuffix(keySuffix);
try {
var key = toKey(keySuffixBuf.retain());
try {
return Tuples.of(keySuffix, toKey(keySuffixBuf.retain()));
return Tuples.of(keySuffix, key.retain());
} finally {
keySuffixBuf.release();
key.release();
}
})), existsAlmostCertainly)
.flatMapSequential(entry -> {
entry.getT2().release();
return Mono.fromCallable(() -> {
Optional<U> valueOpt;
if (entry.getT3().isPresent()) {
valueOpt = Optional.of(valueSerializer.deserialize(entry.getT3().get()));
} else {
valueOpt = Optional.empty();
} finally {
keySuffixBuf.release();
}
})), existsAlmostCertainly).flatMapSequential(entry -> {
entry.getT2().release();
return Mono.fromCallable(() -> {
Optional<U> valueOpt;
if (entry.getT3().isPresent()) {
var buf = entry.getT3().get();
try {
valueOpt = Optional.of(valueSerializer.deserialize(buf.retain()));
} finally {
buf.release();
}
return Map.entry(entry.getT1(), valueOpt);
});
} else {
valueOpt = Optional.empty();
}
return Map.entry(entry.getT1(), valueOpt);
});
}).transform(LLUtils::handleDiscard);
}
private Entry<ByteBuf, ByteBuf> serializeEntry(T key, U value) throws SerializationException {
private LLEntry serializeEntry(T key, U value) throws SerializationException {
ByteBuf serializedKey = toKey(serializeSuffix(key));
try {
ByteBuf serializedValue = valueSerializer.serialize(value);
try {
return Map.entry(serializedKey.retain(), serializedValue.retain());
return new LLEntry(serializedKey.retain(), serializedValue.retain());
} finally {
serializedValue.release();
}
@ -380,20 +385,21 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
@Override
public Mono<Void> putMulti(Flux<Entry<T, U>> entries) {
var serializedEntries = entries
.flatMap(entry -> Mono
.fromCallable(() -> serializeEntry(entry.getKey(), entry.getValue()))
.doOnDiscard(Entry.class, uncastedEntry -> {
if (uncastedEntry.getKey() instanceof ByteBuf byteBuf) {
byteBuf.release();
}
if (uncastedEntry.getValue() instanceof ByteBuf byteBuf) {
byteBuf.release();
}
})
);
.<LLEntry>handle((entry, sink) -> {
try {
sink.next(serializeEntry(entry.getKey(), entry.getValue()));
} catch (SerializationException e) {
sink.error(e);
}
});
return dictionary
.putMulti(serializedEntries, false)
.then();
.then()
.doOnDiscard(LLEntry.class, entry -> {
if (!entry.isReleased()) {
entry.release();
}
});
}
@Override
@ -455,21 +461,33 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
return dictionary
.getRange(resolveSnapshot(snapshot), rangeMono)
.<Entry<T, U>>handle((serializedEntry, sink) -> {
ByteBuf key = serializedEntry.getKey();
ByteBuf value = serializedEntry.getValue();
try {
sink.next(Map.entry(
deserializeSuffix(stripPrefix(serializedEntry.getKey(), false)),
valueSerializer.deserialize(serializedEntry.getValue())
));
ByteBuf keySuffix = stripPrefix(key.retain(), false);
try {
sink.next(Map.entry(deserializeSuffix(keySuffix.retain()),
valueSerializer.deserialize(value.retain())));
} finally {
keySuffix.release();
}
} catch (SerializationException e) {
sink.error(e);
} finally {
key.release();
value.release();
}
})
.doOnDiscard(Entry.class, uncastedEntry -> {
if (uncastedEntry.getKey() instanceof ByteBuf byteBuf) {
byteBuf.release();
if (byteBuf.refCnt() > 0) {
byteBuf.release();
}
}
if (uncastedEntry.getValue() instanceof ByteBuf byteBuf) {
byteBuf.release();
if (byteBuf.refCnt() > 0) {
byteBuf.release();
}
}
});
}
@ -481,8 +499,22 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
b -> getAllValues(null),
b -> dictionary.setRange(rangeMono, entries.handle((entry, sink) -> {
try {
ByteBuf serializedValue = valueSerializer.serialize(entry.getValue());
sink.next(Map.entry(toKey(serializeSuffix(entry.getKey())), serializedValue));
ByteBuf serializedKeySuffix = serializeSuffix(entry.getKey());
try {
ByteBuf serializedKey = toKey(serializedKeySuffix);
try {
ByteBuf serializedValue = valueSerializer.serialize(entry.getValue());
try {
sink.next(new LLEntry(serializedKey.retain(), serializedValue.retain()));
} finally {
serializedValue.release();
}
} finally {
serializedKey.release();
}
} finally {
serializedKeySuffix.release();
}
} catch (SerializationException e) {
sink.error(e);
}

View File

@ -22,6 +22,7 @@ import java.util.Map.Entry;
import org.jetbrains.annotations.Nullable;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.util.function.Tuples;
// todo: implement optimized methods (which?)
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
@ -393,25 +394,10 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
return Mono.using(
() -> serializeSuffix(keySuffix),
keySuffixData -> {
Flux<ByteBuf> debuggingKeysFlux = Mono.<List<ByteBuf>>defer(() -> {
if (LLLocalDictionary.DEBUG_PREFIXES_WHEN_ASSERTIONS_ARE_ENABLED
&& this.subStageGetter.needsDebuggingKeyFlux()) {
return Flux
.using(
() -> toExtRange(keySuffixData.retain()),
extRangeBuf -> this.dictionary
.getRangeKeys(resolveSnapshot(snapshot), LLUtils.lazyRetainRange(extRangeBuf)),
LLRange::release
)
.collectList();
} else {
return Mono.just(List.of());
}
}).flatMapIterable(it -> it);
return Mono.using(
() -> toKeyWithoutExt(keySuffixData.retain()),
keyWithoutExt -> this.subStageGetter
.subStage(dictionary, snapshot, LLUtils.lazyRetain(keyWithoutExt), debuggingKeysFlux),
.subStage(dictionary, snapshot, LLUtils.lazyRetain(keyWithoutExt)),
ReferenceCounted::release
);
},
@ -433,87 +419,43 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
@Override
public Flux<Entry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot) {
return Flux
.defer(() -> {
if (LLLocalDictionary.DEBUG_PREFIXES_WHEN_ASSERTIONS_ARE_ENABLED && this.subStageGetter.needsDebuggingKeyFlux()) {
return dictionary
.getRangeKeysGrouped(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength)
.concatMap(rangeKeys -> Flux
.using(
() -> {
assert this.subStageGetter.isMultiKey() || rangeKeys.size() == 1;
ByteBuf groupKeyWithExt = rangeKeys.get(0).retainedSlice();
ByteBuf groupKeyWithoutExt = removeExtFromFullKey(groupKeyWithExt.retain(), true);
ByteBuf groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain(), true);
return new GroupBuffers(groupKeyWithExt, groupKeyWithoutExt, groupSuffix);
},
buffers -> Mono
.fromCallable(() -> {
assert subStageKeysConsistency(buffers.groupKeyWithExt.readableBytes());
return null;
})
.then(this.subStageGetter
.subStage(dictionary,
snapshot,
LLUtils.lazyRetain(buffers.groupKeyWithoutExt),
Flux.fromIterable(rangeKeys).map(ByteBuf::retain)
)
.<Entry<T, US>>handle((us, sink) -> {
try {
var deserializedSuffix = this.deserializeSuffix(buffers.groupSuffix.retain());
sink.next(Map.entry(deserializedSuffix, us));
} catch (SerializationException ex) {
sink.error(ex);
}
})
),
buffers -> {
buffers.groupSuffix.release();
buffers.groupKeyWithoutExt.release();
buffers.groupKeyWithExt.release();
}
)
.doAfterTerminate(() -> {
for (ByteBuf rangeKey : rangeKeys) {
rangeKey.release();
}
})
)
.doOnDiscard(Collection.class, discardedCollection -> {
for (Object o : discardedCollection) {
if (o instanceof ByteBuf byteBuf) {
byteBuf.release();
.defer(() -> dictionary.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength))
.flatMapSequential(groupKeyWithoutExt -> Mono
.using(
() -> {
try {
var groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain(), true);
try {
assert subStageKeysConsistency(groupKeyWithoutExt.readableBytes() + keyExtLength);
return Tuples.of(groupKeyWithoutExt.retain(), groupSuffix.retain());
} finally {
groupSuffix.release();
}
} finally {
groupKeyWithoutExt.release();
}
});
} else {
return Flux
.defer(() -> dictionary.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength))
.flatMapSequential(groupKeyWithoutExt -> Mono
.using(
() -> {
var groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain(), true);
assert subStageKeysConsistency(groupKeyWithoutExt.readableBytes() + keyExtLength);
return groupSuffix;
},
groupSuffix -> this.subStageGetter
.subStage(dictionary,
snapshot,
LLUtils.lazyRetain(groupKeyWithoutExt),
Flux.empty()
)
.<Entry<T, US>>handle((us, sink) -> {
try {
sink.next(Map.entry(this.deserializeSuffix(groupSuffix.retain()), us));
} catch (SerializationException ex) {
sink.error(ex);
}
}),
ReferenceCounted::release
},
groupKeyWithoutExtAndGroupSuffix -> this.subStageGetter
.subStage(dictionary,
snapshot,
LLUtils.lazyRetain(groupKeyWithoutExtAndGroupSuffix.getT1())
)
);
}
});
.<Entry<T, US>>handle((us, sink) -> {
try {
sink.next(Map.entry(this.deserializeSuffix(groupKeyWithoutExtAndGroupSuffix.getT2().retain()), us));
} catch (SerializationException ex) {
sink.error(ex);
}
}),
entry -> {
entry.getT1().release();
entry.getT2().release();
}
)
)
.transform(LLUtils::handleDiscard);
}
private boolean subStageKeysConsistency(int totalKeyLength) {

View File

@ -13,10 +13,7 @@ public interface SubStageGetter<U, US extends DatabaseStage<U>> {
Mono<US> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKey,
@Nullable Flux<ByteBuf> debuggingKeyFlux);
Mono<ByteBuf> prefixKey);
boolean isMultiKey();
boolean needsDebuggingKeyFlux();
}

View File

@ -16,38 +16,25 @@ import reactor.core.publisher.Mono;
public class SubStageGetterHashMap<T, U, TH> implements
SubStageGetter<Map<T, U>, DatabaseMapDictionaryHashed<T, U, TH>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final Serializer<T, ByteBuf> keySerializer;
private final Serializer<U, ByteBuf> valueSerializer;
private final Function<T, TH> keyHashFunction;
private final SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer;
private final boolean enableAssertionsWhenUsingAssertions;
public SubStageGetterHashMap(Serializer<T, ByteBuf> keySerializer,
Serializer<U, ByteBuf> valueSerializer,
Function<T, TH> keyHashFunction,
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer,
boolean enableAssertionsWhenUsingAssertions) {
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.keyHashFunction = keyHashFunction;
this.keyHashSerializer = keyHashSerializer;
this.enableAssertionsWhenUsingAssertions = enableAssertionsWhenUsingAssertions;
}
@Override
public Mono<DatabaseMapDictionaryHashed<T, U, TH>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKeyMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> prefixKeyMono) {
return Mono.usingWhen(
prefixKeyMono,
prefixKey -> Mono
@ -59,24 +46,7 @@ public class SubStageGetterHashMap<T, U, TH> implements
keyHashFunction,
keyHashSerializer
)
)
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (key.readableBytes() != prefixKey.readableBytes() + getKeyHashBinaryLength()) {
sink.error(new IndexOutOfBoundsException());
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
),
prefixKey -> Mono.fromRunnable(prefixKey::release)
);
}
@ -86,11 +56,6 @@ public class SubStageGetterHashMap<T, U, TH> implements
return true;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled && enableAssertionsWhenUsingAssertions;
}
public int getKeyHashBinaryLength() {
return keyHashSerializer.getSerializedBinaryLength();
}

View File

@ -16,35 +16,22 @@ import reactor.core.publisher.Mono;
public class SubStageGetterHashSet<T, TH> implements
SubStageGetter<Map<T, Nothing>, DatabaseSetDictionaryHashed<T, TH>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final Serializer<T, ByteBuf> keySerializer;
private final Function<T, TH> keyHashFunction;
private final SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer;
private final boolean enableAssertionsWhenUsingAssertions;
public SubStageGetterHashSet(Serializer<T, ByteBuf> keySerializer,
Function<T, TH> keyHashFunction,
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer,
boolean enableAssertionsWhenUsingAssertions) {
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
this.keySerializer = keySerializer;
this.keyHashFunction = keyHashFunction;
this.keyHashSerializer = keyHashSerializer;
this.enableAssertionsWhenUsingAssertions = enableAssertionsWhenUsingAssertions;
}
@Override
public Mono<DatabaseSetDictionaryHashed<T, TH>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKeyMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> prefixKeyMono) {
return Mono.usingWhen(prefixKeyMono,
prefixKey -> Mono
.fromSupplier(() -> DatabaseSetDictionaryHashed
@ -54,24 +41,7 @@ public class SubStageGetterHashSet<T, TH> implements
keyHashFunction,
keyHashSerializer
)
)
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (key.readableBytes() != prefixKey.readableBytes() + getKeyHashBinaryLength()) {
sink.error(new IndexOutOfBoundsException());
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
),
prefixKey -> Mono.fromRunnable(prefixKey::release)
);
}
@ -81,11 +51,6 @@ public class SubStageGetterHashSet<T, TH> implements
return true;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled && enableAssertionsWhenUsingAssertions;
}
public int getKeyHashBinaryLength() {
return keyHashSerializer.getSerializedBinaryLength();
}

View File

@ -14,31 +14,19 @@ import reactor.core.publisher.Mono;
public class SubStageGetterMap<T, U> implements SubStageGetter<Map<T, U>, DatabaseMapDictionary<T, U>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
private final Serializer<U, ByteBuf> valueSerializer;
private final boolean enableAssertionsWhenUsingAssertions;
public SubStageGetterMap(SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
Serializer<U, ByteBuf> valueSerializer, boolean enableAssertionsWhenUsingAssertions) {
Serializer<U, ByteBuf> valueSerializer) {
this.keySerializer = keySerializer;
this.valueSerializer = valueSerializer;
this.enableAssertionsWhenUsingAssertions = enableAssertionsWhenUsingAssertions;
}
@Override
public Mono<DatabaseMapDictionary<T, U>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKeyMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> prefixKeyMono) {
return Mono.usingWhen(prefixKeyMono,
prefixKey -> Mono
.fromSupplier(() -> DatabaseMapDictionary
@ -47,24 +35,7 @@ public class SubStageGetterMap<T, U> implements SubStageGetter<Map<T, U>, Databa
keySerializer,
valueSerializer
)
)
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (key.readableBytes() != prefixKey.readableBytes() + getKeyBinaryLength()) {
sink.error(new IndexOutOfBoundsException());
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
),
prefixKey -> Mono.fromRunnable(prefixKey::release)
);
}
@ -74,11 +45,6 @@ public class SubStageGetterMap<T, U> implements SubStageGetter<Map<T, U>, Databa
return true;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled && enableAssertionsWhenUsingAssertions;
}
public int getKeyBinaryLength() {
return keySerializer.getSerializedBinaryLength();
}

View File

@ -14,28 +14,17 @@ import reactor.core.publisher.Mono;
public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
SubStageGetter<Map<T, U>, DatabaseMapDictionaryDeep<T, U, US>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final SubStageGetter<U, US> subStageGetter;
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
private final int keyExtLength;
private final boolean enableAssertionsWhenUsingAssertions;
public SubStageGetterMapDeep(SubStageGetter<U, US> subStageGetter,
SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
int keyExtLength, boolean enableAssertionsWhenUsingAssertions) {
int keyExtLength) {
this.subStageGetter = subStageGetter;
this.keySerializer = keySerializer;
this.keyExtLength = keyExtLength;
assert keyExtConsistency();
this.enableAssertionsWhenUsingAssertions = enableAssertionsWhenUsingAssertions;
}
@ -52,8 +41,7 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
@Override
public Mono<DatabaseMapDictionaryDeep<T, U, US>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKeyMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> prefixKeyMono) {
return Mono.usingWhen(prefixKeyMono,
prefixKey -> Mono
.fromSupplier(() -> DatabaseMapDictionaryDeep
@ -63,24 +51,7 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
subStageGetter,
keyExtLength
)
)
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (key.readableBytes() != prefixKey.readableBytes() + getKeyBinaryLength()) {
sink.error(new IndexOutOfBoundsException());
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
),
prefixKey -> Mono.fromRunnable(prefixKey::release)
);
}
@ -90,11 +61,6 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
return true;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled && enableAssertionsWhenUsingAssertions;
}
private Mono<Void> checkKeyFluxConsistency(ByteBuf prefixKey, List<ByteBuf> keys) {
return Mono
.fromCallable(() -> {

View File

@ -14,49 +14,19 @@ import reactor.core.publisher.Mono;
public class SubStageGetterSet<T> implements SubStageGetter<Map<T, Nothing>, DatabaseSetDictionary<T>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
private final boolean enableAssertionsWhenUsingAssertions;
public SubStageGetterSet(SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
boolean enableAssertionsWhenUsingAssertions) {
public SubStageGetterSet(SerializerFixedBinaryLength<T, ByteBuf> keySerializer) {
this.keySerializer = keySerializer;
this.enableAssertionsWhenUsingAssertions = enableAssertionsWhenUsingAssertions;
}
@Override
public Mono<DatabaseSetDictionary<T>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> prefixKeyMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> prefixKeyMono) {
return Mono.usingWhen(prefixKeyMono,
prefixKey -> Mono
.fromSupplier(() -> DatabaseSetDictionary.tail(dictionary, prefixKey.retain(), keySerializer))
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (key.readableBytes() != prefixKey.readableBytes() + getKeyBinaryLength()) {
sink.error(new IndexOutOfBoundsException());
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
.fromSupplier(() -> DatabaseSetDictionary.tail(dictionary, prefixKey.retain(), keySerializer)),
prefixKey -> Mono.fromRunnable(prefixKey::release)
);
}
@ -66,11 +36,6 @@ public class SubStageGetterSet<T> implements SubStageGetter<Map<T, Nothing>, Dat
return true;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled && enableAssertionsWhenUsingAssertions;
}
public int getKeyBinaryLength() {
return keySerializer.getSerializedBinaryLength();
}

View File

@ -13,15 +13,6 @@ import reactor.core.publisher.Mono;
public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageEntry<T>> {
private static final boolean assertsEnabled;
static {
boolean assertsEnabledTmp = false;
//noinspection AssertWithSideEffects
assert assertsEnabledTmp = true;
//noinspection ConstantConditions
assertsEnabled = assertsEnabledTmp;
}
private final Serializer<T, ByteBuf> serializer;
public SubStageGetterSingle(Serializer<T, ByteBuf> serializer) {
@ -31,29 +22,11 @@ public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageE
@Override
public Mono<DatabaseStageEntry<T>> subStage(LLDictionary dictionary,
@Nullable CompositeSnapshot snapshot,
Mono<ByteBuf> keyPrefixMono,
@Nullable Flux<ByteBuf> debuggingKeysFlux) {
Mono<ByteBuf> keyPrefixMono) {
return Mono.usingWhen(
keyPrefixMono,
keyPrefix -> Mono
.<DatabaseStageEntry<T>>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix.retain(), serializer))
.transform(mono -> {
if (debuggingKeysFlux != null) {
return debuggingKeysFlux.handle((key, sink) -> {
try {
if (needsDebuggingKeyFlux() && !LLUtils.equals(keyPrefix, key)) {
sink.error(new IndexOutOfBoundsException("Found more than one element!"));
} else {
sink.complete();
}
} finally {
key.release();
}
}).then(mono);
} else {
return mono;
}
}),
.<DatabaseStageEntry<T>>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix.retain(), serializer)),
keyPrefix -> Mono.fromRunnable(keyPrefix::release)
);
}
@ -63,9 +36,4 @@ public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageE
return false;
}
@Override
public boolean needsDebuggingKeyFlux() {
return assertsEnabled;
}
}

View File

@ -12,6 +12,7 @@ import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.ExtraKeyOperationResult;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLUtils;
@ -88,7 +89,6 @@ public class LLLocalDictionary implements LLDictionary {
* now it's true to avoid crashes during iterations on completely corrupted files
*/
static final boolean VERIFY_CHECKSUMS_WHEN_NOT_NEEDED = true;
public static final boolean DEBUG_PREFIXES_WHEN_ASSERTIONS_ARE_ENABLED = true;
/**
* Default: true. Use false to debug problems with windowing.
*/
@ -218,9 +218,9 @@ public class LLLocalDictionary implements LLDictionary {
return list;
}
private IntArrayList getLockIndicesEntries(List<Entry<ByteBuf, ByteBuf>> keys) {
private IntArrayList getLockIndicesEntries(List<LLEntry> keys) {
var list = new IntArrayList(keys.size());
for (Entry<ByteBuf, ByteBuf> key : keys) {
for (LLEntry key : keys) {
list.add(getLockIndex(key.getKey()));
}
return list;
@ -290,7 +290,7 @@ public class LLLocalDictionary implements LLDictionary {
throw new RocksDBException("Key buffer must be direct");
}
ByteBuffer keyNioBuffer = LLUtils.toDirect(key);
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions() || keyNioBuffer.isDirect();
assert keyNioBuffer.isDirect();
// Create a direct result buffer because RocksDB works only with direct buffers
ByteBuf resultBuf = alloc.directBuffer(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES);
try {
@ -300,17 +300,15 @@ public class LLLocalDictionary implements LLDictionary {
do {
// Create the result nio buffer to pass to RocksDB
resultNioBuf = resultBuf.nioBuffer(0, resultBuf.capacity());
if (databaseOptions.enableDbAssertionsWhenUsingAssertions()) {
assert keyNioBuffer.isDirect();
assert resultNioBuf.isDirect();
}
assert keyNioBuffer.isDirect();
assert resultNioBuf.isDirect();
valueSize = db.get(cfh,
Objects.requireNonNullElse(readOptions, EMPTY_READ_OPTIONS),
keyNioBuffer.position(0),
resultNioBuf
);
if (valueSize != RocksDB.NOT_FOUND) {
if (databaseOptions.enableDbAssertionsWhenUsingAssertions()) {
if (ASSERTIONS_ENABLED) {
// todo: check if position is equal to data that have been read
// todo: check if limit is equal to value size or data that have been read
assert valueSize <= 0 || resultNioBuf.limit() > 0;
@ -408,11 +406,11 @@ public class LLLocalDictionary implements LLDictionary {
throw new RocksDBException("Value buffer must be direct");
}
var keyNioBuffer = LLUtils.toDirect(key);
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions() || keyNioBuffer.isDirect();
assert keyNioBuffer.isDirect();
var valueNioBuffer = LLUtils.toDirect(value);
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions() || valueNioBuffer.isDirect();
assert valueNioBuffer.isDirect();
db.put(cfh, validWriteOptions, keyNioBuffer, valueNioBuffer);
} else {
db.put(cfh, validWriteOptions, LLUtils.toArray(key), LLUtils.toArray(value));
@ -750,8 +748,7 @@ public class LLLocalDictionary implements LLDictionary {
newData = updater.apply(prevDataToSendToUpdater == null
? null
: prevDataToSendToUpdater.retain());
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions()
|| prevDataToSendToUpdater == null
assert prevDataToSendToUpdater == null
|| prevDataToSendToUpdater.readerIndex() == 0
|| !prevDataToSendToUpdater.isReadable();
} finally {
@ -886,7 +883,7 @@ public class LLLocalDictionary implements LLDictionary {
.single()
.map(LLUtils::booleanToResponseByteBuffer)
.doAfterTerminate(() -> {
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions() || key.refCnt() > 0;
assert key.refCnt() > 0;
});
case PREVIOUS_VALUE -> Mono
.fromCallable(() -> {
@ -912,7 +909,7 @@ public class LLLocalDictionary implements LLDictionary {
try {
return dbGet(cfh, null, key.retain(), true);
} finally {
assert !databaseOptions.enableDbAssertionsWhenUsingAssertions() || key.refCnt() > 0;
assert key.refCnt() > 0;
}
}
} else {
@ -1005,8 +1002,7 @@ public class LLLocalDictionary implements LLDictionary {
.doAfterTerminate(() -> keyBufsWindow.forEach(ReferenceCounted::release));
}, 2) // Max concurrency is 2 to read data while preparing the next segment
.doOnDiscard(Entry.class, discardedEntry -> {
//noinspection unchecked
var entry = (Entry<ByteBuf, ByteBuf>) discardedEntry;
var entry = (LLEntry) discardedEntry;
entry.getKey().release();
entry.getValue().release();
})
@ -1019,14 +1015,14 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Flux<Entry<ByteBuf, ByteBuf>> putMulti(Flux<Entry<ByteBuf, ByteBuf>> entries, boolean getOldValues) {
public Flux<LLEntry> putMulti(Flux<LLEntry> entries, boolean getOldValues) {
return entries
.buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP))
.flatMapSequential(ew -> Mono
.using(
() -> ew,
entriesWindow -> Mono
.<Entry<ByteBuf, ByteBuf>>fromCallable(() -> {
.<LLEntry>fromCallable(() -> {
Iterable<StampedLock> locks;
ArrayList<Long> stamps;
if (updateMode == UpdateMode.ALLOW) {
@ -1047,13 +1043,15 @@ public class LLLocalDictionary implements LLDictionary {
MAX_WRITE_BATCH_SIZE,
BATCH_WRITE_OPTIONS
);
for (Entry<ByteBuf, ByteBuf> entry : entriesWindow) {
batch.put(cfh, entry.getKey().retain(), entry.getValue().retain());
for (LLEntry entry : entriesWindow) {
var k = entry.getKey().retain();
var v = entry.getValue().retain();
batch.put(cfh, k, v);
}
batch.writeToDbAndClose();
batch.close();
} else {
for (Entry<ByteBuf, ByteBuf> entry : entriesWindow) {
for (LLEntry entry : entriesWindow) {
db.put(cfh, EMPTY_WRITE_OPTIONS, entry.getKey().nioBuffer(), entry.getValue().nioBuffer());
}
}
@ -1077,8 +1075,7 @@ public class LLLocalDictionary implements LLDictionary {
return this
.getMulti(null, Flux
.fromIterable(entriesWindow)
.map(Entry::getKey)
.map(ByteBuf::retain)
.map(entry -> entry.getKey().retain())
.map(buf -> Tuples.of(obj, buf)), false)
.publishOn(dbScheduler)
.then(transformer);
@ -1087,9 +1084,8 @@ public class LLLocalDictionary implements LLDictionary {
}
}),
entriesWindow -> {
for (Entry<ByteBuf, ByteBuf> entry : entriesWindow) {
entry.getKey().release();
entry.getValue().release();
for (LLEntry entry : entriesWindow) {
entry.release();
}
}
), 2) // Max concurrency is 2 to read data while preparing the next segment
@ -1244,7 +1240,7 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Flux<Entry<ByteBuf, ByteBuf>> getRange(@Nullable LLSnapshot snapshot,
public Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot,
Mono<LLRange> rangeMono,
boolean existsAlmostCertainly) {
return Flux.usingWhen(rangeMono,
@ -1260,7 +1256,7 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Flux<List<Entry<ByteBuf, ByteBuf>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
public Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<LLRange> rangeMono,
int prefixLength, boolean existsAlmostCertainly) {
return Flux.usingWhen(rangeMono,
@ -1276,18 +1272,18 @@ public class LLLocalDictionary implements LLDictionary {
);
}
private Flux<Entry<ByteBuf, ByteBuf>> getRangeSingle(LLSnapshot snapshot,
private Flux<LLEntry> getRangeSingle(LLSnapshot snapshot,
Mono<ByteBuf> keyMono,
boolean existsAlmostCertainly) {
return Flux.usingWhen(keyMono,
key -> this
.get(snapshot, Mono.just(key).map(ByteBuf::retain), existsAlmostCertainly)
.map(value -> Map.entry(key.retain(), value)),
.map(value -> new LLEntry(key.retain(), value)),
key -> Mono.fromRunnable(key::release)
);
).transform(LLUtils::handleDiscard);
}
private Flux<Entry<ByteBuf, ByteBuf>> getRangeMulti(LLSnapshot snapshot, Mono<LLRange> rangeMono) {
private Flux<LLEntry> getRangeMulti(LLSnapshot snapshot, Mono<LLRange> rangeMono) {
return Flux.usingWhen(rangeMono,
range -> Flux.using(
() -> new LLLocalEntryReactiveRocksIterator(db, alloc, cfh, range.retain(),
@ -1299,7 +1295,7 @@ public class LLLocalDictionary implements LLDictionary {
);
}
private Flux<List<Entry<ByteBuf, ByteBuf>>> getRangeMultiGrouped(LLSnapshot snapshot, Mono<LLRange> rangeMono, int prefixLength) {
private Flux<List<LLEntry>> getRangeMultiGrouped(LLSnapshot snapshot, Mono<LLRange> rangeMono, int prefixLength) {
return Flux.usingWhen(rangeMono,
range -> Flux.using(
() -> new LLLocalGroupedEntryReactiveRocksIterator(db, alloc, cfh, prefixLength, range.retain(),
@ -1436,7 +1432,7 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Mono<Void> setRange(Mono<LLRange> rangeMono, Flux<Entry<ByteBuf, ByteBuf>> entries) {
public Mono<Void> setRange(Mono<LLRange> rangeMono, Flux<LLEntry> entries) {
return Mono.usingWhen(rangeMono,
range -> {
if (USE_WINDOW_IN_SET_RANGE) {
@ -1520,17 +1516,14 @@ public class LLLocalDictionary implements LLDictionary {
)
.flatMap(keysWindowFlux -> keysWindowFlux
.collectList()
.doOnDiscard(Entry.class, discardedEntry -> {
//noinspection unchecked
var entry = (Entry<ByteBuf, ByteBuf>) discardedEntry;
entry.getKey().release();
entry.getValue().release();
})
.flatMap(entriesList -> Mono
.<Void>fromCallable(() -> {
try {
if (!USE_WRITE_BATCHES_IN_SET_RANGE) {
for (Entry<ByteBuf, ByteBuf> entry : entriesList) {
for (LLEntry entry : entriesList) {
assert !entry.isReleased();
assert entry.getKey().refCnt() > 0;
assert entry.getValue().refCnt() > 0;
db.put(cfh, EMPTY_WRITE_OPTIONS, entry.getKey().nioBuffer(), entry.getValue().nioBuffer());
}
} else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) {
@ -1540,14 +1533,20 @@ public class LLLocalDictionary implements LLDictionary {
MAX_WRITE_BATCH_SIZE,
BATCH_WRITE_OPTIONS
)) {
for (Entry<ByteBuf, ByteBuf> entry : entriesList) {
for (LLEntry entry : entriesList) {
assert !entry.isReleased();
assert entry.getKey().refCnt() > 0;
assert entry.getValue().refCnt() > 0;
batch.put(cfh, entry.getKey().retain(), entry.getValue().retain());
}
batch.writeToDbAndClose();
}
} else {
try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) {
for (Entry<ByteBuf, ByteBuf> entry : entriesList) {
for (LLEntry entry : entriesList) {
assert !entry.isReleased();
assert entry.getKey().refCnt() > 0;
assert entry.getValue().refCnt() > 0;
batch.put(cfh, LLUtils.toArray(entry.getKey()), LLUtils.toArray(entry.getValue()));
}
db.write(EMPTY_WRITE_OPTIONS, batch);
@ -1556,9 +1555,9 @@ public class LLLocalDictionary implements LLDictionary {
}
return null;
} finally {
for (Entry<ByteBuf, ByteBuf> entry : entriesList) {
entry.getKey().release();
entry.getValue().release();
for (LLEntry entry : entriesList) {
assert !entry.isReleased();
entry.release();
}
}
})
@ -1903,7 +1902,7 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Mono<Entry<ByteBuf, ByteBuf>> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
public Mono<LLEntry> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
return Mono.usingWhen(rangeMono,
range -> runOnDb(() -> {
try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) {
@ -1940,7 +1939,7 @@ public class LLLocalDictionary implements LLDictionary {
try {
ByteBuf value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value);
try {
return Map.entry(key.retain(), value.retain());
return new LLEntry(key, value);
} finally {
value.release();
}
@ -2123,7 +2122,7 @@ public class LLLocalDictionary implements LLDictionary {
}
@Override
public Mono<Entry<ByteBuf, ByteBuf>> removeOne(Mono<LLRange> rangeMono) {
public Mono<LLEntry> removeOne(Mono<LLRange> rangeMono) {
return Mono.usingWhen(rangeMono,
range -> runOnDb(() -> {
try (var readOpts = new ReadOptions(getReadOptions(null))) {
@ -2161,7 +2160,7 @@ public class LLLocalDictionary implements LLDictionary {
ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
ByteBuf value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value);
dbDelete(cfh, null, key);
return Map.entry(key, value);
return new LLEntry(key, value);
} finally {
maxBound.release();
}

View File

@ -2,6 +2,7 @@ package it.cavallium.dbengine.database.disk;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLRange;
import java.util.Map;
import java.util.Map.Entry;
@ -9,7 +10,7 @@ import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator<Entry<ByteBuf, ByteBuf>> {
public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator<LLEntry> {
public LLLocalEntryReactiveRocksIterator(RocksDB db,
ByteBufAllocator alloc,
@ -22,7 +23,7 @@ public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksItera
}
@Override
public Entry<ByteBuf, ByteBuf> getEntry(ByteBuf key, ByteBuf value) {
return Map.entry(key, value);
public LLEntry getEntry(ByteBuf key, ByteBuf value) {
return new LLEntry(key, value);
}
}

View File

@ -2,6 +2,7 @@ package it.cavallium.dbengine.database.disk;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLRange;
import java.util.Map;
import java.util.Map.Entry;
@ -10,7 +11,7 @@ import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
public class LLLocalGroupedEntryReactiveRocksIterator extends
LLLocalGroupedReactiveRocksIterator<Entry<ByteBuf, ByteBuf>> {
LLLocalGroupedReactiveRocksIterator<LLEntry> {
public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh,
int prefixLength,
@ -22,7 +23,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends
}
@Override
public Entry<ByteBuf, ByteBuf> getEntry(ByteBuf key, ByteBuf value) {
return Map.entry(key, value);
public LLEntry getEntry(ByteBuf key, ByteBuf value) {
return new LLEntry(key, value);
}
}

View File

@ -469,12 +469,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
private ColumnFamilyHandle getCfh(byte[] columnName) throws RocksDBException {
ColumnFamilyHandle cfh = handles.get(Column.special(Column.toString(columnName)));
if (databaseOptions.enableDbAssertionsWhenUsingAssertions()) {
//noinspection RedundantIfStatement
if (!enableColumnsBug) {
assert Arrays.equals(cfh.getName(), columnName);
}
}
assert enableColumnsBug || Arrays.equals(cfh.getName(), columnName);
return cfh;
}

View File

@ -7,10 +7,12 @@ import it.cavallium.dbengine.database.Delta;
import it.cavallium.dbengine.database.ExtraKeyOperationResult;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLEntry;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.disk.ReleasableSlice;
import it.cavallium.dbengine.database.serialization.BiSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
@ -245,7 +247,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Flux<Entry<ByteBuf, ByteBuf>> putMulti(Flux<Entry<ByteBuf, ByteBuf>> entries, boolean getOldValues) {
public Flux<LLEntry> putMulti(Flux<LLEntry> entries, boolean getOldValues) {
return entries
.handle((entry, sink) -> {
var key = entry.getKey();
@ -255,7 +257,7 @@ public class LLMemoryDictionary implements LLDictionary {
if (v == null || !getOldValues) {
sink.complete();
} else {
sink.next(Map.entry(key.retain(), kk(v)));
sink.next(new LLEntry(key.retain(), kk(v)));
}
} finally {
key.release();
@ -271,7 +273,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Flux<Entry<ByteBuf, ByteBuf>> getRange(@Nullable LLSnapshot snapshot,
public Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot,
Mono<LLRange> rangeMono,
boolean existsAlmostCertainly) {
return Flux.usingWhen(rangeMono,
@ -280,13 +282,13 @@ public class LLMemoryDictionary implements LLDictionary {
return Mono.fromCallable(() -> {
var element = snapshots.get(resolveSnapshot(snapshot))
.get(k(range.getSingle()));
return Map.entry(range.getSingle().retain(), kk(element));
return new LLEntry(range.getSingle().retain(), kk(element));
}).flux();
} else {
return Mono
.fromCallable(() -> mapSlice(snapshot, range))
.flatMapMany(map -> Flux.fromIterable(map.entrySet()))
.map(entry -> Map.entry(kk(entry.getKey()), kk(entry.getValue())));
.map(entry -> new LLEntry(kk(entry.getKey()), kk(entry.getValue())));
}
},
range -> Mono.fromRunnable(range::release)
@ -294,7 +296,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Flux<List<Entry<ByteBuf, ByteBuf>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
public Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
Mono<LLRange> rangeMono,
int prefixLength,
boolean existsAlmostCertainly) {
@ -333,8 +335,16 @@ public class LLMemoryDictionary implements LLDictionary {
@Override
public Flux<ByteBuf> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono, int prefixLength) {
return getRangeKeys(snapshot, rangeMono)
.distinctUntilChanged(k -> k.slice(k.readerIndex(), prefixLength), LLUtils::equals)
.map(k -> k.slice(k.readerIndex(), prefixLength));
.distinctUntilChanged(k -> k.slice(k.readerIndex(), prefixLength), (a, b) -> {
if (LLUtils.equals(a, b)) {
b.release();
return true;
} else {
return false;
}
})
.map(k -> k.slice(k.readerIndex(), prefixLength))
.transform(LLUtils::handleDiscard);
}
@Override
@ -343,7 +353,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Mono<Void> setRange(Mono<LLRange> rangeMono, Flux<Entry<ByteBuf, ByteBuf>> entries) {
public Mono<Void> setRange(Mono<LLRange> rangeMono, Flux<LLEntry> entries) {
return Mono.error(new UnsupportedOperationException("Not implemented"));
}
@ -361,7 +371,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Mono<Entry<ByteBuf, ByteBuf>> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
public Mono<LLEntry> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
return Mono.error(new UnsupportedOperationException("Not implemented"));
}
@ -371,7 +381,7 @@ public class LLMemoryDictionary implements LLDictionary {
}
@Override
public Mono<Entry<ByteBuf, ByteBuf>> removeOne(Mono<LLRange> rangeMono) {
public Mono<LLEntry> removeOne(Mono<LLRange> rangeMono) {
return Mono.error(new UnsupportedOperationException("Not implemented"));
}

View File

@ -1,9 +1,14 @@
package it.cavallium.dbengine;
import static org.junit.jupiter.api.Assertions.assertEquals;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.ByteBufAllocator;
import io.netty.buffer.PoolArenaMetric;
import io.netty.buffer.PooledByteBufAllocator;
import io.netty.buffer.UnpooledByteBufAllocator;
import it.cavallium.dbengine.database.Column;
import it.cavallium.dbengine.database.LLDatabaseConnection;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLKeyValueDatabase;
import it.cavallium.dbengine.database.UpdateMode;
@ -35,47 +40,129 @@ import reactor.core.scheduler.Schedulers;
public class DbTestUtils {
public static final ByteBufAllocator ALLOCATOR = new PooledByteBufAllocator(true);
private volatile static ByteBufAllocator POOLED_ALLOCATOR = null;
public static synchronized ByteBufAllocator getUncachedAllocator() {
try {
ensureNoLeaks(POOLED_ALLOCATOR);
} catch (Throwable ex) {
POOLED_ALLOCATOR = null;
}
if (POOLED_ALLOCATOR == null) {
POOLED_ALLOCATOR = new PooledByteBufAllocator(false, 1, 0, 8192, 11, 0, 0, true);
}
return POOLED_ALLOCATOR;
}
public static synchronized ByteBufAllocator getUncachedAllocatorUnsafe() {
return POOLED_ALLOCATOR;
}
public static final AtomicInteger dbId = new AtomicInteger(0);
@SuppressWarnings("SameParameterValue")
private static int getActiveBuffers(ByteBufAllocator allocator) {
int directActive = 0, directAlloc = 0, directDealloc = 0;
if (allocator instanceof PooledByteBufAllocator alloc) {
for (PoolArenaMetric arena : alloc.directArenas()) {
directActive += arena.numActiveAllocations();
directAlloc += arena.numAllocations();
directDealloc += arena.numDeallocations();
}
} else if (allocator instanceof UnpooledByteBufAllocator alloc) {
directActive += alloc.metric().usedDirectMemory();
} else {
throw new UnsupportedOperationException();
}
System.out.println("directActive " + directActive + " directAlloc " + directAlloc + " directDealloc " + directDealloc);
return directActive;
}
@SuppressWarnings("SameParameterValue")
private static int getActiveHeapBuffers(ByteBufAllocator allocator) {
int heapActive = 0, heapAlloc = 0, heapDealloc = 0;
if (allocator instanceof PooledByteBufAllocator alloc) {
for (PoolArenaMetric arena : alloc.heapArenas()) {
heapActive += arena.numActiveAllocations();
heapAlloc += arena.numAllocations();
heapDealloc += arena.numDeallocations();
}
} else if (allocator instanceof UnpooledByteBufAllocator alloc) {
heapActive += alloc.metric().usedHeapMemory();
} else {
throw new UnsupportedOperationException();
}
System.out.println("heapActive " + heapActive + " heapAlloc " + heapAlloc + " heapDealloc " + heapDealloc);
return heapActive;
}
public static <U> Flux<U> tempDb(Function<LLKeyValueDatabase, Publisher<U>> action) {
var wrkspcPath = Path.of("/tmp/.cache/tempdb-" + dbId.incrementAndGet() + "/");
return Flux.usingWhen(Mono
.<LLKeyValueDatabase>fromCallable(() -> {
if (Files.exists(wrkspcPath)) {
Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> {
try {
Files.delete(file);
} catch (IOException ex) {
throw new CompletionException(ex);
}
});
}
Files.createDirectories(wrkspcPath);
return null;
})
.subscribeOn(Schedulers.boundedElastic())
.then(new LLLocalDatabaseConnection(DbTestUtils.ALLOCATOR, wrkspcPath).connect())
.flatMap(conn -> conn.getDatabase("testdb",
List.of(Column.dictionary("testmap"), Column.special("ints"), Column.special("longs")),
new DatabaseOptions(Map.of(), true, false, true, false, true, true, true, true, -1)
)),
action,
db -> db.close().then(Mono.fromCallable(() -> {
if (Files.exists(wrkspcPath)) {
Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> {
try {
Files.delete(file);
} catch (IOException ex) {
throw new CompletionException(ex);
}
});
}
return null;
}).subscribeOn(Schedulers.boundedElastic()))
return Flux.usingWhen(openTempDb(),
tempDb -> action.apply(tempDb.db()),
DbTestUtils::closeTempDb
);
}
public static record TempDb(ByteBufAllocator allocator, LLDatabaseConnection connection, LLKeyValueDatabase db,
Path path) {}
public static Mono<TempDb> openTempDb() {
return Mono.defer(() -> {
var wrkspcPath = Path.of("/tmp/.cache/tempdb-" + dbId.incrementAndGet() + "/");
var alloc = getUncachedAllocator();
return Mono
.<LLKeyValueDatabase>fromCallable(() -> {
if (Files.exists(wrkspcPath)) {
Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> {
try {
Files.delete(file);
} catch (IOException ex) {
throw new CompletionException(ex);
}
});
}
Files.createDirectories(wrkspcPath);
return null;
})
.subscribeOn(Schedulers.boundedElastic())
.then(new LLLocalDatabaseConnection(alloc, wrkspcPath).connect())
.flatMap(conn -> conn
.getDatabase("testdb",
List.of(Column.dictionary("testmap"), Column.special("ints"), Column.special("longs")),
new DatabaseOptions(Map.of(), true, false, true, false, true, true, true, -1)
)
.map(db -> new TempDb(alloc, conn, db, wrkspcPath))
);
});
}
public static Mono<Void> closeTempDb(TempDb tempDb) {
return tempDb.db().close().then(tempDb.connection().disconnect()).then(Mono.fromCallable(() -> {
ensureNoLeaks(tempDb.allocator());
if (tempDb.allocator() instanceof PooledByteBufAllocator pooledByteBufAllocator) {
pooledByteBufAllocator.trimCurrentThreadCache();
pooledByteBufAllocator.freeThreadLocalCache();
}
if (Files.exists(tempDb.path())) {
Files.walk(tempDb.path()).sorted(Comparator.reverseOrder()).forEach(file -> {
try {
Files.delete(file);
} catch (IOException ex) {
throw new CompletionException(ex);
}
});
}
return null;
}).subscribeOn(Schedulers.boundedElastic())).then();
}
public static void ensureNoLeaks(ByteBufAllocator allocator) {
if (allocator != null) {
assertEquals(0, getActiveBuffers(allocator));
assertEquals(0, getActiveHeapBuffers(allocator));
}
}
public static Mono<? extends LLDictionary> tempDictionary(LLKeyValueDatabase database, UpdateMode updateMode) {
return tempDictionary(database, "testmap", updateMode);
}
@ -98,13 +185,13 @@ public class DbTestUtils {
int keyBytes) {
if (dbType == DbType.MAP) {
return DatabaseMapDictionary.simple(dictionary,
SerializerFixedBinaryLength.utf8(DbTestUtils.ALLOCATOR, keyBytes),
Serializer.utf8(DbTestUtils.ALLOCATOR)
SerializerFixedBinaryLength.utf8(dictionary.getAllocator(), keyBytes),
Serializer.utf8(dictionary.getAllocator())
);
} else {
return DatabaseMapDictionaryHashed.simple(dictionary,
Serializer.utf8(DbTestUtils.ALLOCATOR),
Serializer.utf8(DbTestUtils.ALLOCATOR),
Serializer.utf8(dictionary.getAllocator()),
Serializer.utf8(dictionary.getAllocator()),
s -> (short) s.hashCode(),
new SerializerFixedBinaryLength<>() {
@Override
@ -126,7 +213,7 @@ public class DbTestUtils {
@Override
public @NotNull ByteBuf serialize(@NotNull Short deserialized) {
var out = DbTestUtils.ALLOCATOR.directBuffer(Short.BYTES);
var out = dictionary.getAllocator().directBuffer(Short.BYTES);
try {
out.writeShort(deserialized);
out.writerIndex(Short.BYTES);
@ -140,33 +227,31 @@ public class DbTestUtils {
}
}
public static <T, U> DatabaseMapDictionaryDeep<String, Map<String, String>,
public static DatabaseMapDictionaryDeep<String, Map<String, String>,
DatabaseMapDictionary<String, String>> tempDatabaseMapDictionaryDeepMap(
LLDictionary dictionary,
int key1Bytes,
int key2Bytes) {
return DatabaseMapDictionaryDeep.deepTail(dictionary,
SerializerFixedBinaryLength.utf8(DbTestUtils.ALLOCATOR, key1Bytes),
SerializerFixedBinaryLength.utf8(dictionary.getAllocator(), key1Bytes),
key2Bytes,
new SubStageGetterMap<>(SerializerFixedBinaryLength.utf8(DbTestUtils.ALLOCATOR, key2Bytes),
Serializer.utf8(DbTestUtils.ALLOCATOR),
true
new SubStageGetterMap<>(SerializerFixedBinaryLength.utf8(dictionary.getAllocator(), key2Bytes),
Serializer.utf8(dictionary.getAllocator())
)
);
}
public static <T, U> DatabaseMapDictionaryDeep<String, Map<String, String>,
public static DatabaseMapDictionaryDeep<String, Map<String, String>,
DatabaseMapDictionaryHashed<String, String, Integer>> tempDatabaseMapDictionaryDeepMapHashMap(
LLDictionary dictionary,
int key1Bytes) {
return DatabaseMapDictionaryDeep.deepTail(dictionary,
SerializerFixedBinaryLength.utf8(DbTestUtils.ALLOCATOR, key1Bytes),
SerializerFixedBinaryLength.utf8(dictionary.getAllocator(), key1Bytes),
Integer.BYTES,
new SubStageGetterHashMap<>(Serializer.utf8(DbTestUtils.ALLOCATOR),
Serializer.utf8(DbTestUtils.ALLOCATOR),
new SubStageGetterHashMap<>(Serializer.utf8(dictionary.getAllocator()),
Serializer.utf8(dictionary.getAllocator()),
String::hashCode,
SerializerFixedBinaryLength.intSerializer(DbTestUtils.ALLOCATOR),
true
SerializerFixedBinaryLength.intSerializer(dictionary.getAllocator())
)
);
}
@ -174,10 +259,10 @@ public class DbTestUtils {
public static <T, U> DatabaseMapDictionaryHashed<String, String, Integer> tempDatabaseMapDictionaryHashMap(
LLDictionary dictionary) {
return DatabaseMapDictionaryHashed.simple(dictionary,
Serializer.utf8(DbTestUtils.ALLOCATOR),
Serializer.utf8(DbTestUtils.ALLOCATOR),
Serializer.utf8(dictionary.getAllocator()),
Serializer.utf8(dictionary.getAllocator()),
String::hashCode,
SerializerFixedBinaryLength.intSerializer(DbTestUtils.ALLOCATOR)
SerializerFixedBinaryLength.intSerializer(dictionary.getAllocator())
);
}
}

View File

@ -75,7 +75,7 @@ public class OldDatabaseTests {
.map(dictionary -> DatabaseMapDictionaryDeep.deepTail(dictionary,
new FixedStringSerializer(3),
4,
new SubStageGetterMap<>(new FixedStringSerializer(4), Serializer.noop(), true)
new SubStageGetterMap<>(new FixedStringSerializer(4), Serializer.noop())
))
.flatMap(collection -> Flux
.fromIterable(originalSuperKeys)
@ -135,7 +135,7 @@ public class OldDatabaseTests {
.then(new LLLocalDatabaseConnection(PooledByteBufAllocator.DEFAULT, wrkspcPath).connect())
.flatMap(conn -> conn.getDatabase("testdb",
List.of(Column.dictionary("testmap")),
new DatabaseOptions(Map.of(), true, false, true, false, true, true, true, true, -1)
new DatabaseOptions(Map.of(), true, false, true, false, true, true, true, -1)
));
}
@ -159,14 +159,14 @@ public class OldDatabaseTests {
.map(dictionary -> DatabaseMapDictionaryDeep.deepTail(dictionary,
new FixedStringSerializer(3),
4,
new SubStageGetterMap<>(new FixedStringSerializer(4), Serializer.noop(), true)
new SubStageGetterMap<>(new FixedStringSerializer(4), Serializer.noop())
)),
db
.getDictionary("testmap", UpdateMode.DISALLOW)
.map(dictionary -> DatabaseMapDictionaryDeep.deepTail(dictionary,
new FixedStringSerializer(6),
7,
new SubStageGetterMap<>(new FixedStringSerializer(7), Serializer.noop(), true)
new SubStageGetterMap<>(new FixedStringSerializer(7), Serializer.noop())
))
)
.single()

View File

@ -1,5 +1,8 @@
package it.cavallium.dbengine;
import static it.cavallium.dbengine.DbTestUtils.ensureNoLeaks;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocator;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocatorUnsafe;
import static it.cavallium.dbengine.DbTestUtils.tempDb;
import static it.cavallium.dbengine.DbTestUtils.tempDictionary;
@ -7,6 +10,8 @@ import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.UpdateMode;
import java.util.Arrays;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@ -18,6 +23,16 @@ public class TestDictionary {
return Arrays.stream(UpdateMode.values()).map(Arguments::of);
}
@BeforeEach
public void beforeEach() {
ensureNoLeaks(getUncachedAllocator());
}
@AfterEach
public void afterEach() {
ensureNoLeaks(getUncachedAllocatorUnsafe());
}
@ParameterizedTest
@MethodSource("provideArgumentsCreate")
public void testCreate(UpdateMode updateMode) {

View File

@ -2,6 +2,7 @@ package it.cavallium.dbengine;
import static it.cavallium.dbengine.DbTestUtils.*;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateMode;
import java.util.Arrays;
import java.util.List;
@ -13,6 +14,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@ -74,6 +77,16 @@ public class TestDictionaryMap {
.map(fullTuple -> Arguments.of(fullTuple.getT1(), fullTuple.getT2(), fullTuple.getT3(), fullTuple.getT4(), fullTuple.getT5()));
}
@BeforeEach
public void beforeEach() {
ensureNoLeaks(getUncachedAllocator());
}
@AfterEach
public void afterEach() {
ensureNoLeaks(getUncachedAllocatorUnsafe());
}
@ParameterizedTest
@MethodSource("provideArgumentsPut")
public void testPut(DbType dbType, UpdateMode updateMode, String key, String value, boolean shouldFail) {
@ -338,6 +351,7 @@ public class TestDictionaryMap {
)
.filter(k -> k.getValue().isPresent())
.map(k -> Map.entry(k.getKey(), k.getValue().orElseThrow()))
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -390,6 +404,7 @@ public class TestDictionaryMap {
)
.doAfterTerminate(map::release)
)
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -527,6 +542,7 @@ public class TestDictionaryMap {
)
.doAfterTerminate(map::release)
)
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -555,6 +571,7 @@ public class TestDictionaryMap {
)
.doAfterTerminate(map::release)
)
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -588,6 +605,7 @@ public class TestDictionaryMap {
)
.doAfterTerminate(map::release)
)
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -616,6 +634,7 @@ public class TestDictionaryMap {
.doAfterTerminate(map::release)
)
.flatMap(val -> shouldFail ? Mono.empty() : Mono.just(val))
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();
@ -627,7 +646,6 @@ public class TestDictionaryMap {
@ParameterizedTest
@MethodSource("provideArgumentsPutMulti")
public void testPutMultiClear(DbType dbType, UpdateMode updateMode, Map<String, String> entries, boolean shouldFail) {
var remainingEntries = new ConcurrentHashMap<Entry<String, String>, Boolean>().keySet(true);
Step<Boolean> stpVer = StepVerifier
.create(tempDb(db -> tempDictionary(db, updateMode)
.map(dict -> tempDatabaseMapDictionaryMap(dict, dbType, 5))
@ -642,6 +660,7 @@ public class TestDictionaryMap {
.doAfterTerminate(map::release)
)
.flatMap(val -> shouldFail ? Mono.empty() : Mono.just(val))
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();

View File

@ -1,9 +1,13 @@
package it.cavallium.dbengine;
import static it.cavallium.dbengine.DbTestUtils.ensureNoLeaks;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocator;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocatorUnsafe;
import static it.cavallium.dbengine.DbTestUtils.tempDatabaseMapDictionaryDeepMap;
import static it.cavallium.dbengine.DbTestUtils.tempDb;
import static it.cavallium.dbengine.DbTestUtils.tempDictionary;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateMode;
import java.util.Arrays;
import java.util.Map;
@ -13,6 +17,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@ -140,6 +146,16 @@ public class TestDictionaryMapDeep {
.toStream();
}
@BeforeEach
public void beforeEach() {
ensureNoLeaks(getUncachedAllocator());
}
@AfterEach
public void afterEach() {
ensureNoLeaks(getUncachedAllocatorUnsafe());
}
@ParameterizedTest
@MethodSource("provideArgumentsSet")
public void testSetValueGetValue(UpdateMode updateMode, String key, Map<String, String> value, boolean shouldFail) {
@ -520,35 +536,31 @@ public class TestDictionaryMapDeep {
if (updateMode != UpdateMode.ALLOW_UNSAFE && !isTestBadKeysEnabled()) {
return;
}
var stpVer = StepVerifier
.create(tempDb(db -> tempDictionary(db, updateMode)
.map(dict -> tempDatabaseMapDictionaryDeepMap(dict, 5, 6))
.flatMapMany(map -> Flux
.concat(
map.updateValue(key, old -> {
assert old == null;
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, false, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return value;
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, value);
return value;
}).then(map.getValue(null, key))
)
.doAfterTerminate(map::release)
)
));
var stpVer = StepVerifier.create(tempDb(db -> tempDictionary(db, updateMode)
.map(dict -> tempDatabaseMapDictionaryDeepMap(dict, 5, 6))
.flatMapMany(map -> Flux.concat(
map.updateValue(key, old -> {
assert old == null;
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, false, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return Map.of("error?", "error.");
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, Map.of("error?", "error."));
return value;
}).then(map.getValue(null, key)),
map.updateValue(key, true, old -> {
assert Objects.equals(old, value);
return value;
}).then(map.getValue(null, key))
).doAfterTerminate(map::release))
));
if (updateMode != UpdateMode.ALLOW_UNSAFE || shouldFail) {
stpVer.verifyError();
} else {
@ -795,6 +807,7 @@ public class TestDictionaryMapDeep {
)
.doAfterTerminate(map::release);
})
.transform(LLUtils::handleDiscard)
));
if (shouldFail) {
stpVer.verifyError();

View File

@ -1,5 +1,8 @@
package it.cavallium.dbengine;
import static it.cavallium.dbengine.DbTestUtils.ensureNoLeaks;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocator;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocatorUnsafe;
import static it.cavallium.dbengine.DbTestUtils.tempDatabaseMapDictionaryDeepMapHashMap;
import static it.cavallium.dbengine.DbTestUtils.tempDb;
import static it.cavallium.dbengine.DbTestUtils.tempDictionary;
@ -13,6 +16,8 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
@ -94,6 +99,16 @@ public class TestDictionaryMapDeepHashMap {
.toStream();
}
@BeforeEach
public void beforeEach() {
ensureNoLeaks(getUncachedAllocator());
}
@AfterEach
public void afterEach() {
ensureNoLeaks(getUncachedAllocatorUnsafe());
}
@ParameterizedTest
@MethodSource("provideArgumentsPut")
public void testAtPutValueGetAllValues(UpdateMode updateMode, String key1, String key2, String value, boolean shouldFail) {

View File

@ -0,0 +1,217 @@
package it.cavallium.dbengine;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
import io.netty.buffer.ByteBuf;
import it.cavallium.dbengine.DbTestUtils.TempDb;
import it.cavallium.dbengine.database.LLDictionary;
import it.cavallium.dbengine.database.LLDictionaryResultType;
import it.cavallium.dbengine.database.LLKeyValueDatabase;
import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateMode;
import it.cavallium.dbengine.database.UpdateReturnMode;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.Objects;
import java.util.concurrent.Flow.Publisher;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
public class TestLLDictionaryLeaks {
private TempDb tempDb;
private LLKeyValueDatabase db;
@BeforeEach
public void beforeEach() {
tempDb = Objects.requireNonNull(DbTestUtils.openTempDb().block(), "TempDB");
db = tempDb.db();
}
public static Stream<Arguments> provideArguments() {
return Arrays.stream(UpdateMode.values()).map(Arguments::of);
}
public static Stream<Arguments> providePutArguments() {
var updateModes = Arrays.stream(UpdateMode.values());
return updateModes.flatMap(updateMode -> {
var resultTypes = Arrays.stream(LLDictionaryResultType.values());
return resultTypes.map(resultType -> Arguments.of(updateMode, resultType));
});
}
public static Stream<Arguments> provideUpdateArguments() {
var updateModes = Arrays.stream(UpdateMode.values());
return updateModes.flatMap(updateMode -> {
var resultTypes = Arrays.stream(UpdateReturnMode.values());
return resultTypes.map(resultType -> Arguments.of(updateMode, resultType));
});
}
private LLDictionary getDict(UpdateMode updateMode) {
var dict = DbTestUtils.tempDictionary(db, updateMode).block();
var key1 = Mono.fromCallable(() -> fromString("test-key-1"));
var key2 = Mono.fromCallable(() -> fromString("test-key-2"));
var key3 = Mono.fromCallable(() -> fromString("test-key-3"));
var key4 = Mono.fromCallable(() -> fromString("test-key-4"));
var value = Mono.fromCallable(() -> fromString("test-value"));
dict.put(key1, value, LLDictionaryResultType.VOID).block();
dict.put(key2, value, LLDictionaryResultType.VOID).block();
dict.put(key3, value, LLDictionaryResultType.VOID).block();
dict.put(key4, value, LLDictionaryResultType.VOID).block();
return dict;
}
private ByteBuf fromString(String s) {
var sb = s.getBytes(StandardCharsets.UTF_8);
var b = db.getAllocator().buffer(sb.length);
b.writeBytes(b);
return b;
}
private void run(Flux<?> publisher) {
publisher.subscribeOn(Schedulers.immediate()).blockLast();
}
private void runVoid(Mono<Void> publisher) {
publisher.then().subscribeOn(Schedulers.immediate()).block();
}
private <T> T run(Mono<T> publisher) {
return publisher.subscribeOn(Schedulers.immediate()).block();
}
private <T> T run(boolean shouldFail, Mono<T> publisher) {
return publisher.subscribeOn(Schedulers.immediate()).transform(mono -> {
if (shouldFail) {
return mono.onErrorResume(ex -> Mono.empty());
} else {
return mono;
}
}).block();
}
private void runVoid(boolean shouldFail, Mono<Void> publisher) {
publisher.then().subscribeOn(Schedulers.immediate()).transform(mono -> {
if (shouldFail) {
return mono.onErrorResume(ex -> Mono.empty());
} else {
return mono;
}
}).block();
}
@Test
public void testNoOp() {
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testGetDict(UpdateMode updateMode) {
var dict = getDict(updateMode);
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testGetColumnName(UpdateMode updateMode) {
var dict = getDict(updateMode);
dict.getColumnName();
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testGetAllocator(UpdateMode updateMode) {
var dict = getDict(updateMode);
dict.getAllocator();
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testGet(UpdateMode updateMode) {
var dict = getDict(updateMode);
var key = Mono.fromCallable(() -> fromString("test"));
runVoid(dict.get(null, key).then().transform(LLUtils::handleDiscard));
runVoid(dict.get(null, key, true).then().transform(LLUtils::handleDiscard));
runVoid(dict.get(null, key, false).then().transform(LLUtils::handleDiscard));
}
@ParameterizedTest
@MethodSource("providePutArguments")
public void testPut(UpdateMode updateMode, LLDictionaryResultType resultType) {
var dict = getDict(updateMode);
var key = Mono.fromCallable(() -> fromString("test-key"));
var value = Mono.fromCallable(() -> fromString("test-value"));
runVoid(dict.put(key, value, resultType).then());
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testGetUpdateMode(UpdateMode updateMode) {
var dict = getDict(updateMode);
assertEquals(updateMode, run(dict.getUpdateMode()));
}
@ParameterizedTest
@MethodSource("provideUpdateArguments")
public void testUpdate(UpdateMode updateMode, UpdateReturnMode updateReturnMode) {
var dict = getDict(updateMode);
var key = Mono.fromCallable(() -> fromString("test-key"));
runVoid(updateMode == UpdateMode.DISALLOW,
dict.update(key, old -> old, updateReturnMode, true).then().transform(LLUtils::handleDiscard)
);
runVoid(updateMode == UpdateMode.DISALLOW,
dict.update(key, old -> old, updateReturnMode, false).then().transform(LLUtils::handleDiscard)
);
runVoid(updateMode == UpdateMode.DISALLOW,
dict.update(key, old -> old, updateReturnMode).then().transform(LLUtils::handleDiscard)
);
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testUpdateAndGetDelta(UpdateMode updateMode) {
var dict = getDict(updateMode);
var key = Mono.fromCallable(() -> fromString("test-key"));
runVoid(updateMode == UpdateMode.DISALLOW,
dict.updateAndGetDelta(key, old -> old, true).then().transform(LLUtils::handleDiscard)
);
runVoid(updateMode == UpdateMode.DISALLOW,
dict.updateAndGetDelta(key, old -> old, false).then().transform(LLUtils::handleDiscard)
);
runVoid(updateMode == UpdateMode.DISALLOW,
dict.updateAndGetDelta(key, old -> old).then().transform(LLUtils::handleDiscard)
);
}
@ParameterizedTest
@MethodSource("provideArguments")
public void testClear(UpdateMode updateMode) {
var dict = getDict(updateMode);
runVoid(dict.clear());
}
@ParameterizedTest
@MethodSource("providePutArguments")
public void testRemove(UpdateMode updateMode, LLDictionaryResultType resultType) {
var dict = getDict(updateMode);
var key = Mono.fromCallable(() -> fromString("test-key"));
runVoid(dict.remove(key, resultType).then());
}
@AfterEach
public void afterEach() {
DbTestUtils.closeTempDb(tempDb).block();
}
}

View File

@ -1,11 +1,16 @@
package it.cavallium.dbengine;
import static it.cavallium.dbengine.DbTestUtils.ensureNoLeaks;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocator;
import static it.cavallium.dbengine.DbTestUtils.getUncachedAllocatorUnsafe;
import static it.cavallium.dbengine.DbTestUtils.tempDb;
import it.cavallium.dbengine.database.LLKeyValueDatabase;
import it.cavallium.dbengine.database.collections.DatabaseInt;
import it.cavallium.dbengine.database.collections.DatabaseLong;
import java.util.stream.Stream;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
@ -34,6 +39,16 @@ public class TestSingletons {
);
}
@BeforeEach
public void beforeEach() {
ensureNoLeaks(getUncachedAllocator());
}
@AfterEach
public void afterEach() {
ensureNoLeaks(getUncachedAllocatorUnsafe());
}
@Test
public void testCreateInteger() {
StepVerifier