From 3b55e8bd24977e6fa3c722b0933eea14ddd4ca14 Mon Sep 17 00:00:00 2001 From: Andrea Cavalli Date: Sun, 29 Aug 2021 23:18:03 +0200 Subject: [PATCH] (unfinished) Netty 5 refactoring --- pom.xml | 9 +- .../dbengine/client/CompositeDatabase.java | 4 +- .../dbengine/client/MappedSerializer.java | 12 +- .../client/MappedSerializerFixedLength.java | 12 +- .../it/cavallium/dbengine/database/Delta.java | 39 +- .../database/LLDatabaseConnection.java | 4 +- .../cavallium/dbengine/database/LLDelta.java | 129 + .../dbengine/database/LLDictionary.java | 86 +- .../cavallium/dbengine/database/LLEntry.java | 163 +- .../dbengine/database/LLKeyValueDatabase.java | 4 +- .../cavallium/dbengine/database/LLRange.java | 248 +- .../cavallium/dbengine/database/LLUtils.java | 554 ++-- .../dbengine/database/SafeCloseable.java | 7 + .../database/collections/DatabaseEmpty.java | 10 +- .../collections/DatabaseMapDictionary.java | 65 +- .../DatabaseMapDictionaryDeep.java | 392 ++- .../DatabaseMapDictionaryHashed.java | 59 +- .../collections/DatabaseSetDictionary.java | 12 +- .../DatabaseSetDictionaryHashed.java | 18 +- .../database/collections/DatabaseSingle.java | 40 +- .../collections/DatabaseSingleMapped.java | 2 +- .../database/collections/SubStageGetter.java | 5 +- .../collections/SubStageGetterHashMap.java | 16 +- .../collections/SubStageGetterHashSet.java | 12 +- .../collections/SubStageGetterMap.java | 12 +- .../collections/SubStageGetterMapDeep.java | 14 +- .../collections/SubStageGetterSet.java | 8 +- .../collections/SubStageGetterSingle.java | 13 +- .../SubStageGetterSingleBytes.java | 4 +- .../collections/ValueWithHashSerializer.java | 43 +- .../collections/ValuesSetSerializer.java | 35 +- .../disk/LLLocalDatabaseConnection.java | 8 +- .../database/disk/LLLocalDictionary.java | 2260 +++++++++-------- .../LLLocalEntryReactiveRocksIterator.java | 15 +- ...ocalGroupedEntryReactiveRocksIterator.java | 15 +- ...LLocalGroupedKeyReactiveRocksIterator.java | 15 +- .../LLLocalGroupedReactiveRocksIterator.java | 21 +- ...LLLocalKeyPrefixReactiveRocksIterator.java | 19 +- .../disk/LLLocalKeyReactiveRocksIterator.java | 15 +- .../disk/LLLocalKeyValueDatabase.java | 8 +- .../disk/LLLocalReactiveRocksIterator.java | 41 +- .../database/disk/ReleasableSlice.java | 11 +- .../memory/LLMemoryDatabaseConnection.java | 2 +- .../database/memory/LLMemoryDictionary.java | 40 +- .../memory/LLMemoryKeyValueDatabase.java | 4 +- .../database/memory/LLMemorySingleton.java | 12 +- .../database/serialization/Codec.java | 4 +- .../serialization/CodecSerializer.java | 16 +- .../database/serialization/Serializer.java | 55 +- .../SerializerFixedBinaryLength.java | 94 +- .../lucene/RandomFieldComparator.java | 2 +- .../lucene/searcher/TopDocsSearcher.java | 1 - .../dbengine/netty/JMXNettyMonitoring.java | 4 +- .../netty/JMXNettyMonitoringManager.java | 2 +- .../netty/JMXPooledNettyMonitoring.java | 2 +- .../java/org/rocksdb/CappedWriteBatch.java | 29 +- 56 files changed, 2457 insertions(+), 2269 deletions(-) create mode 100644 src/main/java/it/cavallium/dbengine/database/LLDelta.java create mode 100644 src/main/java/it/cavallium/dbengine/database/SafeCloseable.java diff --git a/pom.xml b/pom.xml index b9dd848..c740646 100644 --- a/pom.xml +++ b/pom.xml @@ -32,6 +32,13 @@ false true + + netty5-snapshots + Netty 5 snapshots + https://oss.sonatype.org/content/repositories/snapshots + true + true + @@ -245,7 +252,7 @@ io.netty netty-buffer - 4.1.63.Final + 5.0.0.Final-SNAPSHOT javax.xml.bind diff --git a/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java b/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java index 3e97b1c..cc8c66b 100644 --- a/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java +++ b/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.client; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -18,7 +18,7 @@ public interface CompositeDatabase { */ Mono releaseSnapshot(CompositeSnapshot snapshot); - ByteBufAllocator getAllocator(); + BufferAllocator getAllocator(); /** * Find corrupted items diff --git a/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java b/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java index de2ff04..5b3a0dd 100644 --- a/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java +++ b/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java @@ -1,23 +1,23 @@ package it.cavallium.dbengine.client; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; import org.jetbrains.annotations.NotNull; -public class MappedSerializer implements Serializer { +public class MappedSerializer implements Serializer { - private final Serializer serializer; + private final Serializer serializer; private final Mapper keyMapper; - public MappedSerializer(Serializer serializer, + public MappedSerializer(Serializer serializer, Mapper keyMapper) { this.serializer = serializer; this.keyMapper = keyMapper; } @Override - public @NotNull B deserialize(@NotNull ByteBuf serialized) throws SerializationException { + public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException { try { return keyMapper.map(serializer.deserialize(serialized.retain())); } finally { @@ -26,7 +26,7 @@ public class MappedSerializer implements Serializer { } @Override - public @NotNull ByteBuf serialize(@NotNull B deserialized) throws SerializationException { + public @NotNull Buffer serialize(@NotNull B deserialized) throws SerializationException { return serializer.serialize(keyMapper.unmap(deserialized)); } } diff --git a/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java b/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java index f2d2508..e6b28b0 100644 --- a/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java +++ b/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java @@ -1,23 +1,23 @@ package it.cavallium.dbengine.client; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import org.jetbrains.annotations.NotNull; -public class MappedSerializerFixedLength implements SerializerFixedBinaryLength { +public class MappedSerializerFixedLength implements SerializerFixedBinaryLength { - private final SerializerFixedBinaryLength fixedLengthSerializer; + private final SerializerFixedBinaryLength fixedLengthSerializer; private final Mapper keyMapper; - public MappedSerializerFixedLength(SerializerFixedBinaryLength fixedLengthSerializer, + public MappedSerializerFixedLength(SerializerFixedBinaryLength fixedLengthSerializer, Mapper keyMapper) { this.fixedLengthSerializer = fixedLengthSerializer; this.keyMapper = keyMapper; } @Override - public @NotNull B deserialize(@NotNull ByteBuf serialized) throws SerializationException { + public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException { try { return keyMapper.map(fixedLengthSerializer.deserialize(serialized.retain())); } finally { @@ -26,7 +26,7 @@ public class MappedSerializerFixedLength implements SerializerFixedBinaryL } @Override - public @NotNull ByteBuf serialize(@NotNull B deserialized) throws SerializationException { + public @NotNull Buffer serialize(@NotNull B deserialized) throws SerializationException { return fixedLengthSerializer.serialize(keyMapper.unmap(deserialized)); } diff --git a/src/main/java/it/cavallium/dbengine/database/Delta.java b/src/main/java/it/cavallium/dbengine/database/Delta.java index 7546717..425b7f6 100644 --- a/src/main/java/it/cavallium/dbengine/database/Delta.java +++ b/src/main/java/it/cavallium/dbengine/database/Delta.java @@ -3,9 +3,46 @@ package it.cavallium.dbengine.database; import java.util.Objects; import org.jetbrains.annotations.Nullable; -public record Delta(@Nullable T previous, @Nullable T current) { +public class Delta { + + private final @Nullable T previous; + private final @Nullable T current; + + public Delta(@Nullable T previous, @Nullable T current) { + this.previous = previous; + this.current = current; + } public boolean isModified() { return !Objects.equals(previous, current); } + + public @Nullable T previous() { + return previous; + } + + public @Nullable T current() { + return current; + } + + @Override + public boolean equals(Object obj) { + if (obj == this) + return true; + if (obj == null || obj.getClass() != this.getClass()) + return false; + var that = (Delta) obj; + return Objects.equals(this.previous, that.previous) && Objects.equals(this.current, that.current); + } + + @Override + public int hashCode() { + return Objects.hash(previous, current); + } + + @Override + public String toString() { + return "Delta[" + "previous=" + previous + ", " + "current=" + current + ']'; + } + } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java index 39709b0..defeefa 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.client.DatabaseOptions; import it.cavallium.dbengine.client.IndicizerAnalyzers; import it.cavallium.dbengine.client.IndicizerSimilarities; @@ -11,7 +11,7 @@ import reactor.core.publisher.Mono; @SuppressWarnings("UnusedReturnValue") public interface LLDatabaseConnection { - ByteBufAllocator getAllocator(); + BufferAllocator getAllocator(); Mono connect(); diff --git a/src/main/java/it/cavallium/dbengine/database/LLDelta.java b/src/main/java/it/cavallium/dbengine/database/LLDelta.java new file mode 100644 index 0000000..8d4feaf --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/database/LLDelta.java @@ -0,0 +1,129 @@ +package it.cavallium.dbengine.database; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; +import java.util.StringJoiner; +import org.jetbrains.annotations.Nullable; + +public class LLDelta extends ResourceSupport { + @Nullable + private final Buffer previous; + @Nullable + private final Buffer current; + + private LLDelta(@Nullable Send previous, @Nullable Send current, Drop drop) { + super(new LLDelta.CloseOnDrop(drop)); + assert isAllAccessible(); + this.previous = previous != null ? previous.receive().makeReadOnly() : null; + this.current = current != null ? current.receive().makeReadOnly() : null; + } + + private boolean isAllAccessible() { + assert previous == null || previous.isAccessible(); + assert current == null || current.isAccessible(); + assert this.isAccessible(); + assert this.isOwned(); + return true; + } + + public static LLDelta of(Send min, Send max) { + return new LLDelta(min, max, d -> {}); + } + + public Send previous() { + ensureOwned(); + return previous != null ? previous.copy().send() : null; + } + + public Send current() { + ensureOwned(); + return current != null ? current.copy().send() : null; + } + + public boolean isModified() { + return !LLUtils.equals(previous, current); + } + + private void ensureOwned() { + assert isAllAccessible(); + if (!isOwned()) { + if (!isAccessible()) { + throw this.createResourceClosedException(); + } else { + throw new IllegalStateException("Resource not owned"); + } + } + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LLDelta LLDelta = (LLDelta) o; + return LLUtils.equals(previous, LLDelta.previous) && LLUtils.equals(current, LLDelta.current); + } + + @Override + public int hashCode() { + int result = LLUtils.hashCode(previous); + result = 31 * result + LLUtils.hashCode(current); + return result; + } + + @Override + public String toString() { + return new StringJoiner(", ", LLDelta.class.getSimpleName() + "[", "]") + .add("min=" + LLUtils.toString(previous)) + .add("max=" + LLUtils.toString(current)) + .toString(); + } + + public LLDelta copy() { + ensureOwned(); + return new LLDelta(previous != null ? previous.copy().send() : null, + current != null ? current.copy().send() : null, + d -> {} + ); + } + + @Override + protected RuntimeException createResourceClosedException() { + return new IllegalStateException("Closed"); + } + + @Override + protected Owned prepareSend() { + Send minSend; + Send maxSend; + minSend = this.previous != null ? this.previous.send() : null; + maxSend = this.current != null ? this.current.send() : null; + return drop -> new LLDelta(minSend, maxSend, drop); + } + + private static class CloseOnDrop implements Drop { + + private final Drop delegate; + + public CloseOnDrop(Drop drop) { + this.delegate = drop; + } + + @Override + public void drop(LLDelta obj) { + if (obj.previous != null) { + obj.previous.close(); + } + if (obj.current != null) { + obj.current.close(); + } + delegate.drop(obj); + } + } +} diff --git a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java index 5e2c135..4f18cdc 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.database.serialization.BiSerializationFunction; import it.cavallium.dbengine.database.serialization.SerializationFunction; @@ -23,89 +24,90 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure { String getColumnName(); - ByteBufAllocator getAllocator(); + BufferAllocator getAllocator(); - Mono get(@Nullable LLSnapshot snapshot, Mono key, boolean existsAlmostCertainly); + Mono> get(@Nullable LLSnapshot snapshot, Mono> key, boolean existsAlmostCertainly); - default Mono get(@Nullable LLSnapshot snapshot, Mono key) { + default Mono> get(@Nullable LLSnapshot snapshot, Mono> key) { return get(snapshot, key, false); } - Mono put(Mono key, Mono value, LLDictionaryResultType resultType); + Mono> put(Mono> key, Mono> value, LLDictionaryResultType resultType); Mono getUpdateMode(); - default Mono update(Mono key, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + default Mono> update(Mono> key, + SerializationFunction<@Nullable Send, @Nullable Send> updater, UpdateReturnMode updateReturnMode, boolean existsAlmostCertainly) { return this .updateAndGetDelta(key, updater, existsAlmostCertainly) - .transform(prev -> LLUtils.resolveDelta(prev, updateReturnMode)); + .transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode)); } - default Mono update(Mono key, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + default Mono> update(Mono> key, + SerializationFunction<@Nullable Send, @Nullable Send> updater, UpdateReturnMode returnMode) { return update(key, updater, returnMode, false); } - Mono> updateAndGetDelta(Mono key, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + Mono updateAndGetDelta(Mono> key, + SerializationFunction<@Nullable Send, @Nullable Send> updater, boolean existsAlmostCertainly); - default Mono> updateAndGetDelta(Mono key, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater) { + default Mono updateAndGetDelta(Mono> key, + SerializationFunction<@Nullable Send, @Nullable Send> updater) { return updateAndGetDelta(key, updater, false); } Mono clear(); - Mono remove(Mono key, LLDictionaryResultType resultType); + Mono> remove(Mono> key, LLDictionaryResultType resultType); - Flux>> getMulti(@Nullable LLSnapshot snapshot, - Flux> keys, + Flux, Optional>>> getMulti(@Nullable LLSnapshot snapshot, + Flux>> keys, boolean existsAlmostCertainly); - default Flux>> getMulti(@Nullable LLSnapshot snapshot, Flux> keys) { + default Flux, Optional>>> getMulti(@Nullable LLSnapshot snapshot, + Flux>> keys) { return getMulti(snapshot, keys, false); } - Flux putMulti(Flux entries, boolean getOldValues); + Flux> putMulti(Flux> entries, boolean getOldValues); - Flux> updateMulti(Flux> entries, - BiSerializationFunction updateFunction); + Flux, X>> updateMulti(Flux, X>> entries, + BiSerializationFunction, X, Send> updateFunction); - Flux getRange(@Nullable LLSnapshot snapshot, Mono range, boolean existsAlmostCertainly); + Flux> getRange(@Nullable LLSnapshot snapshot, Mono> range, boolean existsAlmostCertainly); - default Flux getRange(@Nullable LLSnapshot snapshot, Mono range) { + default Flux> getRange(@Nullable LLSnapshot snapshot, Mono> range) { return getRange(snapshot, range, false); } - Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono range, + Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + Mono> range, int prefixLength, boolean existsAlmostCertainly); - default Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono range, + default Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + Mono> range, int prefixLength) { return getRangeGrouped(snapshot, range, prefixLength, false); } - Flux getRangeKeys(@Nullable LLSnapshot snapshot, Mono range); + Flux> getRangeKeys(@Nullable LLSnapshot snapshot, Mono> range); - Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono range, int prefixLength); + Flux>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono> range, int prefixLength); - Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono range, int prefixLength); + Flux> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono> range, int prefixLength); - Flux badBlocks(Mono range); + Flux badBlocks(Mono> range); - Mono setRange(Mono range, Flux entries); + Mono setRange(Mono> range, Flux> entries); - default Mono replaceRange(Mono range, + default Mono replaceRange(Mono> range, boolean canKeysChange, - Function> entriesReplacer, + Function, Mono>> entriesReplacer, boolean existsAlmostCertainly) { return Mono.defer(() -> { if (canKeysChange) { @@ -124,19 +126,19 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure { }); } - default Mono replaceRange(Mono range, + default Mono replaceRange(Mono> range, boolean canKeysChange, - Function> entriesReplacer) { + Function, Mono>> entriesReplacer) { return replaceRange(range, canKeysChange, entriesReplacer, false); } - Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono range); + Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono> range); - Mono sizeRange(@Nullable LLSnapshot snapshot, Mono range, boolean fast); + Mono sizeRange(@Nullable LLSnapshot snapshot, Mono> range, boolean fast); - Mono getOne(@Nullable LLSnapshot snapshot, Mono range); + Mono> getOne(@Nullable LLSnapshot snapshot, Mono> range); - Mono getOneKey(@Nullable LLSnapshot snapshot, Mono range); + Mono> getOneKey(@Nullable LLSnapshot snapshot, Mono> range); - Mono removeOne(Mono range); + Mono> removeOne(Mono> range); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLEntry.java b/src/main/java/it/cavallium/dbengine/database/LLEntry.java index 3a7a46a..e5d73b9 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLEntry.java +++ b/src/main/java/it/cavallium/dbengine/database/LLEntry.java @@ -1,74 +1,127 @@ package it.cavallium.dbengine.database; -import io.netty.buffer.ByteBuf; -import io.netty.util.IllegalReferenceCountException; -import java.util.Map; -import java.util.concurrent.atomic.AtomicInteger; -import org.warp.commonutils.log.Logger; -import org.warp.commonutils.log.LoggerFactory; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; +import java.util.StringJoiner; +import org.jetbrains.annotations.NotNull; -public class LLEntry { +public class LLEntry extends ResourceSupport { + @NotNull + private final Buffer key; + @NotNull + private final Buffer value; - private static final Logger logger = LoggerFactory.getLogger(LLEntry.class); - - private final AtomicInteger refCnt = new AtomicInteger(1); - - private final ByteBuf key; - private final ByteBuf value; - - public LLEntry(ByteBuf key, ByteBuf value) { - try { - this.key = key.retain(); - this.value = value.retain(); - } finally { - key.release(); - value.release(); - } + private LLEntry(Send key, Send value, Drop drop) { + super(new LLEntry.CloseOnDrop(drop)); + assert isAllAccessible(); + this.key = key.receive().makeReadOnly(); + this.value = value.receive().makeReadOnly(); } - public ByteBuf getKey() { - if (refCnt.get() <= 0) { - throw new IllegalReferenceCountException(refCnt.get()); - } + private boolean isAllAccessible() { + assert key.isAccessible(); + assert value.isAccessible(); + assert this.isAccessible(); + assert this.isOwned(); + return true; + } + + public static LLEntry of(Send key, Send value) { + return new LLEntry(key, value, d -> {}); + } + + public Send getKey() { + ensureOwned(); + return key.copy().send(); + } + + public Buffer getKeyUnsafe() { return key; } - public ByteBuf getValue() { - if (refCnt.get() <= 0) { - throw new IllegalReferenceCountException(refCnt.get()); - } + public Send getValue() { + ensureOwned(); + return value.copy().send(); + } + + + public Buffer getValueUnsafe() { return value; } - public void retain() { - if (refCnt.getAndIncrement() <= 0) { - throw new IllegalReferenceCountException(refCnt.get(), 1); + private void ensureOwned() { + assert isAllAccessible(); + if (!isOwned()) { + if (!isAccessible()) { + throw this.createResourceClosedException(); + } else { + throw new IllegalStateException("Resource not owned"); + } } - key.retain(); - value.retain(); - } - - public void release() { - if (refCnt.decrementAndGet() < 0) { - throw new IllegalReferenceCountException(refCnt.get(), -1); - } - if (key.refCnt() > 0) { - key.release(); - } - if (value.refCnt() > 0) { - value.release(); - } - } - - public boolean isReleased() { - return refCnt.get() <= 0; } @Override - protected void finalize() throws Throwable { - if (refCnt.get() > 0) { - logger.warn(this.getClass().getName() + "::release has not been called!"); + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + LLEntry LLEntry = (LLEntry) o; + return LLUtils.equals(key, LLEntry.key) && LLUtils.equals(value, LLEntry.value); + } + + @Override + public int hashCode() { + int result = LLUtils.hashCode(key); + result = 31 * result + LLUtils.hashCode(value); + return result; + } + + @Override + public String toString() { + return new StringJoiner(", ", LLEntry.class.getSimpleName() + "[", "]") + .add("key=" + LLUtils.toString(key)) + .add("value=" + LLUtils.toString(value)) + .toString(); + } + + public LLEntry copy() { + ensureOwned(); + return new LLEntry(key.copy().send(), value.copy().send(), d -> {}); + } + + @Override + protected RuntimeException createResourceClosedException() { + return new IllegalStateException("Closed"); + } + + @Override + protected Owned prepareSend() { + Send keySend; + Send valueSend; + keySend = this.key.send(); + valueSend = this.value.send(); + return drop -> new LLEntry(keySend, valueSend, drop); + } + + private static class CloseOnDrop implements Drop { + + private final Drop delegate; + + public CloseOnDrop(Drop drop) { + this.delegate = drop; + } + + @Override + public void drop(LLEntry obj) { + obj.key.close(); + obj.value.close(); + delegate.drop(obj); } - super.finalize(); } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java index 568f5c6..61023e3 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java @@ -2,7 +2,7 @@ package it.cavallium.dbengine.database; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.database.collections.DatabaseInt; import it.cavallium.dbengine.database.collections.DatabaseLong; import java.nio.charset.StandardCharsets; @@ -46,7 +46,7 @@ public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseS Mono verifyChecksum(); - ByteBufAllocator getAllocator(); + BufferAllocator getAllocator(); Mono close(); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLRange.java b/src/main/java/it/cavallium/dbengine/database/LLRange.java index c31831d..3bb8fd1 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLRange.java +++ b/src/main/java/it/cavallium/dbengine/database/LLRange.java @@ -1,117 +1,146 @@ package it.cavallium.dbengine.database; import static io.netty.buffer.Unpooled.wrappedBuffer; -import static io.netty.buffer.Unpooled.wrappedUnmodifiableBuffer; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufUtil; -import io.netty.util.IllegalReferenceCountException; -import java.util.Arrays; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; import java.util.StringJoiner; -import java.util.concurrent.atomic.AtomicInteger; /** * Range of data, from min (inclusive),to max (exclusive) */ -public class LLRange { +public class LLRange extends ResourceSupport { - private static final LLRange RANGE_ALL = new LLRange(null, null, false); - private final ByteBuf min; - private final ByteBuf max; - private final boolean releasable; - private final AtomicInteger refCnt = new AtomicInteger(1); + private static final LLRange RANGE_ALL = new LLRange(null, null, null, d -> {}); + private Buffer min; + private Buffer max; + private Buffer single; - private LLRange(ByteBuf min, ByteBuf max, boolean releasable) { - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - this.min = min; - this.max = max; - this.releasable = releasable; + private LLRange(Send min, Send max, Send single, Drop drop) { + super(new CloseOnDrop(drop)); + assert isAllAccessible(); + assert single == null || (min == null && max == null); + this.min = min != null ? min.receive().makeReadOnly() : null; + this.max = max != null ? max.receive().makeReadOnly() : null; + this.single = single != null ? single.receive().makeReadOnly() : null; + } + + private boolean isAllAccessible() { + assert min == null || min.isAccessible(); + assert max == null || max.isAccessible(); + assert single == null || single.isAccessible(); + assert this.isAccessible(); + assert this.isOwned(); + return true; } public static LLRange all() { - return RANGE_ALL; + return RANGE_ALL.copy(); } - public static LLRange from(ByteBuf min) { - return new LLRange(min, null, true); + public static LLRange from(Send min) { + return new LLRange(min, null, null, d -> {}); } - public static LLRange to(ByteBuf max) { - return new LLRange(null, max, true); + public static LLRange to(Send max) { + return new LLRange(null, max, null, d -> {}); } - public static LLRange single(ByteBuf single) { - try { - return new LLRange(single.retain(), single.retain(), true); - } finally { - single.release(); - } + public static LLRange single(Send single) { + return new LLRange(null, null, single, d -> {}); } - public static LLRange of(ByteBuf min, ByteBuf max) { - return new LLRange(min, max, true); + public static LLRange of(Send min, Send max) { + return new LLRange(min, max, null, d -> {}); } public boolean isAll() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - return min == null && max == null; + ensureOwned(); + return min == null && max == null && single == null; } public boolean isSingle() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - if (min == null || max == null) return false; - return LLUtils.equals(min, max); + ensureOwned(); + return single != null; } public boolean hasMin() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - return min != null; + ensureOwned(); + return min != null || single != null; } - public ByteBuf getMin() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - assert min != null; - return min; + public Send getMin() { + ensureOwned(); + if (min != null) { + return min.copy().send(); + } else if (single != null) { + return single.copy().send(); + } else { + return null; + } + } + + public Buffer getMinUnsafe() { + ensureOwned(); + if (min != null) { + return min; + } else if (single != null) { + return single; + } else { + return null; + } } public boolean hasMax() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - return max != null; + ensureOwned(); + return max != null || single != null; } - public ByteBuf getMax() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - assert max != null; - return max; - } - - public ByteBuf getSingle() { - checkReleased(); - assert min == null || min.refCnt() > 0; - assert max == null || max.refCnt() > 0; - assert isSingle(); - return min; - } - - private void checkReleased() { - if (!releasable) { - return; + public Send getMax() { + ensureOwned(); + if (max != null) { + return max.copy().send(); + } else if (single != null) { + return single.copy().send(); + } else { + return null; } - if (refCnt.get() <= 0) { - throw new IllegalReferenceCountException(0); + } + + public Buffer getMaxUnsafe() { + ensureOwned(); + if (max != null) { + return max; + } else if (single != null) { + return single; + } else { + return null; + } + } + + public Send getSingle() { + ensureOwned(); + assert isSingle(); + return single != null ? single.copy().send() : null; + } + + public Buffer getSingleUnsafe() { + ensureOwned(); + assert isSingle(); + return single; + } + + private void ensureOwned() { + assert isAllAccessible(); + if (!isOwned()) { + if (!isAccessible()) { + throw this.createResourceClosedException(); + } else { + throw new IllegalStateException("Resource not owned"); + } } } @@ -142,34 +171,53 @@ public class LLRange { .toString(); } - public LLRange retain() { - if (!releasable) { - return this; - } - if (refCnt.updateAndGet(refCnt -> refCnt <= 0 ? 0 : (refCnt + 1)) <= 0) { - throw new IllegalReferenceCountException(0, 1); - } - if (min != null) { - min.retain(); - } - if (max != null) { - max.retain(); - } - return this; + public LLRange copy() { + ensureOwned(); + return new LLRange(min != null ? min.copy().send() : null, + max != null ? max.copy().send() : null, + single != null ? single.copy().send(): null, + d -> {} + ); } - public void release() { - if (!releasable) { - return; + @Override + protected RuntimeException createResourceClosedException() { + return new IllegalStateException("Closed"); + } + + @Override + protected Owned prepareSend() { + Send minSend; + Send maxSend; + Send singleSend; + minSend = this.min != null ? this.min.send() : null; + maxSend = this.max != null ? this.max.send() : null; + singleSend = this.single != null ? this.single.send() : null; + this.makeInaccessible(); + return drop -> new LLRange(minSend, maxSend, singleSend, drop); + } + + private void makeInaccessible() { + this.min = null; + this.max = null; + this.single = null; + } + + private static class CloseOnDrop implements Drop { + + private final Drop delegate; + + public CloseOnDrop(Drop drop) { + this.delegate = drop; } - if (refCnt.decrementAndGet() < 0) { - throw new IllegalReferenceCountException(0, -1); - } - if (min != null) { - min.release(); - } - if (max != null) { - max.release(); + + @Override + public void drop(LLRange obj) { + if (obj.min != null) obj.min.close(); + if (obj.max != null) obj.max.close(); + if (obj.single != null) obj.single.close(); + obj.makeInaccessible(); + delegate.drop(obj); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLUtils.java b/src/main/java/it/cavallium/dbengine/database/LLUtils.java index 9c16c7e..77e537f 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLUtils.java +++ b/src/main/java/it/cavallium/dbengine/database/LLUtils.java @@ -2,21 +2,17 @@ package it.cavallium.dbengine.database; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.CompositeByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.util.AbstractReferenceCounted; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.CompositeBuffer; +import io.netty.buffer.api.Send; import io.netty.util.IllegalReferenceCountException; -import io.netty.util.ReferenceCounted; -import it.cavallium.dbengine.database.disk.ReleasableSlice; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.cavallium.dbengine.lucene.RandomSortField; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.List; import java.util.Map; @@ -24,7 +20,8 @@ import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.concurrent.Callable; -import java.util.function.Function; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; import java.util.function.ToIntFunction; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; @@ -45,7 +42,6 @@ import org.jetbrains.annotations.Nullable; import org.rocksdb.RocksDB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.warp.commonutils.functional.IOFunction; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; import reactor.util.function.Tuple2; @@ -56,6 +52,7 @@ public class LLUtils { private static final Logger logger = LoggerFactory.getLogger(LLUtils.class); + private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0); private static final byte[] RESPONSE_TRUE = new byte[]{1}; private static final byte[] RESPONSE_FALSE = new byte[]{0}; private static final byte[] RESPONSE_TRUE_BUF = new byte[]{1}; @@ -73,12 +70,10 @@ public class LLUtils { return response[0] == 1; } - public static boolean responseToBoolean(ByteBuf response) { - try { + public static boolean responseToBoolean(Buffer response) { + try (response) { assert response.readableBytes() == 1; - return response.getByte(response.readerIndex()) == 1; - } finally { - response.release(); + return response.getByte(response.readerOffset()) == 1; } } @@ -86,8 +81,8 @@ public class LLUtils { return bool ? RESPONSE_TRUE : RESPONSE_FALSE; } - public static ByteBuf booleanToResponseByteBuffer(boolean bool) { - return Unpooled.wrappedBuffer(booleanToResponse(bool)); + public static Buffer booleanToResponseByteBuffer(BufferAllocator alloc, boolean bool) { + return alloc.allocate(1).writeByte(bool ? (byte) 1 : 0); } @Nullable @@ -171,9 +166,9 @@ public class LLUtils { return new it.cavallium.dbengine.database.LLKeyScore(hit.docId(), hit.score(), hit.key()); } - public static String toStringSafe(ByteBuf key) { + public static String toStringSafe(Buffer key) { try { - if (key.refCnt() > 0) { + if (key.isAccessible()) { return toString(key); } else { return "(released)"; @@ -183,11 +178,11 @@ public class LLUtils { } } - public static String toString(ByteBuf key) { + public static String toString(Buffer key) { if (key == null) { return "null"; } else { - int startIndex = key.readerIndex(); + int startIndex = key.readerOffset(); int iMax = key.readableBytes() - 1; int iLimit = 128; if (iMax <= -1) { @@ -213,111 +208,117 @@ public class LLUtils { } } - public static boolean equals(ByteBuf a, ByteBuf b) { + public static boolean equals(Buffer a, Buffer b) { if (a == null && b == null) { return true; } else if (a != null && b != null) { - return ByteBufUtil.equals(a, b); + var aCur = a.openCursor(); + var bCur = b.openCursor(); + if (aCur.bytesLeft() != bCur.bytesLeft()) { + return false; + } + while (aCur.readByte() && bCur.readByte()) { + if (aCur.getByte() != bCur.getByte()) { + return false; + } + } + return true; } else { return false; } } - public static byte[] toArray(ByteBuf key) { - if (key.hasArray()) { - return Arrays.copyOfRange(key.array(), key.arrayOffset() + key.readerIndex(), key.arrayOffset() + key.writerIndex()); - } else { - byte[] keyBytes = new byte[key.readableBytes()]; - key.getBytes(key.readerIndex(), keyBytes, 0, key.readableBytes()); - return keyBytes; - } + public static byte[] toArray(Buffer key) { + byte[] array = new byte[key.readableBytes()]; + key.copyInto(key.readerOffset(), array, 0, key.readableBytes()); + return array; } - public static List toArray(List input) { + public static List toArray(List input) { List result = new ArrayList<>(input.size()); - for (ByteBuf byteBuf : input) { + for (Buffer byteBuf : input) { result.add(toArray(byteBuf)); } return result; } - public static int hashCode(ByteBuf buf) { - return buf == null ? 0 : buf.hashCode(); + public static int hashCode(Buffer buf) { + if (buf == null) + return 0; + + int result = 1; + var cur = buf.openCursor(); + while (cur.readByte()) { + var element = cur.getByte(); + result = 31 * result + element; + } + + return result; } + /** + * + * @return null if size is equal to RocksDB.NOT_FOUND + */ @Nullable - public static ByteBuf readNullableDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction reader) { - ByteBuf buffer = alloc.directBuffer(); - ByteBuf directBuffer = null; + public static Buffer readNullableDirectNioBuffer(BufferAllocator alloc, ToIntFunction reader) { + Buffer buffer = alloc.allocate(4096); ByteBuffer nioBuffer; int size; - Boolean mustBeCopied = null; do { - if (mustBeCopied == null || !mustBeCopied) { - nioBuffer = LLUtils.toDirectFast(buffer); - if (nioBuffer != null) { - nioBuffer.limit(nioBuffer.capacity()); - } - } else { - nioBuffer = null; - } - if ((mustBeCopied != null && mustBeCopied) || nioBuffer == null) { - directBuffer = buffer; - nioBuffer = directBuffer.nioBuffer(0, directBuffer.capacity()); - mustBeCopied = true; - } else { - mustBeCopied = false; - } - try { - assert nioBuffer.isDirect(); - size = reader.applyAsInt(nioBuffer); - if (size != RocksDB.NOT_FOUND) { - if (mustBeCopied) { - buffer.writerIndex(0).writeBytes(nioBuffer); - } - if (size == nioBuffer.limit()) { - buffer.setIndex(0, size); - return buffer; - } else { - assert size > nioBuffer.limit(); - assert nioBuffer.limit() > 0; - buffer.capacity(size); - } - } - } finally { - if (nioBuffer != null) { - nioBuffer = null; - } - if(directBuffer != null) { - directBuffer.release(); - directBuffer = null; + nioBuffer = LLUtils.toDirect(buffer); + nioBuffer.limit(nioBuffer.capacity()); + assert nioBuffer.isDirect(); + size = reader.applyAsInt(nioBuffer); + if (size != RocksDB.NOT_FOUND) { + if (size == nioBuffer.limit()) { + buffer.readerOffset(0).writerOffset(size); + return buffer; + } else { + assert size > nioBuffer.limit(); + assert nioBuffer.limit() > 0; + buffer.ensureWritable(size); } } } while (size != RocksDB.NOT_FOUND); + + // Return null if size is equal to RocksDB.NOT_FOUND return null; } @Nullable - public static ByteBuffer toDirectFast(ByteBuf buffer) { - ByteBuffer result = buffer.nioBuffer(0, buffer.capacity()); - if (result.isDirect()) { - result.limit(buffer.writerIndex()); + public static ByteBuffer toDirectFast(Buffer buffer) { + int readableComponents = buffer.countReadableComponents(); + if (readableComponents > 0) { + AtomicReference byteBufferReference = new AtomicReference<>(null); + buffer.forEachReadable(0, (index, component) -> { + byteBufferReference.setPlain(component.readableBuffer()); + return false; + }); + ByteBuffer byteBuffer = byteBufferReference.getPlain(); + if (byteBuffer != null && byteBuffer.isDirect()) { + byteBuffer.limit(buffer.writerOffset()); - assert result.isDirect(); - assert result.capacity() == buffer.capacity(); - assert buffer.readerIndex() == result.position(); - assert result.limit() - result.position() == buffer.readableBytes(); + assert byteBuffer.isDirect(); + assert byteBuffer.capacity() == buffer.capacity(); + assert buffer.readerOffset() == byteBuffer.position(); + assert byteBuffer.limit() - byteBuffer.position() == buffer.readableBytes(); - return result; + return byteBuffer; + } else { + return null; + } + } else if (readableComponents == 0) { + return EMPTY_BYTE_BUFFER; } else { return null; } } - public static ByteBuffer toDirect(ByteBuf buffer) { + public static ByteBuffer toDirect(Buffer buffer) { ByteBuffer result = toDirectFast(buffer); if (result == null) { - throw new IllegalArgumentException("The supplied ByteBuf is not direct " + throw new IllegalArgumentException("The supplied Buffer is not direct " + "(if it's a CompositeByteBuf it must be consolidated before)"); } assert result.isDirect(); @@ -325,9 +326,9 @@ public class LLUtils { } /* - public static ByteBuf toDirectCopy(ByteBuf buffer) { + public static Buffer toDirectCopy(Buffer buffer) { try { - ByteBuf directCopyBuf = buffer.alloc().buffer(buffer.capacity(), buffer.maxCapacity()); + Buffer directCopyBuf = buffer.alloc().buffer(buffer.capacity(), buffer.maxCapacity()); directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex()); return directCopyBuf; } finally { @@ -336,26 +337,14 @@ public class LLUtils { } */ - public static ByteBuf convertToDirectByteBuf(ByteBufAllocator alloc, ByteBuf buffer) { - ByteBuf result; - ByteBuf directCopyBuf = alloc.buffer(buffer.capacity(), buffer.maxCapacity()); - directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex()); - directCopyBuf.readerIndex(buffer.readerIndex()); - result = directCopyBuf; - assert result.isDirect(); - assert result.capacity() == buffer.capacity(); - assert buffer.readerIndex() == result.readerIndex(); - return result; - } - - public static ByteBuf fromByteArray(ByteBufAllocator alloc, byte[] array) { - ByteBuf result = alloc.buffer(array.length); + public static Buffer fromByteArray(BufferAllocator alloc, byte[] array) { + Buffer result = alloc.allocate(array.length); result.writeBytes(array); return result; } @NotNull - public static ByteBuf readDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction reader) { + public static Buffer readDirectNioBuffer(BufferAllocator alloc, ToIntFunction reader) { var buffer = readNullableDirectNioBuffer(alloc, reader); if (buffer == null) { throw new IllegalStateException("A non-nullable buffer read operation tried to return a \"not found\" element"); @@ -363,81 +352,54 @@ public class LLUtils { return buffer; } - public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer) { - return buffer; - } - - public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2) { - try { - if (buffer1.readableBytes() == 0) { - return compositeBuffer(alloc, buffer2.retain()); - } else if (buffer2.readableBytes() == 0) { - return compositeBuffer(alloc, buffer1.retain()); - } - CompositeByteBuf result = alloc.compositeBuffer(2); - try { - result.addComponent(true, buffer1.retain()); - result.addComponent(true, buffer2.retain()); - return result.consolidate().retain(); - } finally { - result.release(); - } - } finally { - buffer1.release(); - buffer2.release(); + public static Send compositeBuffer(BufferAllocator alloc, Send buffer) { + try (var composite = buffer.receive().compact()) { + assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0; + return composite.send(); } } - public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2, ByteBuf buffer3) { - try { - if (buffer1.readableBytes() == 0) { - return compositeBuffer(alloc, buffer2.retain(), buffer3.retain()); - } else if (buffer2.readableBytes() == 0) { - return compositeBuffer(alloc, buffer1.retain(), buffer3.retain()); - } else if (buffer3.readableBytes() == 0) { - return compositeBuffer(alloc, buffer1.retain(), buffer2.retain()); + public static Send compositeBuffer(BufferAllocator alloc, Send buffer1, Send buffer2) { + try (buffer1) { + try (buffer2) { + try (var composite = CompositeBuffer.compose(alloc, buffer1, buffer2).compact()) { + assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0; + return composite.send(); + } } - CompositeByteBuf result = alloc.compositeBuffer(3); - try { - result.addComponent(true, buffer1.retain()); - result.addComponent(true, buffer2.retain()); - result.addComponent(true, buffer3.retain()); - return result.consolidate().retain(); - } finally { - result.release(); - } - } finally { - buffer1.release(); - buffer2.release(); - buffer3.release(); } } - public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf... buffers) { - try { - switch (buffers.length) { - case 0: - return alloc.buffer(0); - case 1: - return compositeBuffer(alloc, buffers[0].retain().retain()); - case 2: - return compositeBuffer(alloc, buffers[0].retain(), buffers[1].retain()); - case 3: - return compositeBuffer(alloc, buffers[0].retain(), buffers[1].retain(), buffers[2].retain()); - default: - CompositeByteBuf result = alloc.compositeBuffer(buffers.length); - try { - for (ByteBuf buffer : buffers) { - result.addComponent(true, buffer.retain()); - } - return result.consolidate().retain(); - } finally { - result.release(); + public static Send compositeBuffer(BufferAllocator alloc, Send buffer1, Send buffer2, Send buffer3) { + try (buffer1) { + try (buffer2) { + try (buffer3) { + try (var composite = CompositeBuffer.compose(alloc, buffer1, buffer2, buffer3).compact()) { + assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0; + return composite.send(); } + } } + } + } + + public static Send compositeBuffer(BufferAllocator alloc, Send... buffers) { + try { + return switch (buffers.length) { + case 0 -> alloc.allocate(0).send(); + case 1 -> compositeBuffer(alloc, buffers[0]); + case 2 -> compositeBuffer(alloc, buffers[0], buffers[1]); + case 3 -> compositeBuffer(alloc, buffers[0], buffers[1], buffers[2]); + default -> { + try (var composite = CompositeBuffer.compose(alloc, buffers).compact()) { + assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0; + yield composite.send(); + } + } + }; } finally { - for (ByteBuf buffer : buffers) { - buffer.release(); + for (Send buffer : buffers) { + buffer.close(); } } } @@ -467,6 +429,33 @@ public class LLUtils { }); } + public static Mono> resolveLLDelta(Mono prev, UpdateReturnMode updateReturnMode) { + return prev.handle((delta, sink) -> { + try (delta) { + switch (updateReturnMode) { + case GET_NEW_VALUE -> { + var current = delta.current(); + if (current != null) { + sink.next(current); + } else { + sink.complete(); + } + } + case GET_OLD_VALUE -> { + var previous = delta.previous(); + if (previous != null) { + sink.next(previous); + } else { + sink.complete(); + } + } + case NOTHING -> sink.complete(); + default -> sink.error(new IllegalStateException()); + } + } + }); + } + public static Mono> mapDelta(Mono> mono, SerializationFunction<@NotNull T, @Nullable U> mapper) { return mono.handle((delta, sink) -> { @@ -492,38 +481,57 @@ public class LLUtils { }); } + public static Mono> mapLLDelta(Mono mono, + SerializationFunction<@NotNull Send, @Nullable U> mapper) { + return mono.handle((delta, sink) -> { + try { + try (Send prev = delta.previous()) { + try (Send curr = delta.current()) { + U newPrev; + U newCurr; + if (prev != null) { + newPrev = mapper.apply(prev); + } else { + newPrev = null; + } + if (curr != null) { + newCurr = mapper.apply(curr); + } else { + newCurr = null; + } + sink.next(new Delta<>(newPrev, newCurr)); + } + } + } catch (SerializationException ex) { + sink.error(ex); + } + }); + } + public static boolean isDeltaChanged(Delta delta) { return !Objects.equals(delta.previous(), delta.current()); } - public static Mono lazyRetain(ByteBuf buf) { - return Mono.just(buf).map(ByteBuf::retain); + public static Mono> lazyRetain(Buffer buf) { + return Mono.just(buf).map(b -> b.copy().send()); } - public static Mono lazyRetainRange(LLRange range) { - return Mono.just(range).map(LLRange::retain); + public static Mono> lazyRetainRange(LLRange range) { + return Mono.just(range).map(r -> r.copy().send()); } - public static Mono lazyRetain(Callable bufCallable) { - return Mono.fromCallable(bufCallable).cacheInvalidateIf(byteBuf -> { - // Retain if the value has been cached previously - byteBuf.retain(); - return false; - }); + public static Mono> lazyRetain(Callable> bufCallable) { + return Mono.fromCallable(bufCallable); } - public static Mono lazyRetainRange(Callable rangeCallable) { - return Mono.fromCallable(rangeCallable).cacheInvalidateIf(range -> { - // Retain if the value has been cached previously - range.retain(); - return false; - }); + public static Mono> lazyRetainRange(Callable> rangeCallable) { + return Mono.fromCallable(rangeCallable); } public static Mono handleDiscard(Mono mono) { return mono .doOnDiscard(Object.class, obj -> { - if (obj instanceof ReferenceCounted o) { + if (obj instanceof SafeCloseable o) { discardRefCounted(o); } else if (obj instanceof Entry o) { discardEntry(o); @@ -539,13 +547,15 @@ public class LLUtils { discardLLRange(o); } else if (obj instanceof Delta o) { discardDelta(o); + } else if (obj instanceof Send o) { + discardSend(o); } else if (obj instanceof Map o) { discardMap(o); } }); // todo: check if the single object discard hook is more performant /* - .doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted) + .doOnDiscard(SafeCloseable.class, LLUtils::discardRefCounted) .doOnDiscard(Map.Entry.class, LLUtils::discardEntry) .doOnDiscard(Collection.class, LLUtils::discardCollection) .doOnDiscard(Tuple2.class, LLUtils::discardTuple2) @@ -553,6 +563,7 @@ public class LLUtils { .doOnDiscard(LLEntry.class, LLUtils::discardLLEntry) .doOnDiscard(LLRange.class, LLUtils::discardLLRange) .doOnDiscard(Delta.class, LLUtils::discardDelta) + .doOnDiscard(Send.class, LLUtils::discardSend) .doOnDiscard(Map.class, LLUtils::discardMap); */ @@ -561,7 +572,7 @@ public class LLUtils { public static Flux handleDiscard(Flux mono) { return mono .doOnDiscard(Object.class, obj -> { - if (obj instanceof ReferenceCounted o) { + if (obj instanceof SafeCloseable o) { discardRefCounted(o); } else if (obj instanceof Entry o) { discardEntry(o); @@ -577,15 +588,15 @@ public class LLUtils { discardLLRange(o); } else if (obj instanceof Delta o) { discardDelta(o); + } else if (obj instanceof Send o) { + discardSend(o); } else if (obj instanceof Map o) { discardMap(o); - } else { - System.err.println(obj.getClass().getName()); } }); // todo: check if the single object discard hook is more performant /* - .doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted) + .doOnDiscard(SafeCloseable.class, LLUtils::discardRefCounted) .doOnDiscard(Map.Entry.class, LLUtils::discardEntry) .doOnDiscard(Collection.class, LLUtils::discardCollection) .doOnDiscard(Tuple2.class, LLUtils::discardTuple2) @@ -593,113 +604,78 @@ public class LLUtils { .doOnDiscard(LLEntry.class, LLUtils::discardLLEntry) .doOnDiscard(LLRange.class, LLUtils::discardLLRange) .doOnDiscard(Delta.class, LLUtils::discardDelta) + .doOnDiscard(Send.class, LLUtils::discardSend) .doOnDiscard(Map.class, LLUtils::discardMap); */ } private static void discardLLEntry(LLEntry entry) { - logger.trace("Releasing discarded ByteBuf"); - entry.release(); + logger.trace("Releasing discarded Buffer"); + entry.close(); } private static void discardLLRange(LLRange range) { - logger.trace("Releasing discarded ByteBuf"); - range.release(); + logger.trace("Releasing discarded Buffer"); + range.close(); } private static void discardEntry(Map.Entry e) { - if (e.getKey() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getKey() instanceof Buffer bb) { + bb.close(); } - if (e.getValue() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getValue() instanceof Buffer bb) { + bb.close(); } } private static void discardTuple2(Tuple2 e) { - if (e.getT1() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getT1() instanceof Buffer bb) { + bb.close(); } - if (e.getT2() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getT2() instanceof Buffer bb) { + bb.close(); } } private static void discardTuple3(Tuple3 e) { - if (e.getT1() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getT1() instanceof Buffer bb) { + bb.close(); } else if (e.getT1() instanceof Optional opt) { - if (opt.isPresent() && opt.get() instanceof ByteBuf bb) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); + if (opt.isPresent() && opt.get() instanceof Buffer bb) { + bb.close(); } } - if (e.getT2() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getT2() instanceof Buffer bb) { + bb.close(); } else if (e.getT1() instanceof Optional opt) { - if (opt.isPresent() && opt.get() instanceof ByteBuf bb) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); + if (opt.isPresent() && opt.get() instanceof Buffer bb) { + bb.close(); } } - if (e.getT3() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (e.getT3() instanceof Buffer bb) { + bb.close(); } else if (e.getT1() instanceof Optional opt) { - if (opt.isPresent() && opt.get() instanceof ByteBuf bb) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); + if (opt.isPresent() && opt.get() instanceof Buffer bb) { + bb.close(); } } } - private static void discardRefCounted(ReferenceCounted referenceCounted) { - if (referenceCounted.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - referenceCounted.release(); - } + private static void discardRefCounted(SafeCloseable safeCloseable) { + safeCloseable.close(); } private static void discardCollection(Collection collection) { for (Object o : collection) { - if (o instanceof ReferenceCounted referenceCounted) { - if (referenceCounted.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - referenceCounted.release(); - } + if (o instanceof SafeCloseable safeCloseable) { + safeCloseable.close(); } else if (o instanceof Map.Entry entry) { - if (entry.getKey() instanceof ReferenceCounted bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (entry.getKey() instanceof SafeCloseable bb) { + bb.close(); } - if (entry.getValue() instanceof ReferenceCounted bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (entry.getValue() instanceof SafeCloseable bb) { + bb.close(); } } else { break; @@ -708,35 +684,27 @@ public class LLUtils { } private static void discardDelta(Delta delta) { - if (delta.previous() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (delta.previous() instanceof Buffer bb) { + bb.close(); } - if (delta.current() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (delta.current() instanceof Buffer bb) { + bb.close(); } } + private static void discardSend(Send send) { + send.close(); + } + private static void discardMap(Map map) { for (Entry entry : map.entrySet()) { boolean hasByteBuf = false; - if (entry.getKey() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (entry.getKey() instanceof Buffer bb) { + bb.close(); hasByteBuf = true; } - if (entry.getValue() instanceof ByteBuf bb) { - if (bb.refCnt() > 0) { - logger.trace("Releasing discarded ByteBuf"); - bb.release(); - } + if (entry.getValue() instanceof Buffer bb) { + bb.close(); hasByteBuf = true; } if (!hasByteBuf) { @@ -744,4 +712,28 @@ public class LLUtils { } } } + + public static boolean isDirect(Buffer key) { + if (key.countReadableComponents() == 1) { + return key.forEachReadable(0, (index, component) -> component.readableBuffer().isDirect()) >= 0; + } else { + return false; + } + } + + public static String deserializeString(Send bufferSend, int readerOffset, int length, Charset charset) { + try (var buffer = bufferSend.receive()) { + byte[] bytes = new byte[Math.min(length, buffer.readableBytes())]; + buffer.copyInto(readerOffset, bytes, 0, length); + return new String(bytes, charset); + } + } + + public static int utf8MaxBytes(String deserialized) { + return deserialized.length() * 3; + } + + public static void writeString(Buffer buf, String deserialized, Charset charset) { + buf.writeBytes(deserialized.getBytes(charset)); + } } diff --git a/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java b/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java new file mode 100644 index 0000000..565726c --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java @@ -0,0 +1,7 @@ +package it.cavallium.dbengine.database; + +public interface SafeCloseable extends AutoCloseable { + + @Override + void close(); +} diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java index f9006b1..92cc1fd 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; import java.util.function.Function; @@ -11,9 +11,9 @@ public class DatabaseEmpty { @SuppressWarnings({"unused", "InstantiationOfUtilityClass"}) public static final Nothing NOTHING = new Nothing(); - public static final Serializer NOTHING_SERIALIZER = new Serializer<>() { + public static final Serializer NOTHING_SERIALIZER = new Serializer<>() { @Override - public @NotNull Nothing deserialize(@NotNull ByteBuf serialized) { + public @NotNull Nothing deserialize(@NotNull Buffer serialized) { try { return NOTHING; } finally { @@ -22,7 +22,7 @@ public class DatabaseEmpty { } @Override - public @NotNull ByteBuf serialize(@NotNull Nothing deserialized) { + public @NotNull Buffer serialize(@NotNull Nothing deserialized) { return EMPTY_BUFFER; } }; @@ -33,7 +33,7 @@ public class DatabaseEmpty { private DatabaseEmpty() { } - public static DatabaseStageEntry create(LLDictionary dictionary, ByteBuf key) { + public static DatabaseStageEntry create(LLDictionary dictionary, Buffer key) { return new DatabaseSingle<>(dictionary, key, NOTHING_SERIALIZER); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java index fd10eff..21c3ffa 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java @@ -1,6 +1,7 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Send; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; @@ -38,40 +39,38 @@ import reactor.util.function.Tuples; */ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> { - private final Serializer valueSerializer; + private final Serializer> valueSerializer; protected DatabaseMapDictionary(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, - Serializer valueSerializer) { + Send prefixKey, + SerializerFixedBinaryLength> keySuffixSerializer, + Serializer> valueSerializer) { // Do not retain or release or use the prefixKey here super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0); this.valueSerializer = valueSerializer; } public static DatabaseMapDictionary simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, - Serializer valueSerializer) { - return new DatabaseMapDictionary<>(dictionary, dictionary.getAllocator().buffer(0), keySerializer, valueSerializer); + SerializerFixedBinaryLength> keySerializer, + Serializer> valueSerializer) { + return new DatabaseMapDictionary<>(dictionary, dictionary.getAllocator().allocate(0).send(), keySerializer, valueSerializer); } public static DatabaseMapDictionary tail(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, - Serializer valueSerializer) { + Send prefixKey, + SerializerFixedBinaryLength> keySuffixSerializer, + Serializer> valueSerializer) { return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer); } - private ByteBuf toKey(ByteBuf suffixKey) { - try { + private Send toKey(Send suffixKeyToSend) { + try (var suffixKey = suffixKeyToSend.receive()) { assert suffixKeyConsistency(suffixKey.readableBytes()); - return LLUtils.compositeBuffer(dictionary.getAllocator(), keyPrefix.retain(), suffixKey.retain()); - } finally { - suffixKey.release(); + return LLUtils.compositeBuffer(dictionary.getAllocator(), keyPrefix.copy().send(), suffixKey.send()); } } - private void deserializeValue(ByteBuf value, SynchronousSink sink) { + private void deserializeValue(Send value, SynchronousSink sink) { try { sink.next(valueSerializer.deserialize(value)); } catch (SerializationException ex) { @@ -202,7 +201,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) { + public SerializationFunction<@Nullable Buffer, @Nullable Buffer> getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) { return oldSerialized -> { try { U result; @@ -224,7 +223,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep BiSerializationFunction<@Nullable ByteBuf, X, @Nullable ByteBuf> getSerializedUpdater( + public BiSerializationFunction<@Nullable Buffer, X, @Nullable Buffer> getSerializedUpdater( BiSerializationFunction<@Nullable U, X, @Nullable U> updater) { return (oldSerialized, extra) -> { try { @@ -336,7 +335,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep>> getMulti(@Nullable CompositeSnapshot snapshot, Flux keys, boolean existsAlmostCertainly) { return dictionary.getMulti(resolveSnapshot(snapshot), keys.flatMap(keySuffix -> Mono.fromCallable(() -> { - ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + Buffer keySuffixBuf = serializeSuffix(keySuffix); try { var key = toKey(keySuffixBuf.retain()); try { @@ -367,9 +366,9 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep extends DatabaseMapDictionaryDeep Flux> updateMulti(Flux> entries, BiSerializationFunction<@Nullable U, X, @Nullable U> updater) { - Flux> serializedEntries = entries + Flux> serializedEntries = entries .flatMap(entry -> Mono .fromCallable(() -> Tuples.of(serializeSuffix(entry.getT1()), entry.getT2())) ) .doOnDiscard(Tuple2.class, uncastedEntry -> { - if (uncastedEntry.getT1() instanceof ByteBuf byteBuf) { + if (uncastedEntry.getT1() instanceof Buffer byteBuf) { byteBuf.release(); } - if (uncastedEntry.getT2() instanceof ByteBuf byteBuf) { + if (uncastedEntry.getT2() instanceof Buffer byteBuf) { byteBuf.release(); } }); @@ -435,7 +434,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep { try { - ByteBuf keySuffixWithExt = stripPrefix(key.retain(), false); + Buffer keySuffixWithExt = stripPrefix(key.retain(), false); try { sink.next(Map.entry(deserializeSuffix(keySuffixWithExt.retainedSlice()), new DatabaseSingleMapped<>(new DatabaseSingle<>(dictionary, @@ -459,10 +458,10 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep>handle((serializedEntry, sink) -> { - ByteBuf key = serializedEntry.getKey(); - ByteBuf value = serializedEntry.getValue(); + Buffer key = serializedEntry.getKey(); + Buffer value = serializedEntry.getValue(); try { - ByteBuf keySuffix = stripPrefix(key.retain(), false); + Buffer keySuffix = stripPrefix(key.retain(), false); try { sink.next(Map.entry(deserializeSuffix(keySuffix.retain()), valueSerializer.deserialize(value.retain()))); @@ -477,12 +476,12 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep { - if (uncastedEntry.getKey() instanceof ByteBuf byteBuf) { + if (uncastedEntry.getKey() instanceof Buffer byteBuf) { if (byteBuf.refCnt() > 0) { byteBuf.release(); } } - if (uncastedEntry.getValue() instanceof ByteBuf byteBuf) { + if (uncastedEntry.getValue() instanceof Buffer byteBuf) { if (byteBuf.refCnt() > 0) { byteBuf.release(); } @@ -496,9 +495,9 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep { try { - ByteBuf serializedKey = toKey(serializeSuffix(entry.getKey())); + Buffer serializedKey = toKey(serializeSuffix(entry.getKey())); try { - ByteBuf serializedValue = valueSerializer.serialize(entry.getValue()); + Buffer serializedValue = valueSerializer.serialize(entry.getValue()); try { sink.next(new LLEntry(serializedKey.retain(), serializedValue.retain())); } finally { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryDeep.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryDeep.java index add1919..1d3c716 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryDeep.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryDeep.java @@ -1,7 +1,9 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Resource; +import io.netty.buffer.api.Send; import io.netty.util.IllegalReferenceCountException; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.BadBlock; @@ -28,178 +30,155 @@ import reactor.util.function.Tuples; public class DatabaseMapDictionaryDeep> implements DatabaseStageMap { protected final LLDictionary dictionary; - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; protected final SubStageGetter subStageGetter; - protected final SerializerFixedBinaryLength keySuffixSerializer; - protected final ByteBuf keyPrefix; + protected final SerializerFixedBinaryLength> keySuffixSerializer; + protected final Buffer keyPrefix; protected final int keyPrefixLength; protected final int keySuffixLength; protected final int keyExtLength; protected final LLRange range; - protected final Mono rangeMono; + protected final Mono> rangeMono; private volatile boolean released; - private static ByteBuf incrementPrefix(ByteBufAllocator alloc, ByteBuf originalKey, int prefixLength) { - try { + private static Send incrementPrefix(BufferAllocator alloc, Send originalKeySend, int prefixLength) { + try (var originalKey = originalKeySend.receive()) { assert originalKey.readableBytes() >= prefixLength; - ByteBuf copiedBuf = alloc.buffer(originalKey.writerIndex(), originalKey.writerIndex() + 1); - try { + try (Buffer copiedBuf = alloc.allocate(originalKey.writerOffset())) { boolean overflowed = true; final int ff = 0xFF; int writtenBytes = 0; - copiedBuf.writerIndex(prefixLength); + copiedBuf.writerOffset(prefixLength); for (int i = prefixLength - 1; i >= 0; i--) { int iByte = originalKey.getUnsignedByte(i); if (iByte != ff) { - copiedBuf.setByte(i, iByte + 1); + copiedBuf.setUnsignedByte(i, iByte + 1); writtenBytes++; overflowed = false; break; } else { - copiedBuf.setByte(i, 0x00); + copiedBuf.setUnsignedByte(i, 0x00); writtenBytes++; overflowed = true; } } assert prefixLength - writtenBytes >= 0; if (prefixLength - writtenBytes > 0) { - copiedBuf.setBytes(0, originalKey, 0, (prefixLength - writtenBytes)); + originalKey.copyInto(0, copiedBuf, 0, (prefixLength - writtenBytes)); } - copiedBuf.writerIndex(copiedBuf.capacity()); + copiedBuf.writerOffset(copiedBuf.capacity()); - if (originalKey.writerIndex() - prefixLength > 0) { - copiedBuf.setBytes(prefixLength, originalKey, prefixLength, originalKey.writerIndex() - prefixLength); + if (originalKey.writerOffset() - prefixLength > 0) { + originalKey.copyInto(prefixLength, copiedBuf, prefixLength, originalKey.writerOffset() - prefixLength); } if (overflowed) { - for (int i = 0; i < copiedBuf.writerIndex(); i++) { - copiedBuf.setByte(i, 0xFF); + for (int i = 0; i < copiedBuf.writerOffset(); i++) { + copiedBuf.setUnsignedByte(i, 0xFF); } - copiedBuf.writeZero(1); + copiedBuf.writeByte((byte) 0x00); } - return copiedBuf.retain(); - } finally { - copiedBuf.release(); + return copiedBuf.send(); } - } finally { - originalKey.release(); } } - static ByteBuf firstRangeKey(ByteBufAllocator alloc, - ByteBuf prefixKey, + static Send firstRangeKey(BufferAllocator alloc, + Send prefixKey, int prefixLength, int suffixLength, int extLength) { return zeroFillKeySuffixAndExt(alloc, prefixKey, prefixLength, suffixLength, extLength); } - static ByteBuf nextRangeKey(ByteBufAllocator alloc, - ByteBuf prefixKey, + static Send nextRangeKey(BufferAllocator alloc, + Send prefixKey, int prefixLength, int suffixLength, int extLength) { - try { - ByteBuf nonIncremented = zeroFillKeySuffixAndExt(alloc, prefixKey.retain(), prefixLength, suffixLength, extLength); - try { - return incrementPrefix(alloc, nonIncremented.retain(), prefixLength); - } finally { - nonIncremented.release(); + try (prefixKey) { + try (Send nonIncremented = zeroFillKeySuffixAndExt(alloc, prefixKey, prefixLength, suffixLength, + extLength)) { + return incrementPrefix(alloc, nonIncremented, prefixLength); } - } finally { - prefixKey.release(); } } - protected static ByteBuf zeroFillKeySuffixAndExt(ByteBufAllocator alloc, - ByteBuf prefixKey, + protected static Send zeroFillKeySuffixAndExt(BufferAllocator alloc, + Send prefixKeySend, int prefixLength, int suffixLength, int extLength) { - try { + try (var prefixKey = prefixKeySend.receive()) { assert prefixKey.readableBytes() == prefixLength; assert suffixLength > 0; assert extLength >= 0; - ByteBuf zeroSuffixAndExt = alloc.buffer(suffixLength + extLength, suffixLength + extLength); - try { - zeroSuffixAndExt.writeZero(suffixLength + extLength); - ByteBuf result = LLUtils.compositeBuffer(alloc, prefixKey.retain(), zeroSuffixAndExt.retain()); - try { - return result.retain(); - } finally { - result.release(); + try (Buffer zeroSuffixAndExt = alloc.allocate(suffixLength + extLength)) { + for (int i = 0; i < suffixLength + extLength; i++) { + zeroSuffixAndExt.writeByte((byte) 0x0); + } + try (Send result = LLUtils.compositeBuffer(alloc, prefixKey.send(), zeroSuffixAndExt.send())) { + return result; } - } finally { - zeroSuffixAndExt.release(); } - } finally { - prefixKey.release(); } } - static ByteBuf firstRangeKey( - ByteBufAllocator alloc, - ByteBuf prefixKey, - ByteBuf suffixKey, + static Send firstRangeKey( + BufferAllocator alloc, + Send prefixKey, + Send suffixKey, int prefixLength, int suffixLength, int extLength) { return zeroFillKeyExt(alloc, prefixKey, suffixKey, prefixLength, suffixLength, extLength); } - static ByteBuf nextRangeKey( - ByteBufAllocator alloc, - ByteBuf prefixKey, - ByteBuf suffixKey, + static Send nextRangeKey( + BufferAllocator alloc, + Send prefixKey, + Send suffixKey, int prefixLength, int suffixLength, int extLength) { - try { - ByteBuf nonIncremented = zeroFillKeyExt(alloc, - prefixKey.retain(), - suffixKey.retain(), - prefixLength, - suffixLength, - extLength - ); - try { - return incrementPrefix(alloc, nonIncremented.retain(), prefixLength + suffixLength); - } finally { - nonIncremented.release(); - } - } finally { - prefixKey.release(); - suffixKey.release(); + try (Send nonIncremented = zeroFillKeyExt(alloc, + prefixKey, + suffixKey, + prefixLength, + suffixLength, + extLength + )) { + return incrementPrefix(alloc, nonIncremented, prefixLength + suffixLength); } } - protected static ByteBuf zeroFillKeyExt( - ByteBufAllocator alloc, - ByteBuf prefixKey, - ByteBuf suffixKey, + protected static Send zeroFillKeyExt( + BufferAllocator alloc, + Send prefixKeySend, + Send suffixKeySend, int prefixLength, int suffixLength, int extLength) { - try { - assert prefixKey.readableBytes() == prefixLength; - assert suffixKey.readableBytes() == suffixLength; - assert suffixLength > 0; - assert extLength >= 0; - ByteBuf result = LLUtils.compositeBuffer(alloc, - prefixKey.retain(), - suffixKey.retain(), - alloc.buffer(extLength, extLength).writeZero(extLength) - ); - try { - assert result.readableBytes() == prefixLength + suffixLength + extLength; - return result.retain(); - } finally { - result.release(); + try (var prefixKey = prefixKeySend.receive()) { + try (var suffixKey = suffixKeySend.receive()) { + assert prefixKey.readableBytes() == prefixLength; + assert suffixKey.readableBytes() == suffixLength; + assert suffixLength > 0; + assert extLength >= 0; + + try (var ext = alloc.allocate(extLength)) { + for (int i = 0; i < extLength; i++) { + ext.writeByte((byte) 0); + } + + try (Buffer result = LLUtils.compositeBuffer(alloc, prefixKey.send(), suffixKey.send(), ext.send()) + .receive()) { + assert result.readableBytes() == prefixLength + suffixLength + extLength; + return result.send(); + } + } } - } finally { - prefixKey.release(); - suffixKey.release(); } } @@ -208,22 +187,18 @@ public class DatabaseMapDictionaryDeep> implem */ @Deprecated public static DatabaseMapDictionaryDeep> simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength> keySerializer, SubStageGetterSingle subStageGetter) { - return new DatabaseMapDictionaryDeep<>(dictionary, - dictionary.getAllocator().buffer(0), - keySerializer, - subStageGetter, - 0 - ); + return new DatabaseMapDictionaryDeep<>(dictionary, dictionary.getAllocator().allocate(0).send(), + keySerializer, subStageGetter, 0); } public static > DatabaseMapDictionaryDeep deepTail(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength> keySerializer, int keyExtLength, SubStageGetter subStageGetter) { return new DatabaseMapDictionaryDeep<>(dictionary, - dictionary.getAllocator().buffer(0), + dictionary.getAllocator().allocate(0).send(), keySerializer, subStageGetter, keyExtLength @@ -231,56 +206,45 @@ public class DatabaseMapDictionaryDeep> implem } public static > DatabaseMapDictionaryDeep deepIntermediate(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, + Send prefixKey, + SerializerFixedBinaryLength> keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength); } protected DatabaseMapDictionaryDeep(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, + Send prefixKey, + SerializerFixedBinaryLength> keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { - try { - this.dictionary = dictionary; - this.alloc = dictionary.getAllocator(); - this.subStageGetter = subStageGetter; - this.keySuffixSerializer = keySuffixSerializer; - assert prefixKey.refCnt() > 0; - this.keyPrefix = prefixKey.retain(); - assert keyPrefix.refCnt() > 0; - this.keyPrefixLength = keyPrefix.readableBytes(); - this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); - this.keyExtLength = keyExtLength; - ByteBuf firstKey = firstRangeKey(alloc, - keyPrefix.retain(), + this.dictionary = dictionary; + this.alloc = dictionary.getAllocator(); + this.subStageGetter = subStageGetter; + this.keySuffixSerializer = keySuffixSerializer; + this.keyPrefix = prefixKey.receive(); + assert keyPrefix.isAccessible(); + this.keyPrefixLength = keyPrefix.readableBytes(); + this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); + this.keyExtLength = keyExtLength; + try (Buffer firstKey = firstRangeKey(alloc, + keyPrefix.copy().send(), + keyPrefixLength, + keySuffixLength, + keyExtLength + ).receive()) { + try (Buffer nextRangeKey = nextRangeKey(alloc, + keyPrefix.copy().send(), keyPrefixLength, keySuffixLength, keyExtLength - ); - try { - ByteBuf nextRangeKey = nextRangeKey(alloc, - keyPrefix.retain(), - keyPrefixLength, - keySuffixLength, - keyExtLength - ); - try { - assert keyPrefix.refCnt() > 0; - assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey); - this.range = keyPrefixLength == 0 ? LLRange.all() : LLRange.of(firstKey.retain(), nextRangeKey.retain()); - this.rangeMono = LLUtils.lazyRetainRange(this.range); - assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength); - } finally { - nextRangeKey.release(); - } - } finally { - firstKey.release(); + ).receive()) { + assert keyPrefix.isAccessible(); + assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey); + this.range = keyPrefixLength == 0 ? LLRange.all() : LLRange.of(firstKey.send(), nextRangeKey.send()); + this.rangeMono = LLUtils.lazyRetainRange(this.range); + assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength); } - } finally { - prefixKey.release(); } } @@ -302,49 +266,31 @@ public class DatabaseMapDictionaryDeep> implem /** * Keep only suffix and ext */ - protected ByteBuf stripPrefix(ByteBuf key, boolean slice) { - try { - if (slice) { - return key.retainedSlice(this.keyPrefixLength, key.readableBytes() - this.keyPrefixLength); - } else { - return key.retain().readerIndex(key.readerIndex() + keyPrefixLength); - } - } finally { - key.release(); + protected Send stripPrefix(Send keyToReceive) { + try (var key = keyToReceive.receive()) { + return key.copy(this.keyPrefixLength, key.readableBytes() - this.keyPrefixLength).send(); } } /** * Remove ext from full key */ - protected ByteBuf removeExtFromFullKey(ByteBuf key, boolean slice) { - try { - if (slice) { - return key.retainedSlice(key.readerIndex(), keyPrefixLength + keySuffixLength); - } else { - return key.retain().writerIndex(key.writerIndex() - (keyPrefixLength + keySuffixLength)); - } - } finally { - key.release(); + protected Send removeExtFromFullKey(Send keyToReceive) { + try (var key = keyToReceive.receive()) { + return key.copy(key.readerOffset(), keyPrefixLength + keySuffixLength).send(); } } /** * Add prefix to suffix */ - protected ByteBuf toKeyWithoutExt(ByteBuf suffixKey) { - try { + protected Send toKeyWithoutExt(Send suffixKeyToReceive) { + try (var suffixKey = suffixKeyToReceive.receive()) { assert suffixKey.readableBytes() == keySuffixLength; - ByteBuf result = LLUtils.compositeBuffer(alloc, keyPrefix.retain(), suffixKey.retain()); - assert keyPrefix.refCnt() > 0; - try { + try (Buffer result = LLUtils.compositeBuffer(alloc, keyPrefix.copy().send(), suffixKey.send()).receive()) { assert result.readableBytes() == keyPrefixLength + keySuffixLength; - return result.retain(); - } finally { - result.release(); + return result.send(); } - } finally { - suffixKey.release(); } } @@ -356,26 +302,23 @@ public class DatabaseMapDictionaryDeep> implem } } - protected LLRange toExtRange(ByteBuf keySuffix) { - try { - ByteBuf first = firstRangeKey(alloc, - keyPrefix.retain(), - keySuffix.retain(), + protected Send toExtRange(Buffer keySuffix) { + try (Buffer first = firstRangeKey(alloc, + keyPrefix.copy().send(), + keySuffix.copy().send(), + keyPrefixLength, + keySuffixLength, + keyExtLength + ).receive()) { + try (Buffer end = nextRangeKey(alloc, + keyPrefix.copy().send(), + keySuffix.copy().send(), keyPrefixLength, keySuffixLength, keyExtLength - ); - ByteBuf end = nextRangeKey(alloc, - keyPrefix.retain(), - keySuffix.retain(), - keyPrefixLength, - keySuffixLength, - keyExtLength - ); - assert keyPrefix.refCnt() > 0; - return LLRange.of(first, end); - } finally { - keySuffix.release(); + ).receive()) { + return LLRange.of(first.send(), end.send()).send(); + } } } @@ -392,16 +335,14 @@ public class DatabaseMapDictionaryDeep> implem @Override public Mono at(@Nullable CompositeSnapshot snapshot, T keySuffix) { return Mono.using( - () -> serializeSuffix(keySuffix), - keySuffixData -> { - return Mono.using( - () -> toKeyWithoutExt(keySuffixData.retain()), - keyWithoutExt -> this.subStageGetter - .subStage(dictionary, snapshot, LLUtils.lazyRetain(keyWithoutExt)), - ReferenceCounted::release - ); - }, - ReferenceCounted::release + () -> serializeSuffix(keySuffix).receive(), + keySuffixData -> Mono.using( + () -> toKeyWithoutExt(keySuffixData.send()).receive(), + keyWithoutExt -> this.subStageGetter + .subStage(dictionary, snapshot, LLUtils.lazyRetain(keyWithoutExt)), + Resource::close + ), + Resource::close ).transform(LLUtils::handleDiscard).doOnDiscard(DatabaseStage.class, DatabaseStage::release); } @@ -415,26 +356,21 @@ public class DatabaseMapDictionaryDeep> implem return dictionary.badBlocks(rangeMono); } - private static record GroupBuffers(ByteBuf groupKeyWithExt, ByteBuf groupKeyWithoutExt, ByteBuf groupSuffix) {} + private static record GroupBuffers(Buffer groupKeyWithExt, Buffer groupKeyWithoutExt, Buffer groupSuffix) {} @Override public Flux> getAllStages(@Nullable CompositeSnapshot snapshot) { return Flux .defer(() -> dictionary.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength)) - .flatMapSequential(groupKeyWithoutExt -> Mono + .flatMapSequential(groupKeyWithoutExtSend -> Mono .using( () -> { - try { - var groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain(), true); - try { + try (var groupKeyWithoutExt = groupKeyWithoutExtSend.receive()) { + try (var groupSuffix = this.stripPrefix(groupKeyWithoutExt.copy().send()).receive()) { assert subStageKeysConsistency(groupKeyWithoutExt.readableBytes() + keyExtLength); - return Tuples.of(groupKeyWithoutExt.retain(), groupSuffix.retain()); - } finally { - groupSuffix.release(); + return Tuples.of(groupKeyWithoutExt, groupSuffix); } - } finally { - groupKeyWithoutExt.release(); } }, groupKeyWithoutExtAndGroupSuffix -> this.subStageGetter @@ -444,14 +380,15 @@ public class DatabaseMapDictionaryDeep> implem ) .>handle((us, sink) -> { try { - sink.next(Map.entry(this.deserializeSuffix(groupKeyWithoutExtAndGroupSuffix.getT2().retain()), us)); + sink.next(Map.entry(this.deserializeSuffix(groupKeyWithoutExtAndGroupSuffix.getT2().send()), + us)); } catch (SerializationException ex) { sink.error(ex); } }), entry -> { - entry.getT1().release(); - entry.getT2().release(); + entry.getT1().close(); + entry.getT2().close(); } ) ) @@ -489,8 +426,8 @@ public class DatabaseMapDictionaryDeep> implem return dictionary.clear(); } else if (range.isSingle()) { return dictionary - .remove(LLUtils.lazyRetain(range.getSingle()), LLDictionaryResultType.VOID) - .doOnNext(ReferenceCounted::release) + .remove(LLUtils.lazyRetain(range::getSingle), LLDictionaryResultType.VOID) + .doOnNext(Send::close) .then(); } else { return dictionary.setRange(LLUtils.lazyRetainRange(range), Flux.empty()); @@ -499,31 +436,30 @@ public class DatabaseMapDictionaryDeep> implem } //todo: temporary wrapper. convert the whole class to buffers - protected T deserializeSuffix(ByteBuf keySuffix) throws SerializationException { - try { + protected T deserializeSuffix(Send keySuffixToReceive) throws SerializationException { + try (var keySuffix = keySuffixToReceive.receive()) { assert suffixKeyConsistency(keySuffix.readableBytes()); - var result = keySuffixSerializer.deserialize(keySuffix.retain()); - assert keyPrefix.refCnt() > 0; + var result = keySuffixSerializer.deserialize(keySuffix.send()); + assert keyPrefix.isAccessible(); return result; - } finally { - keySuffix.release(); } } //todo: temporary wrapper. convert the whole class to buffers - protected ByteBuf serializeSuffix(T keySuffix) throws SerializationException { - ByteBuf suffixData = keySuffixSerializer.serialize(keySuffix); - assert suffixKeyConsistency(suffixData.readableBytes()); - assert keyPrefix.refCnt() > 0; - return suffixData; + protected Send serializeSuffix(T keySuffix) throws SerializationException { + try (Buffer suffixData = keySuffixSerializer.serialize(keySuffix).receive()) { + assert suffixKeyConsistency(suffixData.readableBytes()); + assert keyPrefix.isAccessible(); + return suffixData.send(); + } } @Override public void release() { if (!released) { released = true; - this.range.release(); - this.keyPrefix.release(); + this.range.close(); + this.keyPrefix.close(); } else { throw new IllegalReferenceCountException(0, -1); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java index 806af29..6808e1f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; @@ -33,41 +34,37 @@ import reactor.util.function.Tuples; @SuppressWarnings("unused") public class DatabaseMapDictionaryHashed implements DatabaseStageMap> { - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; private final DatabaseMapDictionary>> subDictionary; private final Function keySuffixHashFunction; protected DatabaseMapDictionaryHashed(LLDictionary dictionary, - ByteBuf prefixKey, - Serializer keySuffixSerializer, - Serializer valueSerializer, + Send prefixKey, + Serializer> keySuffixSerializer, + Serializer> valueSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { - try { - if (dictionary.getUpdateMode().block() != UpdateMode.ALLOW) { - throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW"); - } - this.alloc = dictionary.getAllocator(); - ValueWithHashSerializer valueWithHashSerializer - = new ValueWithHashSerializer<>(alloc, keySuffixSerializer, valueSerializer); - ValuesSetSerializer> valuesSetSerializer - = new ValuesSetSerializer<>(alloc, valueWithHashSerializer); - this.subDictionary = DatabaseMapDictionary.tail(dictionary, - prefixKey.retain(), - keySuffixHashSerializer, - valuesSetSerializer - ); - this.keySuffixHashFunction = keySuffixHashFunction; - } finally { - prefixKey.release(); + SerializerFixedBinaryLength keySuffixHashSerializer) { + if (dictionary.getUpdateMode().block() != UpdateMode.ALLOW) { + throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW"); } + this.alloc = dictionary.getAllocator(); + ValueWithHashSerializer valueWithHashSerializer + = new ValueWithHashSerializer<>(alloc, keySuffixSerializer, valueSerializer); + ValuesSetSerializer> valuesSetSerializer + = new ValuesSetSerializer<>(alloc, valueWithHashSerializer); + this.subDictionary = DatabaseMapDictionary.tail(dictionary, + prefixKey, + keySuffixHashSerializer, + valuesSetSerializer + ); + this.keySuffixHashFunction = keySuffixHashFunction; } public static DatabaseMapDictionaryHashed simple(LLDictionary dictionary, - Serializer keySerializer, - Serializer valueSerializer, + Serializer keySerializer, + Serializer valueSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseMapDictionaryHashed<>( dictionary, dictionary.getAllocator().buffer(0), @@ -79,11 +76,11 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap DatabaseMapDictionaryHashed tail(LLDictionary dictionary, - ByteBuf prefixKey, - Serializer keySuffixSerializer, - Serializer valueSerializer, + Buffer prefixKey, + Serializer keySuffixSerializer, + Serializer valueSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { + SerializerFixedBinaryLength keySuffixHashSerializer) { return new DatabaseMapDictionaryHashed<>(dictionary, prefixKey, keySuffixSerializer, diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java index 8f6e742..063099c 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -15,13 +15,13 @@ import reactor.core.publisher.Mono; public class DatabaseSetDictionary extends DatabaseMapDictionary { protected DatabaseSetDictionary(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer) { + Buffer prefixKey, + SerializerFixedBinaryLength keySuffixSerializer) { super(dictionary, prefixKey, keySuffixSerializer, DatabaseEmpty.NOTHING_SERIALIZER); } public static DatabaseSetDictionary simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer) { + SerializerFixedBinaryLength keySerializer) { var buf = dictionary.getAllocator().buffer(0); try { return new DatabaseSetDictionary<>(dictionary, buf, keySerializer); @@ -31,8 +31,8 @@ public class DatabaseSetDictionary extends DatabaseMapDictionary } public static DatabaseSetDictionary tail(LLDictionary dictionary, - ByteBuf prefixKey, - SerializerFixedBinaryLength keySuffixSerializer) { + Buffer prefixKey, + SerializerFixedBinaryLength keySuffixSerializer) { return new DatabaseSetDictionary<>(dictionary, prefixKey, keySuffixSerializer); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java index 45310e0..1862b97 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -17,10 +17,10 @@ import reactor.core.publisher.Mono; public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHashed { protected DatabaseSetDictionaryHashed(LLDictionary dictionary, - ByteBuf prefixKey, - Serializer keySuffixSerializer, + Buffer prefixKey, + Serializer keySuffixSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { + SerializerFixedBinaryLength keySuffixHashSerializer) { super(dictionary, prefixKey, keySuffixSerializer, @@ -31,9 +31,9 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas } public static DatabaseSetDictionaryHashed simple(LLDictionary dictionary, - Serializer keySerializer, + Serializer keySerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseSetDictionaryHashed<>(dictionary, dictionary.getAllocator().buffer(0), keySerializer, @@ -43,10 +43,10 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas } public static DatabaseSetDictionaryHashed tail(LLDictionary dictionary, - ByteBuf prefixKey, - Serializer keySuffixSerializer, + Buffer prefixKey, + Serializer keySuffixSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseSetDictionaryHashed<>(dictionary, prefixKey, keySuffixSerializer, diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java index f0e56f5..7b03570 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java @@ -1,6 +1,8 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; @@ -23,18 +25,16 @@ import reactor.core.publisher.SynchronousSink; public class DatabaseSingle implements DatabaseStageEntry { private final LLDictionary dictionary; - private final ByteBuf key; - private final Mono keyMono; - private final Serializer serializer; + private final Buffer key; + private final Mono> keyMono; + private final Serializer> serializer; - public DatabaseSingle(LLDictionary dictionary, ByteBuf key, Serializer serializer) { - try { + public DatabaseSingle(LLDictionary dictionary, Send key, Serializer> serializer) { + try (key) { this.dictionary = dictionary; - this.key = key.retain(); + this.key = key.receive(); this.keyMono = LLUtils.lazyRetain(this.key); this.serializer = serializer; - } finally { - key.release(); } } @@ -46,7 +46,7 @@ public class DatabaseSingle implements DatabaseStageEntry { } } - private void deserializeValue(ByteBuf value, SynchronousSink sink) { + private void deserializeValue(Send value, SynchronousSink sink) { try { sink.next(serializer.deserialize(value)); } catch (SerializationException ex) { @@ -63,13 +63,9 @@ public class DatabaseSingle implements DatabaseStageEntry { @Override public Mono setAndGetPrevious(U value) { - return Mono - .using(() -> serializer.serialize(value), - valueByteBuf -> dictionary - .put(keyMono, LLUtils.lazyRetain(valueByteBuf), LLDictionaryResultType.PREVIOUS_VALUE) - .handle(this::deserializeValue), - ReferenceCounted::release - ); + return dictionary + .put(keyMono, Mono.fromCallable(() -> serializer.serialize(value)), LLDictionaryResultType.PREVIOUS_VALUE) + .handle(this::deserializeValue); } @Override @@ -99,7 +95,7 @@ public class DatabaseSingle implements DatabaseStageEntry { } else { return serializer.serialize(result); } - }, existsAlmostCertainly).transform(mono -> LLUtils.mapDelta(mono, serializer::deserialize)); + }, existsAlmostCertainly).transform(mono -> LLUtils.mapLLDelta(mono, serializer::deserialize)); } @Override @@ -112,23 +108,23 @@ public class DatabaseSingle implements DatabaseStageEntry { @Override public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single)) + .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single).map(ResourceSupport::send)) .map(empty -> empty ? 0L : 1L); } @Override public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single)); + .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single).map(ResourceSupport::send)); } @Override public void release() { - key.release(); + key.close(); } @Override public Flux badBlocks() { - return dictionary.badBlocks(keyMono.map(LLRange::single)); + return dictionary.badBlocks(keyMono.map(LLRange::single).map(ResourceSupport::send)); } } \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java index fb38a13..4def30c 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java index b4609ad..1e15434 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java @@ -1,6 +1,7 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import java.util.Collection; @@ -13,7 +14,7 @@ public interface SubStageGetter> { Mono subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKey); + Mono> prefixKey); boolean isMultiKey(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java index 025d4e2..6d763b2 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; @@ -16,15 +16,15 @@ import reactor.core.publisher.Mono; public class SubStageGetterHashMap implements SubStageGetter, DatabaseMapDictionaryHashed> { - private final Serializer keySerializer; - private final Serializer valueSerializer; + private final Serializer keySerializer; + private final Serializer valueSerializer; private final Function keyHashFunction; - private final SerializerFixedBinaryLength keyHashSerializer; + private final SerializerFixedBinaryLength keyHashSerializer; - public SubStageGetterHashMap(Serializer keySerializer, - Serializer valueSerializer, + public SubStageGetterHashMap(Serializer keySerializer, + Serializer valueSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.keyHashFunction = keyHashFunction; @@ -34,7 +34,7 @@ public class SubStageGetterHashMap implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { + Mono prefixKeyMono) { return Mono.usingWhen( prefixKeyMono, prefixKey -> Mono diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java index a228cc5..3e4326f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -16,13 +16,13 @@ import reactor.core.publisher.Mono; public class SubStageGetterHashSet implements SubStageGetter, DatabaseSetDictionaryHashed> { - private final Serializer keySerializer; + private final Serializer keySerializer; private final Function keyHashFunction; - private final SerializerFixedBinaryLength keyHashSerializer; + private final SerializerFixedBinaryLength keyHashSerializer; - public SubStageGetterHashSet(Serializer keySerializer, + public SubStageGetterHashSet(Serializer keySerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { this.keySerializer = keySerializer; this.keyHashFunction = keyHashFunction; this.keyHashSerializer = keyHashSerializer; @@ -31,7 +31,7 @@ public class SubStageGetterHashSet implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { + Mono prefixKeyMono) { return Mono.usingWhen(prefixKeyMono, prefixKey -> Mono .fromSupplier(() -> DatabaseSetDictionaryHashed diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java index f137329..3787c16 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; @@ -14,11 +14,11 @@ import reactor.core.publisher.Mono; public class SubStageGetterMap implements SubStageGetter, DatabaseMapDictionary> { - private final SerializerFixedBinaryLength keySerializer; - private final Serializer valueSerializer; + private final SerializerFixedBinaryLength keySerializer; + private final Serializer valueSerializer; - public SubStageGetterMap(SerializerFixedBinaryLength keySerializer, - Serializer valueSerializer) { + public SubStageGetterMap(SerializerFixedBinaryLength keySerializer, + Serializer valueSerializer) { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; } @@ -26,7 +26,7 @@ public class SubStageGetterMap implements SubStageGetter, Databa @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { + Mono prefixKeyMono) { return Mono.usingWhen(prefixKeyMono, prefixKey -> Mono .fromSupplier(() -> DatabaseMapDictionary diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java index 913ce3b..0148829 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; @@ -15,11 +15,11 @@ public class SubStageGetterMapDeep> implements SubStageGetter, DatabaseMapDictionaryDeep> { private final SubStageGetter subStageGetter; - private final SerializerFixedBinaryLength keySerializer; + private final SerializerFixedBinaryLength keySerializer; private final int keyExtLength; public SubStageGetterMapDeep(SubStageGetter subStageGetter, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength keySerializer, int keyExtLength) { this.subStageGetter = subStageGetter; this.keySerializer = keySerializer; @@ -41,7 +41,7 @@ public class SubStageGetterMapDeep> implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { + Mono prefixKeyMono) { return Mono.usingWhen(prefixKeyMono, prefixKey -> Mono .fromSupplier(() -> DatabaseMapDictionaryDeep @@ -61,16 +61,16 @@ public class SubStageGetterMapDeep> implements return true; } - private Mono checkKeyFluxConsistency(ByteBuf prefixKey, List keys) { + private Mono checkKeyFluxConsistency(Buffer prefixKey, List keys) { return Mono .fromCallable(() -> { try { - for (ByteBuf key : keys) { + for (Buffer key : keys) { assert key.readableBytes() == prefixKey.readableBytes() + getKeyBinaryLength(); } } finally { prefixKey.release(); - for (ByteBuf key : keys) { + for (Buffer key : keys) { key.release(); } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java index 9f92697..863547f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; @@ -14,16 +14,16 @@ import reactor.core.publisher.Mono; public class SubStageGetterSet implements SubStageGetter, DatabaseSetDictionary> { - private final SerializerFixedBinaryLength keySerializer; + private final SerializerFixedBinaryLength keySerializer; - public SubStageGetterSet(SerializerFixedBinaryLength keySerializer) { + public SubStageGetterSet(SerializerFixedBinaryLength keySerializer) { this.keySerializer = keySerializer; } @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { + Mono prefixKeyMono) { return Mono.usingWhen(prefixKeyMono, prefixKey -> Mono .fromSupplier(() -> DatabaseSetDictionary.tail(dictionary, prefixKey.retain(), keySerializer)), diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java index ea11a28..1f07a31 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java @@ -1,6 +1,7 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLUtils; @@ -13,21 +14,21 @@ import reactor.core.publisher.Mono; public class SubStageGetterSingle implements SubStageGetter> { - private final Serializer serializer; + private final Serializer> serializer; - public SubStageGetterSingle(Serializer serializer) { + public SubStageGetterSingle(Serializer> serializer) { this.serializer = serializer; } @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono keyPrefixMono) { + Mono> keyPrefixMono) { return Mono.usingWhen( keyPrefixMono, keyPrefix -> Mono - .>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix.retain(), serializer)), - keyPrefix -> Mono.fromRunnable(keyPrefix::release) + .>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix, serializer)), + keyPrefix -> Mono.fromRunnable(keyPrefix::close) ); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java index fe340d7..1f04d8c 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import it.cavallium.dbengine.database.serialization.Serializer; -public class SubStageGetterSingleBytes extends SubStageGetterSingle { +public class SubStageGetterSingleBytes extends SubStageGetterSingle { public SubStageGetterSingleBytes() { super(Serializer.noop()); diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java b/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java index 5d29c8c..bc4176d 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; @@ -9,43 +10,35 @@ import java.util.Map; import java.util.Map.Entry; import org.jetbrains.annotations.NotNull; -class ValueWithHashSerializer implements Serializer, ByteBuf> { +class ValueWithHashSerializer implements Serializer, Send> { - private final ByteBufAllocator allocator; - private final Serializer keySuffixSerializer; - private final Serializer valueSerializer; + private final BufferAllocator allocator; + private final Serializer> keySuffixSerializer; + private final Serializer> valueSerializer; - ValueWithHashSerializer(ByteBufAllocator allocator, - Serializer keySuffixSerializer, - Serializer valueSerializer) { + ValueWithHashSerializer(BufferAllocator allocator, + Serializer> keySuffixSerializer, + Serializer> valueSerializer) { this.allocator = allocator; this.keySuffixSerializer = keySuffixSerializer; this.valueSerializer = valueSerializer; } @Override - public @NotNull Entry deserialize(@NotNull ByteBuf serialized) throws SerializationException { - try { - X deserializedKey = keySuffixSerializer.deserialize(serialized.retain()); - Y deserializedValue = valueSerializer.deserialize(serialized.retain()); + public @NotNull Entry deserialize(@NotNull Send serializedToReceive) throws SerializationException { + try (var serialized = serializedToReceive.receive()) { + X deserializedKey = keySuffixSerializer.deserialize(serialized.copy().send()); + Y deserializedValue = valueSerializer.deserialize(serialized.send()); return Map.entry(deserializedKey, deserializedValue); - } finally { - serialized.release(); } } @Override - public @NotNull ByteBuf serialize(@NotNull Entry deserialized) throws SerializationException { - ByteBuf keySuffix = keySuffixSerializer.serialize(deserialized.getKey()); - try { - ByteBuf value = valueSerializer.serialize(deserialized.getValue()); - try { - return LLUtils.compositeBuffer(allocator, keySuffix.retain(), value.retain()); - } finally { - value.release(); + public @NotNull Send serialize(@NotNull Entry deserialized) throws SerializationException { + try (Buffer keySuffix = keySuffixSerializer.serialize(deserialized.getKey()).receive()) { + try (Buffer value = valueSerializer.serialize(deserialized.getValue()).receive()) { + return LLUtils.compositeBuffer(allocator, keySuffix.send(), value.send()); } - } finally { - keySuffix.release(); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java b/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java index df71bc5..78d2f1f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database.collections; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; import it.unimi.dsi.fastutil.objects.ObjectArraySet; @@ -13,47 +14,39 @@ import java.util.HashSet; import java.util.Set; import org.jetbrains.annotations.NotNull; -class ValuesSetSerializer implements Serializer, ByteBuf> { +class ValuesSetSerializer implements Serializer, Send> { - private final ByteBufAllocator allocator; - private final Serializer entrySerializer; + private final BufferAllocator allocator; + private final Serializer> entrySerializer; - ValuesSetSerializer(ByteBufAllocator allocator, Serializer entrySerializer) { + ValuesSetSerializer(BufferAllocator allocator, Serializer> entrySerializer) { this.allocator = allocator; this.entrySerializer = entrySerializer; } @Override - public @NotNull ObjectArraySet deserialize(@NotNull ByteBuf serialized) throws SerializationException { - try { + public @NotNull ObjectArraySet deserialize(@NotNull Send serializedToReceive) throws SerializationException { + try (var serialized = serializedToReceive.receive()) { int entriesLength = serialized.readInt(); ArrayList deserializedElements = new ArrayList<>(entriesLength); for (int i = 0; i < entriesLength; i++) { - X entry = entrySerializer.deserialize(serialized.retain()); + X entry = entrySerializer.deserialize(serialized.send()); deserializedElements.add(entry); } return new ObjectArraySet<>(deserializedElements); - } finally { - serialized.release(); } } @Override - public @NotNull ByteBuf serialize(@NotNull ObjectArraySet deserialized) throws SerializationException { - ByteBuf output = allocator.buffer(); - try { + public @NotNull Send serialize(@NotNull ObjectArraySet deserialized) throws SerializationException { + try (Buffer output = allocator.allocate(64)) { output.writeInt(deserialized.size()); for (X entry : deserialized) { - ByteBuf serialized = entrySerializer.serialize(entry); - try { + try (Buffer serialized = entrySerializer.serialize(entry).receive()) { output.writeBytes(serialized); - } finally { - serialized.release(); } } - return output.retain(); - } finally { - output.release(); + return output.send(); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java index 2c52977..9d6ecf4 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.client.IndicizerAnalyzers; import it.cavallium.dbengine.client.IndicizerSimilarities; import it.cavallium.dbengine.client.LuceneOptions; @@ -23,16 +23,16 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { JMXNettyMonitoringManager.initialize(); } - private final ByteBufAllocator allocator; + private final BufferAllocator allocator; private final Path basePath; - public LLLocalDatabaseConnection(ByteBufAllocator allocator, Path basePath) { + public LLLocalDatabaseConnection(BufferAllocator allocator, Path basePath) { this.allocator = allocator; this.basePath = basePath; } @Override - public ByteBufAllocator getAllocator() { + public BufferAllocator getAllocator() { return allocator; } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java index 22effb6..90d5acb 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java @@ -1,15 +1,23 @@ package it.cavallium.dbengine.database.disk; import static io.netty.buffer.Unpooled.wrappedBuffer; +import static it.cavallium.dbengine.database.LLUtils.fromByteArray; +import static it.cavallium.dbengine.database.LLUtils.isDirect; +import static it.cavallium.dbengine.database.LLUtils.toDirect; +import static java.util.Objects.requireNonNull; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Resource; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.DatabaseOptions; import it.cavallium.dbengine.database.Column; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.ExtraKeyOperationResult; +import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLEntry; @@ -143,13 +151,13 @@ public class LLLocalDictionary implements LLDictionary { private final Function snapshotResolver; private final Striped itemsLock = Striped.readWriteStampedLock(STRIPES); private final UpdateMode updateMode; - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; private final String getRangeMultiDebugName; private final String getRangeKeysMultiDebugName; private final DatabaseOptions databaseOptions; public LLLocalDictionary( - ByteBufAllocator allocator, + BufferAllocator allocator, @NotNull RocksDB db, @NotNull ColumnFamilyHandle columnFamilyHandle, String databaseName, @@ -158,9 +166,9 @@ public class LLLocalDictionary implements LLDictionary { Function snapshotResolver, UpdateMode updateMode, DatabaseOptions databaseOptions) { - Objects.requireNonNull(db); + requireNonNull(db); this.db = db; - Objects.requireNonNull(columnFamilyHandle); + requireNonNull(columnFamilyHandle); this.cfh = columnFamilyHandle; this.databaseName = databaseName; this.columnName = columnName; @@ -206,13 +214,13 @@ public class LLLocalDictionary implements LLDictionary { } } - private int getLockIndex(ByteBuf key) { + private int getLockIndex(Buffer key) { return Math.abs(LLUtils.hashCode(key) % STRIPES); } - private IntArrayList getLockIndices(List keys) { + private IntArrayList getLockIndices(List keys) { var list = new IntArrayList(keys.size()); - for (ByteBuf key : keys) { + for (Buffer key : keys) { list.add(getLockIndex(key)); } return list; @@ -221,85 +229,90 @@ public class LLLocalDictionary implements LLDictionary { private IntArrayList getLockIndicesEntries(List keys) { var list = new IntArrayList(keys.size()); for (LLEntry key : keys) { - list.add(getLockIndex(key.getKey())); + list.add(getLockIndex(key.getKeyUnsafe())); } return list; } - private IntArrayList getLockIndicesWithExtra(List> entries) { + private IntArrayList getLockIndicesWithExtra(List> entries) { var list = new IntArrayList(entries.size()); - for (Tuple2 key : entries) { + for (Tuple2 key : entries) { list.add(getLockIndex(key.getT1())); } return list; } @Override - public ByteBufAllocator getAllocator() { + public BufferAllocator getAllocator() { return alloc; } - private Mono runOnDb(Callable<@Nullable T> callable) { + private @NotNull Mono runOnDb(Callable<@Nullable T> callable) { return Mono.fromCallable(callable).subscribeOn(dbScheduler); } @Override - public Mono get(@Nullable LLSnapshot snapshot, - Mono keyMono, + public Mono> get(@Nullable LLSnapshot snapshot, + Mono> keyMono, boolean existsAlmostCertainly) { return Mono.usingWhen(keyMono, - key -> runOnDb(() -> { - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); + keySend -> runOnDb(() -> { + try (var key = keySend.receive()) { + try { + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); - stamp = lock.readLock(); - } else { - lock = null; - stamp = 0; - } - try { - if (logger.isTraceEnabled()) { - logger.trace("Reading {}", LLUtils.toStringSafe(key)); - } - return dbGet(cfh, resolveSnapshot(snapshot), key.retain(), existsAlmostCertainly); - } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlockRead(stamp); + stamp = lock.readLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toStringSafe(key)); + } + return dbGet(cfh, resolveSnapshot(snapshot), key.send(), existsAlmostCertainly); + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlockRead(stamp); + } + } + } catch (Exception ex) { + throw new IOException("Failed to read " + LLUtils.toStringSafe(key), ex); } } - }).onErrorMap(cause -> new IOException("Failed to read " - + LLUtils.toStringSafe(key), cause)), - key -> Mono.fromRunnable(key::release) + }).onErrorMap(cause -> new IOException("Failed to read", cause)), + keySend -> Mono.fromRunnable(keySend::close) ); } - private ByteBuf dbGet(ColumnFamilyHandle cfh, + @Nullable + private Send dbGet(ColumnFamilyHandle cfh, @Nullable ReadOptions readOptions, - ByteBuf key, + Send keySend, boolean existsAlmostCertainly) throws RocksDBException { - try { - if (databaseOptions.allowNettyDirect() && key.isDirect()) { + try (var key = keySend.receive()) { + if (databaseOptions.allowNettyDirect() && isDirect(key)) { //todo: implement keyMayExist if existsAlmostCertainly is false. // Unfortunately it's not feasible until RocksDB implements keyMayExist with buffers // Create the key nio buffer to pass to RocksDB - if (!key.isDirect()) { + if (!isDirect(key)) { throw new RocksDBException("Key buffer must be direct"); } - ByteBuffer keyNioBuffer = LLUtils.toDirect(key); + ByteBuffer keyNioBuffer = toDirect(key); assert keyNioBuffer.isDirect(); // Create a direct result buffer because RocksDB works only with direct buffers - ByteBuf resultBuf = alloc.directBuffer(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES); - try { + try (Buffer resultBuf = alloc.allocate(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES)) { int valueSize; int assertionReadData = -1; ByteBuffer resultNioBuf; do { // Create the result nio buffer to pass to RocksDB - resultNioBuf = resultBuf.nioBuffer(0, resultBuf.capacity()); + resultNioBuf = toDirect(resultBuf); assert keyNioBuffer.isDirect(); assert resultNioBuf.isDirect(); valueSize = db.get(cfh, @@ -336,13 +349,13 @@ public class LLLocalDictionary implements LLDictionary { if (valueSize <= resultNioBuf.limit()) { // Return the result ready to be read - return resultBuf.setIndex(0, valueSize).retain(); + return resultBuf.readerOffset(0).writerOffset(valueSize).send(); } else { // If the locking is enabled the data is safe, so we can append the next read data. // Otherwise we need to re-read everything. if (updateMode == UpdateMode.ALLOW) { // Update the resultBuf writerIndex with the new position - resultBuf.writerIndex(resultNioBuf.limit()); + resultBuf.writerOffset(resultNioBuf.limit()); } //noinspection UnusedAssignment resultNioBuf = null; @@ -352,209 +365,225 @@ public class LLLocalDictionary implements LLDictionary { if (resultBuf.capacity() < valueSize) { // Expand the resultBuf size if the result is bigger than the current result // buffer size - resultBuf.capacity(valueSize); + resultBuf.ensureWritable(valueSize); } } // Repeat if the result has been found but it's still not finished } while (valueSize != RocksDB.NOT_FOUND); // If the value is not found return null return null; - } finally { - resultBuf.release(); } } else { - ReadOptions validReadOptions = Objects.requireNonNullElse(readOptions, EMPTY_READ_OPTIONS); - byte[] keyArray = LLUtils.toArray(key); - Objects.requireNonNull(keyArray); - Holder data = existsAlmostCertainly ? null : new Holder<>(); - if (existsAlmostCertainly || db.keyMayExist(cfh, - validReadOptions, - keyArray, - data - )) { - if (!existsAlmostCertainly && data.getValue() != null) { - return wrappedBuffer(data.getValue()); - } else { - byte[] result = db.get(cfh, validReadOptions, keyArray); - if (result == null) { - return null; + try (ReadOptions validReadOptions = Objects.requireNonNullElse(readOptions, EMPTY_READ_OPTIONS)) { + byte[] keyArray = LLUtils.toArray(key); + requireNonNull(keyArray); + Holder data = existsAlmostCertainly ? null : new Holder<>(); + if (existsAlmostCertainly || db.keyMayExist(cfh, validReadOptions, keyArray, data)) { + if (!existsAlmostCertainly && data.getValue() != null) { + return LLUtils.fromByteArray(alloc, data.getValue()).send(); } else { - return wrappedBuffer(result); + byte[] result = db.get(cfh, validReadOptions, keyArray); + if (result == null) { + return null; + } else { + return LLUtils.fromByteArray(alloc, result).send(); + } } + } else { + return null; } - } else { - return null; } } - } finally { - key.release(); } } @SuppressWarnings("SameParameterValue") private void dbPut(ColumnFamilyHandle cfh, @Nullable WriteOptions writeOptions, - ByteBuf key, - ByteBuf value) throws RocksDBException { - try { - WriteOptions validWriteOptions = Objects.requireNonNullElse(writeOptions, EMPTY_WRITE_OPTIONS); - if (databaseOptions.allowNettyDirect() && key.isDirect() && value.isDirect()) { - if (!key.isDirect()) { - throw new RocksDBException("Key buffer must be direct"); - } - if (!value.isDirect()) { - throw new RocksDBException("Value buffer must be direct"); - } - var keyNioBuffer = LLUtils.toDirect(key); - assert keyNioBuffer.isDirect(); + Send keyToReceive, + Send valueToReceive) throws RocksDBException { + try (WriteOptions validWriteOptions = Objects.requireNonNullElse(writeOptions, EMPTY_WRITE_OPTIONS)) { + try (var key = keyToReceive.receive()) { + try (var value = valueToReceive.receive()) { + if (databaseOptions.allowNettyDirect()) { + if (!isDirect(key)) { + throw new RocksDBException("Key buffer must be direct"); + } + if (!isDirect(value)) { + throw new RocksDBException("Value buffer must be direct"); + } + var keyNioBuffer = toDirect(key); + assert keyNioBuffer.isDirect(); - var valueNioBuffer = LLUtils.toDirect(value); - assert valueNioBuffer.isDirect(); - db.put(cfh, validWriteOptions, keyNioBuffer, valueNioBuffer); - } else { - db.put(cfh, validWriteOptions, LLUtils.toArray(key), LLUtils.toArray(value)); + var valueNioBuffer = toDirect(value); + assert valueNioBuffer.isDirect(); + db.put(cfh, validWriteOptions, keyNioBuffer, valueNioBuffer); + } else { + db.put(cfh, validWriteOptions, LLUtils.toArray(key), LLUtils.toArray(value)); + } + } } - } finally { - key.release(); - value.release(); } } @Override - public Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono> rangeMono) { return Mono.usingWhen(rangeMono, - range -> { - if (range.isSingle()) { - return this.containsKey(snapshot, Mono.just(range.getSingle()).map(ByteBuf::retain)); - } else { - return this.containsRange(snapshot, Mono.just(range).map(LLRange::retain)); + rangeSend -> { + try (var range = rangeSend.receive()) { + if (range.isSingle()) { + return this.containsKey(snapshot, LLUtils.lazyRetain((range.getSingle().receive()))); + } else { + return this.containsRange(snapshot, LLUtils.lazyRetainRange(range)); + } } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ).map(isContained -> !isContained); } - public Mono containsRange(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Mono containsRange(@Nullable LLSnapshot snapshot, Mono> rangeMono) { return Mono.usingWhen(rangeMono, - range -> runOnDb(() -> { - try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { - readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); - readOpts.setFillCache(false); - if (range.hasMin()) { - if (databaseOptions.allowNettyDirect() && range.getMin().isDirect()) { - readOpts.setIterateLowerBound(new DirectSlice(Objects - .requireNonNull(LLUtils.toDirect(range.getMin()), - "This range must use direct buffers"))); - } else { - readOpts.setIterateLowerBound(new Slice(LLUtils.toArray(range.getMin()))); - } - } - if (range.hasMax()) { - if (databaseOptions.allowNettyDirect() && range.getMax().isDirect()) { - readOpts.setIterateUpperBound(new DirectSlice(Objects - .requireNonNull(LLUtils.toDirect(range.getMax()), - "This range must use direct buffers" - ))); - } else { - readOpts.setIterateUpperBound(new Slice(LLUtils.toArray(range.getMax()))); - } - } - try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - if (databaseOptions.allowNettyDirect() && range.getMin().isDirect()) { - rocksIterator.seek(Objects.requireNonNull(LLUtils.toDirect(range.getMin()), - "This range must use direct buffers" - )); - } else { - rocksIterator.seek(LLUtils.toArray(range.getMin())); + rangeSend -> runOnDb(() -> { + // Temporary resources to release after finished + Buffer cloned1 = null; + Buffer cloned2 = null; + Buffer cloned3 = null; + try { + try (var range = rangeSend.receive()) { + try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { + readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + readOpts.setFillCache(false); + if (range.hasMin()) { + try (var rangeMin = range.getMin().receive()) { + if (databaseOptions.allowNettyDirect()) { + ByteBuffer directBuf = toDirect(cloned1 = rangeMin.copy()); + requireNonNull(directBuf, "This range must use direct buffers"); + readOpts.setIterateLowerBound(new DirectSlice(directBuf)); + } else { + readOpts.setIterateLowerBound(new Slice(LLUtils.toArray(rangeMin))); + } + } + } + if (range.hasMax()) { + try (var rangeMax = range.getMax().receive()) { + if (databaseOptions.allowNettyDirect()) { + var directBuf = toDirect(cloned2 = rangeMax.copy()); + requireNonNull(directBuf, "This range must use direct buffers"); + readOpts.setIterateUpperBound(new DirectSlice(directBuf)); + } else { + readOpts.setIterateUpperBound(new Slice(LLUtils.toArray(rangeMax))); + } + } + } + try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + try (var rangeMin = range.getMin().receive()) { + if (databaseOptions.allowNettyDirect()) { + var directBuf = toDirect(cloned3 = rangeMin.copy()); + requireNonNull(directBuf, "This range must use direct buffers"); + rocksIterator.seek(directBuf); + } else { + rocksIterator.seek(LLUtils.toArray(rangeMin)); + } + } + } else { + rocksIterator.seekToFirst(); + } + rocksIterator.status(); + return rocksIterator.isValid(); } - } else { - rocksIterator.seekToFirst(); } - rocksIterator.status(); - return rocksIterator.isValid(); } + } finally { + if (cloned1 != null) cloned1.close(); + if (cloned2 != null) cloned2.close(); + if (cloned3 != null) cloned3.close(); } - }).onErrorMap(cause -> new IOException("Failed to read range " + range.toString(), cause)), - range -> Mono.fromRunnable(range::release)); + }).onErrorMap(cause -> new IOException("Failed to read range", cause)), + rangeSend -> Mono.fromRunnable(rangeSend::close)); } - private Mono containsKey(@Nullable LLSnapshot snapshot, Mono keyMono) { + private Mono containsKey(@Nullable LLSnapshot snapshot, Mono> keyMono) { return Mono.usingWhen(keyMono, - key -> runOnDb(() -> { + keySend -> runOnDb(() -> { + try (var key = keySend.receive()) { + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.readLock(); - } else { - lock = null; - stamp = 0; - } - try { - int size = RocksDB.NOT_FOUND; - byte[] keyBytes = LLUtils.toArray(key); - Holder data = new Holder<>(); - var unmodifiableReadOpts = resolveSnapshot(snapshot); - if (db.keyMayExist(cfh, unmodifiableReadOpts, keyBytes, data)) { - if (data.getValue() != null) { - size = data.getValue().length; - } else { - size = db.get(cfh, unmodifiableReadOpts, keyBytes, NO_DATA); + stamp = lock.readLock(); + } else { + lock = null; + stamp = 0; + } + try { + int size = RocksDB.NOT_FOUND; + byte[] keyBytes = LLUtils.toArray(key); + Holder data = new Holder<>(); + try (var unmodifiableReadOpts = resolveSnapshot(snapshot)) { + if (db.keyMayExist(cfh, unmodifiableReadOpts, keyBytes, data)) { + if (data.getValue() != null) { + size = data.getValue().length; + } else { + size = db.get(cfh, unmodifiableReadOpts, keyBytes, NO_DATA); + } + } + } + return size != RocksDB.NOT_FOUND; + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlockRead(stamp); } } - return size != RocksDB.NOT_FOUND; - } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlockRead(stamp); - } } - }).onErrorMap(cause -> new IOException("Failed to read " - + LLUtils.toStringSafe(key), cause)), - key -> Mono.fromRunnable(key::release) + }).onErrorMap(cause -> new IOException("Failed to read", cause)), + keySend -> Mono.fromRunnable(keySend::close) ); } @Override - public Mono put(Mono keyMono, - Mono valueMono, + public Mono> put(Mono> keyMono, + Mono> valueMono, LLDictionaryResultType resultType) { return Mono.usingWhen(keyMono, - key -> this - .getPreviousData(Mono.just(key).map(ByteBuf::retain), resultType) + keySend -> this + .getPreviousData(keyMono, resultType) .concatWith(Mono.usingWhen(valueMono, - value -> this.runOnDb(() -> { - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.writeLock(); - } else { - lock = null; - stamp = 0; - } - try { - if (logger.isTraceEnabled()) { - logger.trace("Writing {}: {}", - LLUtils.toStringSafe(key), LLUtils.toStringSafe(value)); - } - dbPut(cfh, null, key.retain(), value.retain()); - return null; - } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlockWrite(stamp); + valueSend -> this.>runOnDb(() -> { + try (var key = keySend.receive()) { + try (var value = valueSend.receive()) { + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); + + stamp = lock.writeLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Writing {}: {}", + LLUtils.toStringSafe(key), LLUtils.toStringSafe(value)); + } + dbPut(cfh, null, key.send(), value.send()); + return null; + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlockWrite(stamp); + } + } } } }), - value -> Mono.fromRunnable(value::release) - ).onErrorMap(cause -> new IOException("Failed to write " - + LLUtils.toStringSafe(key), cause))) + value -> Mono.fromRunnable(value::close) + ).onErrorMap(cause -> new IOException("Failed to write", cause))) .singleOrEmpty(), - key -> Mono.fromRunnable(key::release) + keySend -> Mono.fromRunnable(keySend::close) ); } @@ -566,325 +595,332 @@ public class LLLocalDictionary implements LLDictionary { // Remember to change also updateAndGetDelta() if you are modifying this function @SuppressWarnings("DuplicatedCode") @Override - public Mono update(Mono keyMono, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + public Mono> update(Mono> keyMono, + SerializationFunction<@Nullable Send, @Nullable Send> updater, UpdateReturnMode updateReturnMode, boolean existsAlmostCertainly) { return Mono.usingWhen(keyMono, - key -> runOnDb(() -> { - if (updateMode == UpdateMode.DISALLOW) { - throw new UnsupportedOperationException("update() is disallowed"); - } - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.readLock(); - } else { - lock = null; - stamp = 0; - } - try { - if (logger.isTraceEnabled()) { - logger.trace("Reading {}", LLUtils.toStringSafe(key)); + keySend -> runOnDb(() -> { + try (var key = keySend.receive()) { + if (updateMode == UpdateMode.DISALLOW) { + throw new UnsupportedOperationException("update() is disallowed"); } - while (true) { - @Nullable ByteBuf prevData; - var prevDataHolder = existsAlmostCertainly ? null : new Holder(); - if (existsAlmostCertainly - || db.keyMayExist(cfh, LLUtils.toArray(key), prevDataHolder)) { - if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { - byte @Nullable [] prevDataBytes = prevDataHolder.getValue(); - if (prevDataBytes != null) { - prevData = wrappedBuffer(prevDataBytes); + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); + + stamp = lock.readLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toStringSafe(key)); + } + while (true) { + @Nullable Buffer prevData; + var prevDataHolder = existsAlmostCertainly ? null : new Holder(); + if (existsAlmostCertainly + || db.keyMayExist(cfh, LLUtils.toArray(key), prevDataHolder)) { + if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { + byte @Nullable [] prevDataBytes = prevDataHolder.getValue(); + if (prevDataBytes != null) { + prevData = LLUtils.fromByteArray(alloc, prevDataBytes); + } else { + prevData = null; + } } else { - prevData = null; + var obtainedPrevData = dbGet(cfh, null, key.send(), existsAlmostCertainly); + if (obtainedPrevData == null) { + prevData = null; + } else { + prevData = obtainedPrevData.receive(); + } } } else { - prevData = dbGet(cfh, null, key.retain(), existsAlmostCertainly); - } - } else { - prevData = null; - } - try { - @Nullable ByteBuf newData; - ByteBuf prevDataToSendToUpdater = prevData == null - ? null - : prevData.retainedSlice(); - try { - newData = updater.apply(prevDataToSendToUpdater == null - ? null - : prevDataToSendToUpdater.retain()); - if (!(prevDataToSendToUpdater == null - || prevDataToSendToUpdater.readerIndex() == 0 - || !prevDataToSendToUpdater.isReadable())) { - throw new IllegalStateException("The updater has read the previous data partially" - + " (read bytes: " + prevDataToSendToUpdater.readerIndex() - + " unread bytes: " + prevDataToSendToUpdater.readableBytes() + ")." - + " The only allowed options are reading the data fully or not reading it at all"); - } - } finally { - if (prevDataToSendToUpdater != null) { - prevDataToSendToUpdater.release(); - } + prevData = null; } try { - if (prevData != null && newData == null) { - //noinspection DuplicatedCode - if (updateMode == UpdateMode.ALLOW) { - var ws = lock.tryConvertToWriteLock(stamp); - if (ws != 0) { - stamp = ws; + @Nullable Buffer newData; + try (Buffer prevDataToSendToUpdater = prevData == null ? null : prevData.copy()) { + try (var newDataToReceive = updater.apply( + prevDataToSendToUpdater == null ? null : prevDataToSendToUpdater.send())) { + if (newDataToReceive != null) { + newData = newDataToReceive.receive(); } else { - lock.unlockRead(stamp); - - stamp = lock.writeLock(); - continue; + newData = null; } } - if (logger.isTraceEnabled()) { - logger.trace("Deleting {}", LLUtils.toStringSafe(key)); - } - dbDelete(cfh, null, key.retain()); - } else if (newData != null - && (prevData == null || !LLUtils.equals(prevData, newData))) { - //noinspection DuplicatedCode - if (updateMode == UpdateMode.ALLOW) { - var ws = lock.tryConvertToWriteLock(stamp); - if (ws != 0) { - stamp = ws; - } else { - lock.unlockRead(stamp); - - stamp = lock.writeLock(); - continue; - } - } - if (logger.isTraceEnabled()) { - logger.trace("Writing {}: {}", - LLUtils.toStringSafe(key), LLUtils.toStringSafe(newData)); - } - dbPut(cfh, null, key.retain(), newData.retain()); } - return switch (updateReturnMode) { - case GET_NEW_VALUE -> newData != null ? newData.retain() : null; - case GET_OLD_VALUE -> prevData != null ? prevData.retain() : null; - case NOTHING -> null; - //noinspection UnnecessaryDefault - default -> throw new IllegalArgumentException(); - }; + try { + if (prevData != null && newData == null) { + //noinspection DuplicatedCode + if (updateMode == UpdateMode.ALLOW) { + var ws = lock.tryConvertToWriteLock(stamp); + if (ws != 0) { + stamp = ws; + } else { + lock.unlockRead(stamp); + + stamp = lock.writeLock(); + continue; + } + } + if (logger.isTraceEnabled()) { + logger.trace("Deleting {}", LLUtils.toStringSafe(key)); + } + dbDelete(cfh, null, key.send()); + } else if (newData != null + && (prevData == null || !LLUtils.equals(prevData, newData))) { + //noinspection DuplicatedCode + if (updateMode == UpdateMode.ALLOW) { + var ws = lock.tryConvertToWriteLock(stamp); + if (ws != 0) { + stamp = ws; + } else { + lock.unlockRead(stamp); + + stamp = lock.writeLock(); + continue; + } + } + if (logger.isTraceEnabled()) { + logger.trace("Writing {}: {}", LLUtils.toStringSafe(key), LLUtils.toStringSafe(newData)); + } + Buffer dataToPut; + if (updateReturnMode == UpdateReturnMode.GET_NEW_VALUE) { + dataToPut = newData.copy(); + } else { + dataToPut = newData; + } + try { + dbPut(cfh, null, key.send(), dataToPut.send()); + } finally { + if (dataToPut != newData) { + dataToPut.close(); + } + } + } + return switch (updateReturnMode) { + case GET_NEW_VALUE -> newData != null ? newData.send() : null; + case GET_OLD_VALUE -> prevData != null ? prevData.send() : null; + case NOTHING -> null; + //noinspection UnnecessaryDefault + default -> throw new IllegalArgumentException(); + }; + } finally { + if (newData != null) { + newData.close(); + } + } } finally { - if (newData != null) { - newData.release(); + if (prevData != null) { + prevData.close(); } } - } finally { - if (prevData != null) { - prevData.release(); - } } - } - } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlock(stamp); + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlock(stamp); + } } } - }).onErrorMap(cause -> new IOException("Failed to read or write " - + LLUtils.toStringSafe(key), cause)), - key -> Mono.fromRunnable(key::release) + }).onErrorMap(cause -> new IOException("Failed to read or write", cause)), + keySend -> Mono.fromRunnable(keySend::close) ); } // Remember to change also update() if you are modifying this function @SuppressWarnings("DuplicatedCode") @Override - public Mono> updateAndGetDelta(Mono keyMono, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + public Mono updateAndGetDelta(Mono> keyMono, + SerializationFunction<@Nullable Send, @Nullable Send> updater, boolean existsAlmostCertainly) { return Mono.usingWhen(keyMono, - key -> this.runOnDb(() -> { - if (updateMode == UpdateMode.DISALLOW) { - throw new UnsupportedOperationException("update() is disallowed"); - } - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.readLock(); - } else { - lock = null; - stamp = 0; - } - try { - if (logger.isTraceEnabled()) { - logger.trace("Reading {}", LLUtils.toStringSafe(key)); + keySend -> this.runOnDb(() -> { + try (var key = keySend.receive()) { + if (updateMode == UpdateMode.DISALLOW) { + throw new UnsupportedOperationException("update() is disallowed"); } - while (true) { - @Nullable ByteBuf prevData; - var prevDataHolder = existsAlmostCertainly ? null : new Holder(); - if (existsAlmostCertainly - || db.keyMayExist(cfh, LLUtils.toArray(key), prevDataHolder)) { - if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { - byte @Nullable [] prevDataBytes = prevDataHolder.getValue(); - if (prevDataBytes != null) { - prevData = wrappedBuffer(prevDataBytes); + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); + + stamp = lock.readLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toStringSafe(key)); + } + while (true) { + @Nullable Buffer prevData; + var prevDataHolder = existsAlmostCertainly ? null : new Holder(); + if (existsAlmostCertainly + || db.keyMayExist(cfh, LLUtils.toArray(key), prevDataHolder)) { + if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { + byte @Nullable [] prevDataBytes = prevDataHolder.getValue(); + if (prevDataBytes != null) { + prevData = LLUtils.fromByteArray(alloc, prevDataBytes); + } else { + prevData = null; + } } else { - prevData = null; + var obtainedPrevData = dbGet(cfh, null, key.send(), existsAlmostCertainly); + if (obtainedPrevData == null) { + prevData = null; + } else { + prevData = obtainedPrevData.receive(); + } } } else { - prevData = dbGet(cfh, null, key.retain(), existsAlmostCertainly); - } - } else { - prevData = null; - } - try { - @Nullable ByteBuf newData; - ByteBuf prevDataToSendToUpdater = prevData == null - ? null - : prevData.retainedSlice(); - try { - newData = updater.apply(prevDataToSendToUpdater == null - ? null - : prevDataToSendToUpdater.retain()); - assert prevDataToSendToUpdater == null - || prevDataToSendToUpdater.readerIndex() == 0 - || !prevDataToSendToUpdater.isReadable(); - } finally { - if (prevDataToSendToUpdater != null) { - prevDataToSendToUpdater.release(); - } + prevData = null; } try { - if (prevData != null && newData == null) { - //noinspection DuplicatedCode - if (updateMode == UpdateMode.ALLOW) { - var ws = lock.tryConvertToWriteLock(stamp); - if (ws != 0) { - stamp = ws; + @Nullable Buffer newData; + try (Buffer prevDataToSendToUpdater = prevData == null ? null : prevData.copy()) { + try (var newDataToReceive = updater.apply( + prevDataToSendToUpdater == null ? null : prevDataToSendToUpdater.send())) { + if (newDataToReceive != null) { + newData = newDataToReceive.receive(); } else { - lock.unlockRead(stamp); - - stamp = lock.writeLock(); - continue; + newData = null; } } - if (logger.isTraceEnabled()) { - logger.trace("Deleting {}", LLUtils.toStringSafe(key)); - } - dbDelete(cfh, null, key.retain()); - } else if (newData != null - && (prevData == null || !LLUtils.equals(prevData, newData))) { - //noinspection DuplicatedCode - if (updateMode == UpdateMode.ALLOW) { - var ws = lock.tryConvertToWriteLock(stamp); - if (ws != 0) { - stamp = ws; - } else { - lock.unlockRead(stamp); - - stamp = lock.writeLock(); - continue; - } - } - if (logger.isTraceEnabled()) { - logger.trace("Writing {}: {}", - LLUtils.toStringSafe(key), LLUtils.toStringSafe(newData)); - } - dbPut(cfh, null, key.retain(), newData.retain()); } - return new Delta<>( - prevData != null ? prevData.retain() : null, - newData != null ? newData.retain() : null - ); + try { + if (prevData != null && newData == null) { + //noinspection DuplicatedCode + if (updateMode == UpdateMode.ALLOW) { + var ws = lock.tryConvertToWriteLock(stamp); + if (ws != 0) { + stamp = ws; + } else { + lock.unlockRead(stamp); + + stamp = lock.writeLock(); + continue; + } + } + if (logger.isTraceEnabled()) { + logger.trace("Deleting {}", LLUtils.toStringSafe(key)); + } + dbDelete(cfh, null, key.send()); + } else if (newData != null + && (prevData == null || !LLUtils.equals(prevData, newData))) { + //noinspection DuplicatedCode + if (updateMode == UpdateMode.ALLOW) { + var ws = lock.tryConvertToWriteLock(stamp); + if (ws != 0) { + stamp = ws; + } else { + lock.unlockRead(stamp); + + stamp = lock.writeLock(); + continue; + } + } + if (logger.isTraceEnabled()) { + logger.trace("Writing {}: {}", + LLUtils.toStringSafe(key), LLUtils.toStringSafe(newData)); + } + dbPut(cfh, null, key.send(), newData.copy().send()); + } + return LLDelta.of( + prevData != null ? prevData.send() : null, + newData != null ? newData.send() : null + ); + } finally { + if (newData != null) { + newData.close(); + } + } } finally { - if (newData != null) { - newData.release(); + if (prevData != null) { + prevData.close(); } } - } finally { - if (prevData != null) { - prevData.release(); - } } - } - } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlock(stamp); + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlock(stamp); + } } } - }).onErrorMap(cause -> new IOException("Failed to read or write " - + LLUtils.toStringSafe(key), cause)), - key -> Mono.fromRunnable(key::release) + }).onErrorMap(cause -> new IOException("Failed to read or write", cause)), + keySend -> Mono.fromRunnable(keySend::close) ); } - private void dbDelete(ColumnFamilyHandle cfh, @Nullable WriteOptions writeOptions, ByteBuf key) + private void dbDelete(ColumnFamilyHandle cfh, @Nullable WriteOptions writeOptions, Send keyToReceive) throws RocksDBException { - try { + try (var key = keyToReceive.receive()) { var validWriteOptions = Objects.requireNonNullElse(writeOptions, EMPTY_WRITE_OPTIONS); - if (databaseOptions.allowNettyDirect() && key.isDirect()) { - if (!key.isDirect()) { + if (databaseOptions.allowNettyDirect()) { + if (!isDirect(key)) { throw new IllegalArgumentException("Key must be a direct buffer"); } - var keyNioBuffer = LLUtils.toDirect(key); + var keyNioBuffer = toDirect(key); db.delete(cfh, validWriteOptions, keyNioBuffer); } else { db.delete(cfh, validWriteOptions, LLUtils.toArray(key)); } - } finally { - key.release(); } } @Override - public Mono remove(Mono keyMono, LLDictionaryResultType resultType) { + public Mono> remove(Mono> keyMono, LLDictionaryResultType resultType) { return Mono.usingWhen(keyMono, - key -> this - .getPreviousData(Mono.just(key).map(ByteBuf::retain), resultType) + keySend -> this + .getPreviousData(keyMono, resultType) .concatWith(this - .runOnDb(() -> { - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.writeLock(); - } else { - lock = null; - stamp = 0; - } - try { - if (logger.isTraceEnabled()) { - logger.trace("Deleting {}", LLUtils.toStringSafe(key)); - } - dbDelete(cfh, null, key.retain()); - return null; - } finally { + .>runOnDb(() -> { + try (var key = keySend.receive()) { + StampedLock lock; + long stamp; if (updateMode == UpdateMode.ALLOW) { - lock.unlockWrite(stamp); + lock = itemsLock.getAt(getLockIndex(key)); + + stamp = lock.writeLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Deleting {}", LLUtils.toStringSafe(key)); + } + dbDelete(cfh, null, key.send()); + return null; + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlockWrite(stamp); + } } } }) - .onErrorMap(cause -> new IOException("Failed to delete " - + LLUtils.toStringSafe(key), cause)) + .onErrorMap(cause -> new IOException("Failed to delete", cause)) ) .singleOrEmpty(), - key -> Mono.fromCallable(key::release)); + keySend -> Mono.fromRunnable(keySend::close) + ); } - private Mono getPreviousData(Mono keyMono, LLDictionaryResultType resultType) { + private Mono> getPreviousData(Mono> keyMono, LLDictionaryResultType resultType) { return Mono .usingWhen(keyMono, - key -> switch (resultType) { + keySend -> { + try (var key = keySend.receive()) { + return switch (resultType) { case PREVIOUS_VALUE_EXISTENCE -> this - .containsKey(null, Mono.just(key).map(ByteBuf::retain)) + .containsKey(null, keyMono) .single() - .map(LLUtils::booleanToResponseByteBuffer) - .doAfterTerminate(() -> { - assert key.refCnt() > 0; - }); + .map((Boolean bool) -> LLUtils.booleanToResponseByteBuffer(alloc, bool).send()); case PREVIOUS_VALUE -> Mono .fromCallable(() -> { StampedLock lock; @@ -904,13 +940,9 @@ public class LLLocalDictionary implements LLDictionary { var data = new Holder(); if (db.keyMayExist(cfh, LLUtils.toArray(key), data)) { if (data.getValue() != null) { - return wrappedBuffer(data.getValue()); + return LLUtils.fromByteArray(alloc, data.getValue()).send(); } else { - try { - return dbGet(cfh, null, key.retain(), true); - } finally { - assert key.refCnt() > 0; - } + return dbGet(cfh, null, key.send(), true); } } else { return null; @@ -924,430 +956,452 @@ public class LLLocalDictionary implements LLDictionary { .onErrorMap(cause -> new IOException("Failed to read " + LLUtils.toStringSafe(key), cause)) .subscribeOn(dbScheduler); case VOID -> Mono.empty(); + }; + } }, - key -> Mono.fromRunnable(key::release) + keySend -> Mono.fromRunnable(keySend::close) ); } @Override - public Flux>> getMulti(@Nullable LLSnapshot snapshot, - Flux> keys, + public Flux, Optional>>> getMulti(@Nullable LLSnapshot snapshot, + Flux>> keys, boolean existsAlmostCertainly) { return keys .transform(normal -> new BufferTimeOutPublisher<>(normal, MULTI_GET_WINDOW, MULTI_GET_WINDOW_TIMEOUT)) .doOnDiscard(Tuple2.class, discardedEntry -> { //noinspection unchecked - var entry = (Tuple2) discardedEntry; - entry.getT2().release(); + var entry = (Tuple2) discardedEntry; + entry.getT2().close(); }) .doOnDiscard(Tuple3.class, discardedEntry -> { //noinspection unchecked - var entry = (Tuple3) discardedEntry; - entry.getT2().release(); - entry.getT3().release(); + var entry = (Tuple3) discardedEntry; + entry.getT2().close(); + entry.getT3().close(); }) .flatMapSequential(keysWindow -> { - List keyBufsWindow = new ArrayList<>(keysWindow.size()); - for (Tuple2 objects : keysWindow) { - keyBufsWindow.add(objects.getT2()); + List> keyBufsWindowSend = new ArrayList<>(keysWindow.size()); + for (Tuple2> objects : keysWindow) { + keyBufsWindowSend.add(objects.getT2()); } - return Mono - .fromCallable(() -> { + return runOnDb(() -> { + List keyBufsWindow = new ArrayList<>(keyBufsWindowSend.size()); + for (Send bufferSend : keyBufsWindowSend) { + keyBufsWindow.add(bufferSend.receive()); + } + try { + Iterable locks; + ArrayList stamps; + if (updateMode == UpdateMode.ALLOW) { + locks = itemsLock.bulkGetAt(getLockIndices(keyBufsWindow)); + stamps = new ArrayList<>(); + for (var lock : locks) { + + stamps.add(lock.readLock()); + } + } else { + locks = null; + stamps = null; + } + try { + var columnFamilyHandles = new RepeatedElementList<>(cfh, keysWindow.size()); + List results = db.multiGetAsList(resolveSnapshot(snapshot), + columnFamilyHandles, LLUtils.toArray(keyBufsWindow)); + var mappedResults = new ArrayList, Optional>>>(results.size()); + for (int i = 0; i < results.size(); i++) { + byte[] val = results.get(i); + Optional valueOpt; + if (val != null) { + results.set(i, null); + valueOpt = Optional.of(LLUtils.fromByteArray(alloc, val)); + } else { + valueOpt = Optional.empty(); + } + mappedResults.add(Tuples.of(keysWindow.get(i).getT1(), + keyBufsWindow.get(i).send(), + valueOpt.map(Resource::send) + )); + } + return mappedResults; + } finally { + if (updateMode == UpdateMode.ALLOW) { + int index = 0; + for (var lock : locks) { + lock.unlockRead(stamps.get(index)); + index++; + } + } + } + } finally { + for (Buffer buffer : keyBufsWindow) { + buffer.close(); + } + } + }) + .flatMapIterable(list -> list) + .onErrorMap(cause -> new IOException("Failed to read keys", cause)) + .doAfterTerminate(() -> keyBufsWindowSend.forEach(Send::close)); + }, 2) // Max concurrency is 2 to read data while preparing the next segment + .doOnDiscard(LLEntry.class, ResourceSupport::close) + .doOnDiscard(Tuple3.class, discardedEntry -> { + if (discardedEntry.getT2() instanceof Buffer bb) { + bb.close(); + } + if (discardedEntry.getT2() instanceof Optional opt) { + if (opt.isPresent() && opt.get() instanceof Buffer bb) { + bb.close(); + } + } + }); + } + + @Override + public Flux> putMulti(Flux> entries, boolean getOldValues) { + return entries + .buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) + .flatMapSequential(ew -> this + .>>runOnDb(() -> { + var entriesWindow = new ArrayList(ew.size()); + for (Send entrySend : ew) { + entriesWindow.add(entrySend.receive()); + } + try { Iterable locks; ArrayList stamps; if (updateMode == UpdateMode.ALLOW) { - locks = itemsLock.bulkGetAt(getLockIndices(keyBufsWindow)); + locks = itemsLock.bulkGetAt(getLockIndicesEntries(entriesWindow)); stamps = new ArrayList<>(); for (var lock : locks) { - - stamps.add(lock.readLock()); + stamps.add(lock.writeLock()); } } else { locks = null; stamps = null; } try { - var columnFamilyHandles = new RepeatedElementList<>(cfh, keysWindow.size()); - var results = db.multiGetAsList(resolveSnapshot(snapshot), columnFamilyHandles, LLUtils.toArray(keyBufsWindow)); - var mappedResults = new ArrayList>>(results.size()); - for (int i = 0; i < results.size(); i++) { - byte[] val = results.get(i); - Optional valueOpt; - if (val != null) { - results.set(i, null); - valueOpt = Optional.of(wrappedBuffer(val)); - } else { - valueOpt = Optional.empty(); + ArrayList> oldValues; + if (getOldValues) { + oldValues = new ArrayList<>(entriesWindow.size()); + try (var readOptions = resolveSnapshot(null)) { + for (LLEntry entry : entriesWindow) { + try (var key = entry.getKey().receive()) { + Send oldValue = dbGet(cfh, readOptions, key.copy().send(), false); + if (oldValue != null) { + oldValues.add(LLEntry.of(key.send(), oldValue).send()); + } + } + } } - mappedResults.add(Tuples.of(keysWindow.get(i).getT1(), - keyBufsWindow.get(i).retain(), - valueOpt - )); + } else { + oldValues = null; } - return mappedResults; + if (USE_WRITE_BATCHES_IN_PUT_MULTI) { + var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + BATCH_WRITE_OPTIONS + ); + for (LLEntry entry : entriesWindow) { + var k = entry.getKey(); + var v = entry.getValue(); + batch.put(cfh, k, v); + } + batch.writeToDbAndClose(); + batch.close(); + } else { + for (LLEntry entry : entriesWindow) { + try (var k = entry.getKey().receive()) { + try (var v = entry.getValue().receive()) { + db.put(cfh, EMPTY_WRITE_OPTIONS, toDirect(k), toDirect(v)); + } + } + } + } + return oldValues; } finally { if (updateMode == UpdateMode.ALLOW) { int index = 0; for (var lock : locks) { - lock.unlockRead(stamps.get(index)); + lock.unlockWrite(stamps.get(index)); index++; } } } - }) - .subscribeOn(dbScheduler) - .flatMapIterable(list -> list) - .onErrorMap(cause -> new IOException("Failed to read keys " - + Arrays.deepToString(keyBufsWindow.toArray(ByteBuf[]::new)), cause)) - .doAfterTerminate(() -> keyBufsWindow.forEach(ReferenceCounted::release)); - }, 2) // Max concurrency is 2 to read data while preparing the next segment - .doOnDiscard(Entry.class, discardedEntry -> { - var entry = (LLEntry) discardedEntry; - entry.getKey().release(); - entry.getValue().release(); - }) - .doOnDiscard(Tuple3.class, discardedEntry -> { - //noinspection unchecked - var entry = (Tuple3>) discardedEntry; - entry.getT2().release(); - entry.getT3().ifPresent(ReferenceCounted::release); - }); - } - - @Override - public Flux putMulti(Flux entries, boolean getOldValues) { - return entries - .buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) - .flatMapSequential(ew -> Mono - .using( - () -> ew, - entriesWindow -> Mono - .fromCallable(() -> { - Iterable locks; - ArrayList stamps; - if (updateMode == UpdateMode.ALLOW) { - locks = itemsLock.bulkGetAt(getLockIndicesEntries(entriesWindow)); - stamps = new ArrayList<>(); - for (var lock : locks) { - stamps.add(lock.writeLock()); - } - } else { - locks = null; - stamps = null; - } - try { - if (USE_WRITE_BATCHES_IN_PUT_MULTI) { - var batch = new CappedWriteBatch(db, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - BATCH_WRITE_OPTIONS - ); - for (LLEntry entry : entriesWindow) { - var k = entry.getKey().retain(); - var v = entry.getValue().retain(); - batch.put(cfh, k, v); - } - batch.writeToDbAndClose(); - batch.close(); - } else { - for (LLEntry entry : entriesWindow) { - db.put(cfh, EMPTY_WRITE_OPTIONS, entry.getKey().nioBuffer(), entry.getValue().nioBuffer()); - } - } - return null; - } finally { - if (updateMode == UpdateMode.ALLOW) { - int index = 0; - for (var lock : locks) { - lock.unlockWrite(stamps.get(index)); - index++; - } - } - } - }) - .subscribeOn(dbScheduler) - - // Prepend everything to get previous elements - .transform(transformer -> { - var obj = new Object(); - if (getOldValues) { - return this - .getMulti(null, Flux - .fromIterable(entriesWindow) - .map(entry -> entry.getKey().retain()) - .map(buf -> Tuples.of(obj, buf)), false) - .publishOn(dbScheduler) - .then(transformer); - } else { - return transformer; - } - }), - entriesWindow -> { - for (LLEntry entry : entriesWindow) { - entry.release(); - } + } finally { + for (LLEntry llEntry : entriesWindow) { + llEntry.close(); } - ), 2) // Max concurrency is 2 to read data while preparing the next segment + } + }), 2) // Max concurrency is 2 to read data while preparing the next segment + .flatMapIterable(oldValuesList -> oldValuesList) .transform(LLUtils::handleDiscard); } @Override - public Flux> updateMulti(Flux> entries, - BiSerializationFunction updateFunction) { + public Flux, X>> updateMulti(Flux, X>> entries, + BiSerializationFunction, X, Send> updateFunction) { return entries .buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) - .flatMapSequential(ew -> Flux - .using( - () -> ew, - entriesWindow -> { - List keyBufsWindow = new ArrayList<>(entriesWindow.size()); - for (Tuple2 objects : entriesWindow) { - keyBufsWindow.add(objects.getT1()); - } - return Mono - .>>fromCallable(() -> { - Iterable locks; - ArrayList stamps; - if (updateMode == UpdateMode.ALLOW) { - locks = itemsLock.bulkGetAt(getLockIndicesWithExtra(entriesWindow)); - stamps = new ArrayList<>(); - for (var lock : locks) { - stamps.add(lock.writeLock()); - } - } else { - locks = null; - stamps = null; - } - try { - var columnFamilyHandles = new RepeatedElementList<>(cfh, entriesWindow.size()); - ArrayList>> mappedInputs; - { - var inputs = db.multiGetAsList(resolveSnapshot(null), columnFamilyHandles, LLUtils.toArray(keyBufsWindow)); - mappedInputs = new ArrayList<>(inputs.size()); - for (int i = 0; i < inputs.size(); i++) { - var val = inputs.get(i); - if (val != null) { - inputs.set(i, null); - mappedInputs.add(Tuples.of( - keyBufsWindow.get(i).retain(), - entriesWindow.get(i).getT2(), - Optional.of(wrappedBuffer(val)) - )); - } else { - mappedInputs.add(Tuples.of( - keyBufsWindow.get(i).retain(), - entriesWindow.get(i).getT2(), - Optional.empty() - )); - } - } - } - var updatedValuesToWrite = new ArrayList(mappedInputs.size()); - var valueChangedResult = new ArrayList>(mappedInputs.size()); - try { - for (var mappedInput : mappedInputs) { - //noinspection BlockingMethodInNonBlockingContext - var updatedValue = updateFunction.apply(mappedInput.getT1().retain(), mappedInput.getT2()); - valueChangedResult.add(new ExtraKeyOperationResult<>(mappedInput.getT1(), - mappedInput.getT2(), - !Objects.equals(mappedInput.getT3().orElse(null), updatedValue.retain()) - )); - updatedValuesToWrite.add(updatedValue); - } - } finally { - for (var mappedInput : mappedInputs) { - mappedInput.getT3().ifPresent(ReferenceCounted::release); - } - } + .flatMapSequential(ew -> this., X>>>runOnDb(() -> { + List> entriesWindow = new ArrayList<>(ew.size()); + for (Tuple2, X> tuple : ew) { + entriesWindow.add(tuple.mapT1(Send::receive)); + } + try { + List keyBufsWindow = new ArrayList<>(entriesWindow.size()); + for (Tuple2 objects : entriesWindow) { + keyBufsWindow.add(objects.getT1()); + } - if (USE_WRITE_BATCHES_IN_PUT_MULTI) { - var batch = new CappedWriteBatch(db, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - BATCH_WRITE_OPTIONS - ); - int i = 0; - for (Tuple2 entry : entriesWindow) { - var valueToWrite = updatedValuesToWrite.get(i); - if (valueToWrite == null) { - batch.delete(cfh, entry.getT1().retain()); - } else { - batch.put(cfh, entry.getT1().retain(), valueToWrite.retain()); - } - i++; - } - batch.writeToDbAndClose(); - batch.close(); - } else { - int i = 0; - for (Tuple2 entry : entriesWindow) { - var valueToWrite = updatedValuesToWrite.get(i); - db.put(cfh, EMPTY_WRITE_OPTIONS, entry.getT1().nioBuffer(), valueToWrite.nioBuffer()); - i++; - } - } - return valueChangedResult; - } finally { - if (updateMode == UpdateMode.ALLOW) { - int index = 0; - for (var lock : locks) { - lock.unlockWrite(stamps.get(index)); - index++; - } - } - } - }) - .subscribeOn(dbScheduler) - .flatMapIterable(list -> list); - }, - entriesWindow -> { - for (Tuple2 entry : entriesWindow) { - entry.getT1().release(); + Iterable locks; + ArrayList stamps; + if (updateMode == UpdateMode.ALLOW) { + locks = itemsLock.bulkGetAt(getLockIndicesWithExtra(entriesWindow)); + stamps = new ArrayList<>(); + for (var lock : locks) { + stamps.add(lock.writeLock()); + } + } else { + locks = null; + stamps = null; + } + try { + var columnFamilyHandles = new RepeatedElementList<>(cfh, entriesWindow.size()); + ArrayList, X, Optional>>> mappedInputs; + { + var inputs = db.multiGetAsList(resolveSnapshot(null), columnFamilyHandles, + LLUtils.toArray(keyBufsWindow)); + mappedInputs = new ArrayList<>(inputs.size()); + for (int i = 0; i < inputs.size(); i++) { + var val = inputs.get(i); + if (val != null) { + inputs.set(i, null); + mappedInputs.add(Tuples.of( + keyBufsWindow.get(i).send(), + entriesWindow.get(i).getT2(), + Optional.of(fromByteArray(alloc, val).send()) + )); + } else { + mappedInputs.add(Tuples.of( + keyBufsWindow.get(i).send(), + entriesWindow.get(i).getT2(), + Optional.empty() + )); } } - ), 2 // Max concurrency is 2 to update data while preparing the next segment - ) - .doOnDiscard(Tuple2.class, entry -> { - if (entry.getT1() instanceof ByteBuf bb) { - bb.release(); + } + var updatedValuesToWrite = new ArrayList>(mappedInputs.size()); + var valueChangedResult = new ArrayList, X>>(mappedInputs.size()); + try { + for (var mappedInput : mappedInputs) { + try (var updatedValue = updateFunction + .apply(mappedInput.getT1(), mappedInput.getT2()).receive()) { + try (var t3 = mappedInput.getT3().map(Send::receive).orElse(null)) { + valueChangedResult.add(new ExtraKeyOperationResult<>(mappedInput.getT1(), + mappedInput.getT2(), !LLUtils.equals(t3, updatedValue))); + } + updatedValuesToWrite.add(updatedValue.send()); + } + } + } finally { + for (var mappedInput : mappedInputs) { + mappedInput.getT3().ifPresent(Send::close); + } + } + + if (USE_WRITE_BATCHES_IN_PUT_MULTI) { + var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + BATCH_WRITE_OPTIONS + ); + int i = 0; + for (Tuple2 entry : entriesWindow) { + var valueToWrite = updatedValuesToWrite.get(i); + if (valueToWrite == null) { + batch.delete(cfh, entry.getT1().send()); + } else { + batch.put(cfh, entry.getT1().send(), valueToWrite); + } + i++; + } + batch.writeToDbAndClose(); + batch.close(); + } else { + int i = 0; + for (Tuple2 entry : entriesWindow) { + try (var valueToWrite = updatedValuesToWrite.get(i).receive()) { + db.put(cfh, EMPTY_WRITE_OPTIONS, toDirect(entry.getT1()), toDirect(valueToWrite)); + } + i++; + } + } + return valueChangedResult; + } finally { + if (updateMode == UpdateMode.ALLOW) { + int index = 0; + for (var lock : locks) { + lock.unlockWrite(stamps.get(index)); + index++; + } + } + } + } finally { + for (Tuple2 tuple : entriesWindow) { + tuple.getT1().close(); + } } - if (entry.getT2() instanceof ByteBuf bb) { - bb.release(); + }).flatMapIterable(list -> list), /* Max concurrency is 2 to update data while preparing the next segment */ 2) + .doOnDiscard(Tuple2.class, entry -> { + if (entry.getT1() instanceof Buffer bb) { + bb.close(); + } + if (entry.getT2() instanceof Buffer bb) { + bb.close(); } }) .doOnDiscard(ExtraKeyOperationResult.class, entry -> { - if (entry.key() instanceof ByteBuf bb) { - bb.release(); + if (entry.key() instanceof Buffer bb) { + bb.close(); } - if (entry.extra() instanceof ByteBuf bb) { - bb.release(); + if (entry.extra() instanceof Buffer bb) { + bb.close(); } }) .doOnDiscard(Collection.class, obj -> { //noinspection unchecked var castedEntries = (Collection>) obj; for (var entry : castedEntries) { - if (entry.key() instanceof ByteBuf bb) { - bb.release(); + if (entry.key() instanceof Buffer bb) { + bb.close(); } - if (entry.extra() instanceof ByteBuf bb) { - bb.release(); + if (entry.extra() instanceof Buffer bb) { + bb.close(); } } }); } @Override - public Flux getRange(@Nullable LLSnapshot snapshot, - Mono rangeMono, + public Flux> getRange(@Nullable LLSnapshot snapshot, + Mono> rangeMono, boolean existsAlmostCertainly) { return Flux.usingWhen(rangeMono, - range -> { - if (range.isSingle()) { - return getRangeSingle(snapshot, Mono.just(range.getMin()).map(ByteBuf::retain), existsAlmostCertainly); - } else { - return getRangeMulti(snapshot, Mono.just(range).map(LLRange::retain)); + rangeSend -> { + try (var range = rangeSend.receive()) { + if (range.isSingle()) { + var rangeSingleMono = rangeMono.map(r -> r.receive().getSingle()); + return getRangeSingle(snapshot, rangeSingleMono, existsAlmostCertainly); + } else { + return getRangeMulti(snapshot, rangeMono); + } } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, + public Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + Mono> rangeMono, int prefixLength, boolean existsAlmostCertainly) { return Flux.usingWhen(rangeMono, - range -> { - if (range.isSingle()) { - var rangeSingleMono = Mono.just(range.getMin()).map(ByteBuf::retain); - return getRangeSingle(snapshot, rangeSingleMono, existsAlmostCertainly).map(List::of); - } else { - return getRangeMultiGrouped(snapshot, Mono.just(range).map(LLRange::retain), prefixLength); + rangeSend -> { + try (var range = rangeSend.receive()) { + if (range.isSingle()) { + var rangeSingleMono = rangeMono.map(r -> r.receive().getSingle()); + return getRangeSingle(snapshot, rangeSingleMono, existsAlmostCertainly).map(List::of); + } else { + return getRangeMultiGrouped(snapshot, rangeMono, prefixLength); + } } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } - private Flux getRangeSingle(LLSnapshot snapshot, - Mono keyMono, + private Flux> getRangeSingle(LLSnapshot snapshot, + Mono> keyMono, boolean existsAlmostCertainly) { - return Flux.usingWhen(keyMono, - key -> this - .get(snapshot, Mono.just(key).map(ByteBuf::retain), existsAlmostCertainly) - .map(value -> new LLEntry(key.retain(), value)), - key -> Mono.fromRunnable(key::release) - ).transform(LLUtils::handleDiscard); + return Mono + .zip(keyMono, this.get(snapshot, keyMono, existsAlmostCertainly)) + .map(result -> LLEntry.of(result.getT1(), result.getT2()).send()) + .flux() + .transform(LLUtils::handleDiscard); } - private Flux getRangeMulti(LLSnapshot snapshot, Mono rangeMono) { + private Flux> getRangeMulti(LLSnapshot snapshot, Mono> rangeMono) { return Flux.usingWhen(rangeMono, - range -> Flux.using( - () -> new LLLocalEntryReactiveRocksIterator(db, alloc, cfh, range.retain(), + rangeSend -> Flux.using( + () -> new LLLocalEntryReactiveRocksIterator(db, alloc, cfh, rangeSend, databaseOptions.allowNettyDirect(), resolveSnapshot(snapshot), getRangeMultiDebugName), llLocalEntryReactiveRocksIterator -> llLocalEntryReactiveRocksIterator.flux().subscribeOn(dbScheduler), LLLocalReactiveRocksIterator::release ).transform(LLUtils::handleDiscard), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } - private Flux> getRangeMultiGrouped(LLSnapshot snapshot, Mono rangeMono, int prefixLength) { + private Flux>> getRangeMultiGrouped(LLSnapshot snapshot, Mono> rangeMono, int prefixLength) { return Flux.usingWhen(rangeMono, - range -> Flux.using( - () -> new LLLocalGroupedEntryReactiveRocksIterator(db, alloc, cfh, prefixLength, range.retain(), + rangeSend -> Flux.using( + () -> new LLLocalGroupedEntryReactiveRocksIterator(db, alloc, cfh, prefixLength, rangeSend, databaseOptions.allowNettyDirect(), resolveSnapshot(snapshot), "getRangeMultiGrouped"), reactiveRocksIterator -> reactiveRocksIterator.flux().subscribeOn(dbScheduler), LLLocalGroupedReactiveRocksIterator::release ).transform(LLUtils::handleDiscard), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Flux getRangeKeys(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Flux> getRangeKeys(@Nullable LLSnapshot snapshot, Mono> rangeMono) { return Flux.usingWhen(rangeMono, - range -> { - if (range.isSingle()) { - return this.getRangeKeysSingle(snapshot, Mono.just(range.getMin()).map(ByteBuf::retain)); - } else { - return this.getRangeKeysMulti(snapshot, Mono.just(range).map(LLRange::retain)); + rangeSend -> { + try (var range = rangeSend.receive()) { + if (range.isSingle()) { + return this.getRangeKeysSingle(snapshot, rangeMono.map(r -> r.receive().getSingle())); + } else { + return this.getRangeKeysMulti(snapshot, rangeMono); + } } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, + public Flux>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, + Mono> rangeMono, int prefixLength) { return Flux.usingWhen(rangeMono, - range -> Flux.using( - () -> new LLLocalGroupedKeyReactiveRocksIterator(db, alloc, cfh, prefixLength, range.retain(), + rangeSend -> Flux.using( + () -> new LLLocalGroupedKeyReactiveRocksIterator(db, alloc, cfh, prefixLength, rangeSend, databaseOptions.allowNettyDirect(), resolveSnapshot(snapshot), "getRangeKeysGrouped"), reactiveRocksIterator -> reactiveRocksIterator.flux().subscribeOn(dbScheduler), LLLocalGroupedReactiveRocksIterator::release ).transform(LLUtils::handleDiscard), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Flux badBlocks(Mono rangeMono) { + public Flux badBlocks(Mono> rangeMono) { return Flux.usingWhen(rangeMono, - range -> Flux + rangeSend -> Flux .create(sink -> { + var range = rangeSend.receive(); + sink.onDispose(range::close); try (var ro = new ReadOptions(getReadOptions(null))) { ro.setFillCache(false); if (!range.isSingle()) { ro.setReadaheadSize(32 * 1024); } ro.setVerifyChecksums(true); - var rocksIteratorTuple = getRocksIterator(databaseOptions.allowNettyDirect(), ro, range.retain(), db, cfh); + var rocksIteratorTuple = getRocksIterator(databaseOptions.allowNettyDirect(), ro, range.send(), db, cfh); try { try (var rocksIterator = rocksIteratorTuple.getT1()) { rocksIterator.seekToFirst(); @@ -1366,8 +1420,8 @@ public class LLLocalDictionary implements LLDictionary { } } } finally { - rocksIteratorTuple.getT2().release(); - rocksIteratorTuple.getT3().release(); + rocksIteratorTuple.getT2().close(); + rocksIteratorTuple.getT3().close(); } sink.complete(); } catch (Throwable ex) { @@ -1375,20 +1429,20 @@ public class LLLocalDictionary implements LLDictionary { } }) .subscribeOn(dbScheduler), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono rangeMono, int prefixLength) { + public Flux> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono> rangeMono, int prefixLength) { return Flux.usingWhen(rangeMono, - range -> Flux + rangeSend -> Flux .using( () -> new LLLocalKeyPrefixReactiveRocksIterator(db, alloc, cfh, prefixLength, - range.retain(), + rangeSend, databaseOptions.allowNettyDirect(), resolveSnapshot(snapshot), true, @@ -1398,131 +1452,138 @@ public class LLLocalDictionary implements LLDictionary { LLLocalKeyPrefixReactiveRocksIterator::release ) .subscribeOn(dbScheduler), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } - private Flux getRangeKeysSingle(LLSnapshot snapshot, Mono keyMono) { + private Flux> getRangeKeysSingle(LLSnapshot snapshot, Mono> keyMono) { return Flux.usingWhen(keyMono, - key -> this - .containsKey(snapshot, Mono.just(key).map(ByteBuf::retain)) - .flux() - .handle((contains, sink) -> { + keySend -> this + .containsKey(snapshot, keyMono) + .>handle((contains, sink) -> { if (contains) { - sink.next(key.retain()); + sink.next(keySend); } else { sink.complete(); } }) - .doOnDiscard(ByteBuf.class, ReferenceCounted::release), - key -> Mono.fromRunnable(key::release) + .flux() + .doOnDiscard(Buffer.class, Buffer::close), + keySend -> Mono.fromRunnable(keySend::close) ); } - private Flux getRangeKeysMulti(LLSnapshot snapshot, Mono rangeMono) { + private Flux> getRangeKeysMulti(LLSnapshot snapshot, Mono> rangeMono) { return Flux.usingWhen(rangeMono, - range -> Flux.using( - () -> new LLLocalKeyReactiveRocksIterator(db, alloc, cfh, range.retain(), + rangeSend -> Flux.using( + () -> new LLLocalKeyReactiveRocksIterator(db, alloc, cfh, rangeSend, databaseOptions.allowNettyDirect(), resolveSnapshot(snapshot), getRangeKeysMultiDebugName), llLocalKeyReactiveRocksIterator -> llLocalKeyReactiveRocksIterator.flux().subscribeOn(dbScheduler), LLLocalReactiveRocksIterator::release ).transform(LLUtils::handleDiscard), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Mono setRange(Mono rangeMono, Flux entries) { + public Mono setRange(Mono> rangeMono, Flux> entries) { return Mono.usingWhen(rangeMono, - range -> { + rangeSend -> { if (USE_WINDOW_IN_SET_RANGE) { - return Mono - .fromCallable(() -> { - if (!USE_WRITE_BATCH_IN_SET_RANGE_DELETE || !USE_WRITE_BATCHES_IN_SET_RANGE) { - assert EMPTY_READ_OPTIONS.isOwningHandle(); - try (var opts = new ReadOptions(EMPTY_READ_OPTIONS)) { - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - opts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), + return this + .runOnDb(() -> { + try (var range = rangeSend.receive()) { + if (!USE_WRITE_BATCH_IN_SET_RANGE_DELETE || !USE_WRITE_BATCHES_IN_SET_RANGE) { + assert EMPTY_READ_OPTIONS.isOwningHandle(); + try (var opts = new ReadOptions(EMPTY_READ_OPTIONS)) { + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), opts, - IterateBound.UPPER, - range.getMax().retain() + IterateBound.LOWER, + range.getMin() ); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } - assert cfh.isOwningHandle(); - assert opts.isOwningHandle(); - try (RocksIterator it = db.newIterator(cfh, opts)) { - if (!PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), it, range.getMin().retain()); + try { + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), + opts, + IterateBound.UPPER, + range.getMax() + ); } else { - it.seekToFirst(); + maxBound = emptyReleasableSlice(); } - it.status(); - while (it.isValid()) { - db.delete(cfh, it.key()); - it.next(); + assert cfh.isOwningHandle(); + assert opts.isOwningHandle(); + try (RocksIterator it = db.newIterator(cfh, opts)) { + if (!PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), it, range.getMin()); + } else { + it.seekToFirst(); + } it.status(); + while (it.isValid()) { + db.delete(cfh, it.key()); + it.next(); + it.status(); + } + } finally { + maxBound.close(); } } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); + } + } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { + try (var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + BATCH_WRITE_OPTIONS + )) { + if (range.isSingle()) { + batch.delete(cfh, range.getSingle()); + } else { + deleteSmallRangeWriteBatch(batch, range.copy().send()); + } + batch.writeToDbAndClose(); + } + } else { + try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { + if (range.isSingle()) { + batch.delete(cfh, LLUtils.toArray(range.getSingleUnsafe())); + } else { + deleteSmallRangeWriteBatch(batch, range.copy().send()); + } + db.write(EMPTY_WRITE_OPTIONS, batch); + batch.clear(); } } - } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { - try (var batch = new CappedWriteBatch(db, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - BATCH_WRITE_OPTIONS - )) { - if (range.isSingle()) { - batch.delete(cfh, range.getSingle().retain()); - } else { - deleteSmallRangeWriteBatch(batch, range.retain()); - } - batch.writeToDbAndClose(); - } - } else { - try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { - if (range.isSingle()) { - batch.delete(cfh, LLUtils.toArray(range.getSingle())); - } else { - deleteSmallRangeWriteBatch(batch, range.retain()); - } - db.write(EMPTY_WRITE_OPTIONS, batch); - batch.clear(); - } + return null; } - return null; }) - .subscribeOn(dbScheduler) .thenMany(entries.window(MULTI_GET_WINDOW)) .flatMap(keysWindowFlux -> keysWindowFlux .collectList() - .flatMap(entriesList -> Mono - .fromCallable(() -> { + .flatMap(entriesListSend -> this + .runOnDb(() -> { + List entriesList = new ArrayList<>(entriesListSend.size()); + for (Send entrySend : entriesListSend) { + entriesList.add(entrySend.receive()); + } try { if (!USE_WRITE_BATCHES_IN_SET_RANGE) { for (LLEntry entry : entriesList) { - assert !entry.isReleased(); - assert entry.getKey().refCnt() > 0; - assert entry.getValue().refCnt() > 0; - db.put(cfh, EMPTY_WRITE_OPTIONS, entry.getKey().nioBuffer(), entry.getValue().nioBuffer()); + assert entry.isAccessible(); + try (var k = entry.getKey().receive()) { + try (var v = entry.getValue().receive()) { + db.put(cfh, EMPTY_WRITE_OPTIONS, toDirect(k), toDirect(v)); + } + } } } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { try (var batch = new CappedWriteBatch(db, @@ -1532,20 +1593,17 @@ public class LLLocalDictionary implements LLDictionary { BATCH_WRITE_OPTIONS )) { for (LLEntry entry : entriesList) { - assert !entry.isReleased(); - assert entry.getKey().refCnt() > 0; - assert entry.getValue().refCnt() > 0; - batch.put(cfh, entry.getKey().retain(), entry.getValue().retain()); + assert entry.isAccessible(); + batch.put(cfh, entry.getKey(), entry.getValue()); } batch.writeToDbAndClose(); } } else { try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { for (LLEntry entry : entriesList) { - assert !entry.isReleased(); - assert entry.getKey().refCnt() > 0; - assert entry.getValue().refCnt() > 0; - batch.put(cfh, LLUtils.toArray(entry.getKey()), LLUtils.toArray(entry.getValue())); + assert entry.isAccessible(); + batch.put(cfh, LLUtils.toArray(entry.getKeyUnsafe()), + LLUtils.toArray(entry.getValueUnsafe())); } db.write(EMPTY_WRITE_OPTIONS, batch); batch.clear(); @@ -1554,12 +1612,11 @@ public class LLLocalDictionary implements LLDictionary { return null; } finally { for (LLEntry entry : entriesList) { - assert !entry.isReleased(); - entry.release(); + assert entry.isAccessible(); + entry.close(); } } }) - .subscribeOn(dbScheduler) ) ) .then() @@ -1571,161 +1628,136 @@ public class LLLocalDictionary implements LLDictionary { }); } return this - .getRange(null, Mono.just(range).map(LLRange::retain), false) - .flatMap(oldValue -> Mono - .fromCallable(() -> { - try { - dbDelete(cfh, EMPTY_WRITE_OPTIONS, oldValue.getKey().retain()); - return null; - } finally { - oldValue.getKey().release(); - oldValue.getValue().release(); - } - }) - .subscribeOn(dbScheduler) - ) + .getRange(null, rangeMono, false) + .flatMap(oldValueSend -> this.runOnDb(() -> { + try (var oldValue = oldValueSend.receive()) { + dbDelete(cfh, EMPTY_WRITE_OPTIONS, oldValue.getKey()); + return null; + } + })) .then(entries - .flatMap(entry -> Mono.using( - () -> entry, - releasableEntry -> this - .put(Mono.just(entry.getKey()).map(ByteBuf::retain), - Mono.just(entry.getValue()).map(ByteBuf::retain), - LLDictionaryResultType.VOID - ) - .doOnNext(ReferenceCounted::release), - releasableEntry -> { - releasableEntry.getKey().release(); - releasableEntry.getValue().release(); - }) - ) + .flatMap(entrySend -> Mono.using( + entrySend::receive, + entry -> this + .put(LLUtils.lazyRetain(entry::getKey), LLUtils.lazyRetain(entry::getValue), + LLDictionaryResultType.VOID) + .doOnNext(Send::close), + ResourceSupport::close + )) .then(Mono.empty()) ) .onErrorMap(cause -> new IOException("Failed to write range", cause)); } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } //todo: this is broken, check why. (is this still true?) - private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, LLRange range) + private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, Send rangeToReceive) throws RocksDBException { + var range = rangeToReceive.receive(); try (var readOpts = new ReadOptions(getReadOptions(null))) { readOpts.setFillCache(false); ReleasableSlice minBound; if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, range.getMin()); } else { minBound = emptyReleasableSlice(); } try { ReleasableSlice maxBound; if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, range.getMax()); } else { maxBound = emptyReleasableSlice(); } try (var rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin().retain()); + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin()); } else { rocksIterator.seekToFirst(); } rocksIterator.status(); while (rocksIterator.isValid()) { - writeBatch.delete(cfh, LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)); + writeBatch.delete(cfh, LLUtils.readDirectNioBuffer(alloc, rocksIterator::key).send()); rocksIterator.next(); rocksIterator.status(); } } finally { - maxBound.release(); + maxBound.close(); } } finally { - minBound.release(); + minBound.close(); } - } finally { - range.release(); + } catch (Throwable e) { + range.close(); + throw e; } } - private void deleteSmallRangeWriteBatch(WriteBatch writeBatch, LLRange range) + private void deleteSmallRangeWriteBatch(WriteBatch writeBatch, Send rangeToReceive) throws RocksDBException { - try (var readOpts = new ReadOptions(getReadOptions(null))) { - readOpts.setFillCache(false); - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + try (var range = rangeToReceive.receive()) { + try (var readOpts = new ReadOptions(getReadOptions(null))) { + readOpts.setFillCache(false); + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, range.getMin()); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } - try (var rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin().retain()); + try { + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, + range.getMax()); } else { - rocksIterator.seekToFirst(); + maxBound = emptyReleasableSlice(); } - rocksIterator.status(); - while (rocksIterator.isValid()) { - writeBatch.delete(cfh, rocksIterator.key()); - rocksIterator.next(); + try (var rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin()); + } else { + rocksIterator.seekToFirst(); + } rocksIterator.status(); + while (rocksIterator.isValid()) { + writeBatch.delete(cfh, rocksIterator.key()); + rocksIterator.next(); + rocksIterator.status(); + } + } finally { + maxBound.close(); } } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); } - } finally { - range.release(); } } - private static void rocksIterSeekTo(boolean allowNettyDirect, RocksIterator rocksIterator, ByteBuf buffer) { - try { - if (allowNettyDirect && buffer.isDirect()) { - ByteBuffer nioBuffer = LLUtils.toDirect(buffer); + private static void rocksIterSeekTo(boolean allowNettyDirect, RocksIterator rocksIterator, + Send bufferToReceive) { + try (var buffer = bufferToReceive.receive()) { + if (allowNettyDirect) { + ByteBuffer nioBuffer = toDirect(buffer); assert nioBuffer.isDirect(); rocksIterator.seek(nioBuffer); - } else if (buffer.hasArray() && buffer.array().length == buffer.readableBytes()) { - rocksIterator.seek(buffer.array()); } else { rocksIterator.seek(LLUtils.toArray(buffer)); } - } finally { - buffer.release(); } } - private static ReleasableSlice setIterateBound(boolean allowNettyDirect, ReadOptions readOpts, IterateBound boundType, ByteBuf buffer) { + private static ReleasableSlice setIterateBound(boolean allowNettyDirect, ReadOptions readOpts, + IterateBound boundType, Send bufferToReceive) { + var buffer = bufferToReceive.receive(); try { - Objects.requireNonNull(buffer); + requireNonNull(buffer); AbstractSlice slice; - if (allowNettyDirect && LLLocalDictionary.USE_DIRECT_BUFFER_BOUNDS && buffer.isDirect()) { - ByteBuffer nioBuffer = LLUtils.toDirect(buffer); + if (allowNettyDirect && LLLocalDictionary.USE_DIRECT_BUFFER_BOUNDS) { + ByteBuffer nioBuffer = toDirect(buffer); assert nioBuffer.isDirect(); slice = new DirectSlice(nioBuffer, buffer.readableBytes()); assert slice.size() == buffer.readableBytes(); @@ -1735,9 +1767,11 @@ public class LLLocalDictionary implements LLDictionary { } else { readOpts.setIterateUpperBound(slice); } - return new ReleasableSliceImpl(slice, buffer.retain(), nioBuffer); + return new ReleasableSliceImpl(slice, buffer, nioBuffer); } else { - slice = new Slice(Objects.requireNonNull(LLUtils.toArray(buffer))); + try (buffer) { + slice = new Slice(requireNonNull(LLUtils.toArray(buffer))); + } if (boundType == IterateBound.LOWER) { readOpts.setIterateLowerBound(slice); } else { @@ -1745,8 +1779,9 @@ public class LLLocalDictionary implements LLDictionary { } return new ReleasableSliceImpl(slice, null, null); } - } finally { - buffer.release(); + } catch (Throwable e) { + buffer.close(); + throw e; } } @@ -1756,17 +1791,18 @@ public class LLLocalDictionary implements LLDictionary { return new SimpleSliceWithoutRelease(new Slice(arr), null, arr); } - public static record SimpleSliceWithoutRelease(AbstractSlice slice, @Nullable ByteBuf byteBuf, + public static record SimpleSliceWithoutRelease(AbstractSlice slice, @Nullable Buffer byteBuf, @Nullable Object additionalData) implements ReleasableSlice {} - public static record ReleasableSliceImpl(AbstractSlice slice, @Nullable ByteBuf byteBuf, + public static record ReleasableSliceImpl(AbstractSlice slice, @Nullable Buffer byteBuf, @Nullable Object additionalData) implements ReleasableSlice { @Override - public void release() { + public void close() { slice.clear(); + slice.close(); if (byteBuf != null) { - byteBuf.release(); + byteBuf.close(); } } } @@ -1829,185 +1865,163 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public Mono sizeRange(@Nullable LLSnapshot snapshot, Mono rangeMono, boolean fast) { + public Mono sizeRange(@Nullable LLSnapshot snapshot, Mono> rangeMono, boolean fast) { return Mono.usingWhen(rangeMono, - range -> { - if (range.isAll()) { - return this - .runOnDb(() -> fast ? fastSizeAll(snapshot) : exactSizeAll(snapshot)) - .onErrorMap(IOException::new); - } else { - return runOnDb(() -> { - try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { - readOpts.setFillCache(false); - readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + rangeSend -> { + try (var range = rangeSend.receive()) { + if (range.isAll()) { + return this + .runOnDb(() -> fast ? fastSizeAll(snapshot) : exactSizeAll(snapshot)) + .onErrorMap(IOException::new); + } else { + return runOnDb(() -> { + try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { + readOpts.setFillCache(false); + readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, + range.getMin()); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } try { - if (fast) { - readOpts.setIgnoreRangeDeletions(true); - + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, + range.getMax()); + } else { + maxBound = emptyReleasableSlice(); } - try (var rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), - rocksIterator, - range.getMin().retain() - ); - } else { - rocksIterator.seekToFirst(); + try { + if (fast) { + readOpts.setIgnoreRangeDeletions(true); + } - long i = 0; - rocksIterator.status(); - while (rocksIterator.isValid()) { - rocksIterator.next(); + try (var rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, + range.getMin()); + } else { + rocksIterator.seekToFirst(); + } + long i = 0; rocksIterator.status(); - i++; + while (rocksIterator.isValid()) { + rocksIterator.next(); + rocksIterator.status(); + i++; + } + return i; } - return i; + } finally { + maxBound.close(); } } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); } - } - }).onErrorMap(cause -> new IOException("Failed to get size of range " + range, cause)); + }).onErrorMap(cause -> new IOException("Failed to get size of range " + range, cause)); + } } }, - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Mono getOne(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Mono> getOne(@Nullable LLSnapshot snapshot, Mono> rangeMono) { return Mono.usingWhen(rangeMono, - range -> runOnDb(() -> { - try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + rangeSend -> runOnDb(() -> { + try (var range = rangeSend.receive()) { + try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, + range.getMin()); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } - try (var rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin().retain()); + try { + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, + range.getMax()); } else { - rocksIterator.seekToFirst(); + maxBound = emptyReleasableSlice(); } - rocksIterator.status(); - if (rocksIterator.isValid()) { - ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); - try { - ByteBuf value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value); - try { - return new LLEntry(key, value); - } finally { - value.release(); - } - } finally { - key.release(); + try (var rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin()); + } else { + rocksIterator.seekToFirst(); } - } else { - return null; + rocksIterator.status(); + if (rocksIterator.isValid()) { + try (Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)) { + try (Buffer value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value)) { + return LLEntry.of(key.send(), value.send()).send(); + } + } + } else { + return null; + } + } finally { + maxBound.close(); } } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); } } }), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @Override - public Mono getOneKey(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Mono> getOneKey(@Nullable LLSnapshot snapshot, Mono> rangeMono) { return Mono.usingWhen(rangeMono, - range -> runOnDb(() -> { - try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + rangeSend -> runOnDb(() -> { + try (var range = rangeSend.receive()) { + try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) { + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, + range.getMin()); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } - try (var rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin().retain()); + try { + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, + range.getMax()); } else { - rocksIterator.seekToFirst(); + maxBound = emptyReleasableSlice(); } - ByteBuf key; - rocksIterator.status(); - if (rocksIterator.isValid()) { - key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); - return key; - } else { - return null; + try (var rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin()); + } else { + rocksIterator.seekToFirst(); + } + rocksIterator.status(); + if (rocksIterator.isValid()) { + return LLUtils.readDirectNioBuffer(alloc, rocksIterator::key).send(); + } else { + return null; + } + } finally { + maxBound.close(); } } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); } } }), - range -> Mono.fromRunnable(range::release) + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @@ -2120,85 +2134,81 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public Mono removeOne(Mono rangeMono) { + public Mono> removeOne(Mono> rangeMono) { return Mono.usingWhen(rangeMono, - range -> runOnDb(() -> { - try (var readOpts = new ReadOptions(getReadOptions(null))) { - ReleasableSlice minBound; - if (range.hasMin()) { - minBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.LOWER, - range.getMin().retain() - ); - } else { - minBound = emptyReleasableSlice(); - } - try { - ReleasableSlice maxBound; - if (range.hasMax()) { - maxBound = setIterateBound(databaseOptions.allowNettyDirect(), - readOpts, - IterateBound.UPPER, - range.getMax().retain() - ); + rangeSend -> runOnDb(() -> { + try (var range = rangeSend.receive()) { + try (var readOpts = new ReadOptions(getReadOptions(null))) { + ReleasableSlice minBound; + if (range.hasMin()) { + minBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.LOWER, + range.getMin()); } else { - maxBound = emptyReleasableSlice(); + minBound = emptyReleasableSlice(); } - try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin().retain()); + try { + ReleasableSlice maxBound; + if (range.hasMax()) { + maxBound = setIterateBound(databaseOptions.allowNettyDirect(), readOpts, IterateBound.UPPER, + range.getMax()); } else { - rocksIterator.seekToFirst(); + maxBound = emptyReleasableSlice(); } - rocksIterator.status(); - if (!rocksIterator.isValid()) { - return null; + try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(databaseOptions.allowNettyDirect(), rocksIterator, range.getMin()); + } else { + rocksIterator.seekToFirst(); + } + rocksIterator.status(); + if (!rocksIterator.isValid()) { + return null; + } + try (Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)) { + try (Buffer value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value)) { + dbDelete(cfh, null, key.copy().send()); + return LLEntry.of(key.send(), value.send()).send(); + } + } + } finally { + maxBound.close(); } - ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); - ByteBuf value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value); - dbDelete(cfh, null, key); - return new LLEntry(key, value); } finally { - maxBound.release(); + minBound.close(); } - } finally { - minBound.release(); } } - }).onErrorMap(cause -> new IOException("Failed to delete " + range.toString(), cause)), - range -> Mono.fromRunnable(range::release) + }).onErrorMap(cause -> new IOException("Failed to delete", cause)), + rangeSend -> Mono.fromRunnable(rangeSend::close) ); } @NotNull public static Tuple3 getRocksIterator(boolean allowNettyDirect, ReadOptions readOptions, - LLRange range, + Send rangeToReceive, RocksDB db, ColumnFamilyHandle cfh) { - try { + try (var range = rangeToReceive.receive()) { ReleasableSlice sliceMin; ReleasableSlice sliceMax; if (range.hasMin()) { - sliceMin = setIterateBound(allowNettyDirect, readOptions, IterateBound.LOWER, range.getMin().retain()); + sliceMin = setIterateBound(allowNettyDirect, readOptions, IterateBound.LOWER, range.getMin()); } else { sliceMin = emptyReleasableSlice(); } if (range.hasMax()) { - sliceMax = setIterateBound(allowNettyDirect, readOptions, IterateBound.UPPER, range.getMax().retain()); + sliceMax = setIterateBound(allowNettyDirect, readOptions, IterateBound.UPPER, range.getMax()); } else { sliceMax = emptyReleasableSlice(); } var rocksIterator = db.newIterator(cfh, readOptions); if (!PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterSeekTo(allowNettyDirect, rocksIterator, range.getMin().retain()); + rocksIterSeekTo(allowNettyDirect, rocksIterator, range.getMin()); } else { rocksIterator.seekToFirst(); } return Tuples.of(rocksIterator, sliceMin, sliceMax); - } finally { - range.release(); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java index 62eacca..ee7416a 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLEntry; import it.cavallium.dbengine.database.LLRange; import java.util.Map; @@ -10,12 +11,12 @@ import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator { +public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator> { public LLLocalEntryReactiveRocksIterator(RocksDB db, - ByteBufAllocator alloc, + BufferAllocator alloc, ColumnFamilyHandle cfh, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, String debugName) { @@ -23,7 +24,7 @@ public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksItera } @Override - public LLEntry getEntry(ByteBuf key, ByteBuf value) { - return new LLEntry(key, value); + public Send getEntry(Send key, Send value) { + return LLEntry.of(key, value).send(); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java index c86a92c..1b597f5 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java @@ -1,7 +1,8 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLEntry; import it.cavallium.dbengine.database.LLRange; import java.util.Map; @@ -11,11 +12,11 @@ import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; public class LLLocalGroupedEntryReactiveRocksIterator extends - LLLocalGroupedReactiveRocksIterator { + LLLocalGroupedReactiveRocksIterator> { - public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, + public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, String debugName) { @@ -23,7 +24,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends } @Override - public LLEntry getEntry(ByteBuf key, ByteBuf value) { - return new LLEntry(key, value); + public Send getEntry(Send key, Send value) { + return LLEntry.of(key, value).send(); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java index 17111d9..d04d9e0 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java @@ -1,19 +1,20 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLRange; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { +public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator> { public LLLocalGroupedKeyReactiveRocksIterator(RocksDB db, - ByteBufAllocator alloc, + BufferAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, String debugName) { @@ -21,9 +22,9 @@ public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReacti } @Override - public ByteBuf getEntry(ByteBuf key, ByteBuf value) { + public Send getEntry(Send key, Send value) { if (value != null) { - value.release(); + value.close(); } return key; } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java index 4d5da06..cf76373 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java @@ -1,8 +1,9 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferUtil; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep; @@ -20,7 +21,7 @@ import static io.netty.buffer.Unpooled.*; public abstract class LLLocalGroupedReactiveRocksIterator { private final RocksDB db; - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; private final ColumnFamilyHandle cfh; private final int prefixLength; private final LLRange range; @@ -29,9 +30,9 @@ public abstract class LLLocalGroupedReactiveRocksIterator { private final boolean canFillCache; private final boolean readValues; - public LLLocalGroupedReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, + public LLLocalGroupedReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, boolean canFillCache, @@ -59,18 +60,18 @@ public abstract class LLLocalGroupedReactiveRocksIterator { try { var rocksIterator = tuple.getT1(); ObjectArrayList values = new ObjectArrayList<>(); - ByteBuf firstGroupKey = null; + Buffer firstGroupKey = null; try { rocksIterator.status(); while (rocksIterator.isValid()) { - ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); try { if (firstGroupKey == null) { firstGroupKey = key.retain(); } else if (!ByteBufUtil.equals(firstGroupKey, firstGroupKey.readerIndex(), key, key.readerIndex(), prefixLength)) { break; } - ByteBuf value; + Buffer value; if (readValues) { value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value); } else { @@ -112,7 +113,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator { }); } - public abstract T getEntry(ByteBuf key, ByteBuf value); + public abstract T getEntry(Send key, Send value); public void release() { range.release(); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java index 9e667c7..5b7c2e4 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java @@ -1,8 +1,9 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferUtil; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; import java.util.Arrays; @@ -17,7 +18,7 @@ import static io.netty.buffer.Unpooled.*; public class LLLocalKeyPrefixReactiveRocksIterator { private final RocksDB db; - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; private final ColumnFamilyHandle cfh; private final int prefixLength; private final LLRange range; @@ -26,9 +27,9 @@ public class LLLocalKeyPrefixReactiveRocksIterator { private final boolean canFillCache; private final String debugName; - public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, + public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, boolean canFillCache, @@ -45,7 +46,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator { } - public Flux flux() { + public Flux> flux() { return Flux .generate(() -> { var readOptions = new ReadOptions(this.readOptions); @@ -59,10 +60,10 @@ public class LLLocalKeyPrefixReactiveRocksIterator { try { var rocksIterator = tuple.getT1(); rocksIterator.status(); - ByteBuf firstGroupKey = null; + Buffer firstGroupKey = null; try { while (rocksIterator.isValid()) { - ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); try { if (firstGroupKey == null) { firstGroupKey = key.retain(); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java index 8a6fac4..96b2c15 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java @@ -1,18 +1,19 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLRange; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator { +public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator> { public LLLocalKeyReactiveRocksIterator(RocksDB db, - ByteBufAllocator alloc, + BufferAllocator alloc, ColumnFamilyHandle cfh, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, String debugName) { @@ -20,9 +21,9 @@ public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterato } @Override - public ByteBuf getEntry(ByteBuf key, ByteBuf value) { + public Send getEntry(Send key, Send value) { if (value != null) { - value.release(); + value.close(); } return key; } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java index c4619b3..0745e79 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.database.Column; import it.cavallium.dbengine.client.DatabaseOptions; import it.cavallium.dbengine.database.LLKeyValueDatabase; @@ -65,7 +65,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { private static final ColumnFamilyDescriptor DEFAULT_COLUMN_FAMILY = new ColumnFamilyDescriptor( RocksDB.DEFAULT_COLUMN_FAMILY); - private final ByteBufAllocator allocator; + private final BufferAllocator allocator; private final Scheduler dbScheduler; // Configurations @@ -81,7 +81,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { private final AtomicLong nextSnapshotNumbers = new AtomicLong(1); @SuppressWarnings("SwitchStatementWithTooFewBranches") - public LLLocalKeyValueDatabase(ByteBufAllocator allocator, + public LLLocalKeyValueDatabase(BufferAllocator allocator, String name, @Nullable Path path, List columns, @@ -497,7 +497,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { } @Override - public ByteBufAllocator getAllocator() { + public BufferAllocator getAllocator() { return allocator; } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java index e0a7529..b3dd45d 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java @@ -2,8 +2,9 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; import io.netty.util.IllegalReferenceCountException; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; @@ -27,7 +28,7 @@ public abstract class LLLocalReactiveRocksIterator { private final AtomicBoolean released = new AtomicBoolean(false); private final RocksDB db; - private final ByteBufAllocator alloc; + private final BufferAllocator alloc; private final ColumnFamilyHandle cfh; private final LLRange range; private final boolean allowNettyDirect; @@ -36,9 +37,9 @@ public abstract class LLLocalReactiveRocksIterator { private final String debugName; public LLLocalReactiveRocksIterator(RocksDB db, - ByteBufAllocator alloc, + BufferAllocator alloc, ColumnFamilyHandle cfh, - LLRange range, + Send range, boolean allowNettyDirect, ReadOptions readOptions, boolean readValues, @@ -46,7 +47,7 @@ public abstract class LLLocalReactiveRocksIterator { this.db = db; this.alloc = alloc; this.cfh = cfh; - this.range = range; + this.range = range.receive(); this.allowNettyDirect = allowNettyDirect; this.readOptions = readOptions; this.readValues = readValues; @@ -55,59 +56,53 @@ public abstract class LLLocalReactiveRocksIterator { public Flux flux() { return Flux - .>generate(() -> { + .generate(() -> { var readOptions = new ReadOptions(this.readOptions); if (!range.hasMin() || !range.hasMax()) { readOptions.setReadaheadSize(32 * 1024); // 32KiB readOptions.setFillCache(false); } - return getRocksIterator(allowNettyDirect, readOptions, range.retain(), db, cfh); + return getRocksIterator(allowNettyDirect, readOptions, range.copy().send(), db, cfh); }, (tuple, sink) -> { - range.retain(); try { var rocksIterator = tuple.getT1(); rocksIterator.status(); if (rocksIterator.isValid()) { - ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); - try { - ByteBuf value; + try (Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)) { + Buffer value; if (readValues) { value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value); } else { - value = alloc.buffer(0); + value = alloc.allocate(0); } try { rocksIterator.next(); rocksIterator.status(); - sink.next(getEntry(key.retain(), value.retain())); + sink.next(getEntry(key.send(), value.send())); } finally { - value.release(); + value.close(); } - } finally { - key.release(); } } else { sink.complete(); } } catch (RocksDBException ex) { sink.error(ex); - } finally { - range.release(); } return tuple; }, tuple -> { var rocksIterator = tuple.getT1(); rocksIterator.close(); - tuple.getT2().release(); - tuple.getT3().release(); + tuple.getT2().close(); + tuple.getT3().close(); }); } - public abstract T getEntry(ByteBuf key, ByteBuf value); + public abstract T getEntry(Send key, Send value); public void release() { if (released.compareAndSet(false, true)) { - range.release(); + range.close(); } else { throw new IllegalReferenceCountException(0, -1); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/ReleasableSlice.java b/src/main/java/it/cavallium/dbengine/database/disk/ReleasableSlice.java index 17975f0..f4fad27 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/ReleasableSlice.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/ReleasableSlice.java @@ -1,17 +1,20 @@ package it.cavallium.dbengine.database.disk; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Resource; +import it.cavallium.dbengine.database.SafeCloseable; import org.rocksdb.AbstractSlice; -public interface ReleasableSlice { +public interface ReleasableSlice extends SafeCloseable { - default void release() { + @Override + default void close() { } AbstractSlice slice(); - ByteBuf byteBuf(); + Buffer byteBuf(); Object additionalData(); } diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java index 049e83d..8c799f8 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.memory; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.client.DatabaseOptions; import it.cavallium.dbengine.client.IndicizerAnalyzers; import it.cavallium.dbengine.client.IndicizerSimilarities; diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java index 8614e36..705e53f 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java @@ -1,7 +1,7 @@ package it.cavallium.dbengine.database.memory; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.ExtraKeyOperationResult; @@ -81,7 +81,7 @@ public class LLMemoryDictionary implements LLDictionary { } } - private Mono transformResult(Mono result, LLDictionaryResultType resultType) { + private Mono transformResult(Mono result, LLDictionaryResultType resultType) { if (resultType == LLDictionaryResultType.PREVIOUS_VALUE) { // Don't retain the result because it has been removed from the skip list return result.map(this::kk); @@ -95,11 +95,11 @@ public class LLMemoryDictionary implements LLDictionary { } } - private ByteList k(ByteBuf buf) { + private ByteList k(Buffer buf) { return new BinaryLexicographicList(LLUtils.toArray(buf)); } - private ByteBuf kk(ByteList bytesList) { + private Buffer kk(ByteList bytesList) { var buffer = getAllocator().buffer(bytesList.size()); buffer.writeBytes(bytesList.toByteArray()); return buffer; @@ -139,7 +139,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono get(@Nullable LLSnapshot snapshot, Mono keyMono, boolean existsAlmostCertainly) { + public Mono get(@Nullable LLSnapshot snapshot, Mono keyMono, boolean existsAlmostCertainly) { return Mono.usingWhen(keyMono, key -> Mono .fromCallable(() -> snapshots.get(resolveSnapshot(snapshot)).get(k(key))) @@ -150,7 +150,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono put(Mono keyMono, Mono valueMono, LLDictionaryResultType resultType) { + public Mono put(Mono keyMono, Mono valueMono, LLDictionaryResultType resultType) { return Mono.usingWhen(keyMono, key -> Mono.usingWhen(valueMono, value -> Mono @@ -169,17 +169,17 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono> updateAndGetDelta(Mono keyMono, - SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater, + public Mono> updateAndGetDelta(Mono keyMono, + SerializationFunction<@Nullable Buffer, @Nullable Buffer> updater, boolean existsAlmostCertainly) { return Mono.usingWhen(keyMono, key -> Mono.fromCallable(() -> { - AtomicReference oldRef = new AtomicReference<>(null); + AtomicReference oldRef = new AtomicReference<>(null); var newValue = mainDb.compute(k(key), (_unused, old) -> { if (old != null) { oldRef.set(kk(old)); } - ByteBuf v = null; + Buffer v = null; try { v = updater.apply(old != null ? kk(old) : null); } catch (SerializationException e) { @@ -205,7 +205,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono remove(Mono keyMono, LLDictionaryResultType resultType) { + public Mono remove(Mono keyMono, LLDictionaryResultType resultType) { return Mono.usingWhen(keyMono, key -> Mono .fromCallable(() -> mainDb.remove(k(key))) @@ -228,8 +228,8 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Flux>> getMulti(@Nullable LLSnapshot snapshot, - Flux> keys, + public Flux>> getMulti(@Nullable LLSnapshot snapshot, + Flux> keys, boolean existsAlmostCertainly) { return keys .flatMapSequential(key -> { @@ -267,8 +267,8 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Flux> updateMulti(Flux> entries, - BiSerializationFunction updateFunction) { + public Flux> updateMulti(Flux> entries, + BiSerializationFunction updateFunction) { return Flux.error(new UnsupportedOperationException("Not implemented")); } @@ -304,7 +304,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Flux getRangeKeys(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Flux getRangeKeys(@Nullable LLSnapshot snapshot, Mono rangeMono) { return Flux.usingWhen(rangeMono, range -> { if (range.isSingle()) { @@ -325,7 +325,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, + public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono rangeMono, int prefixLength) { return getRangeKeys(snapshot, rangeMono) @@ -333,7 +333,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono rangeMono, int prefixLength) { + public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono rangeMono, int prefixLength) { return getRangeKeys(snapshot, rangeMono) .distinctUntilChanged(k -> k.slice(k.readerIndex(), prefixLength), (a, b) -> { if (LLUtils.equals(a, b)) { @@ -376,7 +376,7 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono getOneKey(@Nullable LLSnapshot snapshot, Mono rangeMono) { + public Mono getOneKey(@Nullable LLSnapshot snapshot, Mono rangeMono) { return Mono.error(new UnsupportedOperationException("Not implemented")); } diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java index 7c02f0f..783ebdf 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java @@ -1,7 +1,7 @@ package it.cavallium.dbengine.database.memory; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; import it.cavallium.dbengine.client.DatabaseOptions; import it.cavallium.dbengine.database.Column; import it.cavallium.dbengine.database.LLDictionary; diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java index 462a3bb..60fd208 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database.memory; -import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.Buffer; import io.netty.buffer.Unpooled; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLSingleton; @@ -13,13 +13,13 @@ public class LLMemorySingleton implements LLSingleton { private final LLMemoryDictionary dict; private final byte[] singletonName; - private final Mono singletonNameBufMono; + private final Mono singletonNameBufMono; public LLMemorySingleton(LLMemoryDictionary dict, byte[] singletonName) { this.dict = dict; this.singletonName = singletonName; - ByteBuf singletonNameBuf = Unpooled.wrappedBuffer(singletonName); - this.singletonNameBufMono = Mono.just(singletonNameBuf).map(ByteBuf::retain); + Buffer singletonNameBuf = Unpooled.wrappedBuffer(singletonName); + this.singletonNameBufMono = Mono.just(singletonNameBuf).map(Buffer::retain); } @Override @@ -42,8 +42,8 @@ public class LLMemorySingleton implements LLSingleton { @Override public Mono set(byte[] value) { - var bbKey = Mono.just(Unpooled.wrappedBuffer(singletonName)).map(ByteBuf::retain); - var bbVal = Mono.just(Unpooled.wrappedBuffer(value)).map(ByteBuf::retain); + var bbKey = Mono.just(Unpooled.wrappedBuffer(singletonName)).map(Buffer::retain); + var bbVal = Mono.just(Unpooled.wrappedBuffer(value)).map(Buffer::retain); return dict .put(bbKey, bbVal, LLDictionaryResultType.VOID) .then(); diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java b/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java index 46119e8..cd6ab98 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java @@ -1,7 +1,7 @@ package it.cavallium.dbengine.database.serialization; -import io.netty.buffer.ByteBufInputStream; -import io.netty.buffer.ByteBufOutputStream; +import io.netty.buffer.api.BufferInputStream; +import io.netty.buffer.api.BufferOutputStream; import java.io.IOException; import org.jetbrains.annotations.NotNull; diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java index db5476d..e123761 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.database.serialization; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufInputStream; -import io.netty.buffer.ByteBufOutputStream; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferInputStream; +import io.netty.buffer.api.BufferOutputStream; import io.netty.buffer.PooledByteBufAllocator; import io.netty.buffer.Unpooled; import java.io.IOError; @@ -11,7 +11,7 @@ import java.io.IOException; import org.jetbrains.annotations.NotNull; import org.warp.commonutils.error.IndexOutOfBoundsException; -public class CodecSerializer implements Serializer { +public class CodecSerializer implements Serializer { private final ByteBufAllocator allocator; private final Codecs deserializationCodecs; @@ -40,7 +40,7 @@ public class CodecSerializer implements Serializer { } @Override - public @NotNull A deserialize(@NotNull ByteBuf serialized) { + public @NotNull A deserialize(@NotNull Buffer serialized) { try (var is = new ByteBufInputStream(serialized)) { int codecId; if (microCodecs) { @@ -59,8 +59,8 @@ public class CodecSerializer implements Serializer { } @Override - public @NotNull ByteBuf serialize(@NotNull A deserialized) { - ByteBuf buf = allocator.buffer(); + public @NotNull Buffer serialize(@NotNull A deserialized) { + Buffer buf = allocator.buffer(); try (var os = new ByteBufOutputStream(buf)) { if (microCodecs) { os.writeByte(serializationCodecId); diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java index c6323a4..ef9aeb0 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.database.serialization; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; +import it.cavallium.dbengine.database.LLUtils; import java.nio.charset.StandardCharsets; import org.jetbrains.annotations.NotNull; @@ -13,52 +13,41 @@ public interface Serializer { @NotNull B serialize(@NotNull A deserialized) throws SerializationException; - Serializer NOOP_SERIALIZER = new Serializer<>() { + Serializer, Send> NOOP_SERIALIZER = new Serializer<>() { @Override - public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) { - try { - return serialized.retainedSlice(); - } finally { - serialized.release(); - } + public @NotNull Send deserialize(@NotNull Send serialized) { + return serialized; } @Override - public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) { - try { - return deserialized.retainedSlice(); - } finally { - deserialized.release(); - } + public @NotNull Send serialize(@NotNull Send deserialized) { + return deserialized; } }; - static Serializer noop() { + static Serializer, Send> noop() { return NOOP_SERIALIZER; } - static Serializer utf8(ByteBufAllocator allocator) { + static Serializer> utf8(BufferAllocator allocator) { return new Serializer<>() { @Override - public @NotNull String deserialize(@NotNull ByteBuf serialized) { - try { - var length = serialized.readInt(); - var result = serialized.toString(serialized.readerIndex(), length, StandardCharsets.UTF_8); - serialized.readerIndex(serialized.readerIndex() + length); - return result; - } finally { - serialized.release(); + public @NotNull String deserialize(@NotNull Send serializedToReceive) { + try (Buffer serialized = serializedToReceive.receive()) { + int length = serialized.readInt(); + return LLUtils.deserializeString(serialized.send(), serialized.readerOffset(), length, StandardCharsets.UTF_8); } } @Override - public @NotNull ByteBuf serialize(@NotNull String deserialized) { + public @NotNull Send serialize(@NotNull String deserialized) { // UTF-8 uses max. 3 bytes per char, so calculate the worst case. - int length = ByteBufUtil.utf8Bytes(deserialized); - ByteBuf buf = allocator.buffer(Integer.BYTES + length); - buf.writeInt(length); - ByteBufUtil.writeUtf8(buf, deserialized); - return buf; + int length = LLUtils.utf8MaxBytes(deserialized); + try (Buffer buf = allocator.allocate(Integer.BYTES + length)) { + buf.writeInt(length); + LLUtils.writeString(buf, deserialized, StandardCharsets.UTF_8); + return buf.send(); + } } }; } diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java index f1db643..14b56cb 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java @@ -1,12 +1,9 @@ package it.cavallium.dbengine.database.serialization; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.PooledByteBufAllocator; -import java.io.NotSerializableException; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.Send; +import it.cavallium.dbengine.database.LLUtils; import java.nio.charset.StandardCharsets; import org.jetbrains.annotations.NotNull; @@ -15,31 +12,30 @@ public interface SerializerFixedBinaryLength extends Serializer { int getSerializedBinaryLength(); - static SerializerFixedBinaryLength noop(int length) { + static SerializerFixedBinaryLength, Send> noop(int length) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) { - try { - if (serialized.readableBytes() != getSerializedBinaryLength()) { + public @NotNull Send deserialize(@NotNull Send serialized) { + try (var buf = serialized.receive()) { + if (buf.readableBytes() != getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " - + serialized.readableBytes() + " bytes instead"); + + buf.readableBytes() + " bytes instead"); } - return serialized.retain(); - } finally { - serialized.release(); + return buf.send(); } } @Override - public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) { - ByteBuf buf = deserialized.retain(); - if (buf.readableBytes() != getSerializedBinaryLength()) { - throw new IllegalArgumentException( - "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " - + buf.readableBytes() + " bytes instead"); + public @NotNull Send serialize(@NotNull Send deserialized) { + try (Buffer buf = deserialized.receive()) { + if (buf.readableBytes() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " + + buf.readableBytes() + " bytes instead"); + } + return buf.send(); } - return buf; } @Override @@ -49,38 +45,32 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength utf8(ByteBufAllocator allocator, int length) { + static SerializerFixedBinaryLength> utf8(BufferAllocator allocator, int length) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull String deserialize(@NotNull ByteBuf serialized) throws SerializationException { - try { + public @NotNull String deserialize(@NotNull Send serializedToReceive) throws SerializationException { + try (var serialized = serializedToReceive.receive()) { if (serialized.readableBytes() != getSerializedBinaryLength()) { throw new SerializationException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + serialized.readableBytes() + " bytes instead"); } - var result = serialized.toString(StandardCharsets.UTF_8); - serialized.readerIndex(serialized.writerIndex()); - return result; - } finally { - serialized.release(); + var readerOffset = serialized.readerOffset(); + return LLUtils.deserializeString(serialized.send(), readerOffset, length, StandardCharsets.UTF_8); } } @Override - public @NotNull ByteBuf serialize(@NotNull String deserialized) throws SerializationException { + public @NotNull Send serialize(@NotNull String deserialized) throws SerializationException { // UTF-8 uses max. 3 bytes per char, so calculate the worst case. - ByteBuf buf = allocator.buffer(ByteBufUtil.utf8MaxBytes(deserialized)); - try { - ByteBufUtil.writeUtf8(buf, deserialized); + try (Buffer buf = allocator.allocate(LLUtils.utf8MaxBytes(deserialized))) { + LLUtils.writeString(buf, deserialized, StandardCharsets.UTF_8); if (buf.readableBytes() != getSerializedBinaryLength()) { throw new SerializationException("Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " + buf.readableBytes() + " bytes instead"); } - return buf.retain(); - } finally { - buf.release(); + return buf.send(); } } @@ -91,26 +81,25 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength intSerializer(ByteBufAllocator allocator) { + static SerializerFixedBinaryLength> intSerializer(BufferAllocator allocator) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Integer deserialize(@NotNull ByteBuf serialized) { - try { + public @NotNull Integer deserialize(@NotNull Send serializedToReceive) { + try (var serialized = serializedToReceive.receive()) { if (serialized.readableBytes() != getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + serialized.readableBytes() + " bytes instead"); } return serialized.readInt(); - } finally { - serialized.release(); } } @Override - public @NotNull ByteBuf serialize(@NotNull Integer deserialized) { - ByteBuf buf = allocator.buffer(Integer.BYTES); - return buf.writeInt(deserialized); + public @NotNull Send serialize(@NotNull Integer deserialized) { + try (Buffer buf = allocator.allocate(Integer.BYTES)) { + return buf.writeInt(deserialized).send(); + } } @Override @@ -120,26 +109,25 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength longSerializer(ByteBufAllocator allocator) { + static SerializerFixedBinaryLength> longSerializer(BufferAllocator allocator) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Long deserialize(@NotNull ByteBuf serialized) { - try { + public @NotNull Long deserialize(@NotNull Send serializedToReceive) { + try (var serialized = serializedToReceive.receive()) { if (serialized.readableBytes() != getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + serialized.readableBytes() + " bytes instead"); } return serialized.readLong(); - } finally { - serialized.release(); } } @Override - public @NotNull ByteBuf serialize(@NotNull Long deserialized) { - ByteBuf buf = allocator.buffer(Long.BYTES); - return buf.writeLong(deserialized); + public @NotNull Send serialize(@NotNull Long deserialized) { + try (Buffer buf = allocator.allocate(Long.BYTES)) { + return buf.writeLong(deserialized).send(); + } } @Override diff --git a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java index b66b607..3a5ff48 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java +++ b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java @@ -75,7 +75,7 @@ public class RandomFieldComparator extends FieldComparator implements Lea } }; if (!(scorer instanceof ScoreCachingWrappingScorer)) { - this.scorer = new ScoreCachingWrappingScorer(randomizedScorer); + this.scorer = ScoreCachingWrappingScorer.wrap(randomizedScorer); } else { this.scorer = randomizedScorer; } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/TopDocsSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/TopDocsSearcher.java index 0d00f40..8901011 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/TopDocsSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/TopDocsSearcher.java @@ -13,7 +13,6 @@ import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.HitQueue; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.MultiCollectorManager.Collectors; import org.apache.lucene.search.Query; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreDoc; diff --git a/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoring.java b/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoring.java index 5100361..bb75b3b 100644 --- a/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoring.java +++ b/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoring.java @@ -1,13 +1,13 @@ package it.cavallium.dbengine.netty; -import io.netty.buffer.ByteBufAllocatorMetric; +import io.netty.buffer.api.BufferAllocatorMetric; public class JMXNettyMonitoring implements JMXNettyMonitoringMBean { private final String name; private final ByteBufAllocatorMetric metric; - public JMXNettyMonitoring(String name, io.netty.buffer.ByteBufAllocatorMetric metric) { + public JMXNettyMonitoring(String name, io.netty.buffer.api.BufferAllocatorMetric metric) { this.name = name; this.metric = metric; } diff --git a/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoringManager.java b/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoringManager.java index c2c4e70..a974c82 100644 --- a/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoringManager.java +++ b/src/main/java/it/cavallium/dbengine/netty/JMXNettyMonitoringManager.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.netty; -import io.netty.buffer.ByteBufAllocatorMetric; +import io.netty.buffer.api.BufferAllocatorMetric; import io.netty.buffer.PoolArenaMetric; import io.netty.buffer.PooledByteBufAllocator; import io.netty.buffer.PooledByteBufAllocatorMetric; diff --git a/src/main/java/it/cavallium/dbengine/netty/JMXPooledNettyMonitoring.java b/src/main/java/it/cavallium/dbengine/netty/JMXPooledNettyMonitoring.java index 9cb1557..19a76db 100644 --- a/src/main/java/it/cavallium/dbengine/netty/JMXPooledNettyMonitoring.java +++ b/src/main/java/it/cavallium/dbengine/netty/JMXPooledNettyMonitoring.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.netty; -import io.netty.buffer.ByteBufAllocatorMetric; +import io.netty.buffer.api.BufferAllocatorMetric; import io.netty.buffer.PooledByteBufAllocatorMetric; public class JMXPooledNettyMonitoring extends JMXNettyMonitoring implements JMXNettyMonitoringMBean { diff --git a/src/main/java/org/rocksdb/CappedWriteBatch.java b/src/main/java/org/rocksdb/CappedWriteBatch.java index d59978c..d862b30 100644 --- a/src/main/java/org/rocksdb/CappedWriteBatch.java +++ b/src/main/java/org/rocksdb/CappedWriteBatch.java @@ -1,6 +1,9 @@ package org.rocksdb; -import io.netty.buffer.ByteBuf; +import static it.cavallium.dbengine.database.LLUtils.isDirect; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Send; import it.cavallium.dbengine.database.LLUtils; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -25,7 +28,7 @@ public class CappedWriteBatch extends WriteBatch { private final int cap; private final WriteOptions writeOptions; - private final List buffersToRelease; + private final List buffersToRelease; /** * @param cap The limit of operations @@ -53,9 +56,8 @@ public class CappedWriteBatch extends WriteBatch { private synchronized void releaseAllBuffers() { if (!buffersToRelease.isEmpty()) { - for (ByteBuf byteBuffer : buffersToRelease) { - assert byteBuffer.refCnt() > 0; - byteBuffer.release(); + for (Buffer byteBuffer : buffersToRelease) { + byteBuffer.close(); } buffersToRelease.clear(); } @@ -90,8 +92,12 @@ public class CappedWriteBatch extends WriteBatch { flushIfNeeded(false); } - public synchronized void put(ColumnFamilyHandle columnFamilyHandle, ByteBuf key, ByteBuf value) throws RocksDBException { - if (USE_FAST_DIRECT_BUFFERS && key.isDirect() && value.isDirect()) { + public synchronized void put(ColumnFamilyHandle columnFamilyHandle, + Send keyToReceive, + Send valueToReceive) throws RocksDBException { + var key = keyToReceive.receive(); + var value = valueToReceive.receive(); + if (USE_FAST_DIRECT_BUFFERS && isDirect(key) && isDirect(value)) { buffersToRelease.add(key); buffersToRelease.add(value); ByteBuffer keyNioBuffer = LLUtils.toDirect(key); @@ -106,8 +112,8 @@ public class CappedWriteBatch extends WriteBatch { byte[] valueArray = LLUtils.toArray(value); super.put(columnFamilyHandle, keyArray, valueArray); } finally { - key.release(); - value.release(); + key.close(); + value.close(); } } flushIfNeeded(false); @@ -151,7 +157,8 @@ public class CappedWriteBatch extends WriteBatch { flushIfNeeded(false); } - public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, ByteBuf key) throws RocksDBException { + public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, Send keyToReceive) throws RocksDBException { + var key = keyToReceive.receive(); if (USE_FAST_DIRECT_BUFFERS) { buffersToRelease.add(key); ByteBuffer keyNioBuffer = LLUtils.toDirect(key); @@ -167,7 +174,7 @@ public class CappedWriteBatch extends WriteBatch { try { super.delete(columnFamilyHandle, LLUtils.toArray(key)); } finally { - key.release(); + key.close(); } } flushIfNeeded(false);