From 2e6aceafe6c580d1ef2d84a1f39e728e1de14063 Mon Sep 17 00:00:00 2001 From: Andrea Cavalli Date: Fri, 30 Apr 2021 19:15:04 +0200 Subject: [PATCH] Converted everything to netty direct buffers --- pom.xml | 16 +- .../database/LLDatabaseConnection.java | 5 +- .../dbengine/database/LLDictionary.java | 49 +- .../database/LLDictionaryResultType.java | 2 +- .../cavallium/dbengine/database/LLRange.java | 78 +- .../cavallium/dbengine/database/LLUtils.java | 252 +++++ .../database/collections/DatabaseEmpty.java | 13 +- .../collections/DatabaseMapDictionary.java | 196 +++- .../DatabaseMapDictionaryDeep.java | 390 +++++-- .../DatabaseMapDictionaryHashed.java | 132 ++- .../collections/DatabaseSetDictionary.java | 11 +- .../DatabaseSetDictionaryHashed.java | 17 +- .../database/collections/DatabaseSingle.java | 48 +- .../collections/DatabaseSingleMapped.java | 22 +- .../database/collections/DatabaseStage.java | 13 +- .../collections/DatabaseStageMap.java | 43 +- .../database/collections/SubStageGetter.java | 5 +- .../collections/SubStageGetterHashMap.java | 21 +- .../collections/SubStageGetterHashSet.java | 17 +- .../collections/SubStageGetterMap.java | 17 +- .../collections/SubStageGetterMapDeep.java | 13 +- .../collections/SubStageGetterSet.java | 30 +- .../collections/SubStageGetterSingle.java | 12 +- .../SubStageGetterSingleBytes.java | 3 +- .../dbengine/database/disk/IterateBound.java | 6 + .../disk/LLLocalDatabaseConnection.java | 15 +- .../database/disk/LLLocalDictionary.java | 1006 ++++++++++++----- .../LLLocalEntryReactiveRocksIterator.java | 9 +- ...ocalGroupedEntryReactiveRocksIterator.java | 11 +- ...LLocalGroupedKeyReactiveRocksIterator.java | 9 +- .../LLLocalGroupedReactiveRocksIterator.java | 55 +- ...LLLocalKeyPrefixReactiveRocksIterator.java | 60 +- .../disk/LLLocalKeyReactiveRocksIterator.java | 9 +- .../disk/LLLocalKeyValueDatabase.java | 56 +- .../database/disk/LLLocalLuceneIndex.java | 8 +- .../disk/LLLocalMultiLuceneIndex.java | 5 +- .../disk/LLLocalReactiveRocksIterator.java | 33 +- .../serialization/CodecSerializer.java | 19 +- .../database/serialization/Serializer.java | 47 +- .../SerializerFixedBinaryLength.java | 114 +- .../rocksdb}/CappedWriteBatch.java | 140 ++- .../dbengine/client/DbTestUtils.java | 106 ++ .../{Database.java => OldDatabaseTests.java} | 91 +- .../dbengine/client/TestDictionaryMap.java | 622 ++++++++++ .../dbengine/client/TestSingletons.java | 120 ++ .../database/collections/TestRanges.java | 45 +- 46 files changed, 3154 insertions(+), 837 deletions(-) create mode 100644 src/main/java/it/cavallium/dbengine/database/disk/IterateBound.java rename src/main/java/{it/cavallium/dbengine/database/disk => org/rocksdb}/CappedWriteBatch.java (53%) create mode 100644 src/test/java/it/cavallium/dbengine/client/DbTestUtils.java rename src/test/java/it/cavallium/dbengine/client/{Database.java => OldDatabaseTests.java} (66%) create mode 100644 src/test/java/it/cavallium/dbengine/client/TestDictionaryMap.java create mode 100644 src/test/java/it/cavallium/dbengine/client/TestSingletons.java diff --git a/pom.xml b/pom.xml index 8da0641..a806724 100644 --- a/pom.xml +++ b/pom.xml @@ -107,7 +107,7 @@ org.junit.jupiter junit-jupiter-api - 5.7.0 + 5.8.0-M1 test @@ -116,6 +116,18 @@ + + org.junit.jupiter + junit-jupiter-engine + 5.8.0-M1 + test + + + org.junit.jupiter + junit-jupiter-params + 5.8.0-M1 + test + org.hamcrest @@ -138,7 +150,7 @@ org.rocksdb rocksdbjni - 6.16.4 + 6.19.3 org.apache.lucene diff --git a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java index c123f04..e11218f 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java @@ -11,7 +11,7 @@ public interface LLDatabaseConnection { Mono connect(); - Mono getDatabase(String name, List columns, boolean lowMemory); + Mono getDatabase(String name, List columns, boolean lowMemory, boolean inMemory); Mono getLuceneIndex(String name, int instancesCount, @@ -19,7 +19,8 @@ public interface LLDatabaseConnection { TextFieldsSimilarity textFieldsSimilarity, Duration queryRefreshDebounceTime, Duration commitDebounceTime, - boolean lowMemory); + boolean lowMemory, + boolean inMemory); Mono disconnect(); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java index 4b8d4a7..2586a3a 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java @@ -1,8 +1,9 @@ package it.cavallium.dbengine.database; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import java.util.List; import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; import org.warp.commonutils.concurrency.atomicity.NotAtomic; @@ -13,60 +14,62 @@ import reactor.core.publisher.Mono; @NotAtomic public interface LLDictionary extends LLKeyValueDatabaseStructure { - Mono get(@Nullable LLSnapshot snapshot, byte[] key, boolean existsAlmostCertainly); + ByteBufAllocator getAllocator(); - default Mono get(@Nullable LLSnapshot snapshot, byte[] key) { + Mono get(@Nullable LLSnapshot snapshot, ByteBuf key, boolean existsAlmostCertainly); + + default Mono get(@Nullable LLSnapshot snapshot, ByteBuf key) { return get(snapshot, key, false); } - Mono put(byte[] key, byte[] value, LLDictionaryResultType resultType); + Mono put(ByteBuf key, ByteBuf value, LLDictionaryResultType resultType); - Mono update(byte[] key, Function, Optional> updater, boolean existsAlmostCertainly); + Mono update(ByteBuf key, Function<@Nullable ByteBuf, @Nullable ByteBuf> updater, boolean existsAlmostCertainly); - default Mono update(byte[] key, Function, Optional> updater) { + default Mono update(ByteBuf key, Function<@Nullable ByteBuf, @Nullable ByteBuf> updater) { return update(key, updater, false); } Mono clear(); - Mono remove(byte[] key, LLDictionaryResultType resultType); + Mono remove(ByteBuf key, LLDictionaryResultType resultType); - Flux> getMulti(@Nullable LLSnapshot snapshot, Flux keys, boolean existsAlmostCertainly); + Flux> getMulti(@Nullable LLSnapshot snapshot, Flux keys, boolean existsAlmostCertainly); - default Flux> getMulti(@Nullable LLSnapshot snapshot, Flux keys) { + default Flux> getMulti(@Nullable LLSnapshot snapshot, Flux keys) { return getMulti(snapshot, keys, false); } - Flux> putMulti(Flux> entries, boolean getOldValues); + Flux> putMulti(Flux> entries, boolean getOldValues); - Flux> getRange(@Nullable LLSnapshot snapshot, LLRange range, boolean existsAlmostCertainly); + Flux> getRange(@Nullable LLSnapshot snapshot, LLRange range, boolean existsAlmostCertainly); - default Flux> getRange(@Nullable LLSnapshot snapshot, LLRange range) { + default Flux> getRange(@Nullable LLSnapshot snapshot, LLRange range) { return getRange(snapshot, range, false); } - Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength, boolean existsAlmostCertainly); - default Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + default Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) { return getRangeGrouped(snapshot, range, prefixLength, false); } - Flux getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range); + Flux getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range); - Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength); + Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength); - Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength); + Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength); - Flux> setRange(LLRange range, Flux> entries, boolean getOldValues); + Flux> setRange(LLRange range, Flux> entries, boolean getOldValues); default Mono replaceRange(LLRange range, boolean canKeysChange, - Function, Mono>> entriesReplacer, + Function, Mono>> entriesReplacer, boolean existsAlmostCertainly) { return Mono.defer(() -> { if (canKeysChange) { @@ -87,7 +90,7 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure { default Mono replaceRange(LLRange range, boolean canKeysChange, - Function, Mono>> entriesReplacer) { + Function, Mono>> entriesReplacer) { return replaceRange(range, canKeysChange, entriesReplacer, false); } @@ -95,9 +98,9 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure { Mono sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast); - Mono> getOne(@Nullable LLSnapshot snapshot, LLRange range); + Mono> getOne(@Nullable LLSnapshot snapshot, LLRange range); - Mono getOneKey(@Nullable LLSnapshot snapshot, LLRange range); + Mono getOneKey(@Nullable LLSnapshot snapshot, LLRange range); - Mono> removeOne(LLRange range); + Mono> removeOne(LLRange range); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDictionaryResultType.java b/src/main/java/it/cavallium/dbengine/database/LLDictionaryResultType.java index d6b5d04..938af1e 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDictionaryResultType.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDictionaryResultType.java @@ -1,5 +1,5 @@ package it.cavallium.dbengine.database; public enum LLDictionaryResultType { - VOID, VALUE_CHANGED, PREVIOUS_VALUE + VOID, PREVIOUS_VALUE_EXISTENCE, PREVIOUS_VALUE } diff --git a/src/main/java/it/cavallium/dbengine/database/LLRange.java b/src/main/java/it/cavallium/dbengine/database/LLRange.java index 0996c2a..a5e17ee 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLRange.java +++ b/src/main/java/it/cavallium/dbengine/database/LLRange.java @@ -1,5 +1,10 @@ package it.cavallium.dbengine.database; +import static io.netty.buffer.Unpooled.wrappedBuffer; +import static io.netty.buffer.Unpooled.wrappedUnmodifiableBuffer; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import java.util.Arrays; import java.util.StringJoiner; @@ -9,10 +14,18 @@ import java.util.StringJoiner; public class LLRange { private static final LLRange RANGE_ALL = new LLRange(null, null); - private final byte[] min; - private final byte[] max; + private final ByteBuf min; + private final ByteBuf max; - private LLRange(byte[] min, byte[] max) { + private LLRange(ByteBuf min, ByteBuf max) { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; + if (min != null && !min.isDirect()) { + throw new IllegalArgumentException("Min buffer must be direct"); + } + if (max != null && !max.isDirect()) { + throw new IllegalArgumentException("Min buffer must be direct"); + } this.min = min; this.max = max; } @@ -21,50 +34,64 @@ public class LLRange { return RANGE_ALL; } - public static LLRange from(byte[] min) { + public static LLRange from(ByteBuf min) { return new LLRange(min, null); } - public static LLRange to(byte[] max) { + public static LLRange to(ByteBuf max) { return new LLRange(null, max); } - public static LLRange single(byte[] single) { + public static LLRange single(ByteBuf single) { return new LLRange(single, single); } - public static LLRange of(byte[] min, byte[] max) { + public static LLRange of(ByteBuf min, ByteBuf max) { return new LLRange(min, max); } public boolean isAll() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; return min == null && max == null; } public boolean isSingle() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; if (min == null || max == null) return false; - return Arrays.equals(min, max); + return LLUtils.equals(min, max); } public boolean hasMin() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; return min != null; } - public byte[] getMin() { + public ByteBuf getMin() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; assert min != null; return min; } public boolean hasMax() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; return max != null; } - public byte[] getMax() { + public ByteBuf getMax() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; assert max != null; return max; } - public byte[] getSingle() { + public ByteBuf getSingle() { + assert min == null || min.refCnt() > 0; + assert max == null || max.refCnt() > 0; assert isSingle(); return min; } @@ -78,21 +105,40 @@ public class LLRange { return false; } LLRange llRange = (LLRange) o; - return Arrays.equals(min, llRange.min) && Arrays.equals(max, llRange.max); + return LLUtils.equals(min, llRange.min) && LLUtils.equals(max, llRange.max); } @Override public int hashCode() { - int result = Arrays.hashCode(min); - result = 31 * result + Arrays.hashCode(max); + int result = LLUtils.hashCode(min); + result = 31 * result + LLUtils.hashCode(max); return result; } @Override public String toString() { return new StringJoiner(", ", LLRange.class.getSimpleName() + "[", "]") - .add("min=" + Arrays.toString(min)) - .add("max=" + Arrays.toString(max)) + .add("min=" + LLUtils.toString(min)) + .add("max=" + LLUtils.toString(max)) .toString(); } + + public LLRange retain() { + if (min != null) { + min.retain(); + } + if (max != null) { + max.retain(); + } + return this; + } + + public void release() { + if (min != null) { + min.release(); + } + if (max != null) { + max.release(); + } + } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLUtils.java b/src/main/java/it/cavallium/dbengine/database/LLUtils.java index a6122d8..3f595de 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLUtils.java +++ b/src/main/java/it/cavallium/dbengine/database/LLUtils.java @@ -2,10 +2,18 @@ package it.cavallium.dbengine.database; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; +import io.netty.buffer.AbstractByteBufAllocator; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.PooledByteBufAllocator; import it.cavallium.dbengine.lucene.RandomSortField; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.LinkedList; import java.util.List; +import java.util.function.ToIntFunction; import org.apache.lucene.document.Document; import org.apache.lucene.document.Field; import org.apache.lucene.document.FloatPoint; @@ -20,13 +28,20 @@ import org.apache.lucene.search.ScoreMode; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.SortedNumericSortField; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.rocksdb.RocksDB; + +import static io.netty.buffer.Unpooled.EMPTY_BUFFER; +import static io.netty.buffer.Unpooled.wrappedBuffer; @SuppressWarnings("unused") public class LLUtils { private static final byte[] RESPONSE_TRUE = new byte[]{1}; private static final byte[] RESPONSE_FALSE = new byte[]{0}; + private static final byte[] RESPONSE_TRUE_BUF = new byte[]{1}; + private static final byte[] RESPONSE_FALSE_BUF = new byte[]{0}; public static final byte[][] LEXICONOGRAPHIC_ITERATION_SEEKS = new byte[256][1]; static { @@ -40,10 +55,23 @@ public class LLUtils { return response[0] == 1; } + public static boolean responseToBoolean(ByteBuf response) { + try { + assert response.readableBytes() == 1; + return response.getByte(response.readerIndex()) == 1; + } finally { + response.release(); + } + } + public static byte[] booleanToResponse(boolean bool) { return bool ? RESPONSE_TRUE : RESPONSE_FALSE; } + public static ByteBuf booleanToResponseByteBuffer(boolean bool) { + return wrappedBuffer(booleanToResponse(bool)); + } + @Nullable public static Sort toSort(@Nullable LLSort sort) { if (sort == null) { @@ -127,4 +155,228 @@ public class LLUtils { public static it.cavallium.dbengine.database.LLKeyScore toKeyScore(LLKeyScore hit) { return new it.cavallium.dbengine.database.LLKeyScore(hit.getKey(), hit.getScore()); } + + public static String toString(ByteBuf key) { + if (key == null) { + return "null"; + } else { + int startIndex = key.readerIndex(); + int iMax = key.readableBytes() - 1; + int iLimit = 128; + if (iMax <= -1) { + return "[]"; + } else { + StringBuilder b = new StringBuilder(); + b.append('['); + int i = 0; + + while(true) { + b.append(key.getByte(startIndex + i)); + if (i == iLimit) { + b.append("…"); + } + if (i == iMax || i == iLimit) { + return b.append(']').toString(); + } + + b.append(", "); + ++i; + } + } + } + } + + public static boolean equals(ByteBuf a, ByteBuf b) { + if (a == null && b == null) { + return true; + } else if (a != null && b != null) { + return ByteBufUtil.equals(a, b); + } else { + return false; + } + } + + public static byte[] toArray(ByteBuf key) { + byte[] keyBytes = new byte[key.readableBytes()]; + key.getBytes(key.readerIndex(), keyBytes, 0, key.readableBytes()); + return keyBytes; + } + + public static List toArray(List input) { + List result = new ArrayList<>(input.size()); + for (ByteBuf byteBuf : input) { + result.add(toArray(byteBuf)); + } + return result; + } + + public static int hashCode(ByteBuf buf) { + return buf == null ? 0 : buf.hashCode(); + } + + @Nullable + public static ByteBuf readNullableDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction reader) { + ByteBuf buffer = alloc.directBuffer(); + try { + ByteBuf directBuffer = null; + ByteBuffer nioBuffer; + int size; + Boolean mustBeCopied = null; + do { + if (mustBeCopied == null || !mustBeCopied) { + nioBuffer = LLUtils.toDirectFast(buffer.retain()); + if (nioBuffer != null) { + nioBuffer.limit(nioBuffer.capacity()); + } + } else { + nioBuffer = null; + } + if ((mustBeCopied != null && mustBeCopied) || nioBuffer == null) { + directBuffer = LLUtils.toDirectCopy(buffer.retain()); + nioBuffer = directBuffer.nioBuffer(0, directBuffer.capacity()); + mustBeCopied = true; + } else { + mustBeCopied = false; + } + try { + assert nioBuffer.isDirect(); + size = reader.applyAsInt(nioBuffer); + if (size != RocksDB.NOT_FOUND) { + if (mustBeCopied) { + buffer.writerIndex(0).writeBytes(nioBuffer); + } + if (size == nioBuffer.limit()) { + buffer.setIndex(0, size); + return buffer; + } else { + assert size > nioBuffer.limit(); + assert nioBuffer.limit() > 0; + buffer.capacity(size); + } + } + } finally { + if (nioBuffer != null) { + nioBuffer = null; + } + if(directBuffer != null) { + directBuffer.release(); + directBuffer = null; + } + } + } while (size != RocksDB.NOT_FOUND); + } catch (Throwable t) { + buffer.release(); + throw t; + } + return null; + } + + @Nullable + public static ByteBuffer toDirectFast(ByteBuf buffer) { + try { + ByteBuffer result = buffer.nioBuffer(0, buffer.capacity()); + if (result.isDirect()) { + result.limit(buffer.writerIndex()); + + assert result.isDirect(); + assert result.capacity() == buffer.capacity(); + assert buffer.readerIndex() == result.position(); + assert result.limit() - result.position() == buffer.readableBytes(); + + return result; + } else { + return null; + } + } finally { + buffer.release(); + } + } + + public static ByteBuf toDirectCopy(ByteBuf buffer) { + try { + ByteBuf directCopyBuf = buffer.alloc().directBuffer(buffer.capacity(), buffer.maxCapacity()); + directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex()); + return directCopyBuf; + } finally { + buffer.release(); + } + } + + public static ByteBuf convertToDirectByteBuf(AbstractByteBufAllocator alloc, ByteBuf buffer) { + ByteBuf result; + ByteBuf directCopyBuf = alloc.directBuffer(buffer.capacity(), buffer.maxCapacity()); + directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex()); + directCopyBuf.readerIndex(buffer.readerIndex()); + result = directCopyBuf; + assert result.isDirect(); + assert result.capacity() == buffer.capacity(); + assert buffer.readerIndex() == result.readerIndex(); + return result; + } + + @NotNull + public static ByteBuf readDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction reader) { + var buffer = readNullableDirectNioBuffer(alloc, reader); + if (buffer == null) { + throw new IllegalStateException("A non-nullable buffer read operation tried to return a \"not found\" element"); + } + return buffer; + } + + public static ByteBuf directCompositeBuffer(ByteBufAllocator alloc, ByteBuf buffer) { + return wrappedBuffer(buffer); + } + + public static ByteBuf directCompositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2) { + assert buffer1.isDirect(); + assert buffer1.nioBuffer().isDirect(); + assert buffer2.isDirect(); + assert buffer2.nioBuffer().isDirect(); + if (buffer1.readableBytes() == 0) { + return wrappedBuffer(buffer2); + } else if (buffer2.readableBytes() == 0) { + return wrappedBuffer(buffer1); + } + CompositeByteBuf compositeBuffer = alloc.compositeDirectBuffer(2); + compositeBuffer.addComponent(true, buffer1); + compositeBuffer.addComponent(true, buffer2); + compositeBuffer.consolidate(); + assert compositeBuffer.isDirect(); + assert compositeBuffer.nioBuffer().isDirect(); + return compositeBuffer; + } + + public static ByteBuf directCompositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2, ByteBuf buffer3) { + if (buffer1.readableBytes() == 0) { + return directCompositeBuffer(alloc, buffer2, buffer3); + } else if (buffer2.readableBytes() == 0) { + return directCompositeBuffer(alloc, buffer1, buffer3); + } else if (buffer3.readableBytes() == 0) { + return directCompositeBuffer(alloc, buffer1, buffer2); + } + CompositeByteBuf compositeBuffer = alloc.compositeDirectBuffer(3); + compositeBuffer.addComponent(true, buffer1); + compositeBuffer.addComponent(true, buffer2); + compositeBuffer.addComponent(true, buffer3); + compositeBuffer.consolidate(); + return compositeBuffer; + } + + public static ByteBuf directCompositeBuffer(ByteBufAllocator alloc, ByteBuf... buffers) { + switch (buffers.length) { + case 0: + return EMPTY_BUFFER; + case 1: + return directCompositeBuffer(alloc, buffers[0]); + case 2: + return directCompositeBuffer(alloc, buffers[0], buffers[1]); + case 3: + return directCompositeBuffer(alloc, buffers[0], buffers[1], buffers[2]); + default: + CompositeByteBuf compositeBuffer = alloc.compositeDirectBuffer(buffers.length); + compositeBuffer.addComponents(true, buffers); + compositeBuffer.consolidate(); + return compositeBuffer; + } + } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java index f6f684f..41c439e 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java @@ -1,24 +1,25 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; import java.util.function.Function; import org.jetbrains.annotations.NotNull; +import static io.netty.buffer.Unpooled.*; public class DatabaseEmpty { @SuppressWarnings({"unused", "InstantiationOfUtilityClass"}) public static final Nothing NOTHING = new Nothing(); - private static final byte[] NOTHING_BYTES = new byte[0]; - public static final Serializer NOTHING_SERIALIZER = new Serializer<>() { + public static final Serializer NOTHING_SERIALIZER = new Serializer<>() { @Override - public @NotNull Nothing deserialize(byte @NotNull [] serialized) { + public @NotNull Nothing deserialize(@NotNull ByteBuf serialized) { return NOTHING; } @Override - public byte @NotNull [] serialize(@NotNull Nothing deserialized) { - return NOTHING_BYTES; + public @NotNull ByteBuf serialize(@NotNull Nothing deserialized) { + return EMPTY_BUFFER; } }; public static final Function NOTHING_HASH_FUNCTION = nothing -> nothing; @@ -28,7 +29,7 @@ public class DatabaseEmpty { private DatabaseEmpty() { } - public static DatabaseStageEntry create(LLDictionary dictionary, byte[] key) { + public static DatabaseStageEntry create(LLDictionary dictionary, ByteBuf key) { return new DatabaseSingle<>(dictionary, key, NOTHING_SERIALIZER); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java index d04b015..b76638a 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java @@ -1,18 +1,19 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; -import java.util.Arrays; +import java.io.IOException; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; +import org.rocksdb.RocksDBException; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; @@ -21,40 +22,44 @@ import reactor.core.publisher.Mono; */ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> { - private final Serializer valueSerializer; + private final Serializer valueSerializer; protected DatabaseMapDictionary(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, - Serializer valueSerializer) { + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer, + Serializer valueSerializer) { + // Do not retain or release or use the prefixKey here super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0); + prefixKey = null; this.valueSerializer = valueSerializer; } public static DatabaseMapDictionary simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, - Serializer valueSerializer) { + SerializerFixedBinaryLength keySerializer, + Serializer valueSerializer) { return new DatabaseMapDictionary<>(dictionary, EMPTY_BYTES, keySerializer, valueSerializer); } public static DatabaseMapDictionary tail(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, - Serializer valueSerializer) { + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer, + Serializer valueSerializer) { return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer); } - private byte[] toKey(byte[] suffixKey) { - assert suffixKeyConsistency(suffixKey.length); - byte[] key = Arrays.copyOf(keyPrefix, keyPrefix.length + suffixKey.length); - System.arraycopy(suffixKey, 0, key, keyPrefix.length, suffixKey.length); - return key; + private ByteBuf toKey(ByteBuf suffixKey) { + assert suffixKeyConsistency(suffixKey.readableBytes()); + try { + return LLUtils.directCompositeBuffer(dictionary.getAllocator(), keyPrefix.retain(), suffixKey.retain()); + } finally { + suffixKey.release(); + } } @Override public Mono> get(@Nullable CompositeSnapshot snapshot, boolean existsAlmostCertainly) { return dictionary - .getRange(resolveSnapshot(snapshot), range, existsAlmostCertainly) + .getRange(resolveSnapshot(snapshot), range.retain(), existsAlmostCertainly) .collectMap( entry -> deserializeSuffix(stripPrefix(entry.getKey())), entry -> deserialize(entry.getValue()), @@ -64,7 +69,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> setAndGetPrevious(Map value) { return dictionary - .setRange(range, + .setRange(range.retain(), Flux .fromIterable(value.entrySet()) .map(entry -> Map.entry(serializeSuffix(entry.getKey()), serialize(entry.getValue()))), @@ -79,7 +84,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> clearAndGetPrevious() { return dictionary - .setRange(range, Flux.empty(), true) + .setRange(range.retain(), Flux.empty(), true) .collectMap( entry -> deserializeSuffix(stripPrefix(entry.getKey())), entry -> deserialize(entry.getValue()), @@ -88,96 +93,170 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast); + return dictionary.sizeRange(resolveSnapshot(snapshot), range.retain(), fast); } @Override public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range); + return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range.retain()); } @Override public Mono> at(@Nullable CompositeSnapshot snapshot, T keySuffix) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); return Mono - .just(new DatabaseSingle<>(dictionary, toKey(serializeSuffix(keySuffix)), Serializer.noop())) - .map(entry -> new DatabaseSingleMapped<>(entry, valueSerializer)); + .fromSupplier(() -> new DatabaseSingle<>(dictionary, keyBuf.retain(), Serializer.noop())) + .>map(entry -> new DatabaseSingleMapped<>(entry, valueSerializer)) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }); } @Override public Mono getValue(@Nullable CompositeSnapshot snapshot, T keySuffix, boolean existsAlmostCertainly) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); return dictionary - .get(resolveSnapshot(snapshot), toKey(serializeSuffix(keySuffix)), existsAlmostCertainly) - .map(this::deserialize); + .get(resolveSnapshot(snapshot), keyBuf.retain(), existsAlmostCertainly) + .map(this::deserialize) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }); } @Override public Mono putValue(T keySuffix, U value) { - return dictionary.put(toKey(serializeSuffix(keySuffix)), serialize(value), LLDictionaryResultType.VOID).then(); + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); + ByteBuf valueBuf = serialize(value); + return dictionary.put(keyBuf.retain(), valueBuf.retain(), LLDictionaryResultType.VOID).doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + valueBuf.release(); + }).then(); } @Override public Mono updateValue(T keySuffix, boolean existsAlmostCertainly, - Function, Optional> updater) { - return dictionary.update(toKey(serializeSuffix(keySuffix)), - oldSerialized -> updater.apply(oldSerialized.map(this::deserialize)).map(this::serialize), - existsAlmostCertainly - ); + Function<@Nullable U, @Nullable U> updater) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); + return dictionary.update(keyBuf.retain(), oldSerialized -> { + try { + var result = updater.apply(oldSerialized == null ? null : this.deserialize(oldSerialized.retain())); + if (result == null) { + return null; + } else { + return this.serialize(result); + } + } finally { + if (oldSerialized != null) { + oldSerialized.release(); + } + } + }, existsAlmostCertainly).doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }); } @Override public Mono putValueAndGetPrevious(T keySuffix, U value) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); + ByteBuf valueBuf = serialize(value); return dictionary - .put(toKey(serializeSuffix(keySuffix)), serialize(value), LLDictionaryResultType.PREVIOUS_VALUE) - .map(this::deserialize); + .put(keyBuf.retain(), valueBuf.retain(), LLDictionaryResultType.PREVIOUS_VALUE) + .map(this::deserialize) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + valueBuf.release(); + }); } @Override public Mono putValueAndGetStatus(T keySuffix, U value) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); + ByteBuf valueBuf = serialize(value); return dictionary - .put(toKey(serializeSuffix(keySuffix)), serialize(value), LLDictionaryResultType.VALUE_CHANGED) - .map(LLUtils::responseToBoolean); + .put(keyBuf.retain(), valueBuf.retain(), LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE) + .map(LLUtils::responseToBoolean) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + valueBuf.release(); + }); } @Override public Mono remove(T keySuffix) { - return dictionary.remove(toKey(serializeSuffix(keySuffix)), LLDictionaryResultType.VOID).then(); + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); + return dictionary.remove(keyBuf.retain(), LLDictionaryResultType.VOID).doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }).then(); } @Override public Mono removeAndGetPrevious(T keySuffix) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); return dictionary - .remove(toKey(serializeSuffix(keySuffix)), LLDictionaryResultType.PREVIOUS_VALUE) - .map(this::deserialize); + .remove(keyBuf.retain(), LLDictionaryResultType.PREVIOUS_VALUE) + .map(this::deserialize) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }); } @Override public Mono removeAndGetStatus(T keySuffix) { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + ByteBuf keyBuf = toKey(keySuffixBuf.retain()); return dictionary - .remove(toKey(serializeSuffix(keySuffix)), LLDictionaryResultType.VALUE_CHANGED) - .map(LLUtils::responseToBoolean); + .remove(keyBuf.retain(), LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE) + .map(LLUtils::responseToBoolean) + .doFinally(s -> { + keyBuf.release(); + keySuffixBuf.release(); + }); } @Override public Flux> getMulti(@Nullable CompositeSnapshot snapshot, Flux keys, boolean existsAlmostCertainly) { return dictionary - .getMulti(resolveSnapshot(snapshot), keys.map(keySuffix -> toKey(serializeSuffix(keySuffix))), existsAlmostCertainly) - .map(entry -> Map.entry(deserializeSuffix(stripPrefix(entry.getKey())), deserialize(entry.getValue()))); + .getMulti(resolveSnapshot(snapshot), keys.flatMap(keySuffix -> Mono.fromCallable(() -> { + ByteBuf keySuffixBuf = serializeSuffix(keySuffix); + try { + return toKey(keySuffixBuf.retain()); + } finally { + keySuffixBuf.release(); + } + })), existsAlmostCertainly) + .flatMap(entry -> Mono.fromCallable(() -> Map.entry(deserializeSuffix(stripPrefix(entry.getKey())), deserialize(entry.getValue())))); } @Override public Mono putMulti(Flux> entries) { return dictionary - .putMulti(entries - .map(entry -> Map - .entry(toKey(serializeSuffix(entry.getKey())), serialize(entry.getValue()))), false) + .putMulti(entries.flatMap(entry -> Mono.fromCallable(() -> Map.entry(toKey(serializeSuffix(entry.getKey())), + serialize(entry.getValue()) + ))), false) .then(); } @Override public Flux>> getAllStages(@Nullable CompositeSnapshot snapshot) { return dictionary - .getRangeKeys(resolveSnapshot(snapshot), range) + .getRangeKeys(resolveSnapshot(snapshot), range.retain()) .map(key -> Map.entry(deserializeSuffix(stripPrefix(key)), new DatabaseSingleMapped<>( new DatabaseSingle<>(dictionary, @@ -191,7 +270,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> getAllValues(@Nullable CompositeSnapshot snapshot) { return dictionary - .getRange(resolveSnapshot(snapshot), range) + .getRange(resolveSnapshot(snapshot), range.retain()) .map(serializedEntry -> Map.entry( deserializeSuffix(stripPrefix(serializedEntry.getKey())), valueSerializer.deserialize(serializedEntry.getValue()) @@ -201,7 +280,7 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> setAllValuesAndGetPrevious(Flux> entries) { return dictionary - .setRange(range, + .setRange(range.retain(), entries.map(entry -> Map.entry(toKey(serializeSuffix(entry.getKey())), serialize(entry.getValue()))), true) .map(entry -> Map.entry(deserializeSuffix(stripPrefix(entry.getKey())), deserialize(entry.getValue()))); @@ -214,22 +293,31 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> implements DatabaseStageMap { - public static final byte[] EMPTY_BYTES = new byte[0]; + public static final ByteBuf EMPTY_BYTES = unreleasableBuffer(directBuffer(0, 0)); protected final LLDictionary dictionary; + private final ByteBufAllocator alloc; protected final SubStageGetter subStageGetter; - protected final SerializerFixedBinaryLength keySuffixSerializer; - protected final byte[] keyPrefix; + protected final SerializerFixedBinaryLength keySuffixSerializer; + protected final ByteBuf keyPrefix; + protected final int keyPrefixLength; protected final int keySuffixLength; protected final int keyExtLength; protected final LLRange range; - private static byte[] incrementPrefix(byte[] key, int prefixLength) { - boolean remainder = true; - final byte ff = (byte) 0xFF; - for (int i = prefixLength - 1; i >= 0; i--) { - if (key[i] != ff) { - key[i]++; - remainder = false; - break; - } else { - key[i] = 0x00; - remainder = true; + private static ByteBuf incrementPrefix(ByteBufAllocator alloc, ByteBuf originalKey, int prefixLength) { + try { + assert originalKey.readableBytes() >= prefixLength; + ByteBuf copiedBuf = alloc.directBuffer(originalKey.writerIndex(), originalKey.writerIndex() + 1); + try { + boolean overflowed = true; + final int ff = 0xFF; + int writtenBytes = 0; + copiedBuf.writerIndex(prefixLength); + for (int i = prefixLength - 1; i >= 0; i--) { + int iByte = originalKey.getUnsignedByte(i); + if (iByte != ff) { + copiedBuf.setByte(i, iByte + 1); + writtenBytes++; + overflowed = false; + break; + } else { + copiedBuf.setByte(i, 0x00); + writtenBytes++; + overflowed = true; + } + } + assert prefixLength - writtenBytes >= 0; + if (prefixLength - writtenBytes > 0) { + copiedBuf.setBytes(0, originalKey, 0, (prefixLength - writtenBytes)); + } + + copiedBuf.writerIndex(copiedBuf.capacity()); + + if (originalKey.writerIndex() - prefixLength > 0) { + copiedBuf.setBytes(prefixLength, originalKey, prefixLength, originalKey.writerIndex() - prefixLength); + } + + if (overflowed) { + for (int i = 0; i < copiedBuf.writerIndex(); i++) { + copiedBuf.setByte(i, 0xFF); + } + copiedBuf.writeZero(1); + } + return copiedBuf.retain(); + } finally { + copiedBuf.release(); } - } - - if (remainder) { - Arrays.fill(key, 0, prefixLength, (byte) 0xFF); - return Arrays.copyOf(key, key.length + 1); - } else { - return key; + } finally { + originalKey.release(); } } - static byte[] firstRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) { - return zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength); + static ByteBuf firstRangeKey(ByteBufAllocator alloc, ByteBuf prefixKey, int prefixLength, int suffixLength, int extLength) { + return zeroFillKeySuffixAndExt(alloc, prefixKey, prefixLength, suffixLength, extLength); } - static byte[] nextRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) { - byte[] nonIncremented = zeroFillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength); - return incrementPrefix(nonIncremented, prefixLength); + static ByteBuf nextRangeKey(ByteBufAllocator alloc, ByteBuf prefixKey, int prefixLength, int suffixLength, int extLength) { + try { + ByteBuf nonIncremented = zeroFillKeySuffixAndExt(alloc, prefixKey.retain(), prefixLength, suffixLength, extLength); + try { + return incrementPrefix(alloc, nonIncremented.retain(), prefixLength); + } finally { + nonIncremented.release(); + } + } finally { + prefixKey.release(); + } } - protected static byte[] zeroFillKeySuffixAndExt(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) { - assert prefixKey.length == prefixLength; + protected static ByteBuf zeroFillKeySuffixAndExt(ByteBufAllocator alloc, ByteBuf prefixKey, int prefixLength, int suffixLength, int extLength) { + try { + assert prefixKey.readableBytes() == prefixLength; + assert suffixLength > 0; + assert extLength >= 0; + if (!prefixKey.isDirect()) { + throw new IllegalArgumentException("Prefix key must be a direct buffer"); + } + assert prefixKey.nioBuffer().isDirect(); + ByteBuf zeroSuffixAndExt = alloc.directBuffer(suffixLength + extLength, suffixLength + extLength); + try { + assert zeroSuffixAndExt.isDirect(); + assert zeroSuffixAndExt.nioBuffer().isDirect(); + zeroSuffixAndExt.writeZero(suffixLength + extLength); + ByteBuf result = LLUtils.directCompositeBuffer(alloc, prefixKey.retain(), zeroSuffixAndExt.retain()); + assert result.isDirect(); + assert result.nioBuffer().isDirect(); + return result; + } finally { + zeroSuffixAndExt.release(); + } + } finally { + prefixKey.release(); + } + } + + static ByteBuf firstRangeKey( + ByteBufAllocator alloc, + ByteBuf prefixKey, + ByteBuf suffixKey, + int prefixLength, + int suffixLength, + int extLength) { + return zeroFillKeyExt(alloc, prefixKey, suffixKey, prefixLength, suffixLength, extLength); + } + + static ByteBuf nextRangeKey( + ByteBufAllocator alloc, + ByteBuf prefixKey, + ByteBuf suffixKey, + int prefixLength, + int suffixLength, + int extLength) { + try { + ByteBuf nonIncremented = zeroFillKeyExt(alloc, + prefixKey.retain(), + suffixKey.retain(), + prefixLength, + suffixLength, + extLength + ); + try { + return incrementPrefix(alloc, nonIncremented.retain(), prefixLength + suffixLength); + } finally { + nonIncremented.release(); + } + } finally { + prefixKey.release(); + suffixKey.release(); + } + } + + protected static ByteBuf zeroFillKeyExt( + ByteBufAllocator alloc, + ByteBuf prefixKey, + ByteBuf suffixKey, + int prefixLength, + int suffixLength, + int extLength) { + assert prefixKey.readableBytes() == prefixLength; + assert suffixKey.readableBytes() == suffixLength; assert suffixLength > 0; assert extLength >= 0; - byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength); - Arrays.fill(result, prefixLength, result.length, (byte) 0); - return result; - } - - static byte[] firstRangeKey(byte[] prefixKey, - byte[] suffixKey, - int prefixLength, - int suffixLength, - int extLength) { - return zeroFillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength); - } - - static byte[] nextRangeKey(byte[] prefixKey, - byte[] suffixKey, - int prefixLength, - int suffixLength, - int extLength) { - byte[] nonIncremented = zeroFillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength); - return incrementPrefix(nonIncremented, prefixLength + suffixLength); - } - - protected static byte[] zeroFillKeyExt(byte[] prefixKey, - byte[] suffixKey, - int prefixLength, - int suffixLength, - int extLength) { - assert prefixKey.length == prefixLength; - assert suffixKey.length == suffixLength; - assert suffixLength > 0; - assert extLength >= 0; - byte[] result = Arrays.copyOf(prefixKey, prefixLength + suffixLength + extLength); - System.arraycopy(suffixKey, 0, result, prefixLength, suffixLength); - Arrays.fill(result, prefixLength + suffixLength, result.length, (byte) 0); + var result = LLUtils.directCompositeBuffer(alloc, prefixKey, suffixKey, alloc.buffer(extLength, extLength).writeZero(extLength)); + assert result.readableBytes() == prefixLength + suffixLength + extLength; return result; } @@ -104,41 +183,73 @@ public class DatabaseMapDictionaryDeep> implem */ @Deprecated public static DatabaseMapDictionaryDeep> simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength keySerializer, SubStageGetterSingle subStageGetter) { return new DatabaseMapDictionaryDeep<>(dictionary, EMPTY_BYTES, keySerializer, subStageGetter, 0); } public static > DatabaseMapDictionaryDeep deepTail(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength keySerializer, int keyExtLength, SubStageGetter subStageGetter) { return new DatabaseMapDictionaryDeep<>(dictionary, EMPTY_BYTES, keySerializer, subStageGetter, keyExtLength); } public static > DatabaseMapDictionaryDeep deepIntermediate(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength); } protected DatabaseMapDictionaryDeep(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer, + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { - this.dictionary = dictionary; - this.subStageGetter = subStageGetter; - this.keySuffixSerializer = keySuffixSerializer; - this.keyPrefix = prefixKey; - this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); - this.keyExtLength = keyExtLength; - byte[] firstKey = firstRangeKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength); - byte[] nextRangeKey = nextRangeKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength); - this.range = keyPrefix.length == 0 ? LLRange.all() : LLRange.of(firstKey, nextRangeKey); - assert subStageKeysConsistency(keyPrefix.length + keySuffixLength + keyExtLength); + try { + this.dictionary = dictionary; + this.alloc = dictionary.getAllocator(); + this.subStageGetter = subStageGetter; + this.keySuffixSerializer = keySuffixSerializer; + this.keyPrefix = wrappedUnmodifiableBuffer(prefixKey).retain(); + this.keyPrefixLength = keyPrefix.readableBytes(); + this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); + this.keyExtLength = keyExtLength; + if (!keyPrefix.isDirect()) { + throw new IllegalArgumentException("KeyPrefix must be a direct buffer"); + } + assert keyPrefix.isDirect(); + ByteBuf firstKey = wrappedUnmodifiableBuffer(firstRangeKey(alloc, + keyPrefix.retain(), + keyPrefixLength, + keySuffixLength, + keyExtLength + )); + ByteBuf nextRangeKey = wrappedUnmodifiableBuffer(nextRangeKey(alloc, + keyPrefix.retain(), + keyPrefixLength, + keySuffixLength, + keyExtLength + )); + try { + assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey); + assert firstKey.isDirect(); + assert nextRangeKey.isDirect(); + assert firstKey.nioBuffer().isDirect(); + assert nextRangeKey.nioBuffer().isDirect(); + this.range = keyPrefixLength == 0 ? LLRange.all() : LLRange.of(firstKey.retain(), nextRangeKey.retain()); + assert range == null || !range.hasMin() || range.getMin().isDirect(); + assert range == null || !range.hasMax() || range.getMax().isDirect(); + assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength); + } finally { + firstKey.release(); + nextRangeKey.release(); + } + } finally { + prefixKey.release(); + } } @SuppressWarnings("unused") @@ -159,26 +270,33 @@ public class DatabaseMapDictionaryDeep> implem /** * Keep only suffix and ext */ - protected byte[] stripPrefix(byte[] key) { - return Arrays.copyOfRange(key, this.keyPrefix.length, key.length); + protected ByteBuf stripPrefix(ByteBuf key) { + return key.slice(this.keyPrefixLength, key.readableBytes() - this.keyPrefixLength); } /** * Remove ext from full key */ - protected byte[] removeExtFromFullKey(byte[] key) { - return Arrays.copyOf(key, keyPrefix.length + keySuffixLength); + protected ByteBuf removeExtFromFullKey(ByteBuf key) { + try { + return key.slice(key.readerIndex(), keyPrefixLength + keySuffixLength).retain(); + } finally { + key.release(); + } } /** * Add prefix to suffix */ - protected byte[] toKeyWithoutExt(byte[] suffixKey) { - assert suffixKey.length == keySuffixLength; - byte[] result = Arrays.copyOf(keyPrefix, keyPrefix.length + keySuffixLength); - System.arraycopy(suffixKey, 0, result, keyPrefix.length, keySuffixLength); - assert result.length == keyPrefix.length + keySuffixLength; - return result; + protected ByteBuf toKeyWithoutExt(ByteBuf suffixKey) { + try { + assert suffixKey.readableBytes() == keySuffixLength; + ByteBuf result = LLUtils.directCompositeBuffer(alloc, keyPrefix.retain(), suffixKey.retain()); + assert result.readableBytes() == keyPrefixLength + keySuffixLength; + return result; + } finally { + suffixKey.release(); + } } protected LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) { @@ -189,71 +307,89 @@ public class DatabaseMapDictionaryDeep> implem } } - protected LLRange toExtRange(byte[] keySuffix) { - byte[] first = firstRangeKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength); - byte[] end = nextRangeKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength); - return LLRange.of(first, end); + protected LLRange toExtRange(ByteBuf keySuffix) { + try { + ByteBuf first = firstRangeKey(alloc, + keyPrefix.retain(), + keySuffix.retain(), + keyPrefixLength, + keySuffixLength, + keyExtLength + ); + ByteBuf end = nextRangeKey(alloc, + keyPrefix.retain(), + keySuffix.retain(), + keyPrefixLength, + keySuffixLength, + keyExtLength + ); + return LLRange.of(first, end); + } finally { + keySuffix.release(); + } } @Override public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast); + return dictionary.sizeRange(resolveSnapshot(snapshot), range.retain(), fast); } @Override public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range); + return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range.retain()); } @Override public Mono at(@Nullable CompositeSnapshot snapshot, T keySuffix) { - byte[] keySuffixData = serializeSuffix(keySuffix); - Flux keyFlux; + ByteBuf keySuffixData = serializeSuffix(keySuffix); + Flux keyFlux; if (LLLocalDictionary.DEBUG_PREFIXES_WHEN_ASSERTIONS_ARE_ENABLED && this.subStageGetter.needsDebuggingKeyFlux()) { - keyFlux = this.dictionary.getRangeKeys(resolveSnapshot(snapshot), toExtRange(keySuffixData)); + keyFlux = this.dictionary.getRangeKeys(resolveSnapshot(snapshot), toExtRange(keySuffixData.retain())); } else { keyFlux = Flux.empty(); } return this.subStageGetter - .subStage(dictionary, - snapshot, - toKeyWithoutExt(keySuffixData), - keyFlux - ); + .subStage(dictionary, snapshot, toKeyWithoutExt(keySuffixData.retain()), keyFlux) + .doFinally(s -> keySuffixData.release()); } @Override public Flux> getAllStages(@Nullable CompositeSnapshot snapshot) { if (LLLocalDictionary.DEBUG_PREFIXES_WHEN_ASSERTIONS_ARE_ENABLED && this.subStageGetter.needsDebuggingKeyFlux()) { return dictionary - .getRangeKeysGrouped(resolveSnapshot(snapshot), range, keyPrefix.length + keySuffixLength) + .getRangeKeysGrouped(resolveSnapshot(snapshot), range.retain(), keyPrefixLength + keySuffixLength) .flatMapSequential(rangeKeys -> { assert this.subStageGetter.isMultiKey() || rangeKeys.size() == 1; - byte[] groupKeyWithExt = rangeKeys.get(0); - byte[] groupKeyWithoutExt = removeExtFromFullKey(groupKeyWithExt); - byte[] groupSuffix = this.stripPrefix(groupKeyWithoutExt); - assert subStageKeysConsistency(groupKeyWithExt.length); + ByteBuf groupKeyWithExt = rangeKeys.get(0).retain(); + ByteBuf groupKeyWithoutExt = removeExtFromFullKey(groupKeyWithExt.retain()); + ByteBuf groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain()); + assert subStageKeysConsistency(groupKeyWithExt.readableBytes()); return this.subStageGetter .subStage(dictionary, snapshot, groupKeyWithoutExt, Flux.fromIterable(rangeKeys) ) - .map(us -> Map.entry(this.deserializeSuffix(groupSuffix), us)); + .map(us -> Map.entry(this.deserializeSuffix(wrappedUnmodifiableBuffer(groupSuffix.retain())), us)) + .doFinally(s -> { + groupSuffix.release(); + groupKeyWithoutExt.release(); + groupKeyWithExt.release(); + }); }); } else { return dictionary - .getRangeKeyPrefixes(resolveSnapshot(snapshot), range, keyPrefix.length + keySuffixLength) + .getRangeKeyPrefixes(resolveSnapshot(snapshot), range, keyPrefixLength + keySuffixLength) .flatMapSequential(groupKeyWithoutExt -> { - byte[] groupSuffix = this.stripPrefix(groupKeyWithoutExt); - assert subStageKeysConsistency(groupKeyWithoutExt.length + keyExtLength); + ByteBuf groupSuffix = this.stripPrefix(groupKeyWithoutExt); + assert subStageKeysConsistency(groupKeyWithoutExt.readableBytes() + keyExtLength); return this.subStageGetter .subStage(dictionary, snapshot, groupKeyWithoutExt, Flux.empty() ) - .map(us -> Map.entry(this.deserializeSuffix(groupSuffix), us)); + .map(us -> Map.entry(this.deserializeSuffix(wrappedUnmodifiableBuffer(groupSuffix)), us)); }); } } @@ -261,10 +397,10 @@ public class DatabaseMapDictionaryDeep> implem private boolean subStageKeysConsistency(int totalKeyLength) { if (subStageGetter instanceof SubStageGetterMapDeep) { return totalKeyLength - == keyPrefix.length + keySuffixLength + ((SubStageGetterMapDeep) subStageGetter).getKeyBinaryLength(); + == keyPrefixLength + keySuffixLength + ((SubStageGetterMapDeep) subStageGetter).getKeyBinaryLength(); } else if (subStageGetter instanceof SubStageGetterMap) { return totalKeyLength - == keyPrefix.length + keySuffixLength + ((SubStageGetterMap) subStageGetter).getKeyBinaryLength(); + == keyPrefixLength + keySuffixLength + ((SubStageGetterMap) subStageGetter).getKeyBinaryLength(); } else { return true; } @@ -287,25 +423,37 @@ public class DatabaseMapDictionaryDeep> implem .clear(); } else if (range.isSingle()) { return dictionary - .remove(range.getSingle(), LLDictionaryResultType.VOID) + .remove(range.getSingle().retain(), LLDictionaryResultType.VOID) .then(); } else { return dictionary - .setRange(range, Flux.empty(), false) + .setRange(range.retain(), Flux.empty(), false) .then(); } } //todo: temporary wrapper. convert the whole class to buffers - protected T deserializeSuffix(byte[] keySuffix) { - assert suffixKeyConsistency(keySuffix.length); + protected T deserializeSuffix(ByteBuf keySuffix) { + assert suffixKeyConsistency(keySuffix.readableBytes()); return keySuffixSerializer.deserialize(keySuffix); } //todo: temporary wrapper. convert the whole class to buffers - protected byte[] serializeSuffix(T keySuffix) { - byte[] suffixData = keySuffixSerializer.serialize(keySuffix); - assert suffixKeyConsistency(suffixData.length); + protected ByteBuf serializeSuffix(T keySuffix) { + ByteBuf suffixData = keySuffixSerializer.serialize(keySuffix); + assert suffixKeyConsistency(suffixData.readableBytes()); return suffixData; } + + @Override + protected void finalize() throws Throwable { + super.finalize(); + range.release(); + } + + @Override + public void release() { + this.range.release(); + this.keyPrefix.release(); + } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java index 7968dc7..51b2d6a 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java @@ -2,18 +2,18 @@ package it.cavallium.dbengine.database.collections; import static it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep.EMPTY_BYTES; -import com.google.common.primitives.Ints; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; +import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.collections.Joiner.ValueGetter; import it.cavallium.dbengine.database.collections.JoinerBlocking.ValueGetterBlocking; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; -import java.util.Arrays; import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -23,19 +23,21 @@ import reactor.core.publisher.Mono; @SuppressWarnings("unused") public class DatabaseMapDictionaryHashed implements DatabaseStageMap> { + private final ByteBufAllocator alloc; private final DatabaseMapDictionary> subDictionary; private final Function keySuffixHashFunction; private final Function> valueMapper; protected DatabaseMapDictionaryHashed(LLDictionary dictionary, - byte[] prefixKey, - Serializer keySuffixSerializer, - Serializer valueSerializer, + ByteBuf prefixKey, + Serializer keySuffixSerializer, + Serializer valueSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { + SerializerFixedBinaryLength keySuffixHashSerializer) { ValueWithHashSerializer valueWithHashSerializer = new ValueWithHashSerializer<>(keySuffixSerializer, valueSerializer ); + this.alloc = dictionary.getAllocator(); this.valueMapper = ValueMapper::new; this.subDictionary = DatabaseMapDictionary.tail(dictionary, prefixKey, @@ -44,40 +46,35 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap implements Serializer, byte[]> { + private class ValueWithHashSerializer implements Serializer, ByteBuf> { - private final Serializer keySuffixSerializer; - private final Serializer valueSerializer; + private final Serializer keySuffixSerializer; + private final Serializer valueSerializer; - private ValueWithHashSerializer(Serializer keySuffixSerializer, Serializer valueSerializer) { + private ValueWithHashSerializer(Serializer keySuffixSerializer, Serializer valueSerializer) { this.keySuffixSerializer = keySuffixSerializer; this.valueSerializer = valueSerializer; } @Override - public @NotNull Entry deserialize(byte @NotNull [] serialized) { - int keySuffixLength = Ints.fromBytes(serialized[0], serialized[1], serialized[2], serialized[3]); - T keySuffix = keySuffixSerializer.deserialize(Arrays.copyOfRange(serialized, - Integer.BYTES, - Integer.BYTES + keySuffixLength - )); - U value = valueSerializer.deserialize(Arrays.copyOfRange(serialized, - Integer.BYTES + keySuffixLength, - serialized.length - )); - return Map.entry(keySuffix, value); + public @NotNull Entry deserialize(@NotNull ByteBuf serialized) { + try { + int keySuffixLength = serialized.readInt(); + T keySuffix = keySuffixSerializer.deserialize(serialized.retainedSlice(serialized.readerIndex(), keySuffixLength)); + U value = valueSerializer.deserialize(serialized.retain()); + return Map.entry(keySuffix, value); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(@NotNull Entry deserialized) { - byte[] keySuffix = keySuffixSerializer.serialize(deserialized.getKey()); - byte[] value = valueSerializer.serialize(deserialized.getValue()); - byte[] result = new byte[Integer.BYTES + keySuffix.length + value.length]; - byte[] keySuffixLen = Ints.toByteArray(keySuffix.length); - System.arraycopy(keySuffixLen, 0, result, 0, Integer.BYTES); - System.arraycopy(keySuffix, 0, result, Integer.BYTES, keySuffix.length); - System.arraycopy(value, 0, result, Integer.BYTES + keySuffix.length, value.length); - return result; + public @NotNull ByteBuf serialize(@NotNull Entry deserialized) { + ByteBuf keySuffix = keySuffixSerializer.serialize(deserialized.getKey()); + ByteBuf value = valueSerializer.serialize(deserialized.getValue()); + ByteBuf keySuffixLen = alloc.buffer(Integer.BYTES, Integer.BYTES); + keySuffixLen.writeInt(keySuffix.readableBytes()); + return LLUtils.directCompositeBuffer(alloc, keySuffixLen, keySuffix, value); } } @@ -101,10 +98,10 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap DatabaseMapDictionaryHashed simple(LLDictionary dictionary, - Serializer keySerializer, - Serializer valueSerializer, + Serializer keySerializer, + Serializer valueSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseMapDictionaryHashed<>(dictionary, EMPTY_BYTES, keySerializer, @@ -115,11 +112,11 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap DatabaseMapDictionaryHashed tail(LLDictionary dictionary, - byte[] prefixKey, - Serializer keySuffixSerializer, - Serializer valueSerializer, + ByteBuf prefixKey, + Serializer keySuffixSerializer, + Serializer valueSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { + SerializerFixedBinaryLength keySuffixHashSerializer) { return new DatabaseMapDictionaryHashed<>(dictionary, prefixKey, keySuffixSerializer, @@ -157,13 +154,20 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap setAndGetStatus(Map map) { - return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(subDictionary::setAndGetStatus); + public Mono setAndGetChanged(Map map) { + return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(subDictionary::setAndGetChanged).single(); } @Override - public Mono update(Function>, Optional>> updater) { - return subDictionary.update(old -> updater.apply(old.map(this::deserializeMap)).map(this::serializeMap)); + public Mono update(Function<@Nullable Map, @Nullable Map> updater) { + return subDictionary.update(old -> { + var result = updater.apply(old == null ? null : this.deserializeMap(old)); + if (result == null) { + return null; + } else { + return this.serializeMap(result); + } + }); } @Override @@ -186,6 +190,11 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap> at(@Nullable CompositeSnapshot snapshot, T key) { return subDictionary @@ -218,18 +227,27 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap updateValue(T key, boolean existsAlmostCertainly, Function, Optional> updater) { - return subDictionary.updateValue(keySuffixHashFunction.apply(key), - existsAlmostCertainly, - old -> updater.apply(old.map(Entry::getValue)).map(newV -> Map.entry(key, newV)) - ); + public Mono updateValue(T key, boolean existsAlmostCertainly, Function<@Nullable U, @Nullable U> updater) { + return subDictionary.updateValue(keySuffixHashFunction.apply(key), existsAlmostCertainly, old -> { + var result = updater.apply(old == null ? null : old.getValue()); + if (result == null) { + return null; + } else { + return Map.entry(key, result); + } + }); } @Override - public Mono updateValue(T key, Function, Optional> updater) { - return subDictionary.updateValue(keySuffixHashFunction.apply(key), - old -> updater.apply(old.map(Entry::getValue)).map(newV -> Map.entry(key, newV)) - ); + public Mono updateValue(T key, Function<@Nullable U, @Nullable U> updater) { + return subDictionary.updateValue(keySuffixHashFunction.apply(key), old -> { + var result = updater.apply(old == null ? null : old.getValue()); + if (result == null) { + return null; + } else { + return Map.entry(key, result); + } + }); } @Override @@ -346,10 +364,16 @@ public class DatabaseMapDictionaryHashed implements DatabaseStageMap update(Function>, Optional>> updater, + public Mono update(Function<@Nullable Map, @Nullable Map> updater, boolean existsAlmostCertainly) { - return subDictionary - .update(item -> updater.apply(item.map(this::deserializeMap)).map(this::serializeMap), existsAlmostCertainly); + return subDictionary.update(item -> { + var result = updater.apply(item == null ? null : this.deserializeMap(item)); + if (result == null) { + return null; + } else { + return this.serializeMap(result); + } + }, existsAlmostCertainly); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java index c86d55d..d4ea412 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -14,19 +15,19 @@ import reactor.core.publisher.Mono; public class DatabaseSetDictionary extends DatabaseMapDictionaryDeep> { protected DatabaseSetDictionary(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer) { + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer) { super(dictionary, prefixKey, keySuffixSerializer, DatabaseEmpty.createSubStageGetter(), 0); } public static DatabaseSetDictionary simple(LLDictionary dictionary, - SerializerFixedBinaryLength keySerializer) { + SerializerFixedBinaryLength keySerializer) { return new DatabaseSetDictionary<>(dictionary, EMPTY_BYTES, keySerializer); } public static DatabaseSetDictionary tail(LLDictionary dictionary, - byte[] prefixKey, - SerializerFixedBinaryLength keySuffixSerializer) { + ByteBuf prefixKey, + SerializerFixedBinaryLength keySuffixSerializer) { return new DatabaseSetDictionary<>(dictionary, prefixKey, keySuffixSerializer); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java index 71f4c05..98a8ed6 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java @@ -2,6 +2,7 @@ package it.cavallium.dbengine.database.collections; import static it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep.EMPTY_BYTES; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -18,10 +19,10 @@ import reactor.core.publisher.Mono; public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHashed { protected DatabaseSetDictionaryHashed(LLDictionary dictionary, - byte[] prefixKey, - Serializer keySuffixSerializer, + ByteBuf prefixKey, + Serializer keySuffixSerializer, Function keySuffixHashFunction, - SerializerFixedBinaryLength keySuffixHashSerializer) { + SerializerFixedBinaryLength keySuffixHashSerializer) { super(dictionary, prefixKey, keySuffixSerializer, @@ -32,9 +33,9 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas } public static DatabaseSetDictionaryHashed simple(LLDictionary dictionary, - Serializer keySerializer, + Serializer keySerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseSetDictionaryHashed<>(dictionary, EMPTY_BYTES, keySerializer, @@ -44,10 +45,10 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas } public static DatabaseSetDictionaryHashed tail(LLDictionary dictionary, - byte[] prefixKey, - Serializer keySuffixSerializer, + ByteBuf prefixKey, + Serializer keySuffixSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { return new DatabaseSetDictionaryHashed<>(dictionary, prefixKey, keySuffixSerializer, diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java index 1c7c12a..b220fd6 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingle.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; @@ -10,15 +11,19 @@ import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; import reactor.core.publisher.Mono; +import static io.netty.buffer.Unpooled.*; public class DatabaseSingle implements DatabaseStageEntry { private final LLDictionary dictionary; - private final byte[] key; - private final Serializer serializer; + private final ByteBuf key; + private final Serializer serializer; - public DatabaseSingle(LLDictionary dictionary, byte[] key, Serializer serializer) { + public DatabaseSingle(LLDictionary dictionary, ByteBuf key, Serializer serializer) { this.dictionary = dictionary; + if (!key.isDirect()) { + throw new IllegalArgumentException("Key must be direct"); + } this.key = key; this.serializer = serializer; } @@ -33,47 +38,60 @@ public class DatabaseSingle implements DatabaseStageEntry { @Override public Mono get(@Nullable CompositeSnapshot snapshot, boolean existsAlmostCertainly) { - return dictionary.get(resolveSnapshot(snapshot), key, existsAlmostCertainly).map(this::deserialize); + return dictionary.get(resolveSnapshot(snapshot), key.retain(), existsAlmostCertainly).map(this::deserialize); } @Override public Mono setAndGetPrevious(U value) { - return dictionary.put(key, serialize(value), LLDictionaryResultType.PREVIOUS_VALUE).map(this::deserialize); + ByteBuf valueByteBuf = serialize(value); + return dictionary + .put(key.retain(), valueByteBuf.retain(), LLDictionaryResultType.PREVIOUS_VALUE) + .map(this::deserialize) + .doFinally(s -> valueByteBuf.release()); } @Override - public Mono update(Function, Optional> updater, boolean existsAlmostCertainly) { - return dictionary.update(key, - (oldValueSer) -> updater.apply(oldValueSer.map(this::deserialize)).map(this::serialize), - existsAlmostCertainly - ); + public Mono update(Function<@Nullable U, @Nullable U> updater, boolean existsAlmostCertainly) { + return dictionary.update(key.retain(), (oldValueSer) -> { + var result = updater.apply(oldValueSer == null ? null : this.deserialize(oldValueSer)); + if (result == null) { + return null; + } else { + return this.serialize(result); + } + }, existsAlmostCertainly); } @Override public Mono clearAndGetPrevious() { - return dictionary.remove(key, LLDictionaryResultType.PREVIOUS_VALUE).map(this::deserialize); + return dictionary.remove(key.retain(), LLDictionaryResultType.PREVIOUS_VALUE).map(this::deserialize); } @Override public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key)) + .isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key.retain())) .map(empty -> empty ? 0L : 1L); } @Override public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key)); + .isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key.retain())); } //todo: temporary wrapper. convert the whole class to buffers - private U deserialize(byte[] bytes) { + private U deserialize(ByteBuf bytes) { return serializer.deserialize(bytes); } //todo: temporary wrapper. convert the whole class to buffers - private byte[] serialize(U bytes) { + private ByteBuf serialize(U bytes) { return serializer.serialize(bytes); } + + @Override + public void release() { + key.release(); + } } \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java index e2d88f1..b58f1fc 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.database.collections; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.serialization.Serializer; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; import reactor.core.publisher.Mono; @@ -39,14 +38,20 @@ public class DatabaseSingleMapped implements DatabaseStageEntry { } @Override - public Mono setAndGetStatus(A value) { - return serializedSingle.setAndGetStatus(serialize(value)); + public Mono setAndGetChanged(A value) { + return serializedSingle.setAndGetChanged(serialize(value)).single(); } @Override - public Mono update(Function, Optional> updater, boolean existsAlmostCertainly) { - return serializedSingle - .update(oldValue -> updater.apply(oldValue.map(this::deserialize)).map(this::serialize), existsAlmostCertainly); + public Mono update(Function<@Nullable A, @Nullable A> updater, boolean existsAlmostCertainly) { + return serializedSingle.update(oldValue -> { + var result = updater.apply(oldValue == null ? null : this.deserialize(oldValue)); + if (result == null) { + return null; + } else { + return this.serialize(result); + } + }, existsAlmostCertainly); } @Override @@ -84,6 +89,11 @@ public class DatabaseSingleMapped implements DatabaseStageEntry { return this; } + @Override + public void release() { + serializedSingle.release(); + } + //todo: temporary wrapper. convert the whole class to buffers private A deserialize(B bytes) { return serializer.deserialize(bytes); diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java index 5c69eb2..b2cb35f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.database.collections; import it.cavallium.dbengine.client.CompositeSnapshot; import java.util.Objects; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; import reactor.core.publisher.Mono; @@ -26,18 +25,18 @@ public interface DatabaseStage extends DatabaseStageWithEntry { } default Mono set(T value) { - return setAndGetStatus(value).then(); + return setAndGetChanged(value).then(); } Mono setAndGetPrevious(T value); - default Mono setAndGetStatus(T value) { - return setAndGetPrevious(value).map(oldValue -> !Objects.equals(oldValue, value)).defaultIfEmpty(false); + default Mono setAndGetChanged(T value) { + return setAndGetPrevious(value).map(oldValue -> !Objects.equals(oldValue, value)).defaultIfEmpty(value != null); } - Mono update(Function, Optional> updater, boolean existsAlmostCertainly); + Mono update(Function<@Nullable T, @Nullable T> updater, boolean existsAlmostCertainly); - default Mono update(Function, Optional> updater) { + default Mono update(Function<@Nullable T, @Nullable T> updater) { return update(updater, false); } @@ -51,6 +50,8 @@ public interface DatabaseStage extends DatabaseStageWithEntry { return clearAndGetPrevious().map(Objects::nonNull).defaultIfEmpty(false); } + void release(); + default Mono close() { return Mono.empty(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java index c324f33..e196240 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java @@ -6,7 +6,6 @@ import it.cavallium.dbengine.database.collections.JoinerBlocking.ValueGetterBloc import java.util.HashMap; import java.util.Map; import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Function; import org.jetbrains.annotations.Nullable; import reactor.core.publisher.Flux; @@ -18,7 +17,7 @@ public interface DatabaseStageMap> extends Dat Mono at(@Nullable CompositeSnapshot snapshot, T key); default Mono getValue(@Nullable CompositeSnapshot snapshot, T key, boolean existsAlmostCertainly) { - return this.at(snapshot, key).flatMap(v -> v.get(snapshot, existsAlmostCertainly)); + return this.at(snapshot, key).flatMap(v -> v.get(snapshot, existsAlmostCertainly).doFinally(s -> v.release())); } default Mono getValue(@Nullable CompositeSnapshot snapshot, T key) { @@ -30,23 +29,29 @@ public interface DatabaseStageMap> extends Dat } default Mono putValue(T key, U value) { - return at(null, key).single().flatMap(v -> v.set(value)); + return at(null, key).single().flatMap(v -> v.set(value).doFinally(s -> v.release())); } - default Mono updateValue(T key, boolean existsAlmostCertainly, Function, Optional> updater) { - return at(null, key).single().flatMap(v -> v.update(updater, existsAlmostCertainly)); + default Mono updateValue(T key, boolean existsAlmostCertainly, Function<@Nullable U, @Nullable U> updater) { + return at(null, key).single().flatMap(v -> v.update(updater, existsAlmostCertainly).doFinally(s -> v.release())); } - default Mono updateValue(T key, Function, Optional> updater) { + default Mono updateValue(T key, Function<@Nullable U, @Nullable U> updater) { return updateValue(key, false, updater); } default Mono putValueAndGetPrevious(T key, U value) { - return at(null, key).single().flatMap(v -> v.setAndGetPrevious(value)); + return at(null, key).single().flatMap(v -> v.setAndGetPrevious(value).doFinally(s -> v.release())); } + /** + * + * @param key + * @param value + * @return true if the key was associated with any value, false if the key didn't exist. + */ default Mono putValueAndGetStatus(T key, U value) { - return at(null, key).single().flatMap(v -> v.setAndGetStatus(value)); + return at(null, key).single().flatMap(v -> v.setAndGetChanged(value).doFinally(s -> v.release())).single(); } default Mono remove(T key) { @@ -54,7 +59,7 @@ public interface DatabaseStageMap> extends Dat } default Mono removeAndGetPrevious(T key) { - return at(null, key).flatMap(DatabaseStage::clearAndGetPrevious); + return at(null, key).flatMap(v -> v.clearAndGetPrevious().doFinally(s -> v.release())); } default Mono removeAndGetStatus(T key) { @@ -106,7 +111,7 @@ public interface DatabaseStageMap> extends Dat .flatMap(entriesReplacer) .flatMap(replacedEntry -> this .at(null, replacedEntry.getKey()) - .map(entry -> entry.set(replacedEntry.getValue()))) + .map(v -> v.set(replacedEntry.getValue()).doFinally(s -> v.release()))) .then(); } } @@ -126,15 +131,23 @@ public interface DatabaseStageMap> extends Dat } @Override - default Mono update(Function>, Optional>> updater, boolean existsAlmostCertainly) { + default Mono update(Function<@Nullable Map, @Nullable Map> updater, boolean existsAlmostCertainly) { return this .getAllValues(null) .collectMap(Entry::getKey, Entry::getValue, HashMap::new) .single() - .map(v -> v.isEmpty() ? Optional.>empty() : Optional.of(v)) - .map(updater) - .filter(Optional::isPresent) - .map(Optional::get) + .>handle((v, sink) -> { + if (v == null || v.isEmpty()) { + sink.complete(); + } else { + var result = updater.apply(v); + if (result == null) { + sink.complete(); + } else { + sink.next(result); + } + } + }) .flatMap(values -> this.setAllValues(Flux.fromIterable(values.entrySet()))) //todo: can be optimized by calculating the correct return value .thenReturn(true); diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java index 18471d0..d5eb95f 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import org.jetbrains.annotations.Nullable; @@ -10,8 +11,8 @@ public interface SubStageGetter> { Mono subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux); + ByteBuf prefixKey, + Flux debuggingKeyFlux); boolean isMultiKey(); diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java index c261674..8813753 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; @@ -23,15 +24,15 @@ public class SubStageGetterHashMap implements assertsEnabled = assertsEnabledTmp; } - private final Serializer keySerializer; - private final Serializer valueSerializer; + private final Serializer keySerializer; + private final Serializer valueSerializer; private final Function keyHashFunction; - private final SerializerFixedBinaryLength keyHashSerializer; + private final SerializerFixedBinaryLength keyHashSerializer; - public SubStageGetterHashMap(Serializer keySerializer, - Serializer valueSerializer, + public SubStageGetterHashMap(Serializer keySerializer, + Serializer valueSerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; this.keyHashFunction = keyHashFunction; @@ -41,8 +42,8 @@ public class SubStageGetterHashMap implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux) { + ByteBuf prefixKey, + Flux debuggingKeyFlux) { Mono> result = Mono.just(DatabaseMapDictionaryHashed.tail(dictionary, prefixKey, keySerializer, @@ -67,9 +68,9 @@ public class SubStageGetterHashMap implements return assertsEnabled; } - private Mono checkKeyFluxConsistency(byte[] prefixKey, Flux keyFlux) { + private Mono checkKeyFluxConsistency(ByteBuf prefixKey, Flux keyFlux) { return keyFlux.doOnNext(key -> { - assert key.length == prefixKey.length + getKeyHashBinaryLength(); + assert key.readableBytes() == prefixKey.readableBytes() + getKeyHashBinaryLength(); }).then(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java index f3fbd5b..9558aeb 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -24,13 +25,13 @@ public class SubStageGetterHashSet implements assertsEnabled = assertsEnabledTmp; } - private final Serializer keySerializer; + private final Serializer keySerializer; private final Function keyHashFunction; - private final SerializerFixedBinaryLength keyHashSerializer; + private final SerializerFixedBinaryLength keyHashSerializer; - public SubStageGetterHashSet(Serializer keySerializer, + public SubStageGetterHashSet(Serializer keySerializer, Function keyHashFunction, - SerializerFixedBinaryLength keyHashSerializer) { + SerializerFixedBinaryLength keyHashSerializer) { this.keySerializer = keySerializer; this.keyHashFunction = keyHashFunction; this.keyHashSerializer = keyHashSerializer; @@ -39,8 +40,8 @@ public class SubStageGetterHashSet implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux) { + ByteBuf prefixKey, + Flux debuggingKeyFlux) { Mono> result = Mono.just(DatabaseSetDictionaryHashed.tail(dictionary, prefixKey, keySerializer, @@ -64,9 +65,9 @@ public class SubStageGetterHashSet implements return assertsEnabled; } - private Mono checkKeyFluxConsistency(byte[] prefixKey, Flux keyFlux) { + private Mono checkKeyFluxConsistency(ByteBuf prefixKey, Flux keyFlux) { return keyFlux.doOnNext(key -> { - assert key.length == prefixKey.length + getKeyHashBinaryLength(); + assert key.readableBytes() == prefixKey.readableBytes() + getKeyHashBinaryLength(); }).then(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java index f8458e8..07e19af 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; @@ -20,11 +21,11 @@ public class SubStageGetterMap implements SubStageGetter, Databa assertsEnabled = assertsEnabledTmp; } - private final SerializerFixedBinaryLength keySerializer; - private final Serializer valueSerializer; + private final SerializerFixedBinaryLength keySerializer; + private final Serializer valueSerializer; - public SubStageGetterMap(SerializerFixedBinaryLength keySerializer, - Serializer valueSerializer) { + public SubStageGetterMap(SerializerFixedBinaryLength keySerializer, + Serializer valueSerializer) { this.keySerializer = keySerializer; this.valueSerializer = valueSerializer; } @@ -32,8 +33,8 @@ public class SubStageGetterMap implements SubStageGetter, Databa @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux) { + ByteBuf prefixKey, + Flux debuggingKeyFlux) { Mono> result = Mono.just(DatabaseMapDictionary.tail(dictionary, prefixKey, keySerializer, valueSerializer )); @@ -54,9 +55,9 @@ public class SubStageGetterMap implements SubStageGetter, Databa return assertsEnabled; } - private Mono checkKeyFluxConsistency(byte[] prefixKey, Flux keyFlux) { + private Mono checkKeyFluxConsistency(ByteBuf prefixKey, Flux keyFlux) { return keyFlux.doOnNext(key -> { - assert key.length == prefixKey.length + getKeyBinaryLength(); + assert key.readableBytes() == prefixKey.readableBytes() + getKeyBinaryLength(); }).then(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java index 0c161f1..4811fd1 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; @@ -20,11 +21,11 @@ public class SubStageGetterMapDeep> implements } private final SubStageGetter subStageGetter; - private final SerializerFixedBinaryLength keySerializer; + private final SerializerFixedBinaryLength keySerializer; private final int keyExtLength; public SubStageGetterMapDeep(SubStageGetter subStageGetter, - SerializerFixedBinaryLength keySerializer, + SerializerFixedBinaryLength keySerializer, int keyExtLength) { this.subStageGetter = subStageGetter; this.keySerializer = keySerializer; @@ -46,8 +47,8 @@ public class SubStageGetterMapDeep> implements @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux) { + ByteBuf prefixKey, + Flux debuggingKeyFlux) { Mono> result = Mono.just(DatabaseMapDictionaryDeep.deepIntermediate(dictionary, prefixKey, keySerializer, @@ -71,9 +72,9 @@ public class SubStageGetterMapDeep> implements return assertsEnabled; } - private Mono checkKeyFluxConsistency(byte[] prefixKey, Flux keyFlux) { + private Mono checkKeyFluxConsistency(ByteBuf prefixKey, Flux keyFlux) { return keyFlux.doOnNext(key -> { - assert key.length == prefixKey.length + getKeyBinaryLength(); + assert key.readableBytes() == prefixKey.readableBytes() + getKeyBinaryLength(); }).then(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java index 0c2bd0f..c3168c6 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; @@ -20,22 +21,27 @@ public class SubStageGetterSet implements SubStageGetter, Dat assertsEnabled = assertsEnabledTmp; } - private final SerializerFixedBinaryLength keySerializer; + private final SerializerFixedBinaryLength keySerializer; - public SubStageGetterSet(SerializerFixedBinaryLength keySerializer) { + public SubStageGetterSet(SerializerFixedBinaryLength keySerializer) { this.keySerializer = keySerializer; } @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] prefixKey, - Flux debuggingKeyFlux) { - Mono> result = Mono.just(DatabaseSetDictionary.tail(dictionary, prefixKey, keySerializer)); - if (assertsEnabled) { - return checkKeyFluxConsistency(prefixKey, debuggingKeyFlux).then(result); - } else { - return result; + ByteBuf prefixKey, + Flux debuggingKeyFlux) { + try { + Mono> result = Mono + .fromSupplier(() -> DatabaseSetDictionary.tail(dictionary, prefixKey.retain(), keySerializer)); + if (assertsEnabled) { + return checkKeyFluxConsistency(prefixKey.retain(), debuggingKeyFlux).then(result); + } else { + return result; + } + } finally { + prefixKey.release(); } } @@ -49,10 +55,10 @@ public class SubStageGetterSet implements SubStageGetter, Dat return assertsEnabled; } - private Mono checkKeyFluxConsistency(byte[] prefixKey, Flux keyFlux) { + private Mono checkKeyFluxConsistency(ByteBuf prefixKey, Flux keyFlux) { return keyFlux.doOnNext(key -> { - assert key.length == prefixKey.length + getKeyBinaryLength(); - }).then(); + assert key.readableBytes() == prefixKey.readableBytes() + getKeyBinaryLength(); + }).doFinally(s -> prefixKey.release()).then(); } public int getKeyBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java index e199f40..116c688 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java @@ -1,7 +1,9 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; +import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.serialization.Serializer; import java.util.Arrays; import org.jetbrains.annotations.Nullable; @@ -19,22 +21,22 @@ public class SubStageGetterSingle implements SubStageGetter serializer; + private final Serializer serializer; - public SubStageGetterSingle(Serializer serializer) { + public SubStageGetterSingle(Serializer serializer) { this.serializer = serializer; } @Override public Mono> subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - byte[] keyPrefix, - Flux debuggingKeyFlux) { + ByteBuf keyPrefix, + Flux debuggingKeyFlux) { return debuggingKeyFlux .singleOrEmpty() .flatMap(key -> Mono .>fromCallable(() -> { - if (!Arrays.equals(keyPrefix, key)) { + if (!LLUtils.equals(keyPrefix, key)) { throw new IndexOutOfBoundsException("Found more than one element!"); } return null; diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java index 6fcc4a3..fe340d7 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java @@ -1,8 +1,9 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBuf; import it.cavallium.dbengine.database.serialization.Serializer; -public class SubStageGetterSingleBytes extends SubStageGetterSingle { +public class SubStageGetterSingleBytes extends SubStageGetterSingle { public SubStageGetterSingleBytes() { super(Serializer.noop()); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/IterateBound.java b/src/main/java/it/cavallium/dbengine/database/disk/IterateBound.java new file mode 100644 index 0000000..5daac4a --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/database/disk/IterateBound.java @@ -0,0 +1,6 @@ +package it.cavallium.dbengine.database.disk; + +public enum IterateBound { + LOWER, + UPPER +} diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java index 6bd38ee..33edde4 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java @@ -36,14 +36,18 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { } @Override - public Mono getDatabase(String name, List columns, boolean lowMemory) { + public Mono getDatabase(String name, + List columns, + boolean lowMemory, + boolean inMemory) { return Mono .fromCallable(() -> new LLLocalKeyValueDatabase(name, basePath.resolve("database_" + name), columns, new LinkedList<>(), crashIfWalError, - lowMemory + lowMemory, + inMemory )) .subscribeOn(Schedulers.boundedElastic()); } @@ -55,7 +59,8 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { TextFieldsSimilarity textFieldsSimilarity, Duration queryRefreshDebounceTime, Duration commitDebounceTime, - boolean lowMemory) { + boolean lowMemory, + boolean inMemory) { return Mono .fromCallable(() -> { if (instancesCount != 1) { @@ -66,7 +71,8 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { textFieldsSimilarity, queryRefreshDebounceTime, commitDebounceTime, - lowMemory + lowMemory, + inMemory ); } else { return new LLLocalLuceneIndex(basePath.resolve("lucene"), @@ -76,6 +82,7 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { queryRefreshDebounceTime, commitDebounceTime, lowMemory, + inMemory, null ); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java index 61a3292..e43ff25 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java @@ -1,5 +1,9 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.util.ReferenceCounted; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLRange; @@ -9,13 +13,13 @@ import it.cavallium.dbengine.database.UpdateMode; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.objects.ObjectArrayList; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.Optional; import java.util.concurrent.Callable; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinTask; @@ -23,11 +27,16 @@ import java.util.concurrent.locks.StampedLock; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import lombok.AllArgsConstructor; +import lombok.Data; import org.apache.commons.lang3.tuple.Pair; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; +import org.rocksdb.AbstractSlice; +import org.rocksdb.CappedWriteBatch; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactRangeOptions; +import org.rocksdb.DirectSlice; import org.rocksdb.FlushOptions; import org.rocksdb.Holder; import org.rocksdb.ReadOptions; @@ -46,6 +55,7 @@ import reactor.core.publisher.Mono; import reactor.core.scheduler.Scheduler; import reactor.util.function.Tuple3; import reactor.util.function.Tuples; +import static io.netty.buffer.Unpooled.*; @NotAtomic public class LLLocalDictionary implements LLDictionary { @@ -56,6 +66,8 @@ public class LLLocalDictionary implements LLDictionary { static final long MAX_WRITE_BATCH_SIZE = 1024L * 1024L * 1024L; // 1GiB static final int CAPPED_WRITE_BATCH_CAP = 50000; // 50K operations static final int MULTI_GET_WINDOW = 500; + static final ReadOptions EMPTY_READ_OPTIONS = new ReadOptions(); + static final WriteOptions EMPTY_WRITE_OPTIONS = new WriteOptions(); static final WriteOptions BATCH_WRITE_OPTIONS = new WriteOptions().setLowPri(true); static final boolean PREFER_SEEK_TO_FIRST = false; static final boolean VERIFY_CHECKSUMS_WHEN_NOT_NEEDED = false; @@ -65,7 +77,17 @@ public class LLLocalDictionary implements LLDictionary { private static final int STRIPES = 512; private static final byte[] FIRST_KEY = new byte[]{}; private static final byte[] NO_DATA = new byte[0]; - private static final ReadOptions EMPTY_READ_OPTIONS = new ReadOptions(); + + private static final boolean ASSERTIONS_ENABLED; + + static { + boolean assertionsEnabled = false; + //noinspection AssertWithSideEffects + assert (assertionsEnabled = true); + //noinspection ConstantConditions + ASSERTIONS_ENABLED = assertionsEnabled; + } + private final RocksDB db; private final ColumnFamilyHandle cfh; private final String databaseName; @@ -73,6 +95,7 @@ public class LLLocalDictionary implements LLDictionary { private final Function snapshotResolver; private final Striped itemsLock = Striped.readWriteStampedLock(STRIPES); private final UpdateMode updateMode; + private final ByteBufAllocator alloc; public LLLocalDictionary(@NotNull RocksDB db, @NotNull ColumnFamilyHandle columnFamilyHandle, @@ -88,6 +111,7 @@ public class LLLocalDictionary implements LLDictionary { this.dbScheduler = dbScheduler; this.snapshotResolver = snapshotResolver; this.updateMode = updateMode; + alloc = PooledByteBufAllocator.DEFAULT; } @Override @@ -111,28 +135,33 @@ public class LLLocalDictionary implements LLDictionary { } } - private int getLockIndex(byte[] key) { - return Math.abs(Arrays.hashCode(key) % STRIPES); + private int getLockIndex(ByteBuf key) { + return Math.abs(key.hashCode() % STRIPES); } - private IntArrayList getLockIndices(List keys) { + private IntArrayList getLockIndices(List keys) { var list = new IntArrayList(keys.size()); - for (byte[] key : keys) { + for (ByteBuf key : keys) { list.add(getLockIndex(key)); } return list; } - private IntArrayList getLockIndicesEntries(List> keys) { + private IntArrayList getLockIndicesEntries(List> keys) { var list = new IntArrayList(keys.size()); - for (Entry key : keys) { + for (Entry key : keys) { list.add(getLockIndex(key.getKey())); } return list; } @Override - public Mono get(@Nullable LLSnapshot snapshot, byte[] key, boolean existsAlmostCertainly) { + public ByteBufAllocator getAllocator() { + return alloc; + } + + @Override + public Mono get(@Nullable LLSnapshot snapshot, ByteBuf key, boolean existsAlmostCertainly) { return Mono .fromCallable(() -> { StampedLock lock; @@ -146,34 +175,160 @@ public class LLLocalDictionary implements LLDictionary { stamp = 0; } try { - logger.trace("Reading {}", key); - Holder data = existsAlmostCertainly ? null : new Holder<>(); - if (existsAlmostCertainly || db.keyMayExist(cfh, resolveSnapshot(snapshot), key, data)) { - if (!existsAlmostCertainly && data.getValue() != null) { - return data.getValue(); - } else { - return db.get(cfh, resolveSnapshot(snapshot), key); - } - } else { - return null; + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toString(key)); } + return dbGet(cfh, resolveSnapshot(snapshot), key.retain()); } finally { if (updateMode == UpdateMode.ALLOW) { lock.unlockRead(stamp); } } }) - .onErrorMap(cause -> new IOException("Failed to read " + Arrays.toString(key), cause)) - .subscribeOn(dbScheduler); + .onErrorMap(cause -> new IOException("Failed to read " + LLUtils.toString(key), cause)) + .subscribeOn(dbScheduler) + .doFinally(s -> key.release()); + } + + private ByteBuf dbGet(ColumnFamilyHandle cfh, @Nullable ReadOptions readOptions, ByteBuf key) throws RocksDBException { + //todo: implement keyMayExist if existsAlmostCertainly is false. + // Unfortunately it's not feasible until RocksDB implements keyMayExist with buffers + + // Create the key nio buffer to pass to RocksDB + if (!key.isDirect()) { + throw new RocksDBException("Key buffer must be direct"); + } + try { + ByteBuf keyDirectBuf = key.retain(); + ByteBuffer keyNioBuffer = LLUtils.toDirectFast(keyDirectBuf.retain()); + if (keyNioBuffer == null) { + keyDirectBuf.release(); + keyDirectBuf = LLUtils.toDirectCopy(key.retain()); + keyNioBuffer = keyDirectBuf.nioBuffer(); + } + try { + assert keyNioBuffer.isDirect(); + // Create a direct result buffer because RocksDB works only with direct buffers + ByteBuf resultBuf = alloc.directBuffer(); + try { + int valueSize; + int assertionReadData = -1; + ByteBuffer resultNioBuf; + do { + // Create the result nio buffer to pass to RocksDB + resultNioBuf = resultBuf.nioBuffer(0, resultBuf.capacity()); + assert keyNioBuffer.isDirect(); + assert resultNioBuf.isDirect(); + valueSize = db.get(cfh, Objects.requireNonNullElse(readOptions, EMPTY_READ_OPTIONS), keyNioBuffer, resultNioBuf); + if (valueSize != RocksDB.NOT_FOUND) { + // todo: check if position is equal to data that have been read + // todo: check if limit is equal to value size or data that have been read + assert valueSize <= 0 || resultNioBuf.limit() > 0; + + // If the locking is enabled the data is safe, so since we are appending data to the end, + // we need to check if it has been appended correctly or it it has been overwritten. + // We must not do this check otherwise because if there is no locking the data can be + // overwritten with a smaller value the next time. + if (updateMode == UpdateMode.ALLOW) { + // Check if read data is larger than previously read data. + // If it's smaller or equals it means that RocksDB is overwriting the beginning of the result buffer. + assert resultNioBuf.limit() > assertionReadData; + if (ASSERTIONS_ENABLED) { + assertionReadData = resultNioBuf.limit(); + } + } + + // Check if read data is not bigger than the total value size. + // If it's bigger it means that RocksDB is writing the start of the result into the result + // buffer more than once. + assert resultNioBuf.limit() <= valueSize; + + if (valueSize <= resultNioBuf.limit()) { + // Return the result ready to be read + return resultBuf.setIndex(0, valueSize).retain(); + } else { + // If the locking is enabled the data is safe, so we can append the next read data. + // Otherwise we need to re-read everything. + if (updateMode == UpdateMode.ALLOW) { + // Update the resultBuf writerIndex with the new position + resultBuf.writerIndex(resultNioBuf.limit()); + } + //noinspection UnusedAssignment + resultNioBuf = null; + } + // Rewind the keyNioBuf position, making it readable again for the next loop iteration + keyNioBuffer.rewind(); + if (resultBuf.capacity() < valueSize) { + // Expand the resultBuf size if the result is bigger than the current result buffer size + resultBuf.capacity(valueSize); + } + } + // Repeat if the result has been found but it's still not finished + } while (valueSize != RocksDB.NOT_FOUND); + // If the value is not found return null + return null; + } finally { + resultBuf.release(); + } + } finally { + keyDirectBuf.release(); + } + } finally { + key.release(); + } + } + + private void dbPut(ColumnFamilyHandle cfh, @Nullable WriteOptions writeOptions, ByteBuf key, ByteBuf value) + throws RocksDBException { + if (!key.isDirect()) { + throw new RocksDBException("Key buffer must be direct"); + } + if (!value.isDirect()) { + throw new RocksDBException("Value buffer must be direct"); + } + try { + ByteBuf keyDirectBuffer = key.retain(); + var keyNioBuffer = LLUtils.toDirectFast(keyDirectBuffer.retain()); + if (keyNioBuffer == null) { + keyDirectBuffer.release(); + keyDirectBuffer = LLUtils.toDirectCopy(key.retain()); + keyNioBuffer = keyDirectBuffer.nioBuffer(); + } + try { + assert keyNioBuffer.isDirect(); + + + ByteBuf valueDirectBuffer = value.retain(); + var valueNioBuffer = LLUtils.toDirectFast(valueDirectBuffer.retain()); + if (valueNioBuffer == null) { + valueDirectBuffer.release(); + valueDirectBuffer = LLUtils.toDirectCopy(value.retain()); + valueNioBuffer = valueDirectBuffer.nioBuffer(); + } + try { + assert valueNioBuffer.isDirect(); + db.put(cfh, Objects.requireNonNullElse(writeOptions, EMPTY_WRITE_OPTIONS), keyNioBuffer, valueNioBuffer); + } finally { + valueDirectBuffer.release(); + } + } finally { + keyDirectBuffer.release(); + } + } finally { + key.release(); + value.release(); + } } @Override public Mono isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range) { + Mono contains; if (range.isSingle()) { - return containsKey(snapshot, range.getSingle()).map(contains -> !contains); + contains = containsKey(snapshot, range.getSingle().retain()); } else { - return containsRange(snapshot, range).map(contains -> !contains); + contains = containsRange(snapshot, range.retain()); } + return contains.map(isContained -> !isContained).doFinally(s -> range.release()); } public Mono containsRange(@Nullable LLSnapshot snapshot, LLRange range) { @@ -183,14 +338,20 @@ public class LLLocalDictionary implements LLDictionary { readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); readOpts.setFillCache(false); if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + readOpts.setIterateLowerBound(new DirectSlice(Objects.requireNonNull(LLUtils.toDirectFast(range.getMin().retain()), + "This range must use direct buffers" + ))); } if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + readOpts.setIterateUpperBound(new DirectSlice(Objects.requireNonNull(LLUtils.toDirectFast(range.getMax().retain()), + "This range must use direct buffers" + ))); } try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); + rocksIterator.seek(Objects.requireNonNull(LLUtils.toDirectFast(range.getMin().retain()), + "This range must use direct buffers" + )); } else { rocksIterator.seekToFirst(); } @@ -198,10 +359,11 @@ public class LLLocalDictionary implements LLDictionary { } }) .onErrorMap(cause -> new IOException("Failed to read range " + range.toString(), cause)) - .subscribeOn(dbScheduler); + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } - private Mono containsKey(@Nullable LLSnapshot snapshot, byte[] key) { + private Mono containsKey(@Nullable LLSnapshot snapshot, ByteBuf key) { return Mono .fromCallable(() -> { StampedLock lock; @@ -216,12 +378,13 @@ public class LLLocalDictionary implements LLDictionary { } try { int size = RocksDB.NOT_FOUND; + byte[] keyBytes = LLUtils.toArray(key); Holder data = new Holder<>(); - if (db.keyMayExist(cfh, resolveSnapshot(snapshot), key, data)) { + if (db.keyMayExist(cfh, resolveSnapshot(snapshot), keyBytes, data)) { if (data.getValue() != null) { size = data.getValue().length; } else { - size = db.get(cfh, resolveSnapshot(snapshot), key, NO_DATA); + size = db.get(cfh, resolveSnapshot(snapshot), keyBytes, NO_DATA); } } return size != RocksDB.NOT_FOUND; @@ -231,13 +394,24 @@ public class LLLocalDictionary implements LLDictionary { } } }) - .onErrorMap(cause -> new IOException("Failed to read " + Arrays.toString(key), cause)) - .subscribeOn(dbScheduler); + .onErrorMap(cause -> new IOException("Failed to read " + LLUtils.toString(key), cause)) + .subscribeOn(dbScheduler) + .doFinally(s -> key.release()); } @Override - public Mono put(byte[] key, byte[] value, LLDictionaryResultType resultType) { - return getPrevValue(key, resultType) + public Mono put(ByteBuf key, ByteBuf value, LLDictionaryResultType resultType) { + if (!key.isDirect()) { + return Mono.fromCallable(() -> { + throw new IllegalArgumentException("Key must not be direct"); + }); + } + if (!value.isDirect()) { + return Mono.fromCallable(() -> { + throw new IllegalArgumentException("Value must not be direct"); + }); + } + return getPreviousData(key.retain(), resultType) .concatWith(Mono .fromCallable(() -> { StampedLock lock; @@ -251,8 +425,17 @@ public class LLLocalDictionary implements LLDictionary { stamp = 0; } try { - logger.trace("Writing {}: {}", key, value); - db.put(cfh, key, value); + if (logger.isTraceEnabled()) { + logger.trace("Writing {}: {}", LLUtils.toString(key), LLUtils.toString(value)); + } + if (!key.isDirect()) { + throw new IllegalArgumentException("Key must not be direct"); + } + if (!value.isDirect()) { + throw new IllegalArgumentException("Value must not be direct"); + } + dbPut(cfh, null, key.retain(), value.retain()); + assert value.refCnt() > 0; return null; } finally { if (updateMode == UpdateMode.ALLOW) { @@ -260,47 +443,71 @@ public class LLLocalDictionary implements LLDictionary { } } }) - .onErrorMap(cause -> new IOException("Failed to write " + Arrays.toString(key), cause)) + .onErrorMap(cause -> new IOException("Failed to write " + LLUtils.toString(key), cause)) .subscribeOn(dbScheduler) .then(Mono.empty()) - ).singleOrEmpty(); + ) + .singleOrEmpty() + .doFinally(s -> { + key.release(); + value.release(); + }); } @Override - public Mono update(byte[] key, - Function, Optional> value, + public Mono update(ByteBuf key, + Function<@Nullable ByteBuf, @Nullable ByteBuf> updater, boolean existsAlmostCertainly) { return Mono - .fromCallable(() -> { - if (updateMode == UpdateMode.DISALLOW) throw new UnsupportedOperationException("update() is disallowed"); - StampedLock lock; - long stamp; - if (updateMode == UpdateMode.ALLOW) { - lock = itemsLock.getAt(getLockIndex(key)); - - stamp = lock.readLock(); + .fromCallable(() -> { + if (updateMode == UpdateMode.DISALLOW) throw new UnsupportedOperationException("update() is disallowed"); + StampedLock lock; + long stamp; + if (updateMode == UpdateMode.ALLOW) { + lock = itemsLock.getAt(getLockIndex(key)); + + stamp = lock.readLock(); + } else { + lock = null; + stamp = 0; + } + try { + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toString(key)); + } + while (true) { + boolean changed = false; + @Nullable ByteBuf prevData; + var prevDataHolder = existsAlmostCertainly ? null : new Holder(); + if (existsAlmostCertainly || db.keyMayExist(cfh, LLUtils.toArray(key), prevDataHolder)) { + if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { + byte @Nullable [] prevDataBytes = prevDataHolder.getValue(); + if (prevDataBytes != null) { + prevData = wrappedBuffer(prevDataBytes); + } else { + prevData = null; + } + } else { + prevData = dbGet(cfh, null, key.retain()); + } } else { - lock = null; - stamp = 0; + prevData = null; } try { - logger.trace("Reading {}", key); - while (true) { - boolean changed = false; - Optional prevData; - var prevDataHolder = existsAlmostCertainly ? null : new Holder(); - if (existsAlmostCertainly || db.keyMayExist(cfh, key, prevDataHolder)) { - if (!existsAlmostCertainly && prevDataHolder.getValue() != null) { - prevData = Optional.ofNullable(prevDataHolder.getValue()); - } else { - prevData = Optional.ofNullable(db.get(cfh, key)); - } - } else { - prevData = Optional.empty(); + @Nullable ByteBuf newData; + ByteBuf prevDataToSendToUpdater = prevData == null ? null : prevData.retainedSlice(); + try { + newData = updater.apply( + prevDataToSendToUpdater == null ? null : prevDataToSendToUpdater.retain()); + assert prevDataToSendToUpdater == null || prevDataToSendToUpdater.readerIndex() == 0 + || !prevDataToSendToUpdater.isReadable(); + } finally { + if (prevDataToSendToUpdater != null) { + prevDataToSendToUpdater.release(); } - - Optional newData = value.apply(prevData); - if (prevData.isPresent() && newData.isEmpty()) { + } + try { + if (prevData != null && newData == null) { //noinspection DuplicatedCode if (updateMode == UpdateMode.ALLOW) { var ws = lock.tryConvertToWriteLock(stamp); @@ -313,11 +520,13 @@ public class LLLocalDictionary implements LLDictionary { continue; } } - logger.trace("Deleting {}", key); + if (logger.isTraceEnabled()) { + logger.trace("Deleting {}", LLUtils.toString(key)); + } changed = true; - db.delete(cfh, key); - } else if (newData.isPresent() - && (prevData.isEmpty() || !Arrays.equals(prevData.get(), newData.get()))) { + dbDelete(cfh, null, key.retain()); + } else if (newData != null + && (prevData == null || !LLUtils.equals(prevData, newData))) { //noinspection DuplicatedCode if (updateMode == UpdateMode.ALLOW) { var ws = lock.tryConvertToWriteLock(stamp); @@ -330,25 +539,61 @@ public class LLLocalDictionary implements LLDictionary { continue; } } - logger.trace("Writing {}: {}", key, newData.get()); + if (logger.isTraceEnabled()) { + logger.trace("Writing {}: {}", LLUtils.toString(key), LLUtils.toString(newData)); + } changed = true; - db.put(cfh, key, newData.get()); + dbPut(cfh, null, key.retain(), newData.retain()); } return changed; + } finally { + if (newData != null) { + newData.release(); + } } } finally { - if (updateMode == UpdateMode.ALLOW) { - lock.unlock(stamp); + if (prevData != null) { + prevData.release(); } } - }) - .onErrorMap(cause -> new IOException("Failed to read or write " + Arrays.toString(key), cause)) - .subscribeOn(dbScheduler); + } + } finally { + if (updateMode == UpdateMode.ALLOW) { + lock.unlock(stamp); + } + } + }) + .onErrorMap(cause -> new IOException("Failed to read or write " + LLUtils.toString(key), cause)) + .subscribeOn(dbScheduler) + .doFinally(s -> key.release()); + } + + private void dbDelete(ColumnFamilyHandle cfh, @Nullable WriteOptions writeOptions, ByteBuf key) + throws RocksDBException { + try { + if (!key.isDirect()) { + throw new IllegalArgumentException("Key must be a direct buffer"); + } + ByteBuf keyDirectBuffer = key.retain(); + var keyNioBuffer = LLUtils.toDirectFast(keyDirectBuffer.retain()); + if (keyNioBuffer == null) { + keyDirectBuffer.release(); + keyDirectBuffer = LLUtils.toDirectCopy(key.retain()); + keyNioBuffer = keyDirectBuffer.nioBuffer(); + } + try { + db.delete(cfh, Objects.requireNonNullElse(writeOptions, EMPTY_WRITE_OPTIONS), keyNioBuffer); + } finally { + keyDirectBuffer.release(); + } + } finally { + key.release(); + } } @Override - public Mono remove(byte[] key, LLDictionaryResultType resultType) { - return getPrevValue(key, resultType) + public Mono remove(ByteBuf key, LLDictionaryResultType resultType) { + return getPreviousData(key.retain(), resultType) .concatWith(Mono .fromCallable(() -> { StampedLock lock; @@ -362,7 +607,10 @@ public class LLLocalDictionary implements LLDictionary { stamp = 0; } try { - db.delete(cfh, key); + if (logger.isTraceEnabled()) { + logger.trace("Deleting {}", LLUtils.toString(key)); + } + dbDelete(cfh, null, key.retain()); return null; } finally { if (updateMode == UpdateMode.ALLOW) { @@ -370,18 +618,27 @@ public class LLLocalDictionary implements LLDictionary { } } }) - .onErrorMap(cause -> new IOException("Failed to delete " + Arrays.toString(key), cause)) + .onErrorMap(cause -> new IOException("Failed to delete " + LLUtils.toString(key), cause)) .subscribeOn(dbScheduler) .then(Mono.empty()) - ).singleOrEmpty(); + ).singleOrEmpty() + .doFinally(s -> key.release()); } - private Mono getPrevValue(byte[] key, LLDictionaryResultType resultType) { + private Mono getPreviousData(ByteBuf key, LLDictionaryResultType resultType) { + Mono prevValue; switch (resultType) { - case VALUE_CHANGED: - return containsKey(null, key).single().map(LLUtils::booleanToResponse); + case PREVIOUS_VALUE_EXISTENCE: + prevValue = this + .containsKey(null, key.retain()) + .single() + .map(LLUtils::booleanToResponseByteBuffer) + .doFinally(s -> { + assert key.refCnt() > 0; + }); + break; case PREVIOUS_VALUE: - return Mono + prevValue = Mono .fromCallable(() -> { StampedLock lock; long stamp; @@ -394,13 +651,19 @@ public class LLLocalDictionary implements LLDictionary { stamp = 0; } try { - logger.trace("Reading {}", key); + if (logger.isTraceEnabled()) { + logger.trace("Reading {}", LLUtils.toArray(key)); + } var data = new Holder(); - if (db.keyMayExist(cfh, key, data)) { + if (db.keyMayExist(cfh, LLUtils.toArray(key), data)) { if (data.getValue() != null) { - return data.getValue(); + return wrappedBuffer(data.getValue()); } else { - return db.get(cfh, key); + try { + return dbGet(cfh, null, key.retain()); + } finally { + assert key.refCnt() > 0; + } } } else { return null; @@ -411,18 +674,22 @@ public class LLLocalDictionary implements LLDictionary { } } }) - .onErrorMap(cause -> new IOException("Failed to read " + Arrays.toString(key), cause)) + .onErrorMap(cause -> new IOException("Failed to read " + LLUtils.toString(key), cause)) .subscribeOn(dbScheduler); + break; case VOID: - return Mono.empty(); + prevValue = Mono.empty(); + break; default: - return Mono.error(new IllegalStateException("Unexpected value: " + resultType)); + prevValue = Mono.error(new IllegalStateException("Unexpected value: " + resultType)); + break; } + return prevValue.doFinally(s -> key.release()); } @Override - public Flux> getMulti(@Nullable LLSnapshot snapshot, - Flux keys, + public Flux> getMulti(@Nullable LLSnapshot snapshot, + Flux keys, boolean existsAlmostCertainly) { return keys .window(MULTI_GET_WINDOW) @@ -446,13 +713,13 @@ public class LLLocalDictionary implements LLDictionary { var handlesArray = new ColumnFamilyHandle[keysWindow.size()]; Arrays.fill(handlesArray, cfh); var handles = ObjectArrayList.wrap(handlesArray, handlesArray.length); - var results = db.multiGetAsList(resolveSnapshot(snapshot), handles, keysWindow); - var mappedResults = new ArrayList>(results.size()); + var results = db.multiGetAsList(resolveSnapshot(snapshot), handles, LLUtils.toArray(keysWindow)); + var mappedResults = new ArrayList>(results.size()); for (int i = 0; i < results.size(); i++) { var val = results.get(i); if (val != null) { results.set(i, null); - mappedResults.add(Map.entry(keysWindow.get(i), val)); + mappedResults.add(Map.entry(keysWindow.get(i).retain(), wrappedBuffer(val))); } } return mappedResults; @@ -469,229 +736,357 @@ public class LLLocalDictionary implements LLDictionary { .subscribeOn(dbScheduler) .flatMapMany(Flux::fromIterable) .onErrorMap(cause -> new IOException("Failed to read keys " - + Arrays.deepToString(keysWindow.toArray(byte[][]::new)), cause)) + + Arrays.deepToString(keysWindow.toArray(ByteBuf[]::new)), cause)) + .doFinally(s -> keysWindow.forEach(ReferenceCounted::release)) ) ); } @Override - public Flux> putMulti(Flux> entries, boolean getOldValues) { + public Flux> putMulti(Flux> entries, boolean getOldValues) { return entries .window(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) .flatMap(Flux::collectList) - .flatMap(entriesWindow -> this - .getMulti(null, Flux.fromIterable(entriesWindow).map(Entry::getKey), false) - .publishOn(dbScheduler) - .concatWith(Mono.fromCallable(() -> { - Iterable locks; - ArrayList stamps; - if (updateMode == UpdateMode.ALLOW) { - locks = itemsLock.bulkGetAt(getLockIndicesEntries(entriesWindow)); - stamps = new ArrayList<>(); - for (var lock : locks) { - stamps.add(lock.writeLock()); - } - } else { - locks = null; - stamps = null; - } - try { - var batch = new CappedWriteBatch(db, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - BATCH_WRITE_OPTIONS - ); - for (Entry entry : entriesWindow) { - batch.put(entry.getKey(), entry.getValue()); - } - batch.writeToDbAndClose(); - batch.close(); - return null; - } finally { - if (updateMode == UpdateMode.ALLOW) { - int index = 0; - for (var lock : locks) { - lock.unlockWrite(stamps.get(index)); - index++; - } - } - } - }))); + .flatMap(entriesWindow -> { + Flux> oldValues; + if (getOldValues) { + oldValues = this + .getMulti(null, Flux + .fromIterable(entriesWindow) + .map(Entry::getKey) + .map(ByteBuf::retain), false) + .publishOn(dbScheduler); + } else { + oldValues = Flux.empty(); + } + return oldValues + .concatWith(Mono.fromCallable(() -> { + Iterable locks; + ArrayList stamps; + if (updateMode == UpdateMode.ALLOW) { + locks = itemsLock.bulkGetAt(getLockIndicesEntries(entriesWindow)); + stamps = new ArrayList<>(); + for (var lock : locks) { + stamps.add(lock.writeLock()); + } + } else { + locks = null; + stamps = null; + } + try { + var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + BATCH_WRITE_OPTIONS + ); + for (Entry entry : entriesWindow) { + batch.put(cfh, entry.getKey().retain(), entry.getValue().retain()); + } + batch.writeToDbAndClose(); + batch.close(); + return null; + } finally { + if (updateMode == UpdateMode.ALLOW) { + int index = 0; + for (var lock : locks) { + lock.unlockWrite(stamps.get(index)); + index++; + } + } + } + })) + .doFinally(s -> entriesWindow.forEach(entry -> { + entry.getKey().release(); + entry.getValue().release(); + })); + } + ); } @NotNull - private Mono> putEntryToWriteBatch(Entry newEntry, + private Mono> putEntryToWriteBatch(Entry newEntry, boolean getOldValues, CappedWriteBatch writeBatch) { - Mono getOldValueMono; + Mono getOldValueMono; if (getOldValues) { - getOldValueMono = get(null, newEntry.getKey(), false); + getOldValueMono = get(null, newEntry.getKey().retain(), false); } else { getOldValueMono = Mono.empty(); } return getOldValueMono .concatWith(Mono - .fromCallable(() -> { - writeBatch.put(cfh, newEntry.getKey(), newEntry.getValue()); + .fromCallable(() -> { + writeBatch.put(cfh, newEntry.getKey().retain(), newEntry.getValue().retain()); return null; }) .subscribeOn(dbScheduler) ) .singleOrEmpty() - .map(oldValue -> Map.entry(newEntry.getKey(), oldValue)); + .map(oldValue -> Map.entry(newEntry.getKey().retain(), oldValue)) + .doFinally(s -> { + newEntry.getKey().release(); + newEntry.getValue().release(); + }); } @Override - public Flux> getRange(@Nullable LLSnapshot snapshot, + public Flux> getRange(@Nullable LLSnapshot snapshot, LLRange range, boolean existsAlmostCertainly) { + Flux> result; if (range.isSingle()) { - return getRangeSingle(snapshot, range.getMin(), existsAlmostCertainly); + result = getRangeSingle(snapshot, range.getMin().retain(), existsAlmostCertainly); } else { - return getRangeMulti(snapshot, range); + result = getRangeMulti(snapshot, range.retain()); } + return result.doFinally(s -> range.release()); } @Override - public Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, + public Flux>> getRangeGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength, boolean existsAlmostCertainly) { + Flux>> result; if (range.isSingle()) { - return getRangeSingle(snapshot, range.getMin(), existsAlmostCertainly).map(List::of); + result = getRangeSingle(snapshot, range.getMin().retain(), existsAlmostCertainly).map(List::of); } else { - return getRangeMultiGrouped(snapshot, range, prefixLength); + result = getRangeMultiGrouped(snapshot, range.retain(), prefixLength); } + return result.doFinally(s -> range.release()); } - private Flux> getRangeSingle(LLSnapshot snapshot, byte[] key, boolean existsAlmostCertainly) { + private Flux> getRangeSingle(LLSnapshot snapshot, ByteBuf key, boolean existsAlmostCertainly) { return this - .get(snapshot, key, existsAlmostCertainly) - .map(value -> Map.entry(key, value)) - .flux(); - } - - private Flux> getRangeMulti(LLSnapshot snapshot, LLRange range) { - return new LLLocalEntryReactiveRocksIterator(db, cfh, range, resolveSnapshot(snapshot)) + .get(snapshot, key.retain(), existsAlmostCertainly) + .map(value -> Map.entry(key.retain(), value)) .flux() - .subscribeOn(dbScheduler); + .doFinally(s -> key.release()); } - private Flux>> getRangeMultiGrouped(LLSnapshot snapshot, LLRange range, int prefixLength) { + private Flux> getRangeMulti(LLSnapshot snapshot, LLRange range) { + return new LLLocalEntryReactiveRocksIterator(db, alloc, cfh, range.retain(), resolveSnapshot(snapshot)) + .flux() + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); + } + + private Flux>> getRangeMultiGrouped(LLSnapshot snapshot, LLRange range, int prefixLength) { return new LLLocalGroupedEntryReactiveRocksIterator(db, + alloc, cfh, prefixLength, - range, + range.retain(), resolveSnapshot(snapshot), "getRangeMultiGrouped" ) .flux() - .subscribeOn(dbScheduler); + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } @Override - public Flux getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range) { + public Flux getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range) { + Flux result; if (range.isSingle()) { - return getRangeKeysSingle(snapshot, range.getMin()); + result = getRangeKeysSingle(snapshot, range.getMin().retain()); } else { - return getRangeKeysMulti(snapshot, range); + result = getRangeKeysMulti(snapshot, range.retain()); } + return result.doFinally(s -> range.release()); } @Override - public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) { + public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) { return new LLLocalGroupedKeyReactiveRocksIterator(db, + alloc, cfh, prefixLength, - range, + range.retain(), resolveSnapshot(snapshot), "getRangeKeysGrouped" - ).flux().subscribeOn(dbScheduler); + ).flux().subscribeOn(dbScheduler).doFinally(s -> range.release()); } @Override - public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) { + public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, LLRange range, int prefixLength) { return new LLLocalKeyPrefixReactiveRocksIterator(db, + alloc, cfh, prefixLength, - range, + range.retain(), resolveSnapshot(snapshot), true, "getRangeKeysGrouped" - ).flux().subscribeOn(dbScheduler); + ).flux().subscribeOn(dbScheduler).doFinally(s -> range.release()); } - private Flux getRangeKeysSingle(LLSnapshot snapshot, byte[] key) { + private Flux getRangeKeysSingle(LLSnapshot snapshot, ByteBuf key) { return this - .containsKey(snapshot, key) + .containsKey(snapshot, key.retain()) .filter(contains -> contains) - .map(contains -> key) - .flux(); + .map(contains -> key.retain()) + .flux() + .doFinally(s -> key.release()); } - private Flux getRangeKeysMulti(LLSnapshot snapshot, LLRange range) { - return new LLLocalKeyReactiveRocksIterator(db, cfh, range, resolveSnapshot(snapshot)).flux().subscribeOn(dbScheduler); + private Flux getRangeKeysMulti(LLSnapshot snapshot, LLRange range) { + return new LLLocalKeyReactiveRocksIterator(db, alloc, cfh, range.retain(), resolveSnapshot(snapshot)) + .flux() + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } @Override - public Flux> setRange(LLRange range, - Flux> entries, + public Flux> setRange(LLRange range, + Flux> entries, boolean getOldValues) { - return Flux - .usingWhen( - Mono - .fromCallable(() -> new CappedWriteBatch(db, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - BATCH_WRITE_OPTIONS) - ) - .subscribeOn(dbScheduler), - writeBatch -> Mono - .fromCallable(() -> { - if (range.isSingle()) { - writeBatch.delete(cfh, range.getSingle()); - } else { - deleteSmallRangeWriteBatch(writeBatch, range); - } - return null; - }) - .subscribeOn(dbScheduler) - .thenMany(entries) - .flatMapSequential(newEntry -> putEntryToWriteBatch(newEntry, getOldValues, writeBatch)), - writeBatch -> Mono - .fromCallable(() -> { - try (writeBatch) { - writeBatch.writeToDbAndClose(); - } - return null; - }) - .subscribeOn(dbScheduler) - ) - .subscribeOn(dbScheduler) - .onErrorMap(cause -> new IOException("Failed to write range", cause)); + Flux> oldValues; + if (getOldValues) { + oldValues = getRange(null, range); + } else { + oldValues = Flux.empty(); + } + return oldValues + .concatWith(Flux + .usingWhen( + Mono + .fromCallable(() -> new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + BATCH_WRITE_OPTIONS) + ) + .subscribeOn(dbScheduler), + writeBatch -> Mono + .fromCallable(() -> { + if (range.isSingle()) { + writeBatch.delete(cfh, LLUtils.toArray(range.getSingle().retain())); + } else { + deleteSmallRangeWriteBatch(writeBatch, range.retain()); + } + return null; + }) + .subscribeOn(dbScheduler) + .thenMany(entries) + .>concatMap(newEntry -> this + .putEntryToWriteBatch(newEntry, false, writeBatch) + .then(Mono.empty()) + ), + writeBatch -> Mono + .fromCallable(() -> { + try (writeBatch) { + writeBatch.writeToDbAndClose(); + } + return null; + }) + .subscribeOn(dbScheduler) + ) + .subscribeOn(dbScheduler) + .onErrorMap(cause -> new IOException("Failed to write range", cause)) + .doFinally(s -> range.release()) + ); } private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, LLRange range) throws RocksDBException { var readOpts = getReadOptions(null); readOpts.setFillCache(false); + ReleasableSlice minBound; if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + minBound = setIterateBound(readOpts, IterateBound.LOWER, range.getMin().retain()); + } else { + minBound = EMPTY_RELEASABLE_SLICE; } + ReleasableSlice maxBound; if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + maxBound = setIterateBound(readOpts, IterateBound.UPPER, range.getMax().retain()); + } else { + maxBound = EMPTY_RELEASABLE_SLICE; } try (var rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); + rocksIterSeekTo(rocksIterator, range.getMin().retain()); } else { rocksIterator.seekToFirst(); } while (rocksIterator.isValid()) { - writeBatch.delete(cfh, rocksIterator.key()); + writeBatch.delete(cfh, LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)); rocksIterator.next(); } + } finally { + minBound.release(); + maxBound.release(); + range.release(); + } + } + + private static void rocksIterSeekTo(RocksIterator rocksIterator, ByteBuf buffer) { + try { + ByteBuf directBuffer = buffer.retain(); + ByteBuffer nioBuffer = LLUtils.toDirectFast(directBuffer.retain()); + if (nioBuffer == null) { + directBuffer.release(); + directBuffer = LLUtils.toDirectCopy(buffer.retain()); + assert directBuffer.isDirect(); + nioBuffer = directBuffer.nioBuffer(); + } + try { + assert nioBuffer.isDirect(); + rocksIterator.seek(nioBuffer); + } finally { + directBuffer.release(); + } + } finally { + buffer.release(); + } + } + + private static ReleasableSlice setIterateBound(ReadOptions readOpts, IterateBound boundType, ByteBuf buffer) { + try { + ByteBuf directBuffer = buffer.retain(); + ByteBuffer nioBuffer = LLUtils.toDirectFast(directBuffer.retain()); + if (nioBuffer == null) { + directBuffer = LLUtils.toDirectCopy(buffer.retain()); + assert directBuffer.isDirect(); + nioBuffer = directBuffer.nioBuffer(); + } + AbstractSlice slice; + try { + assert nioBuffer.isDirect(); + slice = new DirectSlice(nioBuffer); + if (boundType == IterateBound.LOWER) { + readOpts.setIterateLowerBound(slice); + } else { + readOpts.setIterateUpperBound(slice); + } + } catch (Throwable t) { + directBuffer.release(); + throw t; + } + return new ReleasableSlice(slice, directBuffer); + } finally { + buffer.release(); + } + } + + private static final ReleasableSlice EMPTY_RELEASABLE_SLICE = new ReleasableSlice(new Slice(new byte[0]), null) { + @Override + public void release() { + } + }; + + @Data + @AllArgsConstructor + public static class ReleasableSlice { + AbstractSlice slice; + @Nullable ByteBuf byteBuf; + + public void release() { + slice.clear(); + if (byteBuf != null) { + byteBuf.release(); + } } } @@ -752,103 +1147,136 @@ public class LLLocalDictionary implements LLDictionary { @Override public Mono sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast) { + Mono result; if (range.isAll()) { - return Mono + result = Mono .fromCallable(() -> fast ? fastSizeAll(snapshot) : exactSizeAll(snapshot)) .onErrorMap(IOException::new) .subscribeOn(dbScheduler); } else { - return Mono + result = Mono .fromCallable(() -> { var readOpts = resolveSnapshot(snapshot); readOpts.setFillCache(false); readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + ReleasableSlice minBound; if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + minBound = setIterateBound(readOpts, IterateBound.LOWER, range.getMin().retain()); + } else { + minBound = EMPTY_RELEASABLE_SLICE; } + ReleasableSlice maxBound; if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + maxBound = setIterateBound(readOpts, IterateBound.UPPER, range.getMax().retain()); + } else { + maxBound = EMPTY_RELEASABLE_SLICE; } - if (fast) { - readOpts.setIgnoreRangeDeletions(true); + try { + if (fast) { + readOpts.setIgnoreRangeDeletions(true); - } - try (var rocksIterator = db.newIterator(cfh, readOpts)) { - if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); - } else { - rocksIterator.seekToFirst(); } - long i = 0; - while (rocksIterator.isValid()) { - rocksIterator.next(); - i++; + try (var rocksIterator = db.newIterator(cfh, readOpts)) { + if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(rocksIterator, range.getMin().retain()); + } else { + rocksIterator.seekToFirst(); + } + long i = 0; + while (rocksIterator.isValid()) { + rocksIterator.next(); + i++; + } + return i; } - return i; + } finally { + minBound.release(); + maxBound.release(); } }) .onErrorMap(cause -> new IOException("Failed to get size of range " + range.toString(), cause)) .subscribeOn(dbScheduler); } + return result.doFinally(s -> range.release()); } @Override - public Mono> getOne(@Nullable LLSnapshot snapshot, LLRange range) { + public Mono> getOne(@Nullable LLSnapshot snapshot, LLRange range) { return Mono .fromCallable(() -> { var readOpts = resolveSnapshot(snapshot); + ReleasableSlice minBound; if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + minBound = setIterateBound(readOpts, IterateBound.LOWER, range.getMin().retain()); + } else { + minBound = EMPTY_RELEASABLE_SLICE; } + ReleasableSlice maxBound; if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + maxBound = setIterateBound(readOpts, IterateBound.UPPER, range.getMax().retain()); + } else { + maxBound = EMPTY_RELEASABLE_SLICE; } try (var rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); + rocksIterSeekTo(rocksIterator, range.getMin().retain()); } else { rocksIterator.seekToFirst(); } - byte[] key; + ByteBuf key; if (rocksIterator.isValid()) { - key = rocksIterator.key(); - return Map.entry(key, rocksIterator.value()); + key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + return Map.entry(key, LLUtils.readDirectNioBuffer(alloc, rocksIterator::value)); } else { return null; } + } finally { + minBound.release(); + maxBound.release(); } }) - .subscribeOn(dbScheduler); + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } @Override - public Mono getOneKey(@Nullable LLSnapshot snapshot, LLRange range) { + public Mono getOneKey(@Nullable LLSnapshot snapshot, LLRange range) { return Mono .fromCallable(() -> { var readOpts = resolveSnapshot(snapshot); + ReleasableSlice minBound; if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + minBound = setIterateBound(readOpts, IterateBound.LOWER, range.getMin().retain()); + } else { + minBound = EMPTY_RELEASABLE_SLICE; } + ReleasableSlice maxBound; if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + maxBound = setIterateBound(readOpts, IterateBound.UPPER, range.getMax().retain()); + } else { + maxBound = EMPTY_RELEASABLE_SLICE; } try (var rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); + rocksIterSeekTo(rocksIterator, range.getMin().retain()); } else { rocksIterator.seekToFirst(); } - byte[] key; + ByteBuf key; if (rocksIterator.isValid()) { - key = rocksIterator.key(); + key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); return key; } else { return null; } + } finally { + minBound.release(); + maxBound.release(); } }) - .subscribeOn(dbScheduler); + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } private long fastSizeAll(@Nullable LLSnapshot snapshot) { @@ -889,8 +1317,8 @@ public class LLLocalDictionary implements LLDictionary { var commonPool = ForkJoinPool.commonPool(); var futures = IntStream .range(-1, LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS.length) - .mapToObj(idx -> Pair.of(idx == -1 ? null : LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS[idx], - LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS.length >= idx + 1 ? null + .mapToObj(idx -> Pair.of(idx == -1 ? new byte[0] : LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS[idx], + idx + 1 >= LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS.length ? null : LLUtils.LEXICONOGRAPHIC_ITERATION_SEEKS[idx + 1] )) .map(range -> (Callable) () -> { @@ -953,60 +1381,72 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public Mono> removeOne(LLRange range) { + public Mono> removeOne(LLRange range) { return Mono .fromCallable(() -> { var readOpts = getReadOptions(null); + ReleasableSlice minBound; if (range.hasMin()) { - readOpts.setIterateLowerBound(new Slice(range.getMin())); + minBound = setIterateBound(readOpts, IterateBound.LOWER, range.getMin().retain()); + } else { + minBound = EMPTY_RELEASABLE_SLICE; } + ReleasableSlice maxBound; if (range.hasMax()) { - readOpts.setIterateUpperBound(new Slice(range.getMax())); + maxBound = setIterateBound(readOpts, IterateBound.UPPER, range.getMax().retain()); + } else { + maxBound = EMPTY_RELEASABLE_SLICE; } - try (RocksIterator iter = db.newIterator(cfh, readOpts)) { + try (RocksIterator rocksIterator = db.newIterator(cfh, readOpts)) { if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) { - iter.seek(range.getMin()); + rocksIterSeekTo(rocksIterator, range.getMin().retain()); } else { - iter.seekToFirst(); + rocksIterator.seekToFirst(); } - if (!iter.isValid()) { + if (!rocksIterator.isValid()) { return null; } - byte[] key = iter.key(); - byte[] value = iter.value(); - db.delete(cfh, key); + ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + ByteBuf value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value); + dbDelete(cfh, null, key); return Map.entry(key, value); + } finally { + minBound.release(); + maxBound.release(); } }) .onErrorMap(cause -> new IOException("Failed to delete " + range.toString(), cause)) - .subscribeOn(dbScheduler); + .subscribeOn(dbScheduler) + .doFinally(s -> range.release()); } @NotNull - public static Tuple3, Optional> getRocksIterator(ReadOptions readOptions, + public static Tuple3 getRocksIterator(ReadOptions readOptions, LLRange range, RocksDB db, ColumnFamilyHandle cfh) { - Slice sliceMin; - Slice sliceMax; - if (range.hasMin()) { - sliceMin = new Slice(range.getMin()); - readOptions.setIterateLowerBound(sliceMin); - } else { - sliceMin = null; + try { + ReleasableSlice sliceMin; + ReleasableSlice sliceMax; + if (range.hasMin()) { + sliceMin = setIterateBound(readOptions, IterateBound.LOWER, range.getMin().retain()); + } else { + sliceMin = EMPTY_RELEASABLE_SLICE; + } + if (range.hasMax()) { + sliceMax = setIterateBound(readOptions, IterateBound.UPPER, range.getMax().retain()); + } else { + sliceMax = EMPTY_RELEASABLE_SLICE; + } + var rocksIterator = db.newIterator(cfh, readOptions); + if (!PREFER_SEEK_TO_FIRST && range.hasMin()) { + rocksIterSeekTo(rocksIterator, range.getMin().retain()); + } else { + rocksIterator.seekToFirst(); + } + return Tuples.of(rocksIterator, sliceMin, sliceMax); + } finally { + range.release(); } - if (range.hasMax()) { - sliceMax = new Slice(range.getMax()); - readOptions.setIterateUpperBound(sliceMax); - } else { - sliceMax = null; - } - var rocksIterator = db.newIterator(cfh, readOptions); - if (!PREFER_SEEK_TO_FIRST && range.hasMin()) { - rocksIterator.seek(range.getMin()); - } else { - rocksIterator.seekToFirst(); - } - return Tuples.of(rocksIterator, Optional.ofNullable(sliceMin), Optional.ofNullable(sliceMax)); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java index 9a8d823..0394112 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java @@ -1,5 +1,7 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.database.LLRange; import java.util.Map; import java.util.Map.Entry; @@ -7,17 +9,18 @@ import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator> { +public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator> { public LLLocalEntryReactiveRocksIterator(RocksDB db, + ByteBufAllocator alloc, ColumnFamilyHandle cfh, LLRange range, ReadOptions readOptions) { - super(db, cfh, range, readOptions, true); + super(db, alloc, cfh, range, readOptions, true); } @Override - public Entry getEntry(byte[] key, byte[] value) { + public Entry getEntry(ByteBuf key, ByteBuf value) { return Map.entry(key, value); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java index 685733e..0f63943 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java @@ -1,5 +1,7 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.database.LLRange; import java.util.Map; import java.util.Map.Entry; @@ -8,19 +10,18 @@ import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; public class LLLocalGroupedEntryReactiveRocksIterator extends - LLLocalGroupedReactiveRocksIterator> { + LLLocalGroupedReactiveRocksIterator> { - public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, - ColumnFamilyHandle cfh, + public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, LLRange range, ReadOptions readOptions, String debugName) { - super(db, cfh, prefixLength, range, readOptions, false, true); + super(db, alloc, cfh, prefixLength, range, readOptions, false, true); } @Override - public Entry getEntry(byte[] key, byte[] value) { + public Entry getEntry(ByteBuf key, ByteBuf value) { return Map.entry(key, value); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java index d881f35..4a33e2c 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java @@ -1,23 +1,26 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.database.LLRange; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { +public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { public LLLocalGroupedKeyReactiveRocksIterator(RocksDB db, + ByteBufAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, LLRange range, ReadOptions readOptions, String debugName) { - super(db, cfh, prefixLength, range, readOptions, true, false); + super(db, alloc, cfh, prefixLength, range, readOptions, true, false); } @Override - public byte[] getEntry(byte[] key, byte[] value) { + public ByteBuf getEntry(ByteBuf key, ByteBuf value) { return key; } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java index a575fcd..96e4b68 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java @@ -2,21 +2,24 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; import it.cavallium.dbengine.database.LLRange; +import it.cavallium.dbengine.database.LLUtils; import it.unimi.dsi.fastutil.objects.ObjectArrayList; -import java.util.Arrays; import java.util.List; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksMutableObject; import reactor.core.publisher.Flux; +import static io.netty.buffer.Unpooled.*; public abstract class LLLocalGroupedReactiveRocksIterator { - private static final byte[] EMPTY = new byte[0]; - private final RocksDB db; + private final ByteBufAllocator alloc; private final ColumnFamilyHandle cfh; private final int prefixLength; private final LLRange range; @@ -24,14 +27,14 @@ public abstract class LLLocalGroupedReactiveRocksIterator { private final boolean canFillCache; private final boolean readValues; - public LLLocalGroupedReactiveRocksIterator(RocksDB db, - ColumnFamilyHandle cfh, + public LLLocalGroupedReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, LLRange range, ReadOptions readOptions, boolean canFillCache, boolean readValues) { this.db = db; + this.alloc = alloc; this.cfh = cfh; this.prefixLength = prefixLength; this.range = range; @@ -50,18 +53,33 @@ public abstract class LLLocalGroupedReactiveRocksIterator { }, (tuple, sink) -> { var rocksIterator = tuple.getT1(); ObjectArrayList values = new ObjectArrayList<>(); - byte[] firstGroupKey = null; + ByteBuf firstGroupKey = null; - while (rocksIterator.isValid()) { - byte[] key = rocksIterator.key(); - if (firstGroupKey == null) { - firstGroupKey = key; - } else if (!Arrays.equals(firstGroupKey, 0, prefixLength, key, 0, prefixLength)) { - break; + try { + while (rocksIterator.isValid()) { + ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + try { + if (firstGroupKey == null) { + firstGroupKey = key.retainedSlice(); + } else if (!ByteBufUtil.equals(firstGroupKey, 0, key, 0, prefixLength)) { + break; + } + ByteBuf value = readValues ? LLUtils.readDirectNioBuffer(alloc, rocksIterator::value) : EMPTY_BUFFER; + try { + rocksIterator.next(); + T entry = getEntry(key.retain(), value.retain()); + values.add(entry); + } finally { + value.release(); + } + } finally { + key.release(); + } + } + } finally { + if (firstGroupKey != null) { + firstGroupKey.release(); } - byte[] value = readValues ? rocksIterator.value() : EMPTY; - rocksIterator.next(); - values.add(getEntry(key, value)); } if (!values.isEmpty()) { sink.next(values); @@ -72,10 +90,11 @@ public abstract class LLLocalGroupedReactiveRocksIterator { }, tuple -> { var rocksIterator = tuple.getT1(); rocksIterator.close(); - tuple.getT2().ifPresent(RocksMutableObject::close); - tuple.getT3().ifPresent(RocksMutableObject::close); + tuple.getT2().release(); + tuple.getT3().release(); + range.release(); }); } - public abstract T getEntry(byte[] key, byte[] value); + public abstract T getEntry(ByteBuf key, ByteBuf value); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java index fc41939..8a5576b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java @@ -1,18 +1,22 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufUtil; import it.cavallium.dbengine.database.LLRange; +import it.cavallium.dbengine.database.LLUtils; import java.util.Arrays; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksMutableObject; import reactor.core.publisher.Flux; +import static io.netty.buffer.Unpooled.*; public class LLLocalKeyPrefixReactiveRocksIterator { - private static final byte[] EMPTY = new byte[0]; - private final RocksDB db; + private final ByteBufAllocator alloc; private final ColumnFamilyHandle cfh; private final int prefixLength; private final LLRange range; @@ -20,14 +24,14 @@ public class LLLocalKeyPrefixReactiveRocksIterator { private final boolean canFillCache; private final String debugName; - public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, - ColumnFamilyHandle cfh, + public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh, int prefixLength, LLRange range, ReadOptions readOptions, boolean canFillCache, String debugName) { this.db = db; + this.alloc = alloc; this.cfh = cfh; this.prefixLength = prefixLength; this.range = range; @@ -37,7 +41,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator { } - public Flux flux() { + public Flux flux() { return Flux .generate(() -> { var readOptions = new ReadOptions(this.readOptions); @@ -45,32 +49,42 @@ public class LLLocalKeyPrefixReactiveRocksIterator { //readOptions.setReadaheadSize(2 * 1024 * 1024); readOptions.setFillCache(canFillCache); } - return LLLocalDictionary.getRocksIterator(readOptions, range, db, cfh); + return LLLocalDictionary.getRocksIterator(readOptions, range.retain(), db, cfh); }, (tuple, sink) -> { var rocksIterator = tuple.getT1(); - byte[] firstGroupKey = null; - - while (rocksIterator.isValid()) { - byte[] key = rocksIterator.key(); - if (firstGroupKey == null) { - firstGroupKey = key; - } else if (!Arrays.equals(firstGroupKey, 0, prefixLength, key, 0, prefixLength)) { - break; + ByteBuf firstGroupKey = null; + try { + while (rocksIterator.isValid()) { + ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + try { + if (firstGroupKey == null) { + firstGroupKey = key.retain(); + } else if (!ByteBufUtil.equals(firstGroupKey, 0, key, 0, prefixLength)) { + break; + } + rocksIterator.next(); + } finally { + key.release(); + } + } + if (firstGroupKey != null) { + var groupKeyPrefix = firstGroupKey.slice(0, prefixLength); + sink.next(groupKeyPrefix.retain()); + } else { + sink.complete(); + } + } finally { + if (firstGroupKey != null) { + firstGroupKey.release(); } - rocksIterator.next(); - } - if (firstGroupKey != null) { - var groupKeyPrefix = Arrays.copyOf(firstGroupKey, prefixLength); - sink.next(groupKeyPrefix); - } else { - sink.complete(); } return tuple; }, tuple -> { var rocksIterator = tuple.getT1(); rocksIterator.close(); - tuple.getT2().ifPresent(RocksMutableObject::close); - tuple.getT3().ifPresent(RocksMutableObject::close); + tuple.getT2().release(); + tuple.getT3().release(); + range.release(); }); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java index 39ddd09..948d6e4 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java @@ -1,21 +1,24 @@ package it.cavallium.dbengine.database.disk; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.database.LLRange; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; -public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator { +public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator { public LLLocalKeyReactiveRocksIterator(RocksDB db, + ByteBufAllocator alloc, ColumnFamilyHandle cfh, LLRange range, ReadOptions readOptions) { - super(db, cfh, range, readOptions, false); + super(db, alloc, cfh, range, readOptions, false); } @Override - public byte[] getEntry(byte[] key, byte[] value) { + public ByteBuf getEntry(ByteBuf key, ByteBuf value) { return key; } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java index 5978a1d..1e80127 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java @@ -59,6 +59,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { private final Scheduler dbScheduler; private final Path dbPath; + private final boolean inMemory; private final String name; private RocksDB db; private final Map handles; @@ -66,7 +67,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { private final AtomicLong nextSnapshotNumbers = new AtomicLong(1); public LLLocalKeyValueDatabase(String name, Path path, List columns, List handles, - boolean crashIfWalError, boolean lowMemory) throws IOException { + boolean crashIfWalError, boolean lowMemory, boolean inMemory) throws IOException { Options options = openRocksDb(path, crashIfWalError, lowMemory); try { List descriptors = new LinkedList<>(); @@ -83,6 +84,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { String dbPathString = databasesDirPath.toString() + File.separatorChar + path.getFileName(); Path dbPath = Paths.get(dbPathString); this.dbPath = dbPath; + this.inMemory = inMemory; this.name = name; this.dbScheduler = Schedulers.newBoundedElastic(lowMemory ? Runtime.getRuntime().availableProcessors() : Math.max(8, Runtime.getRuntime().availableProcessors()), @@ -92,12 +94,17 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { true ); - createIfNotExists(descriptors, options, dbPath, dbPathString); + createIfNotExists(descriptors, options, inMemory, this.dbPath, dbPathString); // Create all column families that don't exist - createAllColumns(descriptors, options, dbPathString); + createAllColumns(descriptors, options, inMemory, dbPathString); // a factory method that returns a RocksDB instance - this.db = RocksDB.open(new DBOptions(options), dbPathString, descriptors, handles); + this.db = RocksDB.open(new DBOptions(options), + dbPathString, + inMemory ? List.of(DEFAULT_COLUMN_FAMILY) : descriptors, + handles + ); + createInMemoryColumns(descriptors, inMemory, handles); this.handles = new HashMap<>(); for (int i = 0; i < columns.size(); i++) { this.handles.put(columns.get(i), handles.get(i)); @@ -252,8 +259,10 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { return options; } - private void createAllColumns(List totalDescriptors, Options options, - String dbPathString) throws RocksDBException { + private void createAllColumns(List totalDescriptors, Options options, boolean inMemory, String dbPathString) throws RocksDBException { + if (inMemory) { + return; + } List columnFamiliesToCreate = new LinkedList<>(); for (ColumnFamilyDescriptor descriptor : totalDescriptors) { @@ -293,8 +302,35 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { flushAndCloseDb(db, handles); } - private void createIfNotExists(List descriptors, Options options, - Path dbPath, String dbPathString) throws RocksDBException { + private void createInMemoryColumns(List totalDescriptors, + boolean inMemory, + List handles) + throws RocksDBException { + if (!inMemory) { + return; + } + List columnFamiliesToCreate = new LinkedList<>(); + + for (ColumnFamilyDescriptor descriptor : totalDescriptors) { + columnFamiliesToCreate.add(descriptor.getName()); + } + + for (byte[] name : columnFamiliesToCreate) { + if (!Arrays.equals(name, DEFAULT_COLUMN_FAMILY.getName())) { + var descriptor = new ColumnFamilyDescriptor(name); + handles.add(db.createColumnFamily(descriptor)); + } + } + } + + private void createIfNotExists(List descriptors, + Options options, + boolean inMemory, + Path dbPath, + String dbPathString) throws RocksDBException { + if (inMemory) { + return; + } if (Files.notExists(dbPath)) { // Check if handles are all different var descriptorsSet = new HashSet<>(descriptors); @@ -318,7 +354,9 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase { handles.add(db.createColumnFamily(columnFamilyDescriptor)); } - flushAndCloseDb(db, handles); + if (!inMemory) { + flushAndCloseDb(db, handles); + } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java index e56e15f..6fa0d72 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java @@ -53,6 +53,9 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.search.similarities.TFIDFSimilarity; import org.apache.lucene.store.Directory; import org.apache.lucene.store.FSDirectory; +import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.store.RAMDirectory; +import org.apache.solr.core.RAMDirectoryFactory; import org.jetbrains.annotations.Nullable; import org.warp.commonutils.log.Logger; import org.warp.commonutils.log.LoggerFactory; @@ -113,13 +116,12 @@ public class LLLocalLuceneIndex implements LLLuceneIndex { TextFieldsSimilarity similarity, Duration queryRefreshDebounceTime, Duration commitDebounceTime, - boolean lowMemory, - @Nullable LLSearchCollectionStatisticsGetter distributedCollectionStatisticsGetter) throws IOException { + boolean lowMemory, boolean inMemory, @Nullable LLSearchCollectionStatisticsGetter distributedCollectionStatisticsGetter) throws IOException { if (name.length() == 0) { throw new IOException("Empty lucene database name"); } Path directoryPath = luceneBasePath.resolve(name + ".lucene.db"); - this.directory = FSDirectory.open(directoryPath); + this.directory = inMemory ? new RAMDirectory() : FSDirectory.open(directoryPath); this.luceneIndexName = name; this.snapshotter = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy()); this.lowMemory = lowMemory; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java index 5c7faed..e629d88 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java @@ -58,7 +58,7 @@ public class LLLocalMultiLuceneIndex implements LLLuceneIndex { TextFieldsSimilarity textFieldsSimilarity, Duration queryRefreshDebounceTime, Duration commitDebounceTime, - boolean lowMemory) throws IOException { + boolean lowMemory, boolean inMemory) throws IOException { if (instancesCount <= 1 || instancesCount > 100) { throw new IOException("Unsupported instances count: " + instancesCount); @@ -79,8 +79,7 @@ public class LLLocalMultiLuceneIndex implements LLLuceneIndex { textFieldsSimilarity, queryRefreshDebounceTime, commitDebounceTime, - lowMemory, - (indexSearcher, field, distributedPre, actionId) -> distributedCustomCollectionStatistics(finalI, + lowMemory, inMemory, (indexSearcher, field, distributedPre, actionId) -> distributedCustomCollectionStatistics(finalI, indexSearcher, field, distributedPre, diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java index 1054a66..1839393 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java @@ -2,29 +2,34 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; import it.cavallium.dbengine.database.LLRange; +import it.cavallium.dbengine.database.LLUtils; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksMutableObject; import reactor.core.publisher.Flux; +import static io.netty.buffer.Unpooled.*; public abstract class LLLocalReactiveRocksIterator { - private static final byte[] EMPTY = new byte[0]; - private final RocksDB db; + private final ByteBufAllocator alloc; private final ColumnFamilyHandle cfh; private final LLRange range; private final ReadOptions readOptions; private final boolean readValues; public LLLocalReactiveRocksIterator(RocksDB db, + ByteBufAllocator alloc, ColumnFamilyHandle cfh, LLRange range, ReadOptions readOptions, boolean readValues) { this.db = db; + this.alloc = alloc; this.cfh = cfh; this.range = range; this.readOptions = readOptions; @@ -39,14 +44,22 @@ public abstract class LLLocalReactiveRocksIterator { readOptions.setReadaheadSize(2 * 1024 * 1024); readOptions.setFillCache(false); } - return getRocksIterator(readOptions, range, db, cfh); + return getRocksIterator(readOptions, range.retain(), db, cfh); }, (tuple, sink) -> { var rocksIterator = tuple.getT1(); if (rocksIterator.isValid()) { - byte[] key = rocksIterator.key(); - byte[] value = readValues ? rocksIterator.value() : EMPTY; - rocksIterator.next(); - sink.next(getEntry(key, value)); + ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key); + try { + ByteBuf value = readValues ? LLUtils.readDirectNioBuffer(alloc, rocksIterator::value) : EMPTY_BUFFER; + try { + rocksIterator.next(); + sink.next(getEntry(key.retain(), value.retain())); + } finally { + value.release(); + } + } finally { + key.release(); + } } else { sink.complete(); } @@ -54,10 +67,10 @@ public abstract class LLLocalReactiveRocksIterator { }, tuple -> { var rocksIterator = tuple.getT1(); rocksIterator.close(); - tuple.getT2().ifPresent(RocksMutableObject::close); - tuple.getT3().ifPresent(RocksMutableObject::close); + tuple.getT2().release(); + tuple.getT3().release(); }); } - public abstract T getEntry(byte[] key, byte[] value); + public abstract T getEntry(ByteBuf key, ByteBuf value); } diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java index 0ad8f87..4ac4344 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java @@ -3,13 +3,14 @@ package it.cavallium.dbengine.database.serialization; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufInputStream; import io.netty.buffer.ByteBufOutputStream; +import io.netty.buffer.PooledByteBufAllocator; import io.netty.buffer.Unpooled; import java.io.IOError; import java.io.IOException; import org.jetbrains.annotations.NotNull; import org.warp.commonutils.error.IndexOutOfBoundsException; -public class CodecSerializer implements Serializer { +public class CodecSerializer implements Serializer { private final Codecs deserializationCodecs; private final Codec serializationCodec; @@ -34,9 +35,8 @@ public class CodecSerializer implements Serializer { } @Override - public @NotNull A deserialize(byte @NotNull [] serialized) { - ByteBuf buf = Unpooled.wrappedBuffer(serialized); - try (var is = new ByteBufInputStream(buf)) { + public @NotNull A deserialize(@NotNull ByteBuf serialized) { + try (var is = new ByteBufInputStream(serialized)) { int codecId; if (microCodecs) { codecId = is.readUnsignedByte(); @@ -48,12 +48,14 @@ public class CodecSerializer implements Serializer { } catch (IOException ex) { // This shouldn't happen throw new IOError(ex); + } finally { + serialized.release(); } } @Override - public byte @NotNull [] serialize(@NotNull A deserialized) { - ByteBuf buf = Unpooled.buffer(256); + public @NotNull ByteBuf serialize(@NotNull A deserialized) { + ByteBuf buf = PooledByteBufAllocator.DEFAULT.directBuffer(); try (var os = new ByteBufOutputStream(buf)) { if (microCodecs) { os.writeByte(serializationCodecId); @@ -61,14 +63,11 @@ public class CodecSerializer implements Serializer { os.writeInt(serializationCodecId); } serializationCodec.serialize(os, deserialized); - os.flush(); - var bytes = new byte[buf.readableBytes()]; - buf.readBytes(bytes); - return bytes; } catch (IOException ex) { // This shouldn't happen throw new IOError(ex); } + return buf; } @SuppressWarnings("unused") diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java index d4da37b..01ca3a0 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java @@ -1,5 +1,9 @@ package it.cavallium.dbengine.database.serialization; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PooledByteBufAllocator; +import java.nio.charset.StandardCharsets; import org.jetbrains.annotations.NotNull; public interface Serializer { @@ -8,19 +12,50 @@ public interface Serializer { @NotNull B serialize(@NotNull A deserialized); - Serializer NOOP_SERIALIZER = new Serializer<>() { + Serializer NOOP_SERIALIZER = new Serializer<>() { @Override - public byte @NotNull [] deserialize(byte @NotNull [] serialized) { - return serialized; + public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) { + try { + return serialized.retainedSlice(); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(byte @NotNull [] deserialized) { - return deserialized; + public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) { + try { + return deserialized.retainedSlice(); + } finally { + deserialized.release(); + } } }; - static Serializer noop() { + Serializer UTF8_SERIALIZER = new Serializer<>() { + @Override + public @NotNull String deserialize(@NotNull ByteBuf serialized) { + try { + return serialized.toString(StandardCharsets.UTF_8); + } finally { + serialized.release(); + } + } + + @Override + public @NotNull ByteBuf serialize(@NotNull String deserialized) { + // UTF-8 uses max. 3 bytes per char, so calculate the worst case. + ByteBuf buf = PooledByteBufAllocator.DEFAULT.directBuffer(ByteBufUtil.utf8MaxBytes(deserialized)); + ByteBufUtil.writeUtf8(buf, deserialized); + return buf; + } + }; + + static Serializer noop() { return NOOP_SERIALIZER; } + + static Serializer utf8() { + return UTF8_SERIALIZER; + } } diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java index c034d00..148a3f6 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java @@ -2,6 +2,12 @@ package it.cavallium.dbengine.database.serialization; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.PooledByteBufAllocator; +import java.io.NotSerializableException; +import java.nio.charset.StandardCharsets; +import org.apache.commons.lang3.SerializationException; import org.jetbrains.annotations.NotNull; @SuppressWarnings("unused") @@ -9,18 +15,31 @@ public interface SerializerFixedBinaryLength extends Serializer { int getSerializedBinaryLength(); - static SerializerFixedBinaryLength noop(int length) { + static SerializerFixedBinaryLength noop(int length) { return new SerializerFixedBinaryLength<>() { @Override - public byte @NotNull [] deserialize(byte @NotNull [] serialized) { - assert serialized.length == getSerializedBinaryLength(); - return serialized; + public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) { + try { + if (serialized.readableBytes() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + + serialized.readableBytes() + " bytes instead"); + } + return serialized.retain(); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(byte @NotNull [] deserialized) { - assert deserialized.length == getSerializedBinaryLength(); - return deserialized; + public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) { + ByteBuf buf = deserialized.retain(); + if (buf.readableBytes() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " + + buf.readableBytes() + " bytes instead"); + } + return buf; } @Override @@ -30,17 +49,65 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength intSerializer() { + static SerializerFixedBinaryLength utf8(int length) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Integer deserialize(byte @NotNull [] serialized) { - assert serialized.length == getSerializedBinaryLength(); - return Ints.fromByteArray(serialized); + public @NotNull String deserialize(@NotNull ByteBuf serialized) { + try { + if (serialized.readableBytes() != getSerializedBinaryLength()) { + throw new SerializationException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + + serialized.readableBytes() + " bytes instead"); + } + return serialized.toString(StandardCharsets.UTF_8); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(@NotNull Integer deserialized) { - return Ints.toByteArray(deserialized); + public @NotNull ByteBuf serialize(@NotNull String deserialized) { + // UTF-8 uses max. 3 bytes per char, so calculate the worst case. + ByteBuf buf = PooledByteBufAllocator.DEFAULT.directBuffer(ByteBufUtil.utf8MaxBytes(deserialized)); + try { + ByteBufUtil.writeUtf8(buf, deserialized); + if (buf.readableBytes() != getSerializedBinaryLength()) { + throw new SerializationException("Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " + + buf.readableBytes() + " bytes instead"); + } + return buf.retain(); + } finally { + buf.release(); + } + } + + @Override + public int getSerializedBinaryLength() { + return length; + } + }; + } + + static SerializerFixedBinaryLength intSerializer() { + return new SerializerFixedBinaryLength<>() { + @Override + public @NotNull Integer deserialize(@NotNull ByteBuf serialized) { + try { + if (serialized.readableBytes() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + + serialized.readableBytes() + " bytes instead"); + } + return serialized.readInt(); + } finally { + serialized.release(); + } + } + + @Override + public @NotNull ByteBuf serialize(@NotNull Integer deserialized) { + ByteBuf buf = PooledByteBufAllocator.DEFAULT.directBuffer(Integer.BYTES, Integer.BYTES); + return buf.writeInt(deserialized); } @Override @@ -50,17 +117,26 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength longSerializer() { + static SerializerFixedBinaryLength longSerializer() { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Long deserialize(byte @NotNull [] serialized) { - assert serialized.length == getSerializedBinaryLength(); - return Longs.fromByteArray(serialized); + public @NotNull Long deserialize(@NotNull ByteBuf serialized) { + try { + if (serialized.readableBytes() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " + + serialized.readableBytes() + " bytes instead"); + } + return serialized.readLong(); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(@NotNull Long deserialized) { - return Longs.toByteArray(deserialized); + public @NotNull ByteBuf serialize(@NotNull Long deserialized) { + ByteBuf buf = PooledByteBufAllocator.DEFAULT.directBuffer(Integer.BYTES, Integer.BYTES); + return buf.writeLong(deserialized); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java b/src/main/java/org/rocksdb/CappedWriteBatch.java similarity index 53% rename from src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java rename to src/main/java/org/rocksdb/CappedWriteBatch.java index 037f5fc..1cd462b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java +++ b/src/main/java/org/rocksdb/CappedWriteBatch.java @@ -1,6 +1,11 @@ -package it.cavallium.dbengine.database.disk; +package org.rocksdb; +import io.netty.buffer.ByteBuf; +import it.cavallium.dbengine.database.LLUtils; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import org.rocksdb.AbstractWriteBatch; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; @@ -10,13 +15,13 @@ import org.rocksdb.WriteOptions; import org.warp.commonutils.concurrency.atomicity.NotAtomic; @NotAtomic -public class CappedWriteBatch implements WriteBatchInterface, AutoCloseable { +public class CappedWriteBatch extends WriteBatch { private final RocksDB db; private final int cap; private final WriteOptions writeOptions; - - private final WriteBatch writeBatch; + + private final List buffersToRelease; /** * @param cap The limit of operations @@ -26,158 +31,224 @@ public class CappedWriteBatch implements WriteBatchInterface, AutoCloseable { int reservedWriteBatchSize, long maxWriteBatchSize, WriteOptions writeOptions) { + super(reservedWriteBatchSize); this.db = db; this.cap = cap; this.writeOptions = writeOptions; - this.writeBatch = new WriteBatch(reservedWriteBatchSize); - this.writeBatch.setMaxBytes(maxWriteBatchSize); + this.setMaxBytes(maxWriteBatchSize); + this.buffersToRelease = new ArrayList<>(); } private synchronized void flushIfNeeded(boolean force) throws RocksDBException { - if (this.writeBatch.count() >= (force ? 1 : cap)) { - db.write(writeOptions, this.writeBatch); - this.writeBatch.clear(); + if (this.count() >= (force ? 1 : cap)) { + db.write(writeOptions, this); + this.clear(); + releaseAllBuffers(); } } + private synchronized void releaseAllBuffers() { + for (ByteBuf byteBuffer : buffersToRelease) { + byteBuffer.release(); + } + buffersToRelease.clear(); + } + @Override public synchronized int count() { - return writeBatch.count(); + return super.count(); } @Override public synchronized void put(byte[] key, byte[] value) throws RocksDBException { - writeBatch.put(key, value); + super.put(key, value); flushIfNeeded(false); } @Override public synchronized void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException { - writeBatch.put(columnFamilyHandle, key, value); + super.put(columnFamilyHandle, key, value); flushIfNeeded(false); } @Override public synchronized void put(ByteBuffer key, ByteBuffer value) throws RocksDBException { - writeBatch.put(key, value); + super.put(key, value); flushIfNeeded(false); } @Override public synchronized void put(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key, ByteBuffer value) throws RocksDBException { - writeBatch.put(columnFamilyHandle, key, value); + super.put(columnFamilyHandle, key, value); + flushIfNeeded(false); + } + + public synchronized void put(ColumnFamilyHandle columnFamilyHandle, ByteBuf key, ByteBuf value) throws RocksDBException { + buffersToRelease.add(key); + buffersToRelease.add(value); + ByteBuf keyDirectBuf = key.retain(); + ByteBuffer keyNioBuffer = LLUtils.toDirectFast(keyDirectBuf.retain()); + if (keyNioBuffer == null) { + keyDirectBuf.release(); + keyDirectBuf = LLUtils.toDirectCopy(key.retain()); + keyNioBuffer = keyDirectBuf.nioBuffer(); + } + try { + assert keyNioBuffer.isDirect(); + + ByteBuf valueDirectBuf = value.retain(); + ByteBuffer valueNioBuffer = LLUtils.toDirectFast(valueDirectBuf.retain()); + if (valueNioBuffer == null) { + valueDirectBuf.release(); + valueDirectBuf = LLUtils.toDirectCopy(value.retain()); + valueNioBuffer = valueDirectBuf.nioBuffer(); + } + try { + assert valueNioBuffer.isDirect(); + super.put(columnFamilyHandle, keyNioBuffer, valueNioBuffer); + } finally { + buffersToRelease.add(valueDirectBuf); + } + } finally { + buffersToRelease.add(keyDirectBuf); + } flushIfNeeded(false); } @Override public synchronized void merge(byte[] key, byte[] value) throws RocksDBException { - writeBatch.merge(key, value); + super.merge(key, value); flushIfNeeded(false); } @Override public synchronized void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException { - writeBatch.merge(columnFamilyHandle, key, value); + super.merge(columnFamilyHandle, key, value); flushIfNeeded(false); } @Deprecated @Override public synchronized void remove(byte[] key) throws RocksDBException { - writeBatch.remove(key); + super.remove(key); flushIfNeeded(false); } @Deprecated @Override public synchronized void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { - writeBatch.remove(columnFamilyHandle, key); + super.remove(columnFamilyHandle, key); flushIfNeeded(false); } @Override public synchronized void delete(byte[] key) throws RocksDBException { - writeBatch.delete(key); + super.delete(key); flushIfNeeded(false); } @Override public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { - writeBatch.delete(columnFamilyHandle, key); + super.delete(columnFamilyHandle, key); + flushIfNeeded(false); + } + + public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, ByteBuf key) throws RocksDBException { + buffersToRelease.add(key); + ByteBuf keyDirectBuf = key.retain(); + ByteBuffer keyNioBuffer = LLUtils.toDirectFast(keyDirectBuf.retain()); + if (keyNioBuffer == null) { + keyDirectBuf.release(); + keyDirectBuf = LLUtils.toDirectCopy(key.retain()); + keyNioBuffer = keyDirectBuf.nioBuffer(); + } + try { + assert keyNioBuffer.isDirect(); + removeDirect(nativeHandle_, + keyNioBuffer, + keyNioBuffer.position(), + keyNioBuffer.remaining(), + columnFamilyHandle.nativeHandle_ + ); + keyNioBuffer.position(keyNioBuffer.limit()); + } finally { + buffersToRelease.add(keyDirectBuf); + } flushIfNeeded(false); } @Override public synchronized void singleDelete(byte[] key) throws RocksDBException { - writeBatch.singleDelete(key); + super.singleDelete(key); flushIfNeeded(false); } @Override public synchronized void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { - writeBatch.singleDelete(columnFamilyHandle, key); + super.singleDelete(columnFamilyHandle, key); flushIfNeeded(false); } @Override public synchronized void remove(ByteBuffer key) throws RocksDBException { - writeBatch.remove(key); + super.remove(key); flushIfNeeded(false); } @Override public synchronized void remove(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key) throws RocksDBException { - writeBatch.remove(columnFamilyHandle, key); + super.remove(columnFamilyHandle, key); flushIfNeeded(false); } @Override public synchronized void deleteRange(byte[] beginKey, byte[] endKey) throws RocksDBException { - writeBatch.deleteRange(beginKey, endKey); + super.deleteRange(beginKey, endKey); flushIfNeeded(false); } @Override public synchronized void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey) throws RocksDBException { - writeBatch.deleteRange(columnFamilyHandle, beginKey, endKey); + super.deleteRange(columnFamilyHandle, beginKey, endKey); flushIfNeeded(false); } @Override public synchronized void putLogData(byte[] blob) throws RocksDBException { - writeBatch.putLogData(blob); + super.putLogData(blob); flushIfNeeded(false); } @Override public synchronized void clear() { - writeBatch.clear(); + super.clear(); + releaseAllBuffers(); } @Override public synchronized void setSavePoint() { - writeBatch.setSavePoint(); + super.setSavePoint(); } @Override public synchronized void rollbackToSavePoint() throws RocksDBException { - writeBatch.rollbackToSavePoint(); + super.rollbackToSavePoint(); } @Override public synchronized void popSavePoint() throws RocksDBException { - writeBatch.popSavePoint(); + super.popSavePoint(); } @Override public synchronized void setMaxBytes(long maxBytes) { - writeBatch.setMaxBytes(maxBytes); + super.setMaxBytes(maxBytes); } @Override public synchronized WriteBatch getWriteBatch() { - return writeBatch; + return this; } public synchronized void writeToDbAndClose() throws RocksDBException { @@ -186,6 +257,7 @@ public class CappedWriteBatch implements WriteBatchInterface, AutoCloseable { @Override public synchronized void close() { - writeBatch.close(); + super.close(); + releaseAllBuffers(); } } diff --git a/src/test/java/it/cavallium/dbengine/client/DbTestUtils.java b/src/test/java/it/cavallium/dbengine/client/DbTestUtils.java new file mode 100644 index 0000000..4609e1c --- /dev/null +++ b/src/test/java/it/cavallium/dbengine/client/DbTestUtils.java @@ -0,0 +1,106 @@ +package it.cavallium.dbengine.client; + +import it.cavallium.dbengine.database.Column; +import it.cavallium.dbengine.database.LLDictionary; +import it.cavallium.dbengine.database.LLKeyValueDatabase; +import it.cavallium.dbengine.database.UpdateMode; +import it.cavallium.dbengine.database.collections.DatabaseMapDictionary; +import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep; +import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryHashed; +import it.cavallium.dbengine.database.collections.SubStageGetterHashMap; +import it.cavallium.dbengine.database.collections.SubStageGetterMap; +import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection; +import it.cavallium.dbengine.database.serialization.Serializer; +import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletionException; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import org.reactivestreams.Publisher; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.core.scheduler.Schedulers; + +public class DbTestUtils { + + public static final AtomicInteger dbId = new AtomicInteger(0); + + public static Flux tempDb(Function> action) { + var wrkspcPath = Path.of("/tmp/.cache/tempdb-" + dbId.incrementAndGet() + "/"); + return Flux.usingWhen(Mono + .fromCallable(() -> { + if (Files.exists(wrkspcPath)) { + Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> { + try { + Files.delete(file); + } catch (IOException ex) { + throw new CompletionException(ex); + } + }); + } + Files.createDirectories(wrkspcPath); + return null; + }) + .subscribeOn(Schedulers.boundedElastic()) + .then(new LLLocalDatabaseConnection(wrkspcPath, true).connect()) + .flatMap(conn -> conn.getDatabase("testdb", + List.of(Column.dictionary("testmap"), Column.special("ints"), Column.special("longs")), + false, true + )), + action, + db -> db.close().then(Mono.fromCallable(() -> { + if (Files.exists(wrkspcPath)) { + Files.walk(wrkspcPath).sorted(Comparator.reverseOrder()).forEach(file -> { + try { + Files.delete(file); + } catch (IOException ex) { + throw new CompletionException(ex); + } + }); + } + return null; + }).subscribeOn(Schedulers.boundedElastic())) + ); + } + public static Mono tempDictionary(LLKeyValueDatabase database, UpdateMode updateMode) { + return tempDictionary(database, "testmap", updateMode); + } + + public static Mono tempDictionary(LLKeyValueDatabase database, + String name, + UpdateMode updateMode) { + return database.getDictionary(name, updateMode); + } + + public static DatabaseMapDictionary tempDatabaseMapDictionaryMap( + LLDictionary dictionary, + int keyBytes) { + return DatabaseMapDictionary.simple(dictionary, SerializerFixedBinaryLength.utf8(keyBytes), Serializer.utf8()); + } + + public static DatabaseMapDictionaryDeep, DatabaseMapDictionary> tempDatabaseMapDictionaryDeepMap( + LLDictionary dictionary, + int key1Bytes, + int key2Bytes) { + return DatabaseMapDictionaryDeep.deepTail(dictionary, + SerializerFixedBinaryLength.utf8(key1Bytes), + key2Bytes, + new SubStageGetterMap<>(SerializerFixedBinaryLength.utf8(key2Bytes), Serializer.UTF8_SERIALIZER) + ); + } + + public static DatabaseMapDictionaryHashed tempDatabaseMapDictionaryHashMap( + LLDictionary dictionary) { + return DatabaseMapDictionaryHashed.simple(dictionary, + Serializer.utf8(), + Serializer.utf8(), + String::hashCode, + SerializerFixedBinaryLength.intSerializer() + ); + } +} diff --git a/src/test/java/it/cavallium/dbengine/client/Database.java b/src/test/java/it/cavallium/dbengine/client/OldDatabaseTests.java similarity index 66% rename from src/test/java/it/cavallium/dbengine/client/Database.java rename to src/test/java/it/cavallium/dbengine/client/OldDatabaseTests.java index 9ba632f..1243182 100644 --- a/src/test/java/it/cavallium/dbengine/client/Database.java +++ b/src/test/java/it/cavallium/dbengine/client/OldDatabaseTests.java @@ -2,15 +2,19 @@ package it.cavallium.dbengine.client; import static it.cavallium.dbengine.client.CompositeDatabasePartLocation.CompositeDatabasePartType.KV_DATABASE; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import it.cavallium.dbengine.database.Column; import it.cavallium.dbengine.database.LLKeyValueDatabase; import it.cavallium.dbengine.database.UpdateMode; +import it.cavallium.dbengine.database.collections.DatabaseMapDictionary; import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep; import it.cavallium.dbengine.database.collections.SubStageGetterMap; import it.cavallium.dbengine.database.disk.LLLocalDatabaseConnection; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import java.io.IOException; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -20,6 +24,7 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.concurrent.CompletionException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Collectors; import org.jetbrains.annotations.NotNull; import org.junit.jupiter.api.Test; @@ -29,7 +34,61 @@ import reactor.core.scheduler.Schedulers; import reactor.test.StepVerifier; import reactor.util.function.Tuples; -public class Database { +public class OldDatabaseTests { + + @Test + public void testDatabaseAddKeysAndCheckSize() { + LinkedHashSet originalKeys = new LinkedHashSet<>(List.of("K1a", "K1b", "K1c")); + + StepVerifier + .create( + tempDb() + .flatMap(db -> db + .getDictionary("testmap", UpdateMode.DISALLOW) + .map(dictionary -> DatabaseMapDictionary.simple(dictionary, + new FixedStringSerializer(3), + Serializer.noop() + )) + .flatMap(collection -> Flux + .fromIterable(originalKeys) + .flatMap(k1 -> collection.putValue(k1, DUMMY_VALUE)) + .then(collection.leavesCount(null, false)) + ) + ) + ) + .expectNext((long) originalKeys.size()) + .verifyComplete(); + } + + @Test + public void testDeepDatabaseAddKeysAndCheckSize() { + LinkedHashSet originalSuperKeys = new LinkedHashSet<>(List.of("K1a", "K1b", "K1c")); + LinkedHashSet originalSubKeys = new LinkedHashSet<>(List.of("K2aa", "K2bb", "K2cc")); + + StepVerifier + .create( + tempDb() + .flatMap(db -> db + .getDictionary("testmap", UpdateMode.DISALLOW) + .map(dictionary -> DatabaseMapDictionaryDeep.deepTail(dictionary, + new FixedStringSerializer(3), + 4, + new SubStageGetterMap<>(new FixedStringSerializer(4), Serializer.noop()) + )) + .flatMap(collection -> Flux + .fromIterable(originalSuperKeys) + .flatMap(k1 -> collection.at(null, k1)) + .flatMap(k1at -> Flux + .fromIterable(originalSubKeys) + .flatMap(k2 -> k1at.putValue(k2, DUMMY_VALUE)) + ) + .then(collection.leavesCount(null, false)) + ) + ) + ) + .expectNext((long) originalSuperKeys.size() * originalSubKeys.size()) + .verifyComplete(); + } @Test public void testDeepDatabaseAddKeysAndConvertToLongerOnes() { @@ -53,7 +112,7 @@ public class Database { } public static Mono tempDb() { - var wrkspcPath = Path.of("/tmp/.cache/tempdb/"); + var wrkspcPath = Path.of("/tmp/.cache/tempdb-" + DbTestUtils.dbId.incrementAndGet() + "/"); return Mono .fromCallable(() -> { if (Files.exists(wrkspcPath)) { @@ -72,10 +131,16 @@ public class Database { }) .subscribeOn(Schedulers.boundedElastic()) .then(new LLLocalDatabaseConnection(wrkspcPath, true).connect()) - .flatMap(conn -> conn.getDatabase("testdb", List.of(Column.dictionary("testmap")), false)); + .flatMap(conn -> conn.getDatabase("testdb", List.of(Column.dictionary("testmap")), false, true)); } - private static final byte[] DUMMY_VALUE = new byte[] {0x01, 0x03}; + private static final ByteBuf DUMMY_VALUE; + static { + ByteBuf buf = Unpooled.directBuffer(2, 2); + buf.writeByte(0x01); + buf.writeByte(0x03); + DUMMY_VALUE = buf; + } private Flux> addKeysAndConvertToLongerOnes(LLKeyValueDatabase db, LinkedHashSet originalSuperKeys, @@ -157,7 +222,7 @@ public class Database { ); } - private static class FixedStringSerializer implements SerializerFixedBinaryLength { + private static class FixedStringSerializer implements SerializerFixedBinaryLength { private final int size; @@ -171,13 +236,21 @@ public class Database { } @Override - public @NotNull String deserialize(byte @NotNull [] serialized) { - return new String(serialized, StandardCharsets.US_ASCII); + public @NotNull String deserialize(ByteBuf serialized) { + try { + return serialized.toString(StandardCharsets.US_ASCII); + } finally { + serialized.release(); + } } @Override - public byte @NotNull [] serialize(@NotNull String deserialized) { - return deserialized.getBytes(StandardCharsets.US_ASCII); + public ByteBuf serialize(@NotNull String deserialized) { + var serialized = deserialized.getBytes(StandardCharsets.US_ASCII); + var serializedBuf = Unpooled.directBuffer(serialized.length, serialized.length); + serializedBuf.writeBytes(serialized); + assert serializedBuf.isDirect(); + return serializedBuf; } } } diff --git a/src/test/java/it/cavallium/dbengine/client/TestDictionaryMap.java b/src/test/java/it/cavallium/dbengine/client/TestDictionaryMap.java new file mode 100644 index 0000000..0c97549 --- /dev/null +++ b/src/test/java/it/cavallium/dbengine/client/TestDictionaryMap.java @@ -0,0 +1,622 @@ +package it.cavallium.dbengine.client; + +import static it.cavallium.dbengine.client.DbTestUtils.*; + +import it.cavallium.dbengine.database.LLDictionary; +import it.cavallium.dbengine.database.UpdateMode; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import reactor.core.publisher.Flux; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; +import reactor.test.StepVerifier.FirstStep; +import reactor.test.StepVerifier.Step; +import reactor.util.function.Tuple2; +import reactor.util.function.Tuple3; +import reactor.util.function.Tuple4; +import reactor.util.function.Tuples; + +public class TestDictionaryMap { + + private static Stream provideArgumentsCreate() { + return Arrays.stream(UpdateMode.values()).map(Arguments::of); + } + + @ParameterizedTest + @MethodSource("provideArgumentsCreate") + public void testCreate(UpdateMode updateMode) { + StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .flatMap(LLDictionary::clear) + .then() + )) + .verifyComplete(); + } + + private static Stream provideArgumentsPut() { + var goodKeys = Set.of("12345", "zebra"); + var badKeys = Set.of("", "a", "aaaa", "aaaaaa"); + Set> keys = Stream.concat( + goodKeys.stream().map(s -> Tuples.of(s, false)), + badKeys.stream().map(s -> Tuples.of(s, true)) + ).collect(Collectors.toSet()); + var values = Set.of("a", "", "\0", "\0\0", "z", "azzszgzczqz", "bzzazazqzeztzgzzhz!"); + + return keys + .stream() + .flatMap(keyTuple -> { + Stream strm; + if (keyTuple.getT2()) { + strm = values.stream().limit(1); + } else { + strm = values.stream(); + } + return strm.map(val -> Tuples.of(keyTuple.getT1(), val, keyTuple.getT2())); + }) + .flatMap(entryTuple -> Arrays.stream(UpdateMode.values()).map(updateMode -> Tuples.of(updateMode, + entryTuple.getT1(), + entryTuple.getT2(), + entryTuple.getT3() + ))) + .map(fullTuple -> Arguments.of(fullTuple.getT1(), fullTuple.getT2(), fullTuple.getT3(), fullTuple.getT4())); + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testPut(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMap(map -> map + .putValue(key, value) + .then(map.getValue(null, key)) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(value).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testAtSetAtGet(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMap(map -> map + .at(null, key).flatMap(v -> v.set(value).doFinally(s -> v.release())) + .then(map.at(null, key).flatMap(v -> v.get(null).doFinally(s -> v.release()))) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(value).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testPutAndGetPrevious(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putValueAndGetPrevious(key, "error?"), + map.putValueAndGetPrevious(key, value), + map.putValueAndGetPrevious(key, value) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext("error?").expectNext(value).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testPutValueRemoveAndGetPrevious(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.removeAndGetPrevious(key), + map.putValue(key, value).then(map.removeAndGetPrevious(key)), + map.removeAndGetPrevious(key) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(value).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testPutValueRemoveAndGetStatus(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.removeAndGetStatus(key), + map.putValue(key, value).then(map.removeAndGetStatus(key)), + map.removeAndGetStatus(key) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(false, true, false).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testUpdate(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.updateValue(key, old -> { + assert old == null; + return "error?"; + }), + map.updateValue(key, false, old -> { + assert Objects.equals(old, "error?"); + return "error?"; + }), + map.updateValue(key, true, old -> { + assert Objects.equals(old, "error?"); + return "error?"; + }), + map.updateValue(key, true, old -> { + assert Objects.equals(old, "error?"); + return value; + }), + map.updateValue(key, true, old -> { + assert Objects.equals(old, value); + return value; + }) + ) + .doFinally(s -> map.release()) + ) + )); + if (updateMode == UpdateMode.DISALLOW || shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(true, false, false, true, false).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testUpdateGet(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.updateValue(key, old -> { + assert old == null; + return "error?"; + }).then(map.getValue(null, key)), + map.updateValue(key, false, old -> { + assert Objects.equals(old, "error?"); + return "error?"; + }).then(map.getValue(null, key)), + map.updateValue(key, true, old -> { + assert Objects.equals(old, "error?"); + return "error?"; + }).then(map.getValue(null, key)), + map.updateValue(key, true, old -> { + assert Objects.equals(old, "error?"); + return value; + }).then(map.getValue(null, key)), + map.updateValue(key, true, old -> { + assert Objects.equals(old, value); + return value; + }).then(map.getValue(null, key)) + ) + .doFinally(s -> map.release()) + ) + )); + if (updateMode == UpdateMode.DISALLOW || shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext("error?", "error?", "error?", value, value).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPut") + public void testPutAndGetStatus(UpdateMode updateMode, String key, String value, boolean shouldFail) { + var stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putValueAndGetStatus(key, "error?").single(), + map.putValueAndGetStatus(key, value).single(), + map.putValueAndGetStatus(key, value).single(), + map.remove(key), + map.putValueAndGetStatus(key, "error?").single() + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(false, true, true, false).verifyComplete(); + } + } + + private static Stream provideArgumentsPutMulti() { + var goodKeys = Set.of(Set.of("12345", "67890"), Set.of("zebra"), Set.of()); + var badKeys = Set.of(Set.of("", "12345"), Set.of("12345", "a"), Set.of("45678", "aaaa"), Set.of("aaaaaa", "capra")); + Set, Boolean>> keys = Stream.concat( + goodKeys.stream().map(s -> Tuples.of(s, false)), + badKeys.stream().map(s -> Tuples.of(s, true)) + ).collect(Collectors.toSet()); + var values = Set.of("a", "", "\0", "\0\0", "z", "azzszgzczqz", "bzzazazqzeztzgzzhz!"); + + return keys + .stream() + .map(keyTuple -> keyTuple.mapT1(ks -> Flux + .zip(Flux.fromIterable(ks), Flux.fromIterable(values)) + .collectMap(Tuple2::getT1, Tuple2::getT2) + .block() + )) + .flatMap(entryTuple -> Arrays.stream(UpdateMode.values()).map(updateMode -> Tuples.of(updateMode, + entryTuple.getT1(), + entryTuple.getT2() + ))) + .map(fullTuple -> Arguments.of(fullTuple.getT1(), fullTuple.getT2(), fullTuple.getT3())); + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiGetMulti(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map.getMulti(null, Flux.fromIterable(entries.keySet())) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetAllValuesGetMulti(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> map + .setAllValues(Flux.fromIterable(entries.entrySet())) + .thenMany(map.getMulti(null, Flux.fromIterable(entries.keySet()))) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetAllValuesAndGetPrevious(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.setAllValuesAndGetPrevious(Flux.fromIterable(entries.entrySet())), + map.setAllValuesAndGetPrevious(Flux.fromIterable(entries.entrySet())) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetGetMulti(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.set(entries).then(Mono.empty()), + map.getMulti(null, Flux.fromIterable(entries.keySet())) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetAndGetStatus(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> { + Mono removalMono; + if (entries.isEmpty()) { + removalMono = Mono.empty(); + } else { + removalMono = map.remove(entries.keySet().stream().findAny().orElseThrow()); + } + return Flux + .concat( + map.setAndGetChanged(entries).single(), + map.setAndGetChanged(entries).single(), + removalMono.then(Mono.empty()), + map.setAndGetChanged(entries).single() + ) + .doFinally(s -> map.release()); + }) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + stpVer.expectNext(!entries.isEmpty(), false, !entries.isEmpty()).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetAndGetPrevious(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat(map.setAndGetPrevious(entries), map.setAndGetPrevious(entries)) + .map(Map::entrySet) + .flatMap(Flux::fromIterable) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testSetClearAndGetPreviousGet(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat(map.set(entries).then(Mono.empty()), map.clearAndGetPrevious(), map.get(null)) + .map(Map::entrySet) + .flatMap(Flux::fromIterable) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiGetAllValues(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map.getAllValues(null) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiGet(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map.get(null) + .map(Map::entrySet) + .flatMapMany(Flux::fromIterable) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiGetAllStagesGet(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step> stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map + .getAllStages(null) + .flatMap(stage -> stage + .getValue() + .get(null) + .map(val -> Map.entry(stage.getKey(), val)) + .doFinally(s -> stage.getValue().release()) + ) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.verifyError(); + } else { + entries.forEach((k, v) -> remainingEntries.add(Map.entry(k, v))); + for (Entry ignored : remainingEntries) { + stpVer = stpVer.expectNextMatches(remainingEntries::remove); + } + stpVer.verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiIsEmpty(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.isEmpty(null), + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map.isEmpty(null) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.expectNext(true).verifyError(); + } else { + stpVer.expectNext(true, entries.isEmpty()).verifyComplete(); + } + } + + @ParameterizedTest + @MethodSource("provideArgumentsPutMulti") + public void testPutMultiClear(UpdateMode updateMode, Map entries, boolean shouldFail) { + var remainingEntries = new ConcurrentHashMap, Boolean>().keySet(true); + Step stpVer = StepVerifier + .create(tempDb(db -> tempDictionary(db, updateMode) + .map(dict -> tempDatabaseMapDictionaryMap(dict, 5)) + .flatMapMany(map -> Flux + .concat( + map.isEmpty(null), + map.putMulti(Flux.fromIterable(entries.entrySet())).then(Mono.empty()), + map.isEmpty(null), + map.clear().then(Mono.empty()), + map.isEmpty(null) + ) + .doFinally(s -> map.release()) + ) + )); + if (shouldFail) { + stpVer.expectNext(true).verifyError(); + } else { + stpVer.expectNext(true, entries.isEmpty(), true).verifyComplete(); + } + } +} diff --git a/src/test/java/it/cavallium/dbengine/client/TestSingletons.java b/src/test/java/it/cavallium/dbengine/client/TestSingletons.java new file mode 100644 index 0000000..b4542f5 --- /dev/null +++ b/src/test/java/it/cavallium/dbengine/client/TestSingletons.java @@ -0,0 +1,120 @@ +package it.cavallium.dbengine.client; + +import static it.cavallium.dbengine.client.DbTestUtils.tempDb; + +import it.cavallium.dbengine.database.LLKeyValueDatabase; +import it.cavallium.dbengine.database.collections.DatabaseInt; +import it.cavallium.dbengine.database.collections.DatabaseLong; +import java.util.stream.Stream; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.ArgumentsProvider; +import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; +import reactor.core.publisher.Mono; +import reactor.test.StepVerifier; + +public class TestSingletons { + + private static Stream provideNumberWithRepeats() { + return Stream.of( + Arguments.of(Integer.MIN_VALUE, 2), + Arguments.of(-11, 2), + Arguments.of(0, 3), + Arguments.of(102, 5) + ); + } + + private static Stream provideLongNumberWithRepeats() { + return Stream.of( + Arguments.of(Long.MIN_VALUE, 2), + Arguments.of(-11L, 2), + Arguments.of(0L, 3), + Arguments.of(102L, 5) + ); + } + + @Test + public void testCreateInteger() { + StepVerifier + .create(tempDb(db -> tempInt(db, "test", 0) + .flatMap(dbInt -> dbInt.get(null)) + .then() + )) + .verifyComplete(); + } + + @Test + public void testCreateLong() { + StepVerifier + .create(tempDb(db -> tempLong(db, "test", 0) + .flatMap(dbLong -> dbLong.get(null)) + .then() + )) + .verifyComplete(); + } + + @ParameterizedTest + @ValueSource(ints = {Integer.MIN_VALUE, -192, -2, -1, 0, 1, 2, 1292, Integer.MAX_VALUE}) + public void testDefaultValueInteger(int i) { + StepVerifier + .create(tempDb(db -> tempInt(db, "test", i) + .flatMap(dbInt -> dbInt.get(null)) + )) + .expectNext(i) + .verifyComplete(); + } + + @ParameterizedTest + @ValueSource(longs = {Long.MIN_VALUE, -192, -2, -1, 0, 1, 2, 1292, Long.MAX_VALUE}) + public void testDefaultValueLong(long i) { + StepVerifier + .create(tempDb(db -> tempLong(db, "test", i) + .flatMap(dbLong -> dbLong.get(null)) + )) + .expectNext(i) + .verifyComplete(); + } + + @ParameterizedTest + @MethodSource("provideNumberWithRepeats") + public void testSetInteger(Integer i, Integer repeats) { + StepVerifier + .create(tempDb(db -> tempInt(db, "test", 0) + .flatMap(dbInt -> Mono + .defer(() -> dbInt.set((int) System.currentTimeMillis())) + .repeat(repeats) + .then(dbInt.set(i)) + .then(dbInt.get(null))) + )) + .expectNext(i) + .verifyComplete(); + } + + @ParameterizedTest + @MethodSource("provideLongNumberWithRepeats") + public void testSetLong(Long i, Integer repeats) { + StepVerifier + .create(tempDb(db -> tempLong(db, "test", 0) + .flatMap(dbLong -> Mono + .defer(() -> dbLong.set(System.currentTimeMillis())) + .repeat(repeats) + .then(dbLong.set(i)) + .then(dbLong.get(null))) + )) + .expectNext(i) + .verifyComplete(); + } + + public static Mono tempInt(LLKeyValueDatabase database, String name, int defaultValue) { + return database + .getInteger("ints", name, defaultValue); + } + + public static Mono tempLong(LLKeyValueDatabase database, String name, long defaultValue) { + return database + .getLong("longs", name, defaultValue); + } +} diff --git a/src/test/java/it/cavallium/dbengine/database/collections/TestRanges.java b/src/test/java/it/cavallium/dbengine/database/collections/TestRanges.java index 88917c4..c9dcff5 100644 --- a/src/test/java/it/cavallium/dbengine/database/collections/TestRanges.java +++ b/src/test/java/it/cavallium/dbengine/database/collections/TestRanges.java @@ -1,10 +1,21 @@ package it.cavallium.dbengine.database.collections; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.PooledByteBufAllocator; +import io.netty.buffer.Unpooled; +import it.cavallium.dbengine.database.LLUtils; import java.util.Arrays; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import static io.netty.buffer.Unpooled.*; public class TestRanges { + + @Test + public void testDirectBuffer() { + Assertions.assertTrue(wrappedBuffer(Unpooled.directBuffer(10, 10), Unpooled.buffer(10, 10)).isDirect()); + } + @Test public void testNextRangeKey() { testNextRangeKey(new byte[] {0x00, 0x00, 0x00}); @@ -21,11 +32,21 @@ public class TestRanges { public void testNextRangeKey(byte[] prefixKey) { - byte[] firstRangeKey = DatabaseMapDictionaryDeep.firstRangeKey(prefixKey, prefixKey.length, 7, 3); - byte[] nextRangeKey = DatabaseMapDictionaryDeep.nextRangeKey(prefixKey, prefixKey.length, 7, 3); + byte[] firstRangeKey = LLUtils.toArray(DatabaseMapDictionaryDeep.firstRangeKey(PooledByteBufAllocator.DEFAULT, + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(prefixKey)), + prefixKey.length, + 7, + 3 + )); + byte[] nextRangeKey = LLUtils.toArray(DatabaseMapDictionaryDeep.nextRangeKey(PooledByteBufAllocator.DEFAULT, + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(prefixKey)), + prefixKey.length, + 7, + 3 + )); if (Arrays.equals(prefixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF})) { - Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, nextRangeKey); + Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0}, nextRangeKey); } else { long biPrefix = 0; var s = 0; @@ -64,11 +85,23 @@ public class TestRanges { public void testNextRangeKeyWithSuffix(byte[] prefixKey, byte[] suffixKey) { - byte[] firstRangeKey = DatabaseMapDictionaryDeep.firstRangeKey(prefixKey, suffixKey, prefixKey.length, 3, 7); - byte[] nextRangeKey = DatabaseMapDictionaryDeep.nextRangeKey(prefixKey, suffixKey, prefixKey.length, 3, 7); + byte[] firstRangeKey = LLUtils.toArray(DatabaseMapDictionaryDeep.firstRangeKey(ByteBufAllocator.DEFAULT, + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(prefixKey)), + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(suffixKey)), + prefixKey.length, + 3, + 7 + )); + byte[] nextRangeKey = LLUtils.toArray(DatabaseMapDictionaryDeep.nextRangeKey(ByteBufAllocator.DEFAULT, + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(prefixKey)), + LLUtils.convertToDirectByteBuf(PooledByteBufAllocator.DEFAULT, wrappedBuffer(suffixKey)), + prefixKey.length, + 3, + 7 + )); if (Arrays.equals(prefixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF}) && Arrays.equals(suffixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF})) { - Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0, 0, 0, 0, 0, 0, 0, 0}, nextRangeKey); + Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0}, nextRangeKey); } else { long biPrefix = 0; var s = 0;