diff --git a/pom.xml b/pom.xml index 86e25de..d429d33 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ it.cavallium dbengine - 3.0.${revision} + 4.0.${revision} jar @@ -13,7 +13,8 @@ 0-SNAPSHOT false 1.9.5 - 9.4.2 + 9.5.0 + 7.9.2 5.9.0 1.0.244 @@ -96,13 +97,6 @@ hamcrest-library 2.2 - - io.projectreactor - reactor-bom - 2022.0.2 - pom - import - io.netty netty-bom @@ -113,33 +107,11 @@ - - io.projectreactor - reactor-tools - original - runtime - 3.5.1 - com.google.guava guava 31.1-jre - - io.netty - netty5-buffer - 5.0.0.Alpha5 - - - io.netty - netty-common - - - io.netty - netty-buffer - - - io.netty netty-buffer @@ -155,11 +127,6 @@ io.netty netty-handler - - - io.projectreactor.netty.incubator - reactor-netty-incubator-quic - 0.1.2 org.yaml @@ -266,7 +233,7 @@ org.rocksdb rocksdbjni - 7.9.2 + ${rocksdb.version} org.apache.lucene @@ -356,40 +323,6 @@ - - io.projectreactor.netty - reactor-netty - - - io.netty.incubator - netty-incubator-codec-native-quic - - - io.netty - reactor-netty-core - - - io.netty - netty-common - - - io.netty - netty-codec - - - io.netty - netty-handler - - - io.netty - netty-transport - - - io.netty - netty-buffer - - - org.bouncycastle bcpkix-jdk15on @@ -457,11 +390,6 @@ 3.12.0 compile - - io.projectreactor - reactor-test - test - src/test/java @@ -638,39 +566,4 @@ - - - reactor-agent - - false - - reactor.agent.enable - true - - - - - - net.bytebuddy - byte-buddy-maven-plugin - 1.12.22 - - - - transform - - - - - - - reactor.tools.agent.ReactorDebugByteBuddyPlugin - - - - - - - - diff --git a/src/main/data-generator/quic-rpc.yaml b/src/main/data-generator/quic-rpc.yaml index f863853..719556a 100644 --- a/src/main/data-generator/quic-rpc.yaml +++ b/src/main/data-generator/quic-rpc.yaml @@ -77,8 +77,6 @@ superTypesData: NIOFSDirectory, RAFFSDirectory, DirectIOFSDirectory, - RocksDBStandaloneDirectory, - RocksDBSharedDirectory, NRTCachingDirectory ] StandardFSDirectoryOptions: [ @@ -90,7 +88,6 @@ superTypesData: MemoryMappedFSDirectory, NIOFSDirectory, RAFFSDirectory, - RocksDBStandaloneDirectory, StandardFSDirectoryOptions ] Filter: [ @@ -134,8 +131,8 @@ customTypesData: serializer: it.cavallium.dbengine.database.remote.LLSnapshotSerializer Bytes: - javaClass: it.unimi.dsi.fastutil.bytes.ByteList - serializer: it.cavallium.dbengine.database.remote.ByteListSerializer + javaClass: it.cavallium.dbengine.buffers.Buf + serializer: it.cavallium.dbengine.database.remote.BufSerializer StringMap: javaClass: java.util.Map serializer: it.cavallium.dbengine.database.remote.StringMapSerializer @@ -248,7 +245,6 @@ baseTypesData: lowMemory: boolean useDirectIO: boolean allowMemoryMapping: boolean - allowNettyDirect: boolean optimistic: boolean maxOpenFiles: -int blockCache: -long @@ -331,7 +327,6 @@ baseTypesData: indexWriterMaxBufferedDocs: -int applyAllDeletes: -boolean writeAllDeletes: -boolean - allowNonVolatileCollection: boolean maxInMemoryResultEntries: int mergePolicy: TieredMergePolicy TieredMergePolicy: @@ -359,14 +354,6 @@ baseTypesData: delegate: StandardFSDirectoryOptions mergeBufferSize: -int minBytesDirect: -long - RocksDBStandaloneDirectory: - data: - managedPath: Path - blockSize: int - RocksDBSharedDirectory: - data: - managedPath: Path - blockSize: int NRTCachingDirectory: data: delegate: LuceneDirectoryOptions diff --git a/src/main/java/it/cavallium/dbengine/MetricUtils.java b/src/main/java/it/cavallium/dbengine/MetricUtils.java deleted file mode 100644 index 039860b..0000000 --- a/src/main/java/it/cavallium/dbengine/MetricUtils.java +++ /dev/null @@ -1,55 +0,0 @@ -package it.cavallium.dbengine; - -import io.netty5.buffer.pool.PoolArenaMetric; -import io.netty5.buffer.pool.PooledBufferAllocator; -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodHandles; -import java.lang.invoke.MethodType; -import java.util.List; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -/** - * Netty5 hides some metrics. This utility class can read them. - */ -public class MetricUtils { - - private static final Logger LOG = LogManager.getLogger(MetricUtils.class); - private static final MethodHandle GET_ARENA_METRICS; - - static { - var lookup = MethodHandles.lookup(); - - // Get the method handle that returns the metrics of each pool arena - MethodHandle handle = null; - try { - // Find the class - var pooledBufferClass = Class.forName("io.netty5.buffer.pool.PooledBufferAllocatorMetric"); - // Find the handle of the method - handle = lookup.findVirtual(pooledBufferClass, "arenaMetrics", MethodType.methodType(List.class)); - } catch (NoSuchMethodException | IllegalAccessException | ClassNotFoundException ex) { - logMetricsNotAccessible(ex); - } - GET_ARENA_METRICS = handle; - } - - private static void logMetricsNotAccessible(Throwable ex) { - LOG.debug("Failed to open pooled buffer allocator metrics", ex); - } - - /** - * Get the metrics of each pool arena of a pooled allocator - * @param allocator Pooled allocator - * @return A list of {@link PoolArenaMetric} - */ - @SuppressWarnings("unchecked") - public static List getPoolArenaMetrics(PooledBufferAllocator allocator) { - var metric = allocator.metric(); - try { - // Invoke the method to get the metrics - return (List) GET_ARENA_METRICS.invoke(metric); - } catch (Throwable e) { - return List.of(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/buffers/Buf.java b/src/main/java/it/cavallium/dbengine/buffers/Buf.java new file mode 100644 index 0000000..2e678d2 --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/buffers/Buf.java @@ -0,0 +1,182 @@ +package it.cavallium.dbengine.buffers; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import it.unimi.dsi.fastutil.bytes.ByteArrayList; +import it.unimi.dsi.fastutil.bytes.ByteList; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; +import java.util.RandomAccess; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.warp.commonutils.stream.SafeByteArrayInputStream; +import org.warp.commonutils.stream.SafeByteArrayOutputStream; +import org.warp.commonutils.stream.SafeDataOutput; + +public interface Buf extends ByteList, RandomAccess { + static Buf wrap(ByteList bytes) { + if (bytes instanceof Buf buf) { + return buf; + } else if (bytes instanceof ByteArrayList byteArrayList) { + return ByteListBuf.wrap(byteArrayList.elements(), byteArrayList.size()); + } else { + return ByteListBuf.wrap(bytes.toByteArray()); + } + } + static Buf wrap(ByteList bytes, int from, int to) { + if (bytes instanceof Buf buf) { + return buf.subList(from, to); + } else if (bytes instanceof ByteArrayList byteArrayList) { + return ByteListBuf.wrap(byteArrayList.elements(), byteArrayList.size()).subList(from, to); + } else { + return ByteListBuf.wrap(bytes.toByteArray()).subList(from, to); + } + } + + static Buf wrap(byte[] bytes) { + return ByteListBuf.wrap(bytes); + } + + static Buf wrap(byte[] bytes, int from, int to) { + return ByteListBuf.wrap(bytes, to).subList(from, to); + } + + static Buf create(int initialCapacity) { + return new ByteListBuf(initialCapacity); + } + + static Buf copyOf(byte[] original) { + return new ByteListBuf(original); + } + + static Buf create() { + return new ByteListBuf(); + } + + static Buf wrap(byte[] array, int length) { + return ByteListBuf.wrap(array, length); + } + + static Buf createZeroes(int length) { + return ByteListBuf.wrap(new byte[length], length); + } + + /** + * Get this element as an array, converting it if needed + */ + byte @NotNull[] asArray(); + + /** + * Get this element as an array, only if it's already an array, otherwise return null + */ + byte @Nullable[] asArrayStrict(); + + /** + * Get this element as an array with equal or bigger size, converting it if needed + * The returned array may be bigger than expected! + */ + byte @Nullable[] asUnboundedArray(); + + /** + * Get this element as an array with equal or bigger size, only if it's already an array, otherwise return null + * The returned array may be bigger than expected! + */ + byte @Nullable[] asUnboundedArrayStrict(); + + boolean isMutable(); + + void freeze(); + + @Override + Buf subList(int from, int to); + + Buf copy(); + + SafeByteArrayInputStream binaryInputStream(); + + void writeTo(SafeDataOutput dataOutput); + + default long getLong(int i) { + return Longs.fromBytes(getByte(i), + getByte(i + 1), + getByte(i + 2), + getByte(i + 3), + getByte(i + 4), + getByte(i + 5), + getByte(i + 6), + getByte(i + 7) + ); + } + + default int getInt(int i) { + return Ints.fromBytes(getByte(i), + getByte(i + 1), + getByte(i + 2), + getByte(i + 3) + ); + } + + default float getFloat(int i) { + return Float.intBitsToFloat(getInt(i)); + } + + default double getDouble(int i) { + return Double.longBitsToDouble(getLong(i)); + } + + default boolean getBoolean(int i) { + return getByte(i) != 0; + } + + default void setBoolean(int i, boolean val) { + set(i, val ? (byte) 1 : 0); + } + + default void setByte(int i, byte val) { + set(i, val); + } + + default void setInt(int i, int val) { + set(i, (byte) (val >> 24)); + set(i + 1, (byte) (val >> 16)); + set(i + 2, (byte) (val >> 8)); + set(i + 3, (byte) val); + } + + default void setLong(int i, long val) { + set(i, (byte) (val >> 56)); + set(i + 1, (byte) (val >> 48)); + set(i + 2, (byte) (val >> 40)); + set(i + 3, (byte) (val >> 32)); + set(i + 4, (byte) (val >> 24)); + set(i + 5, (byte) (val >> 16)); + set(i + 6, (byte) (val >> 8)); + set(i + 7, (byte) val); + } + + default void setFloat(int i, float val) { + setInt(i, Float.floatToRawIntBits(val)); + } + + default void setDouble(int i, double val) { + setLong(i, Double.doubleToRawLongBits(val)); + } + + default SafeByteArrayOutputStream binaryOutputStream() { + return binaryOutputStream(0, size()); + } + + default SafeByteArrayOutputStream binaryOutputStream(int from) { + return binaryOutputStream(from, size()); + } + + SafeByteArrayOutputStream binaryOutputStream(int from, int to); + + boolean equals(int aStartIndex, Buf b, int bStartIndex, int length); + + boolean equals(int aStartIndex, byte[] b, int bStartIndex, int length); + + default String toString(Charset charset) { + return new String(this.asArray(), charset); + } +} diff --git a/src/main/java/it/cavallium/dbengine/buffers/BufDataInput.java b/src/main/java/it/cavallium/dbengine/buffers/BufDataInput.java new file mode 100644 index 0000000..13db4ac --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/buffers/BufDataInput.java @@ -0,0 +1,42 @@ +package it.cavallium.dbengine.buffers; + +import org.jetbrains.annotations.NotNull; +import org.warp.commonutils.stream.SafeByteArrayInputStream; +import org.warp.commonutils.stream.SafeDataInputStream; + + +public class BufDataInput extends SafeDataInputStream { + + /** + * Creates a DataInputStream that uses the specified underlying InputStream. + * + * @param in the specified input stream + */ + private BufDataInput(@NotNull SafeByteArrayInputStream in) { + super(in); + } + + public static BufDataInput create(Buf byteList) { + return new BufDataInput(byteList.binaryInputStream()); + } + + @Deprecated + @Override + public void close() { + } + + @Override + public void mark(int readlimit) { + throw new UnsupportedOperationException(); + } + + @Override + public void reset() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean markSupported() { + return false; + } +} diff --git a/src/main/java/it/cavallium/dbengine/buffers/BufDataOutput.java b/src/main/java/it/cavallium/dbengine/buffers/BufDataOutput.java new file mode 100644 index 0000000..acd1260 --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/buffers/BufDataOutput.java @@ -0,0 +1,218 @@ +package it.cavallium.dbengine.buffers; + +import it.unimi.dsi.fastutil.Arrays; +import java.io.DataOutput; +import java.io.IOException; +import java.util.Objects; +import org.jetbrains.annotations.NotNull; +import org.warp.commonutils.stream.SafeByteArrayOutputStream; +import org.warp.commonutils.stream.SafeDataOutputStream; + +public class BufDataOutput implements DataOutput { + + private final SafeByteArrayOutputStream buf; + private final SafeDataOutputStream dOut; + private final int limit; + + private BufDataOutput(SafeByteArrayOutputStream buf) { + this.buf = buf; + this.dOut = new SafeDataOutputStream(buf); + limit = Integer.MAX_VALUE; + } + + private BufDataOutput(SafeByteArrayOutputStream buf, int maxSize) { + this.buf = buf; + this.dOut = new SafeDataOutputStream(buf); + this.limit = maxSize; + } + + public static BufDataOutput createLimited(int maxSize, int hint) { + if (hint >= 0) { + if (maxSize < 0 || maxSize == Integer.MAX_VALUE) { + return create(hint); + } else { + return new BufDataOutput(new SafeByteArrayOutputStream(Math.min(maxSize, hint)), maxSize); + } + } else { + return createLimited(maxSize); + } + } + + public static BufDataOutput createLimited(int maxSize) { + if (maxSize < 0 || maxSize == Integer.MAX_VALUE) { + return create(); + } else { + return new BufDataOutput(new SafeByteArrayOutputStream(maxSize), maxSize); + } + } + + public static BufDataOutput create() { + return new BufDataOutput(new SafeByteArrayOutputStream()); + } + + public static BufDataOutput create(int hint) { + if (hint >= 0) { + return new BufDataOutput(new SafeByteArrayOutputStream(hint)); + } else { + return create(); + } + } + + public static BufDataOutput wrap(Buf buf, int from, int to) { + Arrays.ensureFromTo(buf.size(), from, to); + if (buf.isEmpty()) { + return createLimited(0); + } else { + return new BufDataOutput(buf.binaryOutputStream(from), to - from); + } + } + + public static BufDataOutput wrap(Buf buf) { + if (buf.isEmpty()) { + return createLimited(0); + } else { + return new BufDataOutput(buf.binaryOutputStream(), buf.size()); + } + } + + private IllegalStateException unreachable(IOException ex) { + return new IllegalStateException(ex); + } + + @Override + public void write(int b) { + checkOutOfBounds(1); + dOut.write(b); + } + + private void checkOutOfBounds(int delta) { + if (dOut.size() + delta > limit) { + throw new IndexOutOfBoundsException(limit); + } + } + + @Override + public void write(byte @NotNull [] b) { + checkOutOfBounds(b.length); + dOut.write(b); + } + + @Override + public void write(byte @NotNull [] b, int off, int len) { + checkOutOfBounds(Math.max(0, Math.min(b.length - off, len))); + dOut.write(b, off, len); + } + + @Override + public void writeBoolean(boolean v) { + checkOutOfBounds(1); + dOut.writeBoolean(v); + } + + @Override + public void writeByte(int v) { + checkOutOfBounds(Byte.BYTES); + dOut.writeByte(v); + } + + @Override + public void writeShort(int v) { + checkOutOfBounds(Short.BYTES); + dOut.writeShort(v); + } + + @Override + public void writeChar(int v) { + checkOutOfBounds(Character.BYTES); + dOut.writeChar(v); + } + + @Override + public void writeInt(int v) { + checkOutOfBounds(Integer.BYTES); + dOut.writeInt(v); + } + + @Override + public void writeLong(long v) { + checkOutOfBounds(Long.BYTES); + dOut.writeLong(v); + } + + @Override + public void writeFloat(float v) { + checkOutOfBounds(Float.BYTES); + dOut.writeFloat(v); + } + + @Override + public void writeDouble(double v) { + checkOutOfBounds(Double.BYTES); + dOut.writeDouble(v); + } + + public void ensureWritable(int size) { + dOut.flush(); + buf.ensureWritable(size); + } + + @Override + public void writeBytes(@NotNull String s) { + checkOutOfBounds(s.length() * Byte.BYTES); + dOut.writeBytes(s); + } + + // todo: check + public void writeBytes(Buf deserialized) { + checkOutOfBounds(deserialized.size()); + deserialized.writeTo(dOut); + } + + public void writeBytes(byte[] b, int off, int len) { + write(b, off, len); + } + + @Override + public void writeChars(@NotNull String s) { + checkOutOfBounds(Character.BYTES * s.length()); + dOut.writeChars(s); + } + + @Override + public void writeUTF(@NotNull String s) { + throw new UnsupportedOperationException(); + } + + public Buf asList() { + dOut.flush(); + return Buf.wrap(this.buf.array, this.buf.length); + } + + @Override + public String toString() { + return dOut.toString(); + } + + @Override + public int hashCode() { + return dOut.hashCode(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + BufDataOutput that = (BufDataOutput) o; + + return Objects.equals(dOut, that.dOut); + } + + public int size() { + return dOut.size(); + } +} diff --git a/src/main/java/it/cavallium/dbengine/buffers/ByteListBuf.java b/src/main/java/it/cavallium/dbengine/buffers/ByteListBuf.java new file mode 100644 index 0000000..3afbb85 --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/buffers/ByteListBuf.java @@ -0,0 +1,467 @@ +package it.cavallium.dbengine.buffers; + +import it.unimi.dsi.fastutil.bytes.AbstractByteList; +import it.unimi.dsi.fastutil.bytes.ByteArrayList; +import it.unimi.dsi.fastutil.bytes.ByteCollection; +import it.unimi.dsi.fastutil.bytes.ByteConsumer; +import it.unimi.dsi.fastutil.bytes.ByteIterator; +import it.unimi.dsi.fastutil.bytes.ByteIterators; +import it.unimi.dsi.fastutil.bytes.ByteList; +import it.unimi.dsi.fastutil.bytes.ByteListIterator; +import it.unimi.dsi.fastutil.bytes.ByteSpliterator; +import it.unimi.dsi.fastutil.bytes.ByteSpliterators; +import java.io.Serial; +import java.nio.charset.Charset; +import java.util.Arrays; +import java.util.Collection; +import java.util.Iterator; +import java.util.NoSuchElementException; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; +import org.warp.commonutils.stream.SafeByteArrayInputStream; +import org.warp.commonutils.stream.SafeByteArrayOutputStream; +import org.warp.commonutils.stream.SafeDataOutput; + +class ByteListBuf extends ByteArrayList implements Buf { + + private boolean mutable = true; + + protected ByteListBuf(byte[] a, boolean wrapped) { + super(a, wrapped); + } + + public ByteListBuf(int capacity) { + super(capacity); + } + + public ByteListBuf() { + } + + public ByteListBuf(Collection c) { + super(c); + } + + public ByteListBuf(ByteCollection c) { + super(c); + } + + public ByteListBuf(ByteList l) { + super(l); + } + + public ByteListBuf(byte[] a) { + super(a); + } + + public ByteListBuf(byte[] a, int offset, int length) { + super(a, offset, length); + } + + public ByteListBuf(Iterator i) { + super(i); + } + + public ByteListBuf(ByteIterator i) { + super(i); + } + + /** + * Wraps a given array into an array list of given size. + * + *

+ * Note it is guaranteed that the type of the array returned by {@link #elements()} will be the same + * (see the comments in the class documentation). + * + * @param a an array to wrap. + * @param length the length of the resulting array list. + * @return a new array list of the given size, wrapping the given array. + */ + public static ByteListBuf wrap(final byte[] a, final int length) { + if (length > a.length) throw new IllegalArgumentException("The specified length (" + length + ") is greater than the array size (" + a.length + ")"); + final ByteListBuf l = new ByteListBuf(a, true); + l.size = length; + return l; + } + + /** + * Wraps a given array into an array list. + * + *

+ * Note it is guaranteed that the type of the array returned by {@link #elements()} will be the same + * (see the comments in the class documentation). + * + * @param a an array to wrap. + * @return a new array list wrapping the given array. + */ + public static ByteListBuf wrap(final byte[] a) { + return wrap(a, a.length); + } + + /** + * Creates a new empty array list. + * + * @return a new empty array list. + */ + public static ByteListBuf of() { + return new ByteListBuf(); + } + + /** + * Creates an array list using an array of elements. + * + * @param init a the array the will become the new backing array of the array list. + * @return a new array list backed by the given array. + * @see #wrap + */ + + public static ByteListBuf of(final byte... init) { + return wrap(init); + } + + @Override + public byte @NotNull [] asArray() { + if (this.size() == a.length) { + return this.a; + } else { + return this.toByteArray(); + } + } + + @Override + public byte @Nullable [] asArrayStrict() { + if (this.size() == a.length) { + return a; + } else { + return null; + } + } + + @Override + public byte @Nullable [] asUnboundedArray() { + return a; + } + + @Override + public byte @Nullable [] asUnboundedArrayStrict() { + return a; + } + + @Override + public boolean isMutable() { + return mutable; + } + + @Override + public void freeze() { + mutable = false; + } + + @Override + public Buf subList(int from, int to) { + if (from == 0 && to == size()) return this; + ensureIndex(from); + ensureIndex(to); + if (from > to) throw new IndexOutOfBoundsException("Start index (" + from + ") is greater than end index (" + to + ")"); + return new SubList(from, to); + } + + @Override + public Buf copy() { + var copied = ByteListBuf.wrap(this.a.clone()); + copied.size = this.size; + return copied; + } + + @Override + public SafeByteArrayInputStream binaryInputStream() { + return new SafeByteArrayInputStream(this.a, 0, this.size); + } + + @Override + public void writeTo(SafeDataOutput dataOutput) { + dataOutput.write(this.a, 0, this.size); + } + + @Override + public SafeByteArrayOutputStream binaryOutputStream(int from, int to) { + it.unimi.dsi.fastutil.Arrays.ensureFromTo(size, from, to); + return new SafeByteArrayOutputStream(a, from, to); + } + + @Override + public boolean equals(int aStartIndex, Buf b, int bStartIndex, int length) { + return b.equals(bStartIndex, this.a, aStartIndex, length); + } + + @Override + public boolean equals(int aStartIndex, byte[] b, int bStartIndex, int length) { + if (aStartIndex < 0) return false; + if (aStartIndex + length > this.size) { + return false; + } + return Arrays.equals(a, aStartIndex, aStartIndex + length, b, bStartIndex, bStartIndex + length); + } + + @Override + public String toString(Charset charset) { + return new String(a, 0, size, charset); + } + + private class SubList extends AbstractByteList.ByteRandomAccessSubList implements Buf { + @Serial + private static final long serialVersionUID = -3185226345314976296L; + + private boolean subMutable = true; + + protected SubList(int from, int to) { + super(ByteListBuf.this, from, to); + } + + // Most of the inherited methods should be fine, but we can override a few of them for performance. + // Needed because we can't access the parent class' instance variables directly in a different + // instance of SubList. + private byte[] getParentArray() { + return a; + } + + @Override + public @NotNull Buf subList(int from, int to) { + it.unimi.dsi.fastutil.Arrays.ensureFromTo(a.length, from, to); + if (from > to) throw new IllegalArgumentException("Start index (" + from + ") is greater than end index (" + to + ")"); + // Sadly we have to rewrap this, because if there is a sublist of a sublist, and the + // subsublist adds, both sublists need to update their "to" value. + return new SubList(from, to); + } + + @Override + public Buf copy() { + return Buf.wrap(Arrays.copyOfRange(a, from, to)); + } + + @Override + public SafeByteArrayInputStream binaryInputStream() { + return new SafeByteArrayInputStream(a, from, size()); + } + + @Override + public void writeTo(SafeDataOutput dataOutput) { + dataOutput.write(a, from, size()); + } + + @Override + public SafeByteArrayOutputStream binaryOutputStream(int from, int to) { + it.unimi.dsi.fastutil.Arrays.ensureFromTo(size(), from, to); + return new SafeByteArrayOutputStream(a, from + this.from, to + this.from); + } + + @Override + public boolean equals(int aStartIndex, Buf b, int bStartIndex, int length) { + return b.equals(bStartIndex, a, aStartIndex + from, length); + } + + @Override + public boolean equals(int aStartIndex, byte[] b, int bStartIndex, int length) { + var aFrom = from + aStartIndex; + var aTo = from + aStartIndex + length; + if (aFrom < from) return false; + if (aTo > to) return false; + return Arrays.equals(a, aFrom, aTo, b, bStartIndex, bStartIndex + length); + } + + @Override + public byte getByte(int i) { + ensureRestrictedIndex(i); + return a[i + from]; + } + + @Override + public byte @NotNull [] asArray() { + if (this.from == 0 && this.to == a.length) { + return a; + } else { + return toByteArray(); + } + } + + @Override + public byte @Nullable [] asArrayStrict() { + if (this.from == 0 && this.to == a.length) { + return a; + } else { + return null; + } + } + + @Override + public byte @Nullable [] asUnboundedArray() { + if (from == 0) { + return a; + } else { + return toByteArray(); + } + } + + @Override + public byte @Nullable [] asUnboundedArrayStrict() { + if (from == 0) { + return a; + } else { + return null; + } + } + + @Override + public boolean isMutable() { + return mutable && subMutable; + } + + @Override + public void freeze() { + subMutable = false; + } + + private final class SubListIterator extends ByteIterators.AbstractIndexBasedListIterator { + // We are using pos == 0 to be 0 relative to SubList.from (meaning you need to do a[from + i] when + // accessing array). + SubListIterator(int index) { + super(0, index); + } + + @Override + protected byte get(int i) { + return a[from + i]; + } + + @Override + protected void add(int i, byte k) { + ByteListBuf.SubList.this.add(i, k); + } + + @Override + protected void set(int i, byte k) { + ByteListBuf.SubList.this.set(i, k); + } + + @Override + protected void remove(int i) { + ByteListBuf.SubList.this.removeByte(i); + } + + @Override + protected int getMaxPos() { + return to - from; + } + + @Override + public byte nextByte() { + if (!hasNext()) throw new NoSuchElementException(); + return a[from + (lastReturned = pos++)]; + } + + @Override + public byte previousByte() { + if (!hasPrevious()) throw new NoSuchElementException(); + return a[from + (lastReturned = --pos)]; + } + + @Override + public void forEachRemaining(final ByteConsumer action) { + final int max = to - from; + while (pos < max) { + action.accept(a[from + (lastReturned = pos++)]); + } + } + } + + @Override + public @NotNull ByteListIterator listIterator(int index) { + return new ByteListBuf.SubList.SubListIterator(index); + } + + private final class SubListSpliterator extends ByteSpliterators.LateBindingSizeIndexBasedSpliterator { + // We are using pos == 0 to be 0 relative to real array 0 + SubListSpliterator() { + super(from); + } + + private SubListSpliterator(int pos, int maxPos) { + super(pos, maxPos); + } + + @Override + protected int getMaxPosFromBackingStore() { + return to; + } + + @Override + protected byte get(int i) { + return a[i]; + } + + @Override + protected ByteListBuf.SubList.SubListSpliterator makeForSplit(int pos, int maxPos) { + return new ByteListBuf.SubList.SubListSpliterator(pos, maxPos); + } + + @Override + public boolean tryAdvance(final ByteConsumer action) { + if (pos >= getMaxPos()) return false; + action.accept(a[pos++]); + return true; + } + + @Override + public void forEachRemaining(final ByteConsumer action) { + final int max = getMaxPos(); + while (pos < max) { + action.accept(a[pos++]); + } + } + } + + @Override + public ByteSpliterator spliterator() { + return new ByteListBuf.SubList.SubListSpliterator(); + } + + boolean contentsEquals(byte[] otherA, int otherAFrom, int otherATo) { + if (a == otherA && from == otherAFrom && to == otherATo) return true; + return Arrays.equals(a, from, to, otherA, otherAFrom, otherATo); + } + + @Override + public boolean equals(Object o) { + if (o == this) return true; + if (o == null) return false; + if (!(o instanceof java.util.List)) return false; + if (o instanceof ByteListBuf other) { + return contentsEquals(other.a, 0, other.size()); + } + if (o instanceof SubList other) { + return contentsEquals(other.getParentArray(), other.from, other.to); + } + return super.equals(o); + } + + int contentsCompareTo(byte[] otherA, int otherAFrom, int otherATo) { + if (a == otherA && from == otherAFrom && to == otherATo) return 0; + return Arrays.compareUnsigned(a, from, to, otherA, otherAFrom, otherATo); + } + + @Override + public int compareTo(final java.util.@NotNull List l) { + if (l instanceof ByteListBuf other) { + return contentsCompareTo(other.a, 0, other.size()); + } + if (l instanceof ByteListBuf.SubList other) { + return contentsCompareTo(other.getParentArray(), other.from, other.to); + } + return super.compareTo(l); + } + + @Override + public String toString(Charset charset) { + return new String(a, from, to, charset); + } + } +} diff --git a/src/main/java/it/cavallium/dbengine/client/Backuppable.java b/src/main/java/it/cavallium/dbengine/client/Backuppable.java index a83a6c9..eed25fc 100644 --- a/src/main/java/it/cavallium/dbengine/client/Backuppable.java +++ b/src/main/java/it/cavallium/dbengine/client/Backuppable.java @@ -1,8 +1,7 @@ package it.cavallium.dbengine.client; +import java.io.IOException; import java.util.concurrent.atomic.AtomicInteger; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SignalType; public abstract class Backuppable implements IBackuppable { @@ -13,29 +12,29 @@ public abstract class Backuppable implements IBackuppable { private final AtomicInteger state = new AtomicInteger(); @Override - public final Mono pauseForBackup() { - return Mono.defer(() -> { - if (state.compareAndSet(State.RUNNING.ordinal(), State.PAUSING.ordinal())) { - return onPauseForBackup().doFinally(type -> state.compareAndSet(State.PAUSING.ordinal(), - type == SignalType.ON_ERROR ? State.RUNNING.ordinal() : State.PAUSED.ordinal() - )); - } else { - return Mono.empty(); + public final void pauseForBackup() { + if (state.compareAndSet(State.RUNNING.ordinal(), State.PAUSING.ordinal())) { + try { + onPauseForBackup(); + state.compareAndSet(State.PAUSING.ordinal(), State.PAUSED.ordinal()); + } catch (Throwable ex) { + state.compareAndSet(State.PAUSING.ordinal(), State.RUNNING.ordinal()); + throw ex; } - }); + } } @Override - public final Mono resumeAfterBackup() { - return Mono.defer(() -> { - if (state.compareAndSet(State.PAUSED.ordinal(), State.RESUMING.ordinal())) { - return onResumeAfterBackup().doFinally(type -> state.compareAndSet(State.RESUMING.ordinal(), - type == SignalType.ON_ERROR ? State.PAUSED.ordinal() : State.RUNNING.ordinal() - )); - } else { - return Mono.empty(); + public final void resumeAfterBackup() { + if (state.compareAndSet(State.PAUSED.ordinal(), State.RESUMING.ordinal())) { + try { + onResumeAfterBackup(); + state.compareAndSet(State.RESUMING.ordinal(), State.RUNNING.ordinal()); + } catch (Throwable ex) { + state.compareAndSet(State.RESUMING.ordinal(), State.PAUSED.ordinal()); + throw ex; } - }); + } } @Override @@ -47,9 +46,9 @@ public abstract class Backuppable implements IBackuppable { return State.values()[state.get()]; } - protected abstract Mono onPauseForBackup(); + protected abstract void onPauseForBackup(); - protected abstract Mono onResumeAfterBackup(); + protected abstract void onResumeAfterBackup(); public final void setStopped() { state.set(State.STOPPED.ordinal()); diff --git a/src/main/java/it/cavallium/dbengine/client/BadBlock.java b/src/main/java/it/cavallium/dbengine/client/BadBlock.java index 7363a7a..aee9a2c 100644 --- a/src/main/java/it/cavallium/dbengine/client/BadBlock.java +++ b/src/main/java/it/cavallium/dbengine/client/BadBlock.java @@ -1,8 +1,8 @@ package it.cavallium.dbengine.client; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.rpc.current.data.Column; -import it.unimi.dsi.fastutil.bytes.ByteList; import org.jetbrains.annotations.Nullable; -public record BadBlock(String databaseName, @Nullable Column column, @Nullable ByteList rawKey, +public record BadBlock(String databaseName, @Nullable Column column, @Nullable Buf rawKey, @Nullable Throwable ex) {} diff --git a/src/main/java/it/cavallium/dbengine/client/CastMapper.java b/src/main/java/it/cavallium/dbengine/client/CastMapper.java index 8f09a10..12f6aba 100644 --- a/src/main/java/it/cavallium/dbengine/client/CastMapper.java +++ b/src/main/java/it/cavallium/dbengine/client/CastMapper.java @@ -1,14 +1,14 @@ package it.cavallium.dbengine.client; -import it.cavallium.dbengine.client.Mapper; - public class CastMapper implements Mapper { + @SuppressWarnings("unchecked") @Override public U map(T key) { return (U) key; } + @SuppressWarnings("unchecked") @Override public T unmap(U key) { return (T) key; diff --git a/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java b/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java index 5540a8f..8d51878 100644 --- a/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java +++ b/src/main/java/it/cavallium/dbengine/client/CompositeDatabase.java @@ -1,36 +1,32 @@ package it.cavallium.dbengine.client; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.database.DatabaseOperations; import it.cavallium.dbengine.database.DatabaseProperties; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; +import java.util.stream.Stream; public interface CompositeDatabase extends DatabaseProperties, DatabaseOperations { - Mono preClose(); + void preClose(); - Mono close(); + void close(); /** * Can return SnapshotException */ - Mono takeSnapshot(); + CompositeSnapshot takeSnapshot(); /** * Can return SnapshotException */ - Mono releaseSnapshot(CompositeSnapshot snapshot); - - BufferAllocator getAllocator(); + void releaseSnapshot(CompositeSnapshot snapshot); MeterRegistry getMeterRegistry(); /** * Find corrupted items */ - Flux badBlocks(); + Stream badBlocks(); - Mono verifyChecksum(); + void verifyChecksum(); } diff --git a/src/main/java/it/cavallium/dbengine/client/CountedStream.java b/src/main/java/it/cavallium/dbengine/client/CountedStream.java deleted file mode 100644 index 7035bb6..0000000 --- a/src/main/java/it/cavallium/dbengine/client/CountedStream.java +++ /dev/null @@ -1,47 +0,0 @@ -package it.cavallium.dbengine.client; - -import java.util.Collection; -import java.util.List; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -public class CountedStream { - - private final Flux stream; - private final long count; - - public CountedStream(Flux stream, long count) { - this.stream = stream; - this.count = count; - } - - public Flux getStream() { - return stream; - } - - public long getCount() { - return count; - } - - @SafeVarargs - public static CountedStream merge(CountedStream... stream) { - return merge(List.of(stream)); - } - - public static CountedStream merge(Collection> stream) { - return stream - .stream() - .reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount())) - .orElseGet(() -> new CountedStream<>(Flux.empty(), 0)); - } - - public static Mono> merge(Flux> stream) { - return stream - .reduce((a, b) -> new CountedStream<>(Flux.merge(a.getStream(), b.getStream()), a.getCount() + b.getCount())) - .switchIfEmpty(Mono.fromSupplier(() -> new CountedStream<>(Flux.empty(), 0))); - } - - public Mono> collectList() { - return stream.collectList(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/client/DefaultDatabaseOptions.java b/src/main/java/it/cavallium/dbengine/client/DefaultDatabaseOptions.java index 196feb9..b5007c5 100644 --- a/src/main/java/it/cavallium/dbengine/client/DefaultDatabaseOptions.java +++ b/src/main/java/it/cavallium/dbengine/client/DefaultDatabaseOptions.java @@ -58,7 +58,6 @@ public class DefaultDatabaseOptions { false, false, true, - true, Nullableint.empty(), Nullablelong.empty(), Nullablelong.empty(), diff --git a/src/main/java/it/cavallium/dbengine/client/HitEntry.java b/src/main/java/it/cavallium/dbengine/client/HitEntry.java index 701de4a..b03e1c4 100644 --- a/src/main/java/it/cavallium/dbengine/client/HitEntry.java +++ b/src/main/java/it/cavallium/dbengine/client/HitEntry.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.client; import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Mono; +import org.jetbrains.annotations.Nullable; -public record HitEntry(T key, U value, float score) +public record HitEntry(T key, @Nullable U value, float score) implements Comparable> { @Override diff --git a/src/main/java/it/cavallium/dbengine/client/HitKey.java b/src/main/java/it/cavallium/dbengine/client/HitKey.java index 7c1cd8a..122f702 100644 --- a/src/main/java/it/cavallium/dbengine/client/HitKey.java +++ b/src/main/java/it/cavallium/dbengine/client/HitKey.java @@ -1,16 +1,13 @@ package it.cavallium.dbengine.client; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; -import java.util.Comparator; import java.util.function.Function; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public record HitKey(T key, float score) implements Comparable> { - public Mono> withValue(Function> valueGetter) { - return valueGetter.apply(key).map(value -> new HitEntry<>(key, value, score)); + public HitEntry withValue(Function valueGetter) { + return new HitEntry<>(key, valueGetter.apply(key), score); } public HitEntry withNullValue() { diff --git a/src/main/java/it/cavallium/dbengine/client/Hits.java b/src/main/java/it/cavallium/dbengine/client/Hits.java index 684a403..1ecc5e6 100644 --- a/src/main/java/it/cavallium/dbengine/client/Hits.java +++ b/src/main/java/it/cavallium/dbengine/client/Hits.java @@ -4,29 +4,25 @@ import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.database.collections.ValueGetter; -import it.cavallium.dbengine.database.collections.ValueTransformer; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.utils.SimpleResource; -import java.util.Map.Entry; -import java.util.Optional; import java.util.function.Function; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public class Hits extends SimpleResource implements DiscardingCloseable { private static final Logger LOG = LogManager.getLogger(Hits.class); - private static final Hits EMPTY_HITS = new Hits<>(Flux.empty(), TotalHitsCount.of(0, true), false); - private final Flux results; + private static final Hits EMPTY_HITS = new Hits<>(Stream.empty(), TotalHitsCount.of(0, true), false); + private final Stream results; private final TotalHitsCount totalHitsCount; - public Hits(Flux results, TotalHitsCount totalHitsCount) { + public Hits(Stream results, TotalHitsCount totalHitsCount) { this(results, totalHitsCount, true); } - private Hits(Flux results, TotalHitsCount totalHitsCount, boolean canClose) { + private Hits(Stream results, TotalHitsCount totalHitsCount, boolean canClose) { super(canClose); this.results = results; this.totalHitsCount = totalHitsCount; @@ -37,44 +33,16 @@ public class Hits extends SimpleResource implements DiscardingCloseable { return (Hits) EMPTY_HITS; } - public static Function>, Hits>> generateMapper( + public static Function>, Hits>> generateMapper( ValueGetter valueGetter) { return result -> { var hitsToTransform = result.results() - .map(hit -> new LazyHitEntry<>(Mono.just(hit.key()), valueGetter.get(hit.key()), hit.score())); + .map(hit -> new HitEntry<>(hit.key(), valueGetter.get(hit.key()), hit.score())); return Hits.withResource(hitsToTransform, result.totalHitsCount(), result); }; } - public static Function>, Hits>> generateMapper( - ValueTransformer valueTransformer) { - return result -> { - try { - var sharedHitsFlux = result.results().publish().refCount(3); - var scoresFlux = sharedHitsFlux.map(HitKey::score); - var keysFlux = sharedHitsFlux.map(HitKey::key); - - var valuesFlux = valueTransformer.transform(keysFlux); - - var transformedFlux = Flux.zip((Object[] data) -> { - //noinspection unchecked - var keyMono = Mono.just((T) data[0]); - //noinspection unchecked - var val = (Entry>) data[1]; - var valMono = Mono.justOrEmpty(val.getValue()); - var score = (Float) data[2]; - return new LazyHitEntry<>(keyMono, valMono, score); - }, keysFlux, valuesFlux, scoresFlux); - - return Hits.withResource(transformedFlux, result.totalHitsCount(), result); - } catch (Throwable t) { - result.close(); - throw t; - } - }; - } - - public static Hits withResource(Flux hits, TotalHitsCount count, SafeCloseable resource) { + public static Hits withResource(Stream hits, TotalHitsCount count, SafeCloseable resource) { if (resource instanceof LuceneCloseable luceneCloseable) { return new LuceneHits<>(hits, count, luceneCloseable); } else { @@ -82,7 +50,7 @@ public class Hits extends SimpleResource implements DiscardingCloseable { } } - public Flux results() { + public Stream results() { ensureOpen(); return results; } @@ -105,7 +73,7 @@ public class Hits extends SimpleResource implements DiscardingCloseable { private final LuceneCloseable resource; - public LuceneHits(Flux hits, TotalHitsCount count, LuceneCloseable resource) { + public LuceneHits(Stream hits, TotalHitsCount count, LuceneCloseable resource) { super(hits, count); this.resource = resource; } @@ -125,7 +93,7 @@ public class Hits extends SimpleResource implements DiscardingCloseable { private final SafeCloseable resource; - public CloseableHits(Flux hits, TotalHitsCount count, SafeCloseable resource) { + public CloseableHits(Stream hits, TotalHitsCount count, SafeCloseable resource) { super(hits, count); this.resource = resource; } diff --git a/src/main/java/it/cavallium/dbengine/client/IBackuppable.java b/src/main/java/it/cavallium/dbengine/client/IBackuppable.java index 2427f07..fa6ed80 100644 --- a/src/main/java/it/cavallium/dbengine/client/IBackuppable.java +++ b/src/main/java/it/cavallium/dbengine/client/IBackuppable.java @@ -1,12 +1,10 @@ package it.cavallium.dbengine.client; -import reactor.core.publisher.Mono; - public interface IBackuppable { - Mono pauseForBackup(); + void pauseForBackup(); - Mono resumeAfterBackup(); + void resumeAfterBackup(); boolean isPaused(); } diff --git a/src/main/java/it/cavallium/dbengine/client/IndexAction.java b/src/main/java/it/cavallium/dbengine/client/IndexAction.java deleted file mode 100644 index ec5d680..0000000 --- a/src/main/java/it/cavallium/dbengine/client/IndexAction.java +++ /dev/null @@ -1,128 +0,0 @@ -package it.cavallium.dbengine.client; - -import it.cavallium.dbengine.client.IndexAction.Add; -import it.cavallium.dbengine.client.IndexAction.AddMulti; -import it.cavallium.dbengine.client.IndexAction.Update; -import it.cavallium.dbengine.client.IndexAction.UpdateMulti; -import it.cavallium.dbengine.client.IndexAction.Delete; -import it.cavallium.dbengine.client.IndexAction.DeleteAll; -import it.cavallium.dbengine.client.IndexAction.TakeSnapshot; -import it.cavallium.dbengine.client.IndexAction.ReleaseSnapshot; -import it.cavallium.dbengine.client.IndexAction.Flush; -import it.cavallium.dbengine.client.IndexAction.Refresh; -import it.cavallium.dbengine.client.IndexAction.Close; -import it.cavallium.dbengine.database.LLUpdateDocument; -import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.database.LLTerm; -import java.util.Map; -import java.util.Map.Entry; -import reactor.core.publisher.Flux; -import reactor.core.publisher.MonoSink; - -sealed interface IndexAction permits Add, AddMulti, Update, UpdateMulti, Delete, DeleteAll, TakeSnapshot, - ReleaseSnapshot, Flush, Refresh, Close { - - IndexActionType getType(); - - final record Add(LLTerm key, LLUpdateDocument doc, MonoSink addedFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.ADD; - } - } - - final record AddMulti(Flux> docsFlux, MonoSink addedMultiFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.ADD_MULTI; - } - } - - final record Update(LLTerm key, LLUpdateDocument doc, MonoSink updatedFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.UPDATE; - } - } - - final record UpdateMulti(Map docs, MonoSink updatedMultiFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.UPDATE_MULTI; - } - } - - final record Delete(LLTerm key, MonoSink deletedFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.DELETE; - } - } - - final record DeleteAll(MonoSink deletedAllFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.DELETE_ALL; - } - } - - final record TakeSnapshot(MonoSink snapshotFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.TAKE_SNAPSHOT; - } - } - - final record ReleaseSnapshot(LLSnapshot snapshot, MonoSink releasedFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.RELEASE_SNAPSHOT; - } - } - - final record Flush(MonoSink flushFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.FLUSH; - } - } - - final record Refresh(boolean force, MonoSink refreshFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.REFRESH; - } - } - - final record Close(MonoSink closeFuture) implements IndexAction { - - @Override - public IndexActionType getType() { - return IndexActionType.CLOSE; - } - } - - enum IndexActionType { - ADD, - ADD_MULTI, - UPDATE, - UPDATE_MULTI, - DELETE, - DELETE_ALL, - TAKE_SNAPSHOT, - RELEASE_SNAPSHOT, - FLUSH, - REFRESH, - CLOSE - } -} diff --git a/src/main/java/it/cavallium/dbengine/client/Indicizer.java b/src/main/java/it/cavallium/dbengine/client/Indicizer.java index 2a97c92..9d6118e 100644 --- a/src/main/java/it/cavallium/dbengine/client/Indicizer.java +++ b/src/main/java/it/cavallium/dbengine/client/Indicizer.java @@ -4,40 +4,33 @@ import com.google.common.collect.Multimap; import com.google.common.collect.Multimaps; import it.cavallium.dbengine.database.LLIndexRequest; import it.cavallium.dbengine.database.LLSoftUpdateDocument; -import it.cavallium.dbengine.database.LLUpdateDocument; import it.cavallium.dbengine.database.LLTerm; +import it.cavallium.dbengine.database.LLUpdateDocument; import it.cavallium.dbengine.database.LLUpdateFields; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import java.util.Map; -import java.util.Set; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.BytesRef; import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; public abstract class Indicizer { /** * Transform a value to an IndexRequest. */ - public abstract @NotNull Mono toIndexRequest(@NotNull T key, @NotNull U value); + public abstract @NotNull LLIndexRequest toIndexRequest(@NotNull T key, @NotNull U value); - public final @NotNull Mono toDocument(@NotNull T key, @NotNull U value) { - return toIndexRequest(key, value).map(req -> { - if (req instanceof LLUpdateFields updateFields) { - return new LLUpdateDocument(updateFields.items()); - } else if (req instanceof LLUpdateDocument updateDocument) { - return updateDocument; - } else if (req instanceof LLSoftUpdateDocument softUpdateDocument) { - return new LLUpdateDocument(softUpdateDocument.items()); - } else { - throw new UnsupportedOperationException("Unexpected request type: " + req); - } - }); + public final @NotNull LLUpdateDocument toDocument(@NotNull T key, @NotNull U value) { + var req = toIndexRequest(key, value); + if (req instanceof LLUpdateFields updateFields) { + return new LLUpdateDocument(updateFields.items()); + } else if (req instanceof LLUpdateDocument updateDocument) { + return updateDocument; + } else if (req instanceof LLSoftUpdateDocument softUpdateDocument) { + return new LLUpdateDocument(softUpdateDocument.items()); + } else { + throw new UnsupportedOperationException("Unexpected request type: " + req); + } } public abstract @NotNull LLTerm toIndex(@NotNull T key); diff --git a/src/main/java/it/cavallium/dbengine/client/IndicizerAnalyzers.java b/src/main/java/it/cavallium/dbengine/client/IndicizerAnalyzers.java index dca3903..5b6a404 100644 --- a/src/main/java/it/cavallium/dbengine/client/IndicizerAnalyzers.java +++ b/src/main/java/it/cavallium/dbengine/client/IndicizerAnalyzers.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.client; import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer; -import it.cavallium.dbengine.rpc.current.serializers.IndicizerAnalyzersSerializer; import java.util.Map; public class IndicizerAnalyzers { diff --git a/src/main/java/it/cavallium/dbengine/client/IntOpenHashSetJsonAdapter.java b/src/main/java/it/cavallium/dbengine/client/IntOpenHashSetJsonAdapter.java index 8ac66f8..13c9db2 100644 --- a/src/main/java/it/cavallium/dbengine/client/IntOpenHashSetJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/client/IntOpenHashSetJsonAdapter.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.client; import com.squareup.moshi.JsonReader; import com.squareup.moshi.JsonWriter; -import it.cavallium.data.generator.nativedata.Int52; import it.unimi.dsi.fastutil.ints.IntOpenHashSet; import java.io.IOException; import org.jetbrains.annotations.NotNull; diff --git a/src/main/java/it/cavallium/dbengine/client/LazyHitEntry.java b/src/main/java/it/cavallium/dbengine/client/LazyHitEntry.java deleted file mode 100644 index 03cf552..0000000 --- a/src/main/java/it/cavallium/dbengine/client/LazyHitEntry.java +++ /dev/null @@ -1,15 +0,0 @@ -package it.cavallium.dbengine.client; - -import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Mono; - -public record LazyHitEntry(Mono key, Mono value, float score) { - - public Mono> resolve() { - return Mono.zip(key, value, (k, v) -> new HitEntry<>(k, v, score)); - } - - public Mono> resolveKey() { - return key.map(k -> new HitKey<>(k, score)); - } -} diff --git a/src/main/java/it/cavallium/dbengine/client/LazyHitKey.java b/src/main/java/it/cavallium/dbengine/client/LazyHitKey.java deleted file mode 100644 index c6c101f..0000000 --- a/src/main/java/it/cavallium/dbengine/client/LazyHitKey.java +++ /dev/null @@ -1,19 +0,0 @@ -package it.cavallium.dbengine.client; - -import java.util.function.Function; -import reactor.core.publisher.Mono; - -public record LazyHitKey(Mono key, float score) { - - public LazyHitEntry withValue(Function> valueGetter) { - return new LazyHitEntry<>(key, key.flatMap(valueGetter), score); - } - - public Mono> resolve() { - return key.map(k -> new HitKey<>(k, score)); - } - - public Mono> resolveWithValue(Function> valueGetter) { - return resolve().flatMap(key -> key.withValue(valueGetter)); - } -} diff --git a/src/main/java/it/cavallium/dbengine/client/LuceneIndex.java b/src/main/java/it/cavallium/dbengine/client/LuceneIndex.java index 46af19f..d9d46cb 100644 --- a/src/main/java/it/cavallium/dbengine/client/LuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/client/LuceneIndex.java @@ -1,78 +1,71 @@ package it.cavallium.dbengine.client; -import io.netty5.util.Send; import it.cavallium.dbengine.client.query.ClientQueryParams; import it.cavallium.dbengine.client.query.current.data.Query; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.LLSnapshottable; -import it.cavallium.dbengine.database.collections.ValueGetter; -import it.cavallium.dbengine.database.collections.ValueTransformer; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.searcher.BucketParams; -import it.unimi.dsi.fastutil.doubles.DoubleArrayList; import java.util.List; import java.util.Map.Entry; +import java.util.stream.Stream; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public interface LuceneIndex extends LLSnapshottable { - Mono addDocument(T key, U value); + void addDocument(T key, U value); - Mono addDocuments(boolean atomic, Flux> entries); + long addDocuments(boolean atomic, Stream> entries); - Mono deleteDocument(T key); + void deleteDocument(T key); - Mono updateDocument(T key, @NotNull U value); + void updateDocument(T key, @NotNull U value); - Mono updateDocuments(Flux> entries); + long updateDocuments(Stream> entries); - default Mono updateOrDeleteDocument(T key, @Nullable U value) { + default void updateOrDeleteDocument(T key, @Nullable U value) { if (value == null) { - return deleteDocument(key); + deleteDocument(key); } else { - return updateDocument(key, value); + updateDocument(key, value); } } - default Mono updateOrDeleteDocumentIfModified(T key, @NotNull Delta delta) { - return updateOrDeleteDocumentIfModified(key, delta.current(), delta.isModified()); + default void updateOrDeleteDocumentIfModified(T key, @NotNull Delta delta) { + updateOrDeleteDocumentIfModified(key, delta.current(), delta.isModified()); } - default Mono updateOrDeleteDocumentIfModified(T key, @Nullable U currentValue, boolean modified) { + default void updateOrDeleteDocumentIfModified(T key, @Nullable U currentValue, boolean modified) { if (modified) { - return updateOrDeleteDocument(key, currentValue); - } else { - return Mono.empty(); + updateOrDeleteDocument(key, currentValue); } } - Mono deleteAll(); + void deleteAll(); - Mono>> moreLikeThis(ClientQueryParams queryParams, T key, + Hits> moreLikeThis(ClientQueryParams queryParams, T key, U mltDocumentValue); - Mono>> search(ClientQueryParams queryParams); + Hits> search(ClientQueryParams queryParams); - Mono computeBuckets(@Nullable CompositeSnapshot snapshot, + Buckets computeBuckets(@Nullable CompositeSnapshot snapshot, @NotNull List queries, @Nullable Query normalizationQuery, BucketParams bucketParams); - Mono count(@Nullable CompositeSnapshot snapshot, Query query); + TotalHitsCount count(@Nullable CompositeSnapshot snapshot, Query query); boolean isLowMemoryMode(); void close(); - Mono flush(); + void flush(); - Mono waitForMerges(); + void waitForMerges(); - Mono waitForLastMerges(); + void waitForLastMerges(); - Mono refresh(boolean force); + void refresh(boolean force); } diff --git a/src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java b/src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java index f68ebdc..d32c10d 100644 --- a/src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java +++ b/src/main/java/it/cavallium/dbengine/client/LuceneIndexImpl.java @@ -5,7 +5,6 @@ import it.cavallium.dbengine.client.Hits.LuceneHits; import it.cavallium.dbengine.client.query.ClientQueryParams; import it.cavallium.dbengine.client.query.current.data.Query; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; -import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.database.LLKeyScore; import it.cavallium.dbengine.database.LLLuceneIndex; import it.cavallium.dbengine.database.LLSearchResultShard; @@ -13,8 +12,6 @@ import it.cavallium.dbengine.database.LLSearchResultShard.LuceneLLSearchResultSh import it.cavallium.dbengine.database.LLSearchResultShard.ResourcesLLSearchResultShard; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.LLTerm; -import it.cavallium.dbengine.database.LLUpdateDocument; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; @@ -26,14 +23,12 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.logging.Level; +import java.util.function.Function; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SignalType; public class LuceneIndexImpl implements LuceneIndex { @@ -56,96 +51,87 @@ public class LuceneIndexImpl implements LuceneIndex { } @Override - public Mono addDocument(T key, U value) { - return indicizer - .toDocument(key, value) - .flatMap(doc -> luceneIndex.addDocument(indicizer.toIndex(key), doc)); + public void addDocument(T key, U value) { + luceneIndex.addDocument(indicizer.toIndex(key), indicizer.toDocument(key, value)); } @Override - public Mono addDocuments(boolean atomic, Flux> entries) { - return luceneIndex.addDocuments(atomic, entries.flatMap(entry -> indicizer - .toDocument(entry.getKey(), entry.getValue()) - .map(doc -> Map.entry(indicizer.toIndex(entry.getKey()), doc)))); + public long addDocuments(boolean atomic, Stream> entries) { + return luceneIndex.addDocuments(atomic, entries.map(entry -> + Map.entry(indicizer.toIndex(entry.getKey()), indicizer.toDocument(entry.getKey(), entry.getValue())))); } @Override - public Mono deleteDocument(T key) { + public void deleteDocument(T key) { LLTerm id = indicizer.toIndex(key); - return luceneIndex.deleteDocument(id); + luceneIndex.deleteDocument(id); } @Override - public Mono updateDocument(T key, @NotNull U value) { - return indicizer - .toIndexRequest(key, value) - .flatMap(doc -> luceneIndex.update(indicizer.toIndex(key), doc)); + public void updateDocument(T key, @NotNull U value) { + luceneIndex.update(indicizer.toIndex(key), indicizer.toIndexRequest(key, value)); } @Override - public Mono updateDocuments(Flux> entries) { - Flux> mappedEntries = entries - .flatMap(entry -> Mono - .zip(Mono.just(indicizer.toIndex(entry.getKey())), - indicizer.toDocument(entry.getKey(), entry.getValue()).single(), - Map::entry - ) - .single() - ) - .log("impl-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE); - return luceneIndex.updateDocuments(mappedEntries); + public long updateDocuments(Stream> entries) { + return luceneIndex.updateDocuments(entries.map(entry -> + Map.entry(indicizer.toIndex(entry.getKey()), indicizer.toDocument(entry.getKey(), entry.getValue())))); } @Override - public Mono deleteAll() { - return luceneIndex.deleteAll(); + public void deleteAll() { + luceneIndex.deleteAll(); } @Override - public Mono>> moreLikeThis(ClientQueryParams queryParams, + public Hits> moreLikeThis(ClientQueryParams queryParams, T key, U mltDocumentValue) { var mltDocumentFields = indicizer.getMoreLikeThisDocumentFields(key, mltDocumentValue); - return luceneIndex + var results = luceneIndex .moreLikeThis(resolveSnapshot(queryParams.snapshot()), queryParams.toQueryParams(), indicizer.getKeyFieldName(), mltDocumentFields ) - .collectList() - .mapNotNull(shards -> mergeResults(queryParams, shards)) - .map(llSearchResult -> mapResults(llSearchResult)) - .defaultIfEmpty(Hits.empty()) - .doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + .toList(); + LLSearchResultShard mergedResults = mergeResults(queryParams, results); + if (mergedResults != null) { + return mapResults(mergedResults); + } else { + return Hits.empty(); + } } @Override - public Mono>> search(ClientQueryParams queryParams) { - return luceneIndex + public Hits> search(ClientQueryParams queryParams) { + var results = luceneIndex .search(resolveSnapshot(queryParams.snapshot()), queryParams.toQueryParams(), indicizer.getKeyFieldName() ) - .collectList() - .mapNotNull(shards -> mergeResults(queryParams, shards)) - .map(llSearchResult -> mapResults(llSearchResult)) - .defaultIfEmpty(Hits.empty()) - .doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + .toList(); + + var mergedResults = mergeResults(queryParams, results); + if (mergedResults != null) { + return mapResults(mergedResults); + } else { + return Hits.empty(); + } } @Override - public Mono computeBuckets(@Nullable CompositeSnapshot snapshot, + public Buckets computeBuckets(@Nullable CompositeSnapshot snapshot, @NotNull List query, @Nullable Query normalizationQuery, BucketParams bucketParams) { - return luceneIndex.computeBuckets(resolveSnapshot(snapshot), query, - normalizationQuery, bucketParams).single(); + return luceneIndex.computeBuckets(resolveSnapshot(snapshot), query, normalizationQuery, bucketParams); } private Hits> mapResults(LLSearchResultShard llSearchResult) { - Flux> scoresWithKeysFlux = llSearchResult.results() + Stream> scoresWithKeysFlux = llSearchResult.results() .map(hit -> new HitKey<>(indicizer.getKey(hit.key()), hit.score())); if (llSearchResult instanceof LuceneCloseable luceneCloseable) { @@ -156,10 +142,8 @@ public class LuceneIndexImpl implements LuceneIndex { } @Override - public Mono count(@Nullable CompositeSnapshot snapshot, Query query) { - return luceneIndex - .count(resolveSnapshot(snapshot), query, MAX_COUNT_TIME) - .doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + public TotalHitsCount count(@Nullable CompositeSnapshot snapshot, Query query) { + return luceneIndex.count(resolveSnapshot(snapshot), query, MAX_COUNT_TIME); } @Override @@ -176,36 +160,36 @@ public class LuceneIndexImpl implements LuceneIndex { * Flush writes to disk */ @Override - public Mono flush() { - return luceneIndex.flush(); + public void flush() { + luceneIndex.flush(); } @Override - public Mono waitForMerges() { - return luceneIndex.waitForMerges(); + public void waitForMerges() { + luceneIndex.waitForMerges(); } @Override - public Mono waitForLastMerges() { - return luceneIndex.waitForLastMerges(); + public void waitForLastMerges() { + luceneIndex.waitForLastMerges(); } /** * Refresh index searcher */ @Override - public Mono refresh(boolean force) { - return luceneIndex.refresh(force); + public void refresh(boolean force) { + luceneIndex.refresh(force); } @Override - public Mono takeSnapshot() { + public LLSnapshot takeSnapshot() { return luceneIndex.takeSnapshot(); } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return luceneIndex.releaseSnapshot(snapshot); + public void releaseSnapshot(LLSnapshot snapshot) { + luceneIndex.releaseSnapshot(snapshot); } @SuppressWarnings({"unchecked", "rawtypes"}) @@ -217,7 +201,7 @@ public class LuceneIndexImpl implements LuceneIndex { return shards.get(0); } TotalHitsCount count = null; - ObjectArrayList> results = new ObjectArrayList<>(shards.size()); + ObjectArrayList> results = new ObjectArrayList<>(shards.size()); ObjectArrayList resources = new ObjectArrayList(shards.size()); boolean luceneResources = false; for (LLSearchResultShard shard : shards) { @@ -230,17 +214,17 @@ public class LuceneIndexImpl implements LuceneIndex { count = LuceneUtils.sum(count, shard.totalHitsCount()); } var maxLimit = queryParams.offset() + queryParams.limit(); - results.add(shard.results().take(maxLimit, true)); + results.add(shard.results().limit(maxLimit)); resources.add(shard); } Objects.requireNonNull(count); - Flux resultsFlux; + Stream resultsFlux; if (results.size() == 0) { - resultsFlux = Flux.empty(); + resultsFlux = Stream.empty(); } else if (results.size() == 1) { resultsFlux = results.get(0); } else { - resultsFlux = Flux.merge(results); + resultsFlux = results.parallelStream().flatMap(Function.identity()); } if (luceneResources) { return new LuceneLLSearchResultShard(resultsFlux, count, (List) resources); diff --git a/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java b/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java index 91d8f74..021be3b 100644 --- a/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java +++ b/src/main/java/it/cavallium/dbengine/client/MappedSerializer.java @@ -1,11 +1,11 @@ package it.cavallium.dbengine.client; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; public class MappedSerializer implements Serializer { @@ -19,13 +19,13 @@ public class MappedSerializer implements Serializer { } @Override - public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException { - return keyMapper.map(serializer.deserialize(serialized)); + public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException { + return keyMapper.map(serializer.deserialize(in)); } @Override - public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException { - serializer.serialize(keyMapper.unmap(deserialized), output); + public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException { + serializer.serialize(keyMapper.unmap(deserialized), out); } @Override diff --git a/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java b/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java index ebe9090..36fc752 100644 --- a/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java +++ b/src/main/java/it/cavallium/dbengine/client/MappedSerializerFixedLength.java @@ -1,11 +1,11 @@ package it.cavallium.dbengine.client; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; public class MappedSerializerFixedLength implements SerializerFixedBinaryLength { @@ -19,13 +19,13 @@ public class MappedSerializerFixedLength implements SerializerFixedBinaryL } @Override - public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException { - return keyMapper.map(fixedLengthSerializer.deserialize(serialized)); + public @NotNull B deserialize(@NotNull BufDataInput in) throws SerializationException { + return keyMapper.map(fixedLengthSerializer.deserialize(in)); } @Override - public void serialize(@NotNull B deserialized, Buffer output) throws SerializationException { - fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), output); + public void serialize(@NotNull B deserialized, BufDataOutput out) throws SerializationException { + fixedLengthSerializer.serialize(keyMapper.unmap(deserialized), out); } @Override diff --git a/src/main/java/it/cavallium/dbengine/client/NoMapper.java b/src/main/java/it/cavallium/dbengine/client/NoMapper.java index 1b91787..98b3279 100644 --- a/src/main/java/it/cavallium/dbengine/client/NoMapper.java +++ b/src/main/java/it/cavallium/dbengine/client/NoMapper.java @@ -1,7 +1,5 @@ package it.cavallium.dbengine.client; -import it.cavallium.dbengine.client.Mapper; - public class NoMapper implements Mapper { @Override diff --git a/src/main/java/it/cavallium/dbengine/client/UninterruptibleScheduler.java b/src/main/java/it/cavallium/dbengine/client/UninterruptibleScheduler.java deleted file mode 100644 index ba05044..0000000 --- a/src/main/java/it/cavallium/dbengine/client/UninterruptibleScheduler.java +++ /dev/null @@ -1,90 +0,0 @@ -package it.cavallium.dbengine.client; - -import java.util.concurrent.TimeUnit; -import org.jetbrains.annotations.NotNull; -import reactor.core.Disposable; -import reactor.core.scheduler.Scheduler; - -public class UninterruptibleScheduler { - - public static Scheduler uninterruptibleScheduler(Scheduler scheduler) { - return new Scheduler() { - @Override - public @NotNull Disposable schedule(@NotNull Runnable task) { - scheduler.schedule(task); - return () -> {}; - } - - @Override - public @NotNull Disposable schedule(@NotNull Runnable task, long delay, @NotNull TimeUnit unit) { - scheduler.schedule(task, delay, unit); - return () -> {}; - } - - @Override - public @NotNull Disposable schedulePeriodically(@NotNull Runnable task, - long initialDelay, - long period, - @NotNull TimeUnit unit) { - scheduler.schedulePeriodically(task, initialDelay, period, unit); - return () -> {}; - } - - @Override - public boolean isDisposed() { - return scheduler.isDisposed(); - } - - @Override - public void dispose() { - scheduler.dispose(); - } - - @Override - public void start() { - scheduler.start(); - } - - @Override - public long now(@NotNull TimeUnit unit) { - return Scheduler.super.now(unit); - } - - @Override - public @NotNull Worker createWorker() { - var worker = scheduler.createWorker(); - return new Worker() { - @Override - public @NotNull Disposable schedule(@NotNull Runnable task) { - worker.schedule(task); - return () -> {}; - } - - @Override - public void dispose() { - } - - @Override - public boolean isDisposed() { - return worker.isDisposed(); - } - - @Override - public @NotNull Disposable schedule(@NotNull Runnable task, long delay, @NotNull TimeUnit unit) { - worker.schedule(task, delay, unit); - return () -> {}; - } - - @Override - public @NotNull Disposable schedulePeriodically(@NotNull Runnable task, - long initialDelay, - long period, - @NotNull TimeUnit unit) { - worker.schedulePeriodically(task, initialDelay, period, unit); - return () -> {}; - } - }; - } - }; - } -} diff --git a/src/main/java/it/cavallium/dbengine/client/query/ClientQueryParams.java b/src/main/java/it/cavallium/dbengine/client/query/ClientQueryParams.java index 9b36a12..0e04007 100644 --- a/src/main/java/it/cavallium/dbengine/client/query/ClientQueryParams.java +++ b/src/main/java/it/cavallium/dbengine/client/query/ClientQueryParams.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.client.query; import io.soabase.recordbuilder.core.RecordBuilder; -import it.cavallium.data.generator.nativedata.Nullablefloat; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.client.Sort; import it.cavallium.dbengine.client.query.current.data.NoSort; diff --git a/src/main/java/it/cavallium/dbengine/client/query/QueryMoshi.java b/src/main/java/it/cavallium/dbengine/client/query/QueryMoshi.java index 722fa55..e711b5c 100644 --- a/src/main/java/it/cavallium/dbengine/client/query/QueryMoshi.java +++ b/src/main/java/it/cavallium/dbengine/client/query/QueryMoshi.java @@ -1,12 +1,19 @@ package it.cavallium.dbengine.client.query; import com.squareup.moshi.JsonAdapter; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.IntOpenHashSetJsonAdapter; import it.cavallium.dbengine.client.query.current.CurrentVersion; import it.cavallium.dbengine.client.query.current.IBaseType; import it.cavallium.dbengine.client.query.current.IType; +import it.cavallium.dbengine.utils.BooleanListJsonAdapter; +import it.cavallium.dbengine.utils.ByteListJsonAdapter; +import it.cavallium.dbengine.utils.CharListJsonAdapter; +import it.cavallium.dbengine.utils.IntListJsonAdapter; +import it.cavallium.dbengine.utils.LongListJsonAdapter; +import it.cavallium.dbengine.utils.MoshiPolymorphic; +import it.cavallium.dbengine.utils.ShortListJsonAdapter; import it.unimi.dsi.fastutil.booleans.BooleanList; -import it.unimi.dsi.fastutil.bytes.ByteList; import it.unimi.dsi.fastutil.chars.CharList; import it.unimi.dsi.fastutil.ints.IntList; import it.unimi.dsi.fastutil.ints.IntOpenHashSet; @@ -18,13 +25,6 @@ import it.unimi.dsi.fastutil.shorts.ShortList; import java.util.HashSet; import java.util.Map; import java.util.Set; -import it.cavallium.dbengine.utils.BooleanListJsonAdapter; -import it.cavallium.dbengine.utils.ByteListJsonAdapter; -import it.cavallium.dbengine.utils.CharListJsonAdapter; -import it.cavallium.dbengine.utils.IntListJsonAdapter; -import it.cavallium.dbengine.utils.LongListJsonAdapter; -import it.cavallium.dbengine.utils.MoshiPolymorphic; -import it.cavallium.dbengine.utils.ShortListJsonAdapter; public class QueryMoshi extends MoshiPolymorphic { @@ -57,7 +57,7 @@ public class QueryMoshi extends MoshiPolymorphic { this.concreteClasses = concreteClasses; Object2ObjectMap, JsonAdapter> extraAdapters = new Object2ObjectOpenHashMap<>(); extraAdapters.put(BooleanList.class, new BooleanListJsonAdapter()); - extraAdapters.put(ByteList.class, new ByteListJsonAdapter()); + extraAdapters.put(Buf.class, new ByteListJsonAdapter()); extraAdapters.put(ShortList.class, new ShortListJsonAdapter()); extraAdapters.put(CharList.class, new CharListJsonAdapter()); extraAdapters.put(IntList.class, new IntListJsonAdapter()); diff --git a/src/main/java/it/cavallium/dbengine/client/query/QueryParser.java b/src/main/java/it/cavallium/dbengine/client/query/QueryParser.java index 84cf9f2..36bb21f 100644 --- a/src/main/java/it/cavallium/dbengine/client/query/QueryParser.java +++ b/src/main/java/it/cavallium/dbengine/client/query/QueryParser.java @@ -51,16 +51,7 @@ import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.LowerCaseFilter; -import org.apache.lucene.analysis.StopFilter; -import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.en.EnglishPossessiveFilter; -import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; -import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; -import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.document.DoublePoint; import org.apache.lucene.document.FloatPoint; import org.apache.lucene.document.IntPoint; @@ -89,7 +80,7 @@ public class QueryParser { return null; } switch (query.getBaseType$()) { - case StandardQuery: + case StandardQuery -> { var standardQuery = (it.cavallium.dbengine.client.query.current.data.StandardQuery) query; // Fix the analyzer @@ -98,19 +89,12 @@ public class QueryParser { .stream() .collect(Collectors.toMap(Function.identity(), term -> new NoOpAnalyzer())); analyzer = new PerFieldAnalyzerWrapper(analyzer, customAnalyzers); - var standardQueryParser = new StandardQueryParser(analyzer); - - standardQueryParser.setPointsConfigMap(standardQuery - .pointsConfig() - .stream() - .collect(Collectors.toMap( - PointConfig::field, - pointConfig -> new PointsConfig( - toNumberFormat(pointConfig.data().numberFormat()), - toType(pointConfig.data().type()) - ) - ))); + standardQueryParser.setPointsConfigMap(standardQuery.pointsConfig().stream().collect( + Collectors.toMap(PointConfig::field, pointConfig -> + new PointsConfig(toNumberFormat(pointConfig.data().numberFormat()), toType(pointConfig.data().type())) + )) + ); var defaultFields = standardQuery.defaultFields(); try { Query parsed; @@ -126,7 +110,8 @@ public class QueryParser { } catch (QueryNodeException e) { throw new IllegalStateException("Can't parse query expression \"" + standardQuery.query() + "\"", e); } - case BooleanQuery: + } + case BooleanQuery -> { var booleanQuery = (it.cavallium.dbengine.client.query.current.data.BooleanQuery) query; var bq = new Builder(); for (BooleanQueryPart part : booleanQuery.parts()) { @@ -141,101 +126,127 @@ public class QueryParser { } bq.setMinimumNumberShouldMatch(booleanQuery.minShouldMatch()); return bq.build(); - case IntPointExactQuery: + } + case IntPointExactQuery -> { var intPointExactQuery = (IntPointExactQuery) query; return IntPoint.newExactQuery(intPointExactQuery.field(), intPointExactQuery.value()); - case IntNDPointExactQuery: + } + case IntNDPointExactQuery -> { var intndPointExactQuery = (IntNDPointExactQuery) query; var intndValues = intndPointExactQuery.value().toIntArray(); return IntPoint.newRangeQuery(intndPointExactQuery.field(), intndValues, intndValues); - case LongPointExactQuery: + } + case LongPointExactQuery -> { var longPointExactQuery = (LongPointExactQuery) query; return LongPoint.newExactQuery(longPointExactQuery.field(), longPointExactQuery.value()); - case FloatPointExactQuery: + } + case FloatPointExactQuery -> { var floatPointExactQuery = (FloatPointExactQuery) query; return FloatPoint.newExactQuery(floatPointExactQuery.field(), floatPointExactQuery.value()); - case DoublePointExactQuery: + } + case DoublePointExactQuery -> { var doublePointExactQuery = (DoublePointExactQuery) query; return DoublePoint.newExactQuery(doublePointExactQuery.field(), doublePointExactQuery.value()); - case LongNDPointExactQuery: + } + case LongNDPointExactQuery -> { var longndPointExactQuery = (LongNDPointExactQuery) query; var longndValues = longndPointExactQuery.value().toLongArray(); return LongPoint.newRangeQuery(longndPointExactQuery.field(), longndValues, longndValues); - case FloatNDPointExactQuery: + } + case FloatNDPointExactQuery -> { var floatndPointExactQuery = (FloatNDPointExactQuery) query; var floatndValues = floatndPointExactQuery.value().toFloatArray(); return FloatPoint.newRangeQuery(floatndPointExactQuery.field(), floatndValues, floatndValues); - case DoubleNDPointExactQuery: + } + case DoubleNDPointExactQuery -> { var doublendPointExactQuery = (DoubleNDPointExactQuery) query; var doublendValues = doublendPointExactQuery.value().toDoubleArray(); return DoublePoint.newRangeQuery(doublendPointExactQuery.field(), doublendValues, doublendValues); - case IntPointSetQuery: + } + case IntPointSetQuery -> { var intPointSetQuery = (IntPointSetQuery) query; return IntPoint.newSetQuery(intPointSetQuery.field(), intPointSetQuery.values().toIntArray()); - case LongPointSetQuery: + } + case LongPointSetQuery -> { var longPointSetQuery = (LongPointSetQuery) query; return LongPoint.newSetQuery(longPointSetQuery.field(), longPointSetQuery.values().toLongArray()); - case FloatPointSetQuery: + } + case FloatPointSetQuery -> { var floatPointSetQuery = (FloatPointSetQuery) query; return FloatPoint.newSetQuery(floatPointSetQuery.field(), floatPointSetQuery.values().toFloatArray()); - case DoublePointSetQuery: + } + case DoublePointSetQuery -> { var doublePointSetQuery = (DoublePointSetQuery) query; return DoublePoint.newSetQuery(doublePointSetQuery.field(), doublePointSetQuery.values().toDoubleArray()); - case TermQuery: + } + case TermQuery -> { var termQuery = (TermQuery) query; return new org.apache.lucene.search.TermQuery(toTerm(termQuery.term())); - case IntTermQuery: + } + case IntTermQuery -> { var intTermQuery = (IntTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(intTermQuery.field(), IntPoint.pack(intTermQuery.value()) )); - case IntNDTermQuery: + } + case IntNDTermQuery -> { var intNDTermQuery = (IntNDTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(intNDTermQuery.field(), IntPoint.pack(intNDTermQuery.value().toIntArray()) )); - case LongTermQuery: + } + case LongTermQuery -> { var longTermQuery = (LongTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(longTermQuery.field(), LongPoint.pack(longTermQuery.value()) )); - case LongNDTermQuery: + } + case LongNDTermQuery -> { var longNDTermQuery = (LongNDTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(longNDTermQuery.field(), LongPoint.pack(longNDTermQuery.value().toLongArray()) )); - case FloatTermQuery: + } + case FloatTermQuery -> { var floatTermQuery = (FloatTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(floatTermQuery.field(), FloatPoint.pack(floatTermQuery.value()) )); - case FloatNDTermQuery: + } + case FloatNDTermQuery -> { var floatNDTermQuery = (FloatNDTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(floatNDTermQuery.field(), FloatPoint.pack(floatNDTermQuery.value().toFloatArray()) )); - case DoubleTermQuery: + } + case DoubleTermQuery -> { var doubleTermQuery = (DoubleTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(doubleTermQuery.field(), DoublePoint.pack(doubleTermQuery.value()) )); - case DoubleNDTermQuery: + } + case DoubleNDTermQuery -> { var doubleNDTermQuery = (DoubleNDTermQuery) query; return new org.apache.lucene.search.TermQuery(new Term(doubleNDTermQuery.field(), DoublePoint.pack(doubleNDTermQuery.value().toDoubleArray()) )); - case FieldExistsQuery: + } + case FieldExistsQuery -> { var fieldExistQuery = (FieldExistsQuery) query; return new org.apache.lucene.search.FieldExistsQuery(fieldExistQuery.field()); - case BoostQuery: + } + case BoostQuery -> { var boostQuery = (BoostQuery) query; return new org.apache.lucene.search.BoostQuery(toQuery(boostQuery.query(), analyzer), boostQuery.scoreBoost()); - case ConstantScoreQuery: + } + case ConstantScoreQuery -> { var constantScoreQuery = (ConstantScoreQuery) query; return new org.apache.lucene.search.ConstantScoreQuery(toQuery(constantScoreQuery.query(), analyzer)); - case BoxedQuery: + } + case BoxedQuery -> { return toQuery(((BoxedQuery) query).query(), analyzer); - case FuzzyQuery: + } + case FuzzyQuery -> { var fuzzyQuery = (it.cavallium.dbengine.client.query.current.data.FuzzyQuery) query; return new FuzzyQuery(toTerm(fuzzyQuery.term()), fuzzyQuery.maxEdits(), @@ -243,56 +254,67 @@ public class QueryParser { fuzzyQuery.maxExpansions(), fuzzyQuery.transpositions() ); - case IntPointRangeQuery: + } + case IntPointRangeQuery -> { var intPointRangeQuery = (IntPointRangeQuery) query; return IntPoint.newRangeQuery(intPointRangeQuery.field(), intPointRangeQuery.min(), intPointRangeQuery.max()); - case IntNDPointRangeQuery: + } + case IntNDPointRangeQuery -> { var intndPointRangeQuery = (IntNDPointRangeQuery) query; return IntPoint.newRangeQuery(intndPointRangeQuery.field(), intndPointRangeQuery.min().toIntArray(), intndPointRangeQuery.max().toIntArray() ); - case LongPointRangeQuery: + } + case LongPointRangeQuery -> { var longPointRangeQuery = (LongPointRangeQuery) query; return LongPoint.newRangeQuery(longPointRangeQuery.field(), longPointRangeQuery.min(), longPointRangeQuery.max() ); - case FloatPointRangeQuery: + } + case FloatPointRangeQuery -> { var floatPointRangeQuery = (FloatPointRangeQuery) query; return FloatPoint.newRangeQuery(floatPointRangeQuery.field(), floatPointRangeQuery.min(), floatPointRangeQuery.max() ); - case DoublePointRangeQuery: + } + case DoublePointRangeQuery -> { var doublePointRangeQuery = (DoublePointRangeQuery) query; return DoublePoint.newRangeQuery(doublePointRangeQuery.field(), doublePointRangeQuery.min(), doublePointRangeQuery.max() ); - case LongNDPointRangeQuery: + } + case LongNDPointRangeQuery -> { var longndPointRangeQuery = (LongNDPointRangeQuery) query; return LongPoint.newRangeQuery(longndPointRangeQuery.field(), longndPointRangeQuery.min().toLongArray(), longndPointRangeQuery.max().toLongArray() ); - case FloatNDPointRangeQuery: + } + case FloatNDPointRangeQuery -> { var floatndPointRangeQuery = (FloatNDPointRangeQuery) query; return FloatPoint.newRangeQuery(floatndPointRangeQuery.field(), floatndPointRangeQuery.min().toFloatArray(), floatndPointRangeQuery.max().toFloatArray() ); - case DoubleNDPointRangeQuery: + } + case DoubleNDPointRangeQuery -> { var doublendPointRangeQuery = (DoubleNDPointRangeQuery) query; return DoublePoint.newRangeQuery(doublendPointRangeQuery.field(), doublendPointRangeQuery.min().toDoubleArray(), doublendPointRangeQuery.max().toDoubleArray() ); - case MatchAllDocsQuery: + } + case MatchAllDocsQuery -> { return new MatchAllDocsQuery(); - case MatchNoDocsQuery: + } + case MatchNoDocsQuery -> { return new MatchNoDocsQuery(); - case PhraseQuery: + } + case PhraseQuery -> { var phraseQuery = (PhraseQuery) query; var pqb = new org.apache.lucene.search.PhraseQuery.Builder(); for (TermPosition phrase : phraseQuery.phrase()) { @@ -300,27 +322,31 @@ public class QueryParser { } pqb.setSlop(phraseQuery.slop()); return pqb.build(); - case SortedDocFieldExistsQuery: + } + case SortedDocFieldExistsQuery -> { var sortedDocFieldExistsQuery = (SortedDocFieldExistsQuery) query; return new DocValuesFieldExistsQuery(sortedDocFieldExistsQuery.field()); - case SynonymQuery: + } + case SynonymQuery -> { var synonymQuery = (SynonymQuery) query; var sqb = new org.apache.lucene.search.SynonymQuery.Builder(synonymQuery.field()); for (TermAndBoost part : synonymQuery.parts()) { sqb.addTerm(toTerm(part.term()), part.boost()); } return sqb.build(); - case SortedNumericDocValuesFieldSlowRangeQuery: + } + case SortedNumericDocValuesFieldSlowRangeQuery -> { var sortedNumericDocValuesFieldSlowRangeQuery = (SortedNumericDocValuesFieldSlowRangeQuery) query; return SortedNumericDocValuesField.newSlowRangeQuery(sortedNumericDocValuesFieldSlowRangeQuery.field(), sortedNumericDocValuesFieldSlowRangeQuery.min(), sortedNumericDocValuesFieldSlowRangeQuery.max() ); - case WildcardQuery: + } + case WildcardQuery -> { var wildcardQuery = (WildcardQuery) query; return new org.apache.lucene.search.WildcardQuery(new Term(wildcardQuery.field(), wildcardQuery.pattern())); - default: - throw new IllegalStateException("Unexpected value: " + query.getBaseType$()); + } + default -> throw new IllegalStateException("Unexpected value: " + query.getBaseType$()); } } diff --git a/src/main/java/it/cavallium/dbengine/client/query/QueryUtils.java b/src/main/java/it/cavallium/dbengine/client/query/QueryUtils.java index b55725c..be7a47b 100644 --- a/src/main/java/it/cavallium/dbengine/client/query/QueryUtils.java +++ b/src/main/java/it/cavallium/dbengine/client/query/QueryUtils.java @@ -17,7 +17,6 @@ import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer; import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.util.QueryBuilder; import org.jetbrains.annotations.NotNull; @@ -63,29 +62,17 @@ public class QueryUtils { for (BooleanClause booleanClause : booleanQuery) { org.apache.lucene.search.Query queryPartQuery = booleanClause.getQuery(); - Occur occur; - switch (booleanClause.getOccur()) { - case MUST: - occur = OccurMust.of(); - break; - case FILTER: - occur = OccurFilter.of(); - break; - case SHOULD: - occur = OccurShould.of(); - break; - case MUST_NOT: - occur = OccurMustNot.of(); - break; - default: - throw new IllegalArgumentException(); - } + Occur occur = switch (booleanClause.getOccur()) { + case MUST -> OccurMust.of(); + case FILTER -> OccurFilter.of(); + case SHOULD -> OccurShould.of(); + case MUST_NOT -> OccurMustNot.of(); + }; queryParts.add(BooleanQueryPart.of(transformQuery(field, queryPartQuery), occur)); } return BooleanQuery.of(List.copyOf(queryParts), booleanQuery.getMinimumNumberShouldMatch()); } - if (luceneQuery instanceof org.apache.lucene.search.PhraseQuery) { - var phraseQuery = (org.apache.lucene.search.PhraseQuery) luceneQuery; + if (luceneQuery instanceof org.apache.lucene.search.PhraseQuery phraseQuery) { int slop = phraseQuery.getSlop(); var terms = phraseQuery.getTerms(); var positions = phraseQuery.getPositions(); diff --git a/src/main/java/it/cavallium/dbengine/database/BufSupplier.java b/src/main/java/it/cavallium/dbengine/database/BufSupplier.java deleted file mode 100644 index 06f212c..0000000 --- a/src/main/java/it/cavallium/dbengine/database/BufSupplier.java +++ /dev/null @@ -1,62 +0,0 @@ -package it.cavallium.dbengine.database; - -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; -import java.util.function.Supplier; - -public abstract class BufSupplier implements SafeCloseable, DiscardingCloseable, Supplier { - - public static BufSupplier of(Supplier supplier) { - return new SimpleBufSupplier(supplier); - } - - public static BufSupplier of(Send supplier) { - return new CopyBufSupplier(supplier.receive()); - } - - public static BufSupplier ofOwned(Buffer supplier) { - return new CopyBufSupplier(supplier); - } - - public static BufSupplier ofShared(Buffer supplier) { - return new SimpleBufSupplier(() -> supplier.copy()); - } - - private static final class SimpleBufSupplier extends BufSupplier { - - private final Supplier supplier; - - public SimpleBufSupplier(Supplier supplier) { - this.supplier = supplier; - } - - @Override - public Buffer get() { - return supplier.get(); - } - - @Override - public void close() { - - } - } - - private static final class CopyBufSupplier extends BufSupplier { - - private final Buffer supplier; - - public CopyBufSupplier(Buffer supplier) { - this.supplier = supplier; - } - - @Override - public Buffer get() { - return supplier.copy(); - } - - @Override - public void close() { - supplier.close(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/DatabaseOperations.java b/src/main/java/it/cavallium/dbengine/database/DatabaseOperations.java index 0347bd5..04d59a8 100644 --- a/src/main/java/it/cavallium/dbengine/database/DatabaseOperations.java +++ b/src/main/java/it/cavallium/dbengine/database/DatabaseOperations.java @@ -2,10 +2,9 @@ package it.cavallium.dbengine.database; import it.cavallium.dbengine.rpc.current.data.Column; import java.nio.file.Path; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Mono; +import java.util.stream.Stream; public interface DatabaseOperations { - Mono ingestSST(Column column, Publisher files, boolean replaceExisting); + void ingestSST(Column column, Stream files, boolean replaceExisting); } diff --git a/src/main/java/it/cavallium/dbengine/database/DatabaseProperties.java b/src/main/java/it/cavallium/dbengine/database/DatabaseProperties.java index 8c79293..58939ca 100644 --- a/src/main/java/it/cavallium/dbengine/database/DatabaseProperties.java +++ b/src/main/java/it/cavallium/dbengine/database/DatabaseProperties.java @@ -2,30 +2,30 @@ package it.cavallium.dbengine.database; import it.cavallium.dbengine.client.MemoryStats; import it.cavallium.dbengine.rpc.current.data.Column; +import java.io.IOException; import java.util.Map; +import java.util.stream.Stream; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public interface DatabaseProperties { - Mono getMemoryStats(); + MemoryStats getMemoryStats(); - Mono getRocksDBStats(); + String getRocksDBStats(); - Mono> getMapProperty(@Nullable Column column, RocksDBMapProperty property); + Map getMapProperty(@Nullable Column column, RocksDBMapProperty property); - Flux>> getMapColumnProperties(RocksDBMapProperty property); + Stream>> getMapColumnProperties(RocksDBMapProperty property); - Mono getStringProperty(@Nullable Column column, RocksDBStringProperty property); + String getStringProperty(@Nullable Column column, RocksDBStringProperty property); - Flux> getStringColumnProperties(RocksDBStringProperty property); + Stream> getStringColumnProperties(RocksDBStringProperty property); - Mono getLongProperty(@Nullable Column column, RocksDBLongProperty property); + Long getLongProperty(@Nullable Column column, RocksDBLongProperty property); - Flux> getLongColumnProperties(RocksDBLongProperty property); + Stream> getLongColumnProperties(RocksDBLongProperty property); - Mono getAggregatedLongProperty(RocksDBLongProperty property); + Long getAggregatedLongProperty(RocksDBLongProperty property); - Flux getTableProperties(); + Stream getTableProperties(); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java index 1c68a0c..7c762a3 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDatabaseConnection.java @@ -1,39 +1,34 @@ package it.cavallium.dbengine.database; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneRocksDBManager; import it.cavallium.dbengine.rpc.current.data.Column; import it.cavallium.dbengine.rpc.current.data.DatabaseOptions; import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure; import it.cavallium.dbengine.rpc.current.data.LuceneOptions; +import java.io.IOException; import java.util.List; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; @SuppressWarnings("UnusedReturnValue") public interface LLDatabaseConnection { - BufferAllocator getAllocator(); - MeterRegistry getMeterRegistry(); - Mono connect(); + LLDatabaseConnection connect(); - Mono getDatabase(String name, + LLKeyValueDatabase getDatabase(String name, List columns, DatabaseOptions databaseOptions); - Mono getLuceneIndex(String clusterName, + LLLuceneIndex getLuceneIndex(String clusterName, LuceneIndexStructure indexStructure, IndicizerAnalyzers indicizerAnalyzers, IndicizerSimilarities indicizerSimilarities, LuceneOptions luceneOptions, @Nullable LuceneHacks luceneHacks); - Mono disconnect(); + void disconnect(); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDelta.java b/src/main/java/it/cavallium/dbengine/database/LLDelta.java index 25001c5..f661cb6 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDelta.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDelta.java @@ -1,71 +1,37 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; -import it.cavallium.dbengine.utils.SimpleResource; +import static it.cavallium.dbengine.database.LLUtils.unmodifiableBytes; + +import it.cavallium.dbengine.buffers.Buf; import java.util.StringJoiner; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -public class LLDelta extends SimpleResource implements DiscardingCloseable { +public class LLDelta { @Nullable - private final Buffer previous; + private final Buf previous; @Nullable - private final Buffer current; + private final Buf current; - private LLDelta(@Nullable Buffer previous, @Nullable Buffer current) { + private LLDelta(@Nullable Buf previous, @Nullable Buf current) { super(); - this.previous = previous != null ? previous.makeReadOnly() : null; - this.current = current != null ? current.makeReadOnly() : null; + this.previous = unmodifiableBytes(previous); + this.current = unmodifiableBytes(current); } - @Override - protected void ensureOpen() { - super.ensureOpen(); - assert previous == null || previous.isAccessible(); - assert current == null || current.isAccessible(); - } - - @Override - protected void onClose() { - if (previous != null && previous.isAccessible()) { - previous.close(); - } - if (current != null && current.isAccessible()) { - current.close(); - } - } - - public static LLDelta of(Buffer previous, Buffer current) { + public static LLDelta of(Buf previous, Buf current) { assert (previous == null && current == null) || (previous != current); return new LLDelta(previous, current); } - public Send previous() { - ensureOpen(); - return previous != null ? previous.copy().send() : null; - } - - public Send current() { - ensureOpen(); - return current != null ? current.copy().send() : null; - } - - public Buffer currentUnsafe() { - ensureOpen(); - return current; - } - - public Buffer previousUnsafe() { - ensureOpen(); + public Buf previous() { return previous; } + public Buf current() { + return current; + } + public boolean isModified() { return !LLUtils.equals(previous, current); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java index 3e8b9cf..0fe70d5 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/LLDictionary.java @@ -1,105 +1,93 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.database.disk.BinarySerializationFunction; import it.cavallium.dbengine.database.serialization.KVSerializationFunction; -import it.cavallium.dbengine.database.serialization.SerializationFunction; +import java.io.IOException; import java.util.List; -import java.util.Optional; import java.util.function.Function; +import java.util.stream.Stream; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; +import org.rocksdb.RocksDBException; @SuppressWarnings("unused") public interface LLDictionary extends LLKeyValueDatabaseStructure { String getColumnName(); - BufferAllocator getAllocator(); + Buf get(@Nullable LLSnapshot snapshot, Buf key); - Mono get(@Nullable LLSnapshot snapshot, Mono key); - - Mono put(Mono key, Mono value, LLDictionaryResultType resultType); + Buf put(Buf key, Buf value, LLDictionaryResultType resultType); UpdateMode getUpdateMode(); - default Mono update(Mono key, - BinarySerializationFunction updater, - UpdateReturnMode updateReturnMode) { - return this - .updateAndGetDelta(key, updater) - .transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode)); + default Buf update(Buf key, BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { + LLDelta prev = this.updateAndGetDelta(key, updater); + return LLUtils.resolveLLDelta(prev, updateReturnMode); } - Mono updateAndGetDelta(Mono key, BinarySerializationFunction updater); + LLDelta updateAndGetDelta(Buf key, BinarySerializationFunction updater); - Mono clear(); + void clear(); - Mono remove(Mono key, LLDictionaryResultType resultType); + Buf remove(Buf key, LLDictionaryResultType resultType); - Flux getMulti(@Nullable LLSnapshot snapshot, Flux keys); + Stream getMulti(@Nullable LLSnapshot snapshot, Stream keys); - Mono putMulti(Flux entries); + void putMulti(Stream entries); - Flux updateMulti(Flux keys, Flux serializedKeys, - KVSerializationFunction updateFunction); + Stream updateMulti(Stream keys, Stream serializedKeys, + KVSerializationFunction updateFunction); - Flux getRange(@Nullable LLSnapshot snapshot, - Mono range, + Stream getRange(@Nullable LLSnapshot snapshot, + LLRange range, boolean reverse, boolean smallRange); - Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono range, + Stream> getRangeGrouped(@Nullable LLSnapshot snapshot, + LLRange range, int prefixLength, boolean smallRange); - Flux getRangeKeys(@Nullable LLSnapshot snapshot, - Mono range, + Stream getRangeKeys(@Nullable LLSnapshot snapshot, + LLRange range, boolean reverse, - boolean smallRange); + boolean smallRange) throws RocksDBException, IOException; - Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, - Mono range, + Stream> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, + LLRange range, int prefixLength, boolean smallRange); - Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, - Mono range, + Stream getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, + LLRange range, int prefixLength, boolean smallRange); - Flux badBlocks(Mono range); + Stream badBlocks(LLRange range); - Mono setRange(Mono range, Flux entries, boolean smallRange); + void setRange(LLRange range, Stream entries, boolean smallRange); - default Mono replaceRange(Mono range, + default void replaceRange(LLRange range, boolean canKeysChange, - Function> entriesReplacer, + Function<@NotNull LLEntry, @NotNull LLEntry> entriesReplacer, boolean smallRange) { - return Mono.defer(() -> { - if (canKeysChange) { - return this - .setRange(range, this - .getRange(null, range, false, smallRange) - .flatMap(entriesReplacer), smallRange); - } else { - return this.putMulti(this.getRange(null, range, false, smallRange).flatMap(entriesReplacer)); - } - }); + if (canKeysChange) { + this.setRange(range, this.getRange(null, range, false, smallRange).map(entriesReplacer), smallRange); + } else { + this.putMulti(this.getRange(null, range, false, smallRange).map(entriesReplacer)); + } } - Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono range, boolean fillCache); + boolean isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range, boolean fillCache); - Mono sizeRange(@Nullable LLSnapshot snapshot, Mono range, boolean fast); + long sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast); - Mono getOne(@Nullable LLSnapshot snapshot, Mono range); + LLEntry getOne(@Nullable LLSnapshot snapshot, LLRange range); - Mono getOneKey(@Nullable LLSnapshot snapshot, Mono range); + Buf getOneKey(@Nullable LLSnapshot snapshot, LLRange range); - Mono removeOne(Mono range); + LLEntry removeOne(LLRange range); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLEntry.java b/src/main/java/it/cavallium/dbengine/database/LLEntry.java index ae65a71..469732e 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLEntry.java +++ b/src/main/java/it/cavallium/dbengine/database/LLEntry.java @@ -1,67 +1,37 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Resource; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; -import it.cavallium.dbengine.utils.SimpleResource; +import it.cavallium.dbengine.buffers.Buf; import java.util.Objects; import java.util.StringJoiner; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -public class LLEntry extends SimpleResource implements DiscardingCloseable { +public class LLEntry { private static final Logger logger = LogManager.getLogger(LLEntry.class); - private Buffer key; - private Buffer value; + private final Buf key; + private final Buf value; - private LLEntry(@NotNull Send key, @NotNull Send value) { - this.key = key.receive(); - this.value = value.receive(); - assert isAllAccessible(); - } - private LLEntry(@NotNull Buffer key, @NotNull Buffer value) { + private LLEntry(@NotNull Buf key, @NotNull Buf value) { this.key = key; this.value = value; - assert isAllAccessible(); } - private boolean isAllAccessible() { - assert key != null && key.isAccessible(); - assert value != null && value.isAccessible(); - return true; - } - - public static LLEntry of(@NotNull Buffer key, @NotNull Buffer value) { + public static LLEntry of(@NotNull Buf key, @NotNull Buf value) { return new LLEntry(key, value); } - public Send getKey() { - ensureOwned(); - return Objects.requireNonNull(key).copy().send(); + public static LLEntry copyOf(Buf keyView, Buf valueView) { + return new LLEntry(keyView.copy(), valueView.copy()); } - public Buffer getKeyUnsafe() { - return key; + public Buf getKey() { + return Objects.requireNonNull(key); } - public Send getValue() { - ensureOwned(); - return Objects.requireNonNull(value).copy().send(); - } - - - public Buffer getValueUnsafe() { - return value; - } - - private void ensureOwned() { - assert isAllAccessible(); + public Buf getValue() { + return Objects.requireNonNull(value); } @Override @@ -90,24 +60,4 @@ public class LLEntry extends SimpleResource implements DiscardingCloseable { .add("value=" + LLUtils.toString(value)) .toString(); } - - @Override - protected void onClose() { - try { - if (key != null && key.isAccessible()) { - key.close(); - } - } catch (Throwable ex) { - logger.error("Failed to close key", ex); - } - try { - if (value != null && value.isAccessible()) { - value.close(); - } - } catch (Throwable ex) { - logger.error("Failed to close value", ex); - } - key = null; - value = null; - } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLItem.java b/src/main/java/it/cavallium/dbengine/database/LLItem.java index 5ea4367..7ed2787 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLItem.java +++ b/src/main/java/it/cavallium/dbengine/database/LLItem.java @@ -1,13 +1,6 @@ package it.cavallium.dbengine.database; -import com.google.common.primitives.Floats; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; -import java.nio.Buffer; import java.nio.ByteBuffer; -import java.nio.FloatBuffer; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; import java.util.Objects; import java.util.StringJoiner; import org.apache.lucene.document.Field; diff --git a/src/main/java/it/cavallium/dbengine/database/LLKeyScore.java b/src/main/java/it/cavallium/dbengine/database/LLKeyScore.java index a669543..147e02f 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLKeyScore.java +++ b/src/main/java/it/cavallium/dbengine/database/LLKeyScore.java @@ -1,10 +1,6 @@ package it.cavallium.dbengine.database; -import java.util.Objects; -import java.util.StringJoiner; import org.apache.lucene.index.IndexableField; -import org.apache.lucene.util.BytesRef; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public record LLKeyScore(int docId, int shardId, float score, @Nullable IndexableField key) {} diff --git a/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java index f9e5b18..472c043 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/LLKeyValueDatabase.java @@ -3,69 +3,67 @@ package it.cavallium.dbengine.database; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.client.IBackuppable; -import it.cavallium.dbengine.client.MemoryStats; import it.cavallium.dbengine.database.collections.DatabaseInt; import it.cavallium.dbengine.database.collections.DatabaseLong; -import it.cavallium.dbengine.rpc.current.data.Column; +import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Map; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; +import org.rocksdb.RocksDBException; public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseStructure, DatabaseProperties, IBackuppable, DatabaseOperations { - Mono getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable[] defaultValue); + LLSingleton getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable [] defaultValue) + throws IOException; - Mono getDictionary(byte[] columnName, UpdateMode updateMode); + LLDictionary getDictionary(byte[] columnName, UpdateMode updateMode); @Deprecated - default Mono getDeprecatedSet(String name, UpdateMode updateMode) { + default LLDictionary getDeprecatedSet(String name, UpdateMode updateMode) { return getDictionary(ColumnUtils.deprecatedSet(name).name().getBytes(StandardCharsets.US_ASCII), updateMode); } - default Mono getDictionary(String name, UpdateMode updateMode) { + default LLDictionary getDictionary(String name, UpdateMode updateMode) { return getDictionary(ColumnUtils.dictionary(name).name().getBytes(StandardCharsets.US_ASCII), updateMode); } - default Mono getSingleton(String singletonListName, String name) { + default LLSingleton getSingleton(String singletonListName, String name) { return getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII), name.getBytes(StandardCharsets.US_ASCII), null ); } - default Mono getInteger(String singletonListName, String name, int defaultValue) { - return this - .getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII), - name.getBytes(StandardCharsets.US_ASCII), - Ints.toByteArray(defaultValue) - ) - .map(DatabaseInt::new); + default DatabaseInt getInteger(String singletonListName, String name, int defaultValue) { + return new DatabaseInt(this.getSingleton(ColumnUtils + .special(singletonListName) + .name() + .getBytes(StandardCharsets.US_ASCII), + name.getBytes(StandardCharsets.US_ASCII), + Ints.toByteArray(defaultValue) + )); } - default Mono getLong(String singletonListName, String name, long defaultValue) { - return this - .getSingleton(ColumnUtils.special(singletonListName).name().getBytes(StandardCharsets.US_ASCII), - name.getBytes(StandardCharsets.US_ASCII), - Longs.toByteArray(defaultValue) - ) - .map(DatabaseLong::new); + default DatabaseLong getLong(String singletonListName, String name, long defaultValue) { + return new DatabaseLong(this.getSingleton(ColumnUtils + .special(singletonListName) + .name() + .getBytes(StandardCharsets.US_ASCII), + name.getBytes(StandardCharsets.US_ASCII), + Longs.toByteArray(defaultValue) + )); } - Mono verifyChecksum(); + void verifyChecksum(); - Mono compact(); + void compact() throws RocksDBException; - Mono flush(); - - BufferAllocator getAllocator(); + void flush(); MeterRegistry getMeterRegistry(); - Mono preClose(); - Mono close(); + void preClose(); + + void close(); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/LLLuceneIndex.java index f49d4fa..1e62bb6 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/LLLuceneIndex.java @@ -8,30 +8,29 @@ import it.cavallium.dbengine.client.query.current.data.QueryParams; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.searcher.BucketParams; +import java.io.IOException; import java.time.Duration; import java.util.List; -import java.util.Map; import java.util.Map.Entry; +import java.util.stream.Stream; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeCloseable { String getLuceneIndexName(); - Mono addDocument(LLTerm id, LLUpdateDocument doc); + void addDocument(LLTerm id, LLUpdateDocument doc); - Mono addDocuments(boolean atomic, Flux> documents); + long addDocuments(boolean atomic, Stream> documents); - Mono deleteDocument(LLTerm id); + void deleteDocument(LLTerm id); - Mono update(LLTerm id, LLIndexRequest request); + void update(LLTerm id, LLIndexRequest request); - Mono updateDocuments(Flux> documents); + long updateDocuments(Stream> documents); - Mono deleteAll(); + void deleteAll(); /** * @param queryParams the limit is valid for each lucene instance. If you have 15 instances, the number of elements @@ -40,7 +39,7 @@ public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeClosea * The additional query will be used with the moreLikeThis query: "mltQuery AND additionalQuery" * @return the collection has one or more flux */ - Flux moreLikeThis(@Nullable LLSnapshot snapshot, + Stream moreLikeThis(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName, Multimap mltDocumentFields); @@ -50,19 +49,19 @@ public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeClosea * returned can be at most limit * 15 * @return the collection has one or more flux */ - Flux search(@Nullable LLSnapshot snapshot, + Stream search(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName); /** * @return buckets with each value collected into one of the buckets */ - Mono computeBuckets(@Nullable LLSnapshot snapshot, + Buckets computeBuckets(@Nullable LLSnapshot snapshot, @NotNull List queries, @Nullable Query normalizationQuery, BucketParams bucketParams); - default Mono count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { + default TotalHitsCount count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { QueryParams params = QueryParams.of(query, 0, 0, @@ -70,12 +69,11 @@ public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeClosea false, timeout == null ? Long.MAX_VALUE : timeout.toMillis() ); - return Mono - .usingWhen(this.search(snapshot, params, null).singleOrEmpty(), - llSearchResultShard -> Mono.just(llSearchResultShard.totalHitsCount()), - LLUtils::finalizeResource - ) - .defaultIfEmpty(TotalHitsCount.of(0, true)); + return this + .search(snapshot, params, null) + .parallel() + .map(LLSearchResultShard::totalHitsCount) + .reduce(TotalHitsCount.of(0, true), (a, b) -> TotalHitsCount.of(a.value() + b.value(), a.exact() && b.exact())); } boolean isLowMemoryMode(); @@ -84,18 +82,18 @@ public interface LLLuceneIndex extends LLSnapshottable, IBackuppable, SafeClosea * Flush writes to disk. * This does not commit, it syncs the data to the disk */ - Mono flush(); + void flush(); - Mono waitForMerges(); + void waitForMerges(); /** * Wait for the latest pending merge * This disables future merges until shutdown! */ - Mono waitForLastMerges(); + void waitForLastMerges(); /** * Refresh index searcher */ - Mono refresh(boolean force); + void refresh(boolean force); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLMultiDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/LLMultiDatabaseConnection.java index 17083c6..a12669e 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLMultiDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/LLMultiDatabaseConnection.java @@ -2,14 +2,10 @@ package it.cavallium.dbengine.database; import com.google.common.collect.Multimap; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart; import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart.ConnectionPartLucene; import it.cavallium.dbengine.client.ConnectionSettings.ConnectionPart.ConnectionPartRocksDB; -import it.cavallium.dbengine.client.IndicizerAnalyzers; -import it.cavallium.dbengine.client.IndicizerSimilarities; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneRocksDBManager; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.rpc.current.data.Column; import it.cavallium.dbengine.rpc.current.data.DatabaseOptions; @@ -18,6 +14,7 @@ import it.cavallium.dbengine.rpc.current.data.LuceneOptions; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntOpenHashSet; import it.unimi.dsi.fastutil.ints.IntSet; +import java.io.IOException; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -25,12 +22,10 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; +import java.util.concurrent.CompletionException; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; public class LLMultiDatabaseConnection implements LLDatabaseConnection { @@ -83,30 +78,26 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection { allConnections.addAll(databaseShardConnections.values()); } - @Override - public BufferAllocator getAllocator() { - return anyConnection.getAllocator(); - } - @Override public MeterRegistry getMeterRegistry() { return anyConnection.getMeterRegistry(); } @Override - public Mono connect() { - return Flux - .fromIterable(allConnections) - .flatMap((LLDatabaseConnection databaseConnection) -> databaseConnection - .connect() - .doOnError(ex -> LOG.error("Failed to open connection", ex)) - ) - .then() - .thenReturn(this); + public LLDatabaseConnection connect() { + // todo: parallelize? + for (LLDatabaseConnection connection : allConnections) { + try { + connection.connect(); + } catch (Exception ex) { + LOG.error("Failed to open connection", ex); + } + } + return this; } @Override - public Mono getDatabase(String name, + public LLKeyValueDatabase getDatabase(String name, List columns, DatabaseOptions databaseOptions) { var conn = databaseShardConnections.getOrDefault(name, defaultDatabaseConnection); @@ -115,7 +106,7 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection { } @Override - public Mono getLuceneIndex(String clusterName, + public LLLuceneIndex getLuceneIndex(String clusterName, LuceneIndexStructure indexStructure, it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers indicizerAnalyzers, it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities indicizerSimilarities, @@ -150,51 +141,44 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection { luceneHacks ); } else { - return Flux - .fromIterable(connectionToShardMap.entrySet()) - .flatMap(entry -> { - var connectionIndexStructure = indexStructure - .setActiveShards(new IntArrayList(entry.getValue())); + record ShardToIndex(int shard, LLLuceneIndex connIndex) {} + var indices = connectionToShardMap.entrySet().stream().flatMap(entry -> { + var connectionIndexStructure = indexStructure.setActiveShards(new IntArrayList(entry.getValue())); - Flux connIndex = entry.getKey() - .getLuceneIndex(clusterName, - connectionIndexStructure, - indicizerAnalyzers, - indicizerSimilarities, - luceneOptions, - luceneHacks - ).cast(LLLuceneIndex.class).cache().repeat(); - return Flux - .fromIterable(entry.getValue()) - .zipWith(connIndex); - }) - .collectList() - .map(indices -> { - var luceneIndices = new LLLuceneIndex[indexStructure.totalShards()]; - for (var index : indices) { - luceneIndices[index.getT1()] = index.getT2(); - } - return new LLMultiLuceneIndex(clusterName, - indexStructure, - indicizerAnalyzers, - indicizerSimilarities, - luceneOptions, - luceneHacks, - luceneIndices - ); - }); + LLLuceneIndex connIndex; + try { + connIndex = entry.getKey().getLuceneIndex(clusterName, connectionIndexStructure, + indicizerAnalyzers, indicizerSimilarities, luceneOptions, luceneHacks); + } catch (IOException e) { + throw new CompletionException(e); + } + + return entry.getValue().intStream().mapToObj(shard -> new ShardToIndex(shard, connIndex)); + }).toList(); + var luceneIndices = new LLLuceneIndex[indexStructure.totalShards()]; + for (var index : indices) { + luceneIndices[index.shard] = index.connIndex; + } + return new LLMultiLuceneIndex(clusterName, + indexStructure, + indicizerAnalyzers, + indicizerSimilarities, + luceneOptions, + luceneHacks, + luceneIndices + ); } } @Override - public Mono disconnect() { - return Flux - .fromIterable(allConnections) - .flatMap(databaseConnection -> databaseConnection - .disconnect() - .doOnError(ex -> LOG.error("Failed to close connection", ex)) - .onErrorResume(ex -> Mono.empty()) - ) - .then(); + public void disconnect() { + // todo: parallelize? + for (LLDatabaseConnection connection : allConnections) { + try { + connection.disconnect(); + } catch (Exception ex) { + LOG.error("Failed to close connection", ex); + } + } } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLMultiLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/LLMultiLuceneIndex.java index 0378b38..0669498 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLMultiLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/LLMultiLuceneIndex.java @@ -1,35 +1,31 @@ package it.cavallium.dbengine.database; -import com.google.common.collect.Iterables; +import static it.cavallium.dbengine.lucene.LuceneUtils.getLuceneIndexId; +import static java.util.stream.Collectors.groupingBy; + import com.google.common.collect.Multimap; import it.cavallium.dbengine.client.IBackuppable; -import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; -import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.client.query.current.data.Query; import it.cavallium.dbengine.client.query.current.data.QueryParams; -import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.searcher.BucketParams; +import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; +import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure; import it.cavallium.dbengine.rpc.current.data.LuceneOptions; import it.unimi.dsi.fastutil.doubles.DoubleArrayList; -import it.unimi.dsi.fastutil.objects.ObjectArrayList; +import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Map.Entry; -import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SignalType; public class LLMultiLuceneIndex implements LLLuceneIndex { @@ -46,7 +42,6 @@ public class LLMultiLuceneIndex implements LLLuceneIndex { private final LLLuceneIndex[] luceneIndicesById; private final List luceneIndicesSet; private final int totalShards; - private final Flux luceneIndicesFlux; public LLMultiLuceneIndex(String clusterName, LuceneIndexStructure indexStructure, @@ -70,7 +65,6 @@ public class LLMultiLuceneIndex implements LLLuceneIndex { } } this.luceneIndicesSet = new ArrayList<>(luceneIndicesSet); - this.luceneIndicesFlux = Flux.fromIterable(luceneIndicesSet); } @Override @@ -79,108 +73,115 @@ public class LLMultiLuceneIndex implements LLLuceneIndex { } private LLLuceneIndex getLuceneIndex(LLTerm id) { - return luceneIndicesById[LuceneUtils.getLuceneIndexId(id, totalShards)]; + return luceneIndicesById[getLuceneIndexId(id, totalShards)]; } @Override - public Mono addDocument(LLTerm id, LLUpdateDocument doc) { - return getLuceneIndex(id).addDocument(id, doc); + public void addDocument(LLTerm id, LLUpdateDocument doc) { + getLuceneIndex(id).addDocument(id, doc); } @Override - public Mono addDocuments(boolean atomic, Flux> documents) { - return documents - .groupBy(term -> LuceneUtils.getLuceneIndexId(term.getKey(), totalShards)) - .flatMap(group -> { - var index = luceneIndicesById[group.key()]; - return index.addDocuments(atomic, group); - }) + public long addDocuments(boolean atomic, Stream> documents) { + var groupedRequests = documents + .collect(groupingBy(term -> getLuceneIndexId(term.getKey(), totalShards), + Int2ObjectOpenHashMap::new, + Collectors.toList() + )); + + return groupedRequests + .int2ObjectEntrySet() + .stream() + .map(entry -> luceneIndicesById[entry.getIntKey()].addDocuments(atomic, entry.getValue().stream())) .reduce(0L, Long::sum); } @Override - public Mono deleteDocument(LLTerm id) { - return getLuceneIndex(id).deleteDocument(id); + public void deleteDocument(LLTerm id) { + getLuceneIndex(id).deleteDocument(id); } @Override - public Mono update(LLTerm id, LLIndexRequest request) { - return getLuceneIndex(id).update(id, request); + public void update(LLTerm id, LLIndexRequest request) { + getLuceneIndex(id).update(id, request); } @Override - public Mono updateDocuments(Flux> documents) { - return documents - .log("multi-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE) - .groupBy(term -> getLuceneIndex(term.getKey())) - .flatMap(groupFlux -> groupFlux.key().updateDocuments(groupFlux)) + public long updateDocuments(Stream> documents) { + var groupedRequests = documents + .collect(groupingBy(term -> getLuceneIndexId(term.getKey(), totalShards), + Int2ObjectOpenHashMap::new, + Collectors.toList() + )); + + return groupedRequests + .int2ObjectEntrySet() + .stream() + .map(entry -> luceneIndicesById[entry.getIntKey()].updateDocuments(entry.getValue().stream())) .reduce(0L, Long::sum); } @Override - public Mono deleteAll() { - Iterable> it = () -> luceneIndicesSet.stream().map(llLuceneIndex -> llLuceneIndex.deleteAll()).iterator(); - return Mono.whenDelayError(it); + public void deleteAll() { + luceneIndicesSet.forEach(LLLuceneIndex::deleteAll); } @Override - public Flux moreLikeThis(@Nullable LLSnapshot snapshot, + public Stream moreLikeThis(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName, Multimap mltDocumentFields) { - return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.moreLikeThis(snapshot, + return luceneIndicesSet.parallelStream().flatMap(luceneIndex -> luceneIndex.moreLikeThis(snapshot, queryParams, keyFieldName, mltDocumentFields - )).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + )); } - private Mono mergeShards(List shards) { - return Mono.fromCallable(() -> { - List seriesValues = new ArrayList<>(); - DoubleArrayList totals = new DoubleArrayList(shards.get(0).totals()); + private Buckets mergeShards(List shards) { + List seriesValues = new ArrayList<>(); + DoubleArrayList totals = new DoubleArrayList(shards.get(0).totals()); - for (Buckets shard : shards) { - if (seriesValues.isEmpty()) { - seriesValues.addAll(shard.seriesValues()); - } else { - for (int serieIndex = 0; serieIndex < seriesValues.size(); serieIndex++) { - DoubleArrayList mergedSerieValues = seriesValues.get(serieIndex); - for (int dataIndex = 0; dataIndex < mergedSerieValues.size(); dataIndex++) { - mergedSerieValues.set(dataIndex, mergedSerieValues.getDouble(dataIndex) - + shard.seriesValues().get(serieIndex).getDouble(dataIndex) - ); - } + for (Buckets shard : shards) { + if (seriesValues.isEmpty()) { + seriesValues.addAll(shard.seriesValues()); + } else { + for (int serieIndex = 0; serieIndex < seriesValues.size(); serieIndex++) { + DoubleArrayList mergedSerieValues = seriesValues.get(serieIndex); + for (int dataIndex = 0; dataIndex < mergedSerieValues.size(); dataIndex++) { + mergedSerieValues.set(dataIndex, mergedSerieValues.getDouble(dataIndex) + + shard.seriesValues().get(serieIndex).getDouble(dataIndex) + ); } } - for (int i = 0; i < totals.size(); i++) { - totals.set(i, totals.getDouble(i) + shard.totals().getDouble(i)); - } } - return new Buckets(seriesValues, totals); - }); + for (int i = 0; i < totals.size(); i++) { + totals.set(i, totals.getDouble(i) + shard.totals().getDouble(i)); + } + } + return new Buckets(seriesValues, totals); } @Override - public Flux search(@Nullable LLSnapshot snapshot, + public Stream search(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName) { - return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.search(snapshot, + return luceneIndicesSet.parallelStream().flatMap(luceneIndex -> luceneIndex.search(snapshot, queryParams, keyFieldName - )).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + )); } @Override - public Mono computeBuckets(@Nullable LLSnapshot snapshot, + public Buckets computeBuckets(@Nullable LLSnapshot snapshot, @NotNull List queries, @Nullable Query normalizationQuery, BucketParams bucketParams) { - return luceneIndicesFlux.flatMap(luceneIndex -> luceneIndex.computeBuckets(snapshot, + return mergeShards(luceneIndicesSet.parallelStream().map(luceneIndex -> luceneIndex.computeBuckets(snapshot, queries, normalizationQuery, bucketParams - )).collectList().flatMap(this::mergeShards).doOnDiscard(DiscardingCloseable.class, LLUtils::onDiscard); + )).toList()); } @Override @@ -190,78 +191,60 @@ public class LLMultiLuceneIndex implements LLLuceneIndex { @Override public void close() { - Iterable> it = () -> luceneIndicesSet.stream().map(e -> Mono.fromRunnable(e::close)).iterator(); - Mono.whenDelayError(it).transform(LLUtils::handleDiscard).block(); + luceneIndicesSet.parallelStream().forEach(SafeCloseable::close); } @Override - public Mono flush() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::flush).iterator(); - return Mono.whenDelayError(it); + public void flush() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::flush); } @Override - public Mono waitForMerges() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForMerges).iterator(); - return Mono.whenDelayError(it); + public void waitForMerges() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::waitForMerges); } @Override - public Mono waitForLastMerges() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForLastMerges).iterator(); - return Mono.whenDelayError(it); + public void waitForLastMerges() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::waitForLastMerges); } @Override - public Mono refresh(boolean force) { - Iterable> it = () -> luceneIndicesSet.stream().map(index -> index.refresh(force)).iterator(); - return Mono.whenDelayError(it); + public void refresh(boolean force) { + luceneIndicesSet.parallelStream().forEach(index -> index.refresh(force)); } @Override - public Mono takeSnapshot() { - return Mono - // Generate next snapshot index - .fromCallable(nextSnapshotNumber::getAndIncrement) - .flatMap(snapshotIndex -> luceneIndicesFlux - .flatMapSequential(llLuceneIndex -> llLuceneIndex.takeSnapshot()) - .collectList() - .doOnNext(instancesSnapshotsArray -> registeredSnapshots.put(snapshotIndex, instancesSnapshotsArray)) - .thenReturn(new LLSnapshot(snapshotIndex)) - ); + public LLSnapshot takeSnapshot() { + // Generate next snapshot index + var snapshotIndex = nextSnapshotNumber.getAndIncrement(); + var snapshot = luceneIndicesSet.parallelStream().map(LLSnapshottable::takeSnapshot).toList(); + registeredSnapshots.put(snapshotIndex, snapshot); + return new LLSnapshot(snapshotIndex); } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return Mono - .fromCallable(() -> registeredSnapshots.remove(snapshot.getSequenceNumber())) - .flatMapIterable(list -> list) - .index() - .flatMap(tuple -> { - int index = (int) (long) tuple.getT1(); - LLSnapshot instanceSnapshot = tuple.getT2(); - return luceneIndicesSet.get(index).releaseSnapshot(instanceSnapshot); - }) - .then(); + public void releaseSnapshot(LLSnapshot snapshot) { + var list = registeredSnapshots.remove(snapshot.getSequenceNumber()); + for (int shardIndex = 0; shardIndex < list.size(); shardIndex++) { + var luceneIndex = luceneIndicesSet.get(shardIndex); + LLSnapshot instanceSnapshot = list.get(shardIndex); + luceneIndex.releaseSnapshot(instanceSnapshot); + } } @Override - public Mono pauseForBackup() { - return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::pauseForBackup)); + public void pauseForBackup() { + this.luceneIndicesSet.forEach(IBackuppable::pauseForBackup); } @Override - public Mono resumeAfterBackup() { - return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::resumeAfterBackup)); + public void resumeAfterBackup() { + this.luceneIndicesSet.forEach(IBackuppable::resumeAfterBackup); } @Override public boolean isPaused() { - for (LLLuceneIndex llLuceneIndex : this.luceneIndicesSet) { - if (llLuceneIndex.isPaused()) { - return true; - } - } - return false; + return this.luceneIndicesSet.stream().anyMatch(IBackuppable::isPaused); } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLRange.java b/src/main/java/it/cavallium/dbengine/database/LLRange.java index 8de0c8e..0f23f4b 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLRange.java +++ b/src/main/java/it/cavallium/dbengine/database/LLRange.java @@ -1,195 +1,92 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; -import it.cavallium.dbengine.utils.SimpleResource; +import it.cavallium.dbengine.buffers.Buf; import java.util.StringJoiner; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; /** * Range of data, from min (inclusive), to max (exclusive) */ -public class LLRange extends SimpleResource { +public class LLRange { - private static final LLRange RANGE_ALL = new LLRange( null, null, (Buffer) null, false); + private static final LLRange RANGE_ALL = new LLRange( null, null, (Buf) null); @Nullable - private final Buffer min; + private final Buf min; @Nullable - private final Buffer max; + private final Buf max; @Nullable - private final Buffer single; + private final Buf single; - private LLRange(Send min, Send max, Send single, boolean closeable) { - super(closeable); + private LLRange(@Nullable Buf min, @Nullable Buf max, @Nullable Buf single) { assert single == null || (min == null && max == null); - this.min = min != null ? min.receive().makeReadOnly() : null; - this.max = max != null ? max.receive().makeReadOnly() : null; - this.single = single != null ? single.receive().makeReadOnly() : null; - } - - private LLRange(Buffer min, Buffer max, Buffer single, boolean closeable) { - super(closeable); - assert single == null || (min == null && max == null); - this.min = min != null ? min.makeReadOnly() : null; - this.max = max != null ? max.makeReadOnly() : null; - this.single = single != null ? single.makeReadOnly() : null; + this.min = min; + this.max = max; + this.single = single; } public static LLRange all() { return RANGE_ALL; } - public static LLRange from(Send min) { - return new LLRange(min, null, null, true); + public static LLRange from(Buf min) { + return new LLRange(min, null, null); } - public static LLRange to(Send max) { - return new LLRange(null, max, null, true); + public static LLRange to(Buf max) { + return new LLRange(null, max, null); } - public static LLRange single(Send single) { - return new LLRange(null, null, single, true); + public static LLRange single(Buf single) { + return new LLRange(null, null, single); } - public static LLRange singleUnsafe(Buffer single) { - return new LLRange(null, null, single, true); - } - - public static LLRange of(Send min, Send max) { - return new LLRange(min, max, null, true); - } - - public static LLRange ofUnsafe(Buffer min, Buffer max) { - return new LLRange(min, max, null, true); + public static LLRange of(Buf min, Buf max) { + return new LLRange(min, max, null); } public boolean isAll() { - ensureOpen(); return min == null && max == null && single == null; } public boolean isSingle() { - ensureOpen(); return single != null; } public boolean hasMin() { - ensureOpen(); return min != null || single != null; } - public Send getMin() { - ensureOpen(); - if (min != null) { - // todo: use a read-only copy - return min.copy().send(); - } else if (single != null) { - // todo: use a read-only copy - return single.copy().send(); - } else { - return null; - } - } - - public Buffer getMinUnsafe() { - ensureOpen(); + public Buf getMin() { + // todo: use a read-only copy if (min != null) { return min; - } else if (single != null) { + } else { return single; - } else { - return null; - } - } - - public Buffer getMinCopy() { - ensureOpen(); - if (min != null) { - return min.copy(); - } else if (single != null) { - return single.copy(); - } else { - return null; } } public boolean hasMax() { - ensureOpen(); return max != null || single != null; } - public Send getMax() { - ensureOpen(); - if (max != null) { - // todo: use a read-only copy - return max.copy().send(); - } else if (single != null) { - // todo: use a read-only copy - return single.copy().send(); - } else { - return null; - } - } - - public Buffer getMaxUnsafe() { - ensureOpen(); + public Buf getMax() { + // todo: use a read-only copy if (max != null) { return max; - } else if (single != null) { + } else { return single; - } else { - return null; } } - public Buffer getMaxCopy() { - ensureOpen(); - if (max != null) { - return max.copy(); - } else if (single != null) { - return single.copy(); - } else { - return null; - } - } - - public Send getSingle() { - ensureOpen(); + public Buf getSingle() { assert isSingle(); // todo: use a read-only copy - return single != null ? single.copy().send() : null; - } - - public Buffer getSingleUnsafe() { - ensureOpen(); - assert isSingle(); return single; } - @Override - protected void ensureOpen() { - super.ensureOpen(); - assert min == null || min.isAccessible() : "Range min not owned"; - assert max == null || max.isAccessible() : "Range max not owned"; - assert single == null || single.isAccessible() : "Range single not owned"; - } - - @Override - protected void onClose() { - if (min != null && min.isAccessible()) { - min.close(); - } - if (max != null && max.isAccessible()) { - max.close(); - } - if (single != null && single.isAccessible()) { - single.close(); - } + public Buf getSingleUnsafe() { + assert isSingle(); + return single; } @Override @@ -220,12 +117,7 @@ public class LLRange extends SimpleResource { } public LLRange copy() { - ensureOpen(); // todo: use a read-only copy - return new LLRange(min != null ? min.copy().send() : null, - max != null ? max.copy().send() : null, - single != null ? single.copy().send() : null, - true - ); + return new LLRange(min, max, single); } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLSearchResult.java b/src/main/java/it/cavallium/dbengine/database/LLSearchResult.java index 4ed7d9f..d206cc5 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLSearchResult.java +++ b/src/main/java/it/cavallium/dbengine/database/LLSearchResult.java @@ -1,13 +1,13 @@ package it.cavallium.dbengine.database; import java.util.function.BiFunction; +import java.util.stream.Stream; import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Flux; -public record LLSearchResult(Flux results) { +public record LLSearchResult(Stream results) { @NotNull public static BiFunction accumulator() { - return (a, b) -> new LLSearchResult(Flux.merge(a.results, b.results)); + return (a, b) -> new LLSearchResult(Stream.concat(a.results, b.results)); } } diff --git a/src/main/java/it/cavallium/dbengine/database/LLSearchResultShard.java b/src/main/java/it/cavallium/dbengine/database/LLSearchResultShard.java index e5ab01a..3a9d083 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLSearchResultShard.java +++ b/src/main/java/it/cavallium/dbengine/database/LLSearchResultShard.java @@ -1,32 +1,27 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; -import it.cavallium.dbengine.client.LuceneIndexImpl; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.utils.SimpleResource; -import it.unimi.dsi.fastutil.objects.ObjectArrayList; import java.util.List; import java.util.Objects; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import reactor.core.publisher.Flux; public class LLSearchResultShard extends SimpleResource implements DiscardingCloseable { private static final Logger LOG = LogManager.getLogger(LLSearchResultShard.class); - private final Flux results; + private final Stream results; private final TotalHitsCount totalHitsCount; - public LLSearchResultShard(Flux results, TotalHitsCount totalHitsCount) { + public LLSearchResultShard(Stream results, TotalHitsCount totalHitsCount) { this.results = results; this.totalHitsCount = totalHitsCount; } - public static LLSearchResultShard withResource(Flux results, + public static LLSearchResultShard withResource(Stream results, TotalHitsCount totalHitsCount, SafeCloseable closeableResource) { if (closeableResource instanceof LuceneCloseable luceneCloseable) { @@ -36,7 +31,7 @@ public class LLSearchResultShard extends SimpleResource implements DiscardingClo } } - public Flux results() { + public Stream results() { ensureOpen(); return results; } @@ -74,7 +69,7 @@ public class LLSearchResultShard extends SimpleResource implements DiscardingClo private final List resources; - public ResourcesLLSearchResultShard(Flux resultsFlux, + public ResourcesLLSearchResultShard(Stream resultsFlux, TotalHitsCount count, List resources) { super(resultsFlux, count); @@ -102,7 +97,7 @@ public class LLSearchResultShard extends SimpleResource implements DiscardingClo private final List resources; - public LuceneLLSearchResultShard(Flux resultsFlux, + public LuceneLLSearchResultShard(Stream resultsFlux, TotalHitsCount count, List resources) { super(resultsFlux, count); diff --git a/src/main/java/it/cavallium/dbengine/database/LLSingleton.java b/src/main/java/it/cavallium/dbengine/database/LLSingleton.java index 6606366..3883a5a 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLSingleton.java +++ b/src/main/java/it/cavallium/dbengine/database/LLSingleton.java @@ -1,32 +1,22 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.disk.BinarySerializationFunction; -import it.cavallium.dbengine.database.serialization.SerializationFunction; -import it.unimi.dsi.fastutil.bytes.ByteList; -import java.util.function.Function; +import java.io.IOException; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public interface LLSingleton extends LLKeyValueDatabaseStructure { + Buf get(@Nullable LLSnapshot snapshot); - BufferAllocator getAllocator(); + void set(Buf value); - Mono get(@Nullable LLSnapshot snapshot); - - Mono set(Mono value); - - default Mono update(BinarySerializationFunction updater, - UpdateReturnMode updateReturnMode) { - return this - .updateAndGetDelta(updater) - .transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode)); + default Buf update(BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { + var prev = this.updateAndGetDelta(updater); + return LLUtils.resolveLLDelta(prev, updateReturnMode); } - Mono updateAndGetDelta(BinarySerializationFunction updater); + LLDelta updateAndGetDelta(BinarySerializationFunction updater); String getColumnName(); diff --git a/src/main/java/it/cavallium/dbengine/database/LLSnapshottable.java b/src/main/java/it/cavallium/dbengine/database/LLSnapshottable.java index 9639116..942fbeb 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLSnapshottable.java +++ b/src/main/java/it/cavallium/dbengine/database/LLSnapshottable.java @@ -1,10 +1,10 @@ package it.cavallium.dbengine.database; -import reactor.core.publisher.Mono; +import java.io.IOException; public interface LLSnapshottable { - Mono takeSnapshot(); + LLSnapshot takeSnapshot(); - Mono releaseSnapshot(LLSnapshot snapshot); + void releaseSnapshot(LLSnapshot snapshot); } diff --git a/src/main/java/it/cavallium/dbengine/database/LLTerm.java b/src/main/java/it/cavallium/dbengine/database/LLTerm.java index f007d82..e171449 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLTerm.java +++ b/src/main/java/it/cavallium/dbengine/database/LLTerm.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.database; import java.util.Objects; -import org.apache.lucene.index.Term; import org.apache.lucene.util.BytesRef; public class LLTerm { diff --git a/src/main/java/it/cavallium/dbengine/database/LLUtils.java b/src/main/java/it/cavallium/dbengine/database/LLUtils.java index fe7f7ea..db4b6f0 100644 --- a/src/main/java/it/cavallium/dbengine/database/LLUtils.java +++ b/src/main/java/it/cavallium/dbengine/database/LLUtils.java @@ -1,22 +1,11 @@ package it.cavallium.dbengine.database; -import static io.netty5.buffer.StandardAllocationTypes.OFF_HEAP; -import static io.netty5.buffer.internal.InternalBufferUtils.NO_OP_DROP; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; import static org.apache.commons.lang3.ArrayUtils.EMPTY_BYTE_ARRAY; import com.google.common.primitives.Ints; import com.google.common.primitives.Longs; -import io.netty5.buffer.AllocatorControl; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import io.netty5.buffer.CompositeBuffer; -import io.netty5.buffer.Drop; -import io.netty5.util.Resource; -import io.netty5.util.Send; -import io.netty5.util.IllegalReferenceCountException; -import it.cavallium.dbengine.database.serialization.SerializationException; +import io.netty.util.IllegalReferenceCountException; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; @@ -26,8 +15,6 @@ import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodHandles.Lookup; import java.lang.invoke.MethodType; import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.time.Duration; import java.util.ArrayList; import java.util.Collection; import java.util.List; @@ -36,11 +23,8 @@ import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.Set; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; -import java.util.function.ToIntFunction; +import java.util.function.Consumer; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.Marker; @@ -69,16 +53,7 @@ import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import org.rocksdb.AbstractImmutableNativeReference; import org.rocksdb.AbstractNativeReference; -import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import reactor.core.Disposable; -import reactor.core.Fuseable.QueueSubscription; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Hooks; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; @SuppressWarnings("unused") public class LLUtils { @@ -89,13 +64,11 @@ public class LLUtils { public static final int INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES = 4096; public static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0).asReadOnlyBuffer(); - private static final AllocatorControl NO_OP_ALLOCATION_CONTROL = (AllocatorControl) BufferAllocator.offHeapUnpooled(); private static final byte[] RESPONSE_TRUE = new byte[]{1}; private static final byte[] RESPONSE_FALSE = new byte[]{0}; private static final byte[] RESPONSE_TRUE_BUF = new byte[]{1}; private static final byte[] RESPONSE_FALSE_BUF = new byte[]{0}; public static final byte[][] LEXICONOGRAPHIC_ITERATION_SEEKS = new byte[256][1]; - public static final AtomicBoolean hookRegistered = new AtomicBoolean(); public static final boolean MANUAL_READAHEAD = false; public static final boolean ALLOW_STATIC_OPTIONS = false; @@ -111,29 +84,38 @@ public class LLUtils { private static final MethodHandle IS_ACCESSIBLE_METHOD_HANDLE; + private static final MethodHandle IS_IN_NON_BLOCKING_THREAD_MH; + private static final Consumer NULL_CONSUMER = ignored -> {}; + static { for (int i1 = 0; i1 < 256; i1++) { var b = LEXICONOGRAPHIC_ITERATION_SEEKS[i1]; b[0] = (byte) i1; } - var methodType = MethodType.methodType(boolean.class); - MethodHandle isAccessibleMethodHandle = null; - try { - isAccessibleMethodHandle = PUBLIC_LOOKUP.findVirtual(AbstractNativeReference.class, "isAccessible", methodType); - } catch (NoSuchMethodException e) { - logger.debug("Failed to find isAccessible(): no such method"); - } catch (IllegalAccessException e) { - logger.debug("Failed to find isAccessible()", e); + { + var methodType = MethodType.methodType(boolean.class); + MethodHandle isAccessibleMethodHandle = null; + try { + isAccessibleMethodHandle = PUBLIC_LOOKUP.findVirtual(AbstractNativeReference.class, "isAccessible", methodType); + } catch (NoSuchMethodException e) { + logger.debug("Failed to find isAccessible(): no such method"); + } catch (IllegalAccessException e) { + logger.debug("Failed to find isAccessible()", e); + } + IS_ACCESSIBLE_METHOD_HANDLE = isAccessibleMethodHandle; } - IS_ACCESSIBLE_METHOD_HANDLE = isAccessibleMethodHandle; - initHooks(); - } + { + MethodHandle isInNonBlockingThreadMethodHandle = null; + try { + var clz = Objects.requireNonNull(PUBLIC_LOOKUP.findClass("reactor.core.scheduler.Schedulers"), + "reactor.core.scheduler.Schedulers not found"); - public static void initHooks() { - if (hookRegistered.compareAndSet(false, true)) { - Hooks.onNextDropped(LLUtils::onNextDropped); - //todo: add Hooks.onDiscard when it will be implemented - // Hooks.onDiscard(LLUtils::onDiscard); + var methodType = MethodType.methodType(boolean.class); + isInNonBlockingThreadMethodHandle = PUBLIC_LOOKUP.findStatic(clz, "isInNonBlockingThread", methodType); + } catch (NoSuchMethodException | ClassNotFoundException | IllegalAccessException | NullPointerException e) { + logger.debug("Failed to obtain access to reactor core schedulers"); + } + IS_IN_NON_BLOCKING_THREAD_MH = isInNonBlockingThreadMethodHandle; } } @@ -141,26 +123,17 @@ public class LLUtils { return response[0] == 1; } - public static boolean responseToBoolean(Send responseToReceive) { - try (var response = responseToReceive.receive()) { - assert response.readableBytes() == 1; - return response.getByte(response.readerOffset()) == 1; - } - } - - public static boolean responseToBoolean(Buffer response) { - try (response) { - assert response.readableBytes() == 1; - return response.getByte(response.readerOffset()) == 1; - } + public static boolean responseToBoolean(Buf response) { + assert response.size() == 1; + return response.getBoolean(0); } public static byte[] booleanToResponse(boolean bool) { return bool ? RESPONSE_TRUE : RESPONSE_FALSE; } - public static Buffer booleanToResponseByteBuffer(BufferAllocator alloc, boolean bool) { - return alloc.allocate(1).writeByte(bool ? (byte) 1 : 0); + public static Buf booleanToResponseByteBuffer(boolean bool) { + return Buf.wrap(new byte[] {bool ? (byte) 1 : 0}); } @Nullable @@ -307,9 +280,9 @@ public class LLUtils { return new it.cavallium.dbengine.database.LLKeyScore(hit.docId(), hit.shardId(), hit.score(), hit.key()); } - public static String toStringSafe(@Nullable Buffer key) { + public static String toStringSafe(byte @Nullable[] key) { try { - if (key == null || key.isAccessible()) { + if (key == null) { return toString(key); } else { return "(released)"; @@ -319,7 +292,7 @@ public class LLUtils { } } - public static String toStringSafe(byte @Nullable[] key) { + public static String toStringSafe(@Nullable Buf key) { try { if (key == null) { return toString(key); @@ -333,7 +306,7 @@ public class LLUtils { public static String toStringSafe(@Nullable LLRange range) { try { - if (range == null || !range.isClosed()) { + if (range == null) { return toString(range); } else { return "(released)"; @@ -349,60 +322,21 @@ public class LLUtils { } else if (range.isAll()) { return "ξ"; } else if (range.hasMin() && range.hasMax()) { - return "[" + toStringSafe(range.getMinUnsafe()) + "," + toStringSafe(range.getMaxUnsafe()) + ")"; + return "[" + toStringSafe(range.getMin()) + "," + toStringSafe(range.getMax()) + ")"; } else if (range.hasMin()) { - return "[" + toStringSafe(range.getMinUnsafe()) + ",*)"; + return "[" + toStringSafe(range.getMin()) + ",*)"; } else if (range.hasMax()) { - return "[*," + toStringSafe(range.getMaxUnsafe()) + ")"; + return "[*," + toStringSafe(range.getMax()) + ")"; } else { return "∅"; } } - public static String toString(@Nullable Buffer key) { + public static String toString(@Nullable Buf key) { if (key == null) { return "null"; } else { - int startIndex = key.readerOffset(); - int iMax = key.readableBytes() - 1; - int iLimit = 128; - if (iMax <= -1) { - return "[]"; - } else { - StringBuilder arraySB = new StringBuilder(); - StringBuilder asciiSB = new StringBuilder(); - boolean isAscii = true; - arraySB.append('['); - int i = 0; - - while (true) { - var byteVal = key.getUnsignedByte(startIndex + i); - arraySB.append(byteVal); - if (isAscii) { - if (byteVal >= 32 && byteVal < 127) { - asciiSB.append((char) byteVal); - } else if (byteVal == 0) { - asciiSB.append('␀'); - } else { - isAscii = false; - asciiSB = null; - } - } - if (i == iLimit) { - arraySB.append("…"); - } - if (i == iMax || i == iLimit) { - if (isAscii) { - return asciiSB.insert(0, "\"").append("\"").toString(); - } else { - return arraySB.append(']').toString(); - } - } - - arraySB.append(", "); - ++i; - } - } + return toString(key.asArray()); } } @@ -453,21 +387,11 @@ public class LLUtils { } } - public static boolean equals(Buffer a, Buffer b) { + public static boolean equals(Buf a, Buf b) { if (a == null && b == null) { return true; } else if (a != null && b != null) { - var aCur = a.openCursor(); - var bCur = b.openCursor(); - if (aCur.bytesLeft() != bCur.bytesLeft()) { - return false; - } - while (aCur.readByte() && bCur.readByte()) { - if (aCur.getByte() != bCur.getByte()) { - return false; - } - } - return true; + return a.equals(b); } else { return false; } @@ -481,123 +405,27 @@ public class LLUtils { *

* {@code a[aStartIndex : aStartIndex + length] == b[bStartIndex : bStartIndex + length]} */ - public static boolean equals(Buffer a, int aStartIndex, Buffer b, int bStartIndex, int length) { - var aCur = a.openCursor(aStartIndex, length); - var bCur = b.openCursor(bStartIndex, length); - if (aCur.bytesLeft() != bCur.bytesLeft()) { - return false; - } - while (aCur.readByte() && bCur.readByte()) { - if (aCur.getByte() != bCur.getByte()) { - return false; - } - } - return true; + public static boolean equals(Buf a, int aStartIndex, Buf b, int bStartIndex, int length) { + return a.equals(aStartIndex, b, bStartIndex, length); } - public static byte[] toArray(@Nullable Buffer key) { + /** + * + * @return the inner array, DO NOT MODIFY IT + */ + public static byte[] asArray(@Nullable Buf key) { if (key == null) { return EMPTY_BYTE_ARRAY; } - byte[] array = new byte[key.readableBytes()]; - key.copyInto(key.readerOffset(), array, 0, key.readableBytes()); - return array; + return key.asArray(); } - public static List toArray(List input) { - List result = new ArrayList<>(input.size()); - for (Buffer byteBuf : input) { - result.add(toArray(byteBuf)); - } - return result; - } - - public static int hashCode(Buffer buf) { + public static int hashCode(Buf buf) { if (buf == null) { return 0; } - int result = 1; - var cur = buf.openCursor(); - while (cur.readByte()) { - var element = cur.getByte(); - result = 31 * result + element; - } - - return result; - } - - /** - * @return null if size is equal to RocksDB.NOT_FOUND - */ - @Nullable - public static Buffer readNullableDirectNioBuffer(BufferAllocator alloc, ToIntFunction reader) { - if (alloc.getAllocationType() != OFF_HEAP) { - throw new UnsupportedOperationException("Allocator type is not direct: " + alloc); - } - var directBuffer = alloc.allocate(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES); - try { - assert directBuffer.readerOffset() == 0; - assert directBuffer.writerOffset() == 0; - var directBufferWriter = ((BufferComponent) directBuffer).writableBuffer(); - assert directBufferWriter.position() == 0; - assert directBufferWriter.capacity() >= directBuffer.capacity(); - assert directBufferWriter.isDirect(); - int trueSize = reader.applyAsInt(directBufferWriter); - if (trueSize == RocksDB.NOT_FOUND) { - directBuffer.close(); - return null; - } - int readSize = directBufferWriter.limit(); - if (trueSize < readSize) { - throw new IllegalStateException(); - } else if (trueSize == readSize) { - return directBuffer.writerOffset(directBufferWriter.limit()); - } else { - assert directBuffer.readerOffset() == 0; - directBuffer.ensureWritable(trueSize); - assert directBuffer.writerOffset() == 0; - directBufferWriter = ((BufferComponent) directBuffer).writableBuffer(); - assert directBufferWriter.position() == 0; - assert directBufferWriter.isDirect(); - reader.applyAsInt(directBufferWriter.position(0)); - return directBuffer.writerOffset(trueSize); - } - } catch (Throwable t) { - directBuffer.close(); - throw t; - } - } - - public static void ensureBlocking() { - if (Schedulers.isInNonBlockingThread()) { - throw new UnsupportedOperationException("Called collect in a nonblocking thread"); - } - } - - // todo: remove this ugly method - /** - * cleanup resource - * @param cleanupOnSuccess if true the resource will be cleaned up if the function is successful - */ - public static > Mono usingSendResource(Mono> resourceSupplier, - Function> resourceClosure, - boolean cleanupOnSuccess) { - return Mono.usingWhen(resourceSupplier.map(Send::receive), resourceClosure, r -> { - if (cleanupOnSuccess) { - return Mono.fromRunnable(() -> r.close()); - } else { - return Mono.empty(); - } - }, (r, ex) -> Mono.fromRunnable(() -> { - if (r.isAccessible()) { - r.close(); - } - }), r -> Mono.fromRunnable(() -> { - if (r.isAccessible()) { - r.close(); - } - })); + return buf.hashCode(); } public static boolean isSet(ScoreDoc[] scoreDocs) { @@ -609,26 +437,6 @@ public class LLUtils { return true; } - public static Send empty(BufferAllocator allocator) { - try { - return allocator.allocate(0).send(); - } catch (Exception ex) { - try (var empty = CompositeBuffer.compose(allocator)) { - assert empty.readableBytes() == 0; - assert empty.capacity() == 0; - return empty.send(); - } - } - } - - public static Send copy(BufferAllocator allocator, Buffer buf) { - if (CompositeBuffer.isComposite(buf) && buf.capacity() == 0) { - return empty(allocator); - } else { - return buf.copy().send(); - } - } - public static boolean isBoundedRange(LLRange rangeShared) { return rangeShared.hasMin() && rangeShared.hasMax(); } @@ -649,124 +457,26 @@ public class LLUtils { //noinspection resource readOptions = new ReadOptions(); } - if (boundedRange || smallRange) { - readOptions.setFillCache(canFillCache); - } else { + var hugeRange = !boundedRange && !smallRange; + if (hugeRange) { if (readOptions.readaheadSize() <= 0) { readOptions.setReadaheadSize(4 * 1024 * 1024); // 4MiB } - readOptions.setFillCache(false); - readOptions.setVerifyChecksums(false); - } - - if (FORCE_DISABLE_CHECKSUM_VERIFICATION) { - readOptions.setVerifyChecksums(false); } + readOptions.setFillCache(canFillCache && !hugeRange); + readOptions.setVerifyChecksums(!FORCE_DISABLE_CHECKSUM_VERIFICATION && !hugeRange); return readOptions; } - public static Mono finalizeResource(Resource resource) { - Mono runnable = Mono.fromRunnable(() -> LLUtils.finalizeResourceNow(resource)); - if (resource instanceof LuceneCloseable) { - return runnable.transform(LuceneUtils::scheduleLucene); - } else { - return runnable; - } - } - - public static Mono finalizeResource(SafeCloseable resource) { - Mono runnable = Mono.fromRunnable(resource::close); - if (resource instanceof LuceneCloseable) { - return runnable.transform(LuceneUtils::scheduleLucene); - } else { - return runnable; - } - } - - public static void finalizeResourceNow(Resource resource) { - if (resource.isAccessible()) { - resource.close(); - } + public static void finalizeResource(SafeCloseable resource) { + resource.close(); } public static void finalizeResourceNow(SafeCloseable resource) { resource.close(); } - public static Flux handleDiscard(Flux flux) { - return flux.doOnDiscard(Object.class, LLUtils::onDiscard); - } - - public static Mono handleDiscard(Mono flux) { - return flux.doOnDiscard(Object.class, LLUtils::onDiscard); - } - - /** - * Obtain the resource, then run the closure. - * If the closure publisher returns a single element, then the resource is kept open, - * otherwise it is closed. - */ - public static Mono singleOrClose(Mono resourceMono, - Function> closure) { - return Mono.usingWhen(resourceMono, resource -> { - if (resource instanceof LuceneCloseable) { - return closure.apply(resource).publishOn(luceneScheduler()).doOnSuccess(s -> { - if (s == null) { - try { - resource.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }).publishOn(Schedulers.parallel()); - } else { - return closure.apply(resource).doOnSuccess(s -> { - if (s == null) { - try { - resource.close(); - } catch (Exception e) { - throw new RuntimeException(e); - } - } - }); - } - }, resource -> Mono.empty(), (resource, ex) -> Mono.fromCallable(() -> { - resource.close(); - return null; - }), r -> (r instanceof SafeCloseable s) ? LLUtils.finalizeResource(s) : Mono.fromCallable(() -> { - r.close(); - return null; - })); - } - - public static Disposable scheduleRepeated(Scheduler scheduler, Runnable action, Duration delay) { - var currentDisposable = new AtomicReference(); - var disposed = new AtomicBoolean(false); - scheduleRepeatedInternal(scheduler, action, delay, currentDisposable, disposed); - return () -> { - disposed.set(true); - currentDisposable.get().dispose(); - }; - } - - private static void scheduleRepeatedInternal(Scheduler scheduler, - Runnable action, - Duration delay, - AtomicReference currentDisposable, - AtomicBoolean disposed) { - if (disposed.get()) return; - currentDisposable.set(scheduler.schedule(() -> { - if (disposed.get()) return; - try { - action.run(); - } catch (Throwable ex) { - logger.error(ex); - } - scheduleRepeatedInternal(scheduler, action, delay, currentDisposable, disposed); - }, delay.toMillis(), TimeUnit.MILLISECONDS)); - } - public static boolean isAccessible(AbstractNativeReference abstractNativeReference) { if (IS_ACCESSIBLE_METHOD_HANDLE != null) { try { @@ -778,220 +488,116 @@ public class LLUtils { return true; } - @Deprecated - public record DirectBuffer(@NotNull Buffer buffer, @NotNull ByteBuffer byteBuffer) {} - - @NotNull - public static ByteBuffer newDirect(int size) { - return ByteBuffer.allocateDirect(size); + public static Buf unmodifiableBytes(Buf previous) { + previous.freeze(); + return previous; } - private static Drop drop() { - // We cannot reliably drop unsafe memory. We have to rely on the cleaner to do that. - return NO_OP_DROP; - } - - public static boolean isReadOnlyDirect(Buffer inputBuffer) { - return inputBuffer instanceof BufferComponent component && component.readableNativeAddress() != 0; - } - - public static ByteBuffer getReadOnlyDirect(Buffer inputBuffer) { - assert isReadOnlyDirect(inputBuffer); - return ((BufferComponent) inputBuffer).readableBuffer(); - } - - public static Buffer fromByteArray(BufferAllocator alloc, byte[] array) { - Buffer result = alloc.allocate(array.length); - result.writeBytes(array); - return result; - } - - @NotNull - public static Buffer readDirectNioBuffer(BufferAllocator alloc, ToIntFunction reader) { - var nullable = readNullableDirectNioBuffer(alloc, reader); - if (nullable == null) { - throw new IllegalStateException("A non-nullable buffer read operation tried to return a \"not found\" element"); - } - return nullable; - } - - public static Buffer compositeBuffer(BufferAllocator alloc, Send buffer) { - return buffer.receive(); - } - - @NotNull - public static Buffer compositeBuffer(BufferAllocator alloc, - @NotNull Send buffer1, - @NotNull Send buffer2) { - var b1 = buffer1.receive(); - try (var b2 = buffer2.receive()) { - if (b1.writerOffset() < b1.capacity() || b2.writerOffset() < b2.capacity()) { - b1.ensureWritable(b2.readableBytes(), b2.readableBytes(), true); - b2.copyInto(b2.readerOffset(), b1, b1.writerOffset(), b2.readableBytes()); - b1.writerOffset(b1.writerOffset() + b2.readableBytes()); - return b1; - } else { - return alloc.compose(List.of(b1.send(), b2.send())); - } - } - } - - @NotNull - public static Buffer compositeBuffer(BufferAllocator alloc, - @NotNull Send buffer1, - @NotNull Send buffer2, - @NotNull Send buffer3) { - var b1 = buffer1.receive(); - try (var b2 = buffer2.receive()) { - try (var b3 = buffer3.receive()) { - if (b1.writerOffset() < b1.capacity() - || b2.writerOffset() < b2.capacity() - || b3.writerOffset() < b3.capacity()) { - b1.ensureWritable(b2.readableBytes(), b2.readableBytes(), true); - b2.copyInto(b2.readerOffset(), b1, b1.writerOffset(), b2.readableBytes()); - b1.writerOffset(b1.writerOffset() + b2.readableBytes()); - - b1.ensureWritable(b3.readableBytes(), b3.readableBytes(), true); - b3.copyInto(b3.readerOffset(), b1, b1.writerOffset(), b3.readableBytes()); - b1.writerOffset(b1.writerOffset() + b3.readableBytes()); - return b1; - } else { - return alloc.compose(List.of(b1.send(), b2.send(), b3.send())); - } - } - } - } - - public static Mono resolveDelta(Mono> prev, UpdateReturnMode updateReturnMode) { - return prev.handle((delta, sink) -> { - switch (updateReturnMode) { - case GET_NEW_VALUE -> { - var current = delta.current(); - if (current != null) { - sink.next(current); - } else { - sink.complete(); - } - } - case GET_OLD_VALUE -> { - var previous = delta.previous(); - if (previous != null) { - sink.next(previous); - } else { - sink.complete(); - } - } - case NOTHING -> sink.complete(); - default -> sink.error(new IllegalStateException()); - } - }); - } - - public static Mono resolveLLDelta(Mono prev, UpdateReturnMode updateReturnMode) { - return prev.mapNotNull(delta -> { - final Buffer previous = delta.previousUnsafe(); - final Buffer current = delta.currentUnsafe(); - return switch (updateReturnMode) { - case GET_NEW_VALUE -> { - if (previous != null && previous.isAccessible()) { - previous.close(); - } - yield current; - } - case GET_OLD_VALUE -> { - if (current != null && current.isAccessible()) { - current.close(); - } - yield previous; - } - case NOTHING -> { - if (previous != null && previous.isAccessible()) { - previous.close(); - } - if (current != null && current.isAccessible()) { - current.close(); - } - yield null; - } - }; - }); - } - - public static Mono> mapDelta(Mono> mono, - SerializationFunction<@NotNull T, @Nullable U> mapper) { - return mono.handle((delta, sink) -> { + public static boolean isInNonBlockingThread() { + if (IS_IN_NON_BLOCKING_THREAD_MH != null) { try { - T prev = delta.previous(); - T curr = delta.current(); - U newPrev; - U newCurr; - if (prev != null) { - newPrev = mapper.apply(prev); - } else { - newPrev = null; - } - if (curr != null) { - newCurr = mapper.apply(curr); - } else { - newCurr = null; - } - sink.next(new Delta<>(newPrev, newCurr)); - } catch (SerializationException ex) { - sink.error(ex); + return (boolean) IS_IN_NON_BLOCKING_THREAD_MH.invokeExact(); + } catch (Throwable e) { + throw new RuntimeException(e); } - }); + } + return false; } - public static Mono> mapLLDelta(Mono mono, - SerializationFunction<@NotNull Buffer, @Nullable U> mapper) { - return Mono.usingWhen(mono, delta -> Mono.fromCallable(() -> { - Buffer prev = delta.previousUnsafe(); - Buffer curr = delta.currentUnsafe(); - U newPrev; - U newCurr; - if (prev != null) { - newPrev = mapper.apply(prev); - } else { - newPrev = null; - } - if (curr != null) { - newCurr = mapper.apply(curr); - } else { - newCurr = null; - } - return new Delta<>(newPrev, newCurr); - }), LLUtils::finalizeResource); + public static Buf copy(Buf buf) { + return buf.copy(); + } + + public static Buf asByteList(byte[] array) { + return Buf.wrap(array); + } + + public static Buf toByteList(byte[] array) { + return Buf.copyOf(array); + } + + + public static Buf compositeBuffer(Buf buffer) { + return buffer; + } + + @NotNull + public static Buf compositeBuffer(Buf buffer1, Buf buffer2) { + // todo: create a composite buffer without allocating a new array + var out = Buf.create(buffer1.size() + buffer2.size()); + out.addAll(buffer1); + out.addAll(buffer2); + return out; + } + + @NotNull + public static Buf compositeBuffer(Buf buffer1, Buf buffer2, Buf buffer3) { + // todo: create a composite buffer without allocating a new array + var out = Buf.create(buffer1.size() + buffer2.size()); + out.addAll(buffer1); + out.addAll(buffer2); + out.addAll(buffer3); + return out; + } + + public static T resolveDelta(Delta delta, UpdateReturnMode updateReturnMode) { + return switch (updateReturnMode) { + case GET_NEW_VALUE -> delta.current(); + case GET_OLD_VALUE -> delta.previous(); + case NOTHING -> null; + }; + } + + public static Buf resolveLLDelta(LLDelta delta, UpdateReturnMode updateReturnMode) { + final Buf previous = delta.previous(); + final Buf current = delta.current(); + return switch (updateReturnMode) { + case GET_NEW_VALUE -> current; + case GET_OLD_VALUE -> previous; + case NOTHING -> null; + }; + } + + public static Delta mapDelta(Delta delta, SerializationFunction<@NotNull T, @Nullable U> mapper) { + T prev = delta.previous(); + T curr = delta.current(); + U newPrev; + U newCurr; + if (prev != null) { + newPrev = mapper.apply(prev); + } else { + newPrev = null; + } + if (curr != null) { + newCurr = mapper.apply(curr); + } else { + newCurr = null; + } + return new Delta<>(newPrev, newCurr); + } + + public static Delta mapLLDelta(LLDelta delta, SerializationFunction<@NotNull Buf, @Nullable U> mapper) { + var prev = delta.previous(); + var curr = delta.current(); + U newPrev; + U newCurr; + if (prev != null) { + newPrev = mapper.apply(prev); + } else { + newPrev = null; + } + if (curr != null) { + newCurr = mapper.apply(curr); + } else { + newCurr = null; + } + return new Delta<>(newPrev, newCurr); } public static boolean isDeltaChanged(Delta delta) { return !Objects.equals(delta.previous(), delta.current()); } - public static boolean isDirect(Buffer key) { - var readableComponents = key.countReadableComponents(); - if (readableComponents == 0) { - return true; - } else if (readableComponents == 1) { - return key.isDirect(); - } else { - return false; - } - } - - public static String deserializeString(Send bufferSend, int readerOffset, int length, Charset charset) { - try (var buffer = bufferSend.receive()) { - byte[] bytes = new byte[Math.min(length, buffer.readableBytes())]; - buffer.copyInto(readerOffset, bytes, 0, length); - return new String(bytes, charset); - } - } - - public static String deserializeString(@NotNull Buffer buffer, int readerOffset, int length, Charset charset) { - byte[] bytes = new byte[Math.min(length, buffer.readableBytes())]; - buffer.copyInto(readerOffset, bytes, 0, length); - return new String(bytes, charset); - } - public static int utf8MaxBytes(String deserialized) { return deserialized.length() * 3; } @@ -1015,18 +621,14 @@ public class LLUtils { } private static void closeResource(Object next, boolean manual) { - if (next instanceof Send send) { - send.close(); - } if (next instanceof SafeCloseable closeable) { + if (next instanceof SafeCloseable closeable) { if (manual || closeable instanceof DiscardingCloseable) { if (!manual && !LuceneUtils.isLuceneThread() && closeable instanceof LuceneCloseable luceneCloseable) { - luceneScheduler().schedule(() -> luceneCloseable.close()); + luceneCloseable.close(); } else { closeable.close(); } } - } else if (next instanceof Resource resource && resource.isAccessible()) { - resource.close(); } else if (next instanceof List iterable) { iterable.forEach(obj -> closeResource(obj, manual)); } else if (next instanceof Set iterable) { @@ -1080,4 +682,10 @@ public class LLUtils { return term.getValueBytesRef(); } } + + public static void consume(Stream stream) { + try (stream) { + stream.forEach(NULL_CONSUMER); + } + } } diff --git a/src/main/java/it/cavallium/dbengine/database/OptionalBuf.java b/src/main/java/it/cavallium/dbengine/database/OptionalBuf.java index 35f145f..c8fca38 100644 --- a/src/main/java/it/cavallium/dbengine/database/OptionalBuf.java +++ b/src/main/java/it/cavallium/dbengine/database/OptionalBuf.java @@ -1,6 +1,6 @@ package it.cavallium.dbengine.database; -import io.netty5.buffer.Buffer; +import it.cavallium.dbengine.buffers.Buf; import java.util.NoSuchElementException; import java.util.Objects; import java.util.Optional; @@ -9,20 +9,20 @@ import java.util.function.Function; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -public final class OptionalBuf implements DiscardingCloseable { +public final class OptionalBuf { private static final OptionalBuf EMPTY = new OptionalBuf(null); - private final Buffer buffer; + private final Buf buffer; - private OptionalBuf(@Nullable Buffer buffer) { + private OptionalBuf(@Nullable Buf buffer) { this.buffer = buffer; } - public static OptionalBuf ofNullable(@Nullable Buffer buffer) { + public static OptionalBuf ofNullable(@Nullable Buf buffer) { return new OptionalBuf(buffer); } - public static OptionalBuf of(@NotNull Buffer buffer) { + public static OptionalBuf of(@NotNull Buf buffer) { Objects.requireNonNull(buffer); return new OptionalBuf(buffer); } @@ -31,13 +31,6 @@ public final class OptionalBuf implements DiscardingCloseable { return EMPTY; } - @Override - public void close() { - if (buffer != null && buffer.isAccessible()) { - buffer.close(); - } - } - @Override public String toString() { if (buffer != null) { @@ -66,21 +59,21 @@ public final class OptionalBuf implements DiscardingCloseable { return buffer != null ? buffer.hashCode() : 0; } - public Buffer get() { + public Buf get() { if (buffer == null) { throw new NoSuchElementException(); } return buffer; } - public Buffer orElse(Buffer alternative) { + public Buf orElse(Buf alternative) { if (buffer == null) { return alternative; } return buffer; } - public void ifPresent(Consumer consumer) { + public void ifPresent(Consumer consumer) { if (buffer != null) { consumer.accept(buffer); } @@ -94,7 +87,7 @@ public final class OptionalBuf implements DiscardingCloseable { return buffer == null; } - public Optional map(Function mapper) { + public Optional map(Function mapper) { if (buffer != null) { return Optional.of(mapper.apply(buffer)); } else { diff --git a/src/main/java/it/cavallium/dbengine/database/RangeSupplier.java b/src/main/java/it/cavallium/dbengine/database/RangeSupplier.java deleted file mode 100644 index b800c01..0000000 --- a/src/main/java/it/cavallium/dbengine/database/RangeSupplier.java +++ /dev/null @@ -1,57 +0,0 @@ -package it.cavallium.dbengine.database; - -import io.netty5.util.Send; -import java.util.function.Supplier; - -public abstract class RangeSupplier implements DiscardingCloseable, Supplier { - - public static RangeSupplier of(Supplier supplier) { - return new SimpleSupplier(supplier); - } - - public static RangeSupplier ofOwned(LLRange supplier) { - return new CopySupplier(supplier); - } - - public static RangeSupplier ofShared(LLRange supplier) { - return new SimpleSupplier(supplier::copy); - } - - private static final class SimpleSupplier extends RangeSupplier { - - private final Supplier supplier; - - public SimpleSupplier(Supplier supplier) { - this.supplier = supplier; - } - - @Override - public LLRange get() { - return supplier.get(); - } - - @Override - public void close() { - - } - } - - private static final class CopySupplier extends RangeSupplier { - - private final LLRange supplier; - - public CopySupplier(LLRange supplier) { - this.supplier = supplier; - } - - @Override - public LLRange get() { - return supplier.copy(); - } - - @Override - public void close() { - supplier.close(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java b/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java index 4372651..7bafe05 100644 --- a/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java +++ b/src/main/java/it/cavallium/dbengine/database/SafeCloseable.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.database; -public interface SafeCloseable extends io.netty5.util.SafeCloseable { +public interface SafeCloseable extends AutoCloseable { - @Override void close(); } diff --git a/src/main/java/it/cavallium/dbengine/database/SubStageEntry.java b/src/main/java/it/cavallium/dbengine/database/SubStageEntry.java index ad76b8c..c2d46b4 100644 --- a/src/main/java/it/cavallium/dbengine/database/SubStageEntry.java +++ b/src/main/java/it/cavallium/dbengine/database/SubStageEntry.java @@ -4,7 +4,7 @@ import it.cavallium.dbengine.database.collections.DatabaseStage; import java.util.Map.Entry; import java.util.Objects; -public final class SubStageEntry> implements DiscardingCloseable, Entry { +public final class SubStageEntry> implements Entry { private final T key; private final U value; @@ -14,13 +14,6 @@ public final class SubStageEntry> implements Disca this.value = value; } - @Override - public void close() { - if (value != null) { - value.close(); - } - } - @Override public T getKey() { return key; diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java index 931db22..cf935b9 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseEmpty.java @@ -1,11 +1,11 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import it.cavallium.dbengine.database.BufSupplier; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.LLDictionary; +import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; -import java.util.function.Supplier; import org.jetbrains.annotations.NotNull; public class DatabaseEmpty { @@ -13,16 +13,16 @@ public class DatabaseEmpty { @SuppressWarnings({"unused", "InstantiationOfUtilityClass"}) public static final Nothing NOTHING = new Nothing(); - public static Serializer nothingSerializer(BufferAllocator bufferAllocator) { + public static Serializer nothingSerializer() { return new Serializer<>() { @Override - public @NotNull Nothing deserialize(@NotNull Buffer serialized) { + public @NotNull Nothing deserialize(@NotNull BufDataInput in) throws SerializationException { return NOTHING; } @Override - public void serialize(@NotNull Nothing deserialized, Buffer output) { + public void serialize(@NotNull Nothing deserialized, BufDataOutput out) throws SerializationException { } @@ -36,8 +36,8 @@ public class DatabaseEmpty { private DatabaseEmpty() { } - public static DatabaseStageEntry create(LLDictionary dictionary, BufSupplier key) { - return new DatabaseMapSingle<>(dictionary, key, nothingSerializer(dictionary.getAllocator())); + public static DatabaseStageEntry create(LLDictionary dictionary, Buf key) { + return new DatabaseMapSingle<>(dictionary, key, nothingSerializer()); } public static final class Nothing { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseInt.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseInt.java index ef1be2a..54a3eae 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseInt.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseInt.java @@ -1,14 +1,12 @@ package it.cavallium.dbengine.database.collections; -import com.google.common.primitives.Ints; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure; import it.cavallium.dbengine.database.LLSingleton; import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class DatabaseInt implements LLKeyValueDatabaseStructure { @@ -17,28 +15,18 @@ public class DatabaseInt implements LLKeyValueDatabaseStructure { public DatabaseInt(LLSingleton singleton) { this.singleton = singleton; - this.serializer = SerializerFixedBinaryLength.intSerializer(singleton.getAllocator()); + this.serializer = SerializerFixedBinaryLength.intSerializer(); } - public Mono get(@Nullable LLSnapshot snapshot) { - var resultMono = singleton.get(snapshot); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> serializer.deserialize(result)), - LLUtils::finalizeResource - ); + public Integer get(@Nullable LLSnapshot snapshot) { + var result = singleton.get(snapshot); + return serializer.deserialize(BufDataInput.create(result)); } - public Mono set(int value) { - return singleton.set(Mono.fromCallable(() -> { - var buf = singleton.getAllocator().allocate(Integer.BYTES); - try { - serializer.serialize(value, buf); - return buf; - } catch (Throwable ex) { - buf.close(); - throw ex; - } - })); + public void set(int value) { + var buf = BufDataOutput.createLimited(Integer.BYTES); + serializer.serialize(value, buf); + singleton.set(buf.asList()); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseLong.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseLong.java index a26aeb2..a66e8fa 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseLong.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseLong.java @@ -1,16 +1,14 @@ package it.cavallium.dbengine.database.collections; -import com.google.common.primitives.Ints; -import com.google.common.primitives.Longs; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure; import it.cavallium.dbengine.database.LLSingleton; import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.UpdateReturnMode; -import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class DatabaseLong implements LLKeyValueDatabaseStructure { @@ -20,82 +18,63 @@ public class DatabaseLong implements LLKeyValueDatabaseStructure { public DatabaseLong(LLSingleton singleton) { this.singleton = singleton; - this.serializer = SerializerFixedBinaryLength.longSerializer(singleton.getAllocator()); - this.bugSerializer = SerializerFixedBinaryLength.intSerializer(singleton.getAllocator()); + this.serializer = SerializerFixedBinaryLength.longSerializer(); + this.bugSerializer = SerializerFixedBinaryLength.intSerializer(); } - public Mono get(@Nullable LLSnapshot snapshot) { - var resultMono = singleton.get(snapshot); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> { - if (result.readableBytes() == 4) { - return (long) (int) bugSerializer.deserialize(result); - } else { - return serializer.deserialize(result); - } - }), - LLUtils::finalizeResource - ); + public Long get(@Nullable LLSnapshot snapshot) { + var result = BufDataInput.create(singleton.get(snapshot)); + if (result.available() == 4) { + return (long) (int) bugSerializer.deserialize(result); + } else { + return serializer.deserialize(result); + } } - public Mono incrementAndGet() { + public Long incrementAndGet() { return addAnd(1, UpdateReturnMode.GET_NEW_VALUE); } - public Mono getAndIncrement() { + public Long getAndIncrement() { return addAnd(1, UpdateReturnMode.GET_OLD_VALUE); } - public Mono decrementAndGet() { + public Long decrementAndGet() { return addAnd(-1, UpdateReturnMode.GET_NEW_VALUE); } - public Mono getAndDecrement() { + public Long getAndDecrement() { return addAnd(-1, UpdateReturnMode.GET_OLD_VALUE); } - public Mono addAndGet(long count) { + public Long addAndGet(long count) { return addAnd(count, UpdateReturnMode.GET_NEW_VALUE); } - public Mono getAndAdd(long count) { + public Long getAndAdd(long count) { return addAnd(count, UpdateReturnMode.GET_OLD_VALUE); } - private Mono addAnd(long count, UpdateReturnMode updateReturnMode) { - var resultMono = singleton.update(prev -> { - try (prev) { - if (prev != null) { - var prevLong = prev.readLong(); - var alloc = singleton.getAllocator(); - var buf = alloc.allocate(Long.BYTES); - buf.writeLong(prevLong + count); - return buf; - } else { - var alloc = singleton.getAllocator(); - var buf = alloc.allocate(Long.BYTES); - buf.writeLong(count); - return buf; - } + private Long addAnd(long count, UpdateReturnMode updateReturnMode) { + var result = singleton.update(prev -> { + if (prev != null) { + var prevLong = prev.getLong(0); + var buf = Buf.createZeroes(Long.BYTES); + buf.setLong(0, prevLong + count); + return buf; + } else { + var buf = Buf.createZeroes(Long.BYTES); + buf.setLong(0, count); + return buf; } }, updateReturnMode); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(result::readLong), - LLUtils::finalizeResource - ).single(); + return result.getLong(0); } - public Mono set(long value) { - return singleton.set(Mono.fromCallable(() -> { - var buf = singleton.getAllocator().allocate(Long.BYTES); - try { - serializer.serialize(value, buf); - } catch (Throwable ex) { - buf.close(); - throw ex; - } - return buf; - })); + public void set(long value) { + var buf = BufDataOutput.createLimited(Long.BYTES); + serializer.serialize(value, buf); + singleton.set(buf.asList()); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java index e9c7156..c53da06 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionary.java @@ -1,13 +1,11 @@ package it.cavallium.dbengine.database.collections; -import static java.util.Objects.requireNonNullElseGet; - -import io.netty5.buffer.Buffer; -import io.netty5.util.Resource; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.Delta; +import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLEntry; @@ -22,22 +20,21 @@ import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; -import it.cavallium.dbengine.utils.InternalMonoUtils; import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMaps; -import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; /** * Optimized implementation of "DatabaseMapDictionary with SubStageGetterSingle" @@ -50,11 +47,11 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep valueSerializer; protected DatabaseMapDictionary(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKey, SerializerFixedBinaryLength keySuffixSerializer, Serializer valueSerializer) { // Do not retain or release or use the prefixKey here - super(dictionary, prefixKeySupplier, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0); + super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0); this.valueSerializer = valueSerializer; } @@ -65,65 +62,48 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep DatabaseMapDictionary tail(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKey, SerializerFixedBinaryLength keySuffixSerializer, Serializer valueSerializer) { - return new DatabaseMapDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer, valueSerializer); + return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer); } - public static Flux> getLeavesFrom(DatabaseMapDictionary databaseMapDictionary, + public static Stream> getLeavesFrom(DatabaseMapDictionary databaseMapDictionary, CompositeSnapshot snapshot, - Mono keyMin, - Mono keyMax, - boolean reverse, boolean smallRange) { - Mono> keyMinOptMono = keyMin.map(Optional::of).defaultIfEmpty(Optional.empty()); - Mono> keyMaxOptMono = keyMax.map(Optional::of).defaultIfEmpty(Optional.empty()); + @Nullable K keyMin, + @Nullable K keyMax, + boolean reverse, + boolean smallRange) { - return Mono.zip(keyMinOptMono, keyMaxOptMono).flatMapMany(entry -> { - var keyMinOpt = entry.getT1(); - var keyMaxOpt = entry.getT2(); - if (keyMinOpt.isPresent() || keyMaxOpt.isPresent()) { - return databaseMapDictionary.getAllValues(snapshot, - keyMinOpt.orElse(null), - keyMaxOpt.orElse(null), - reverse, - smallRange - ); - } else { - return databaseMapDictionary.getAllValues(snapshot, smallRange); - } - }); + if (keyMin != null || keyMax != null) { + return databaseMapDictionary.getAllValues(snapshot, + keyMin, + keyMax, + reverse, + smallRange + ); + } else { + return databaseMapDictionary.getAllValues(snapshot, smallRange); + } } - public static Flux getKeyLeavesFrom(DatabaseMapDictionary databaseMapDictionary, + public static Stream getKeyLeavesFrom(DatabaseMapDictionary databaseMapDictionary, CompositeSnapshot snapshot, - Mono keyMin, - Mono keyMax, - boolean reverse, boolean smallRange) { - Mono> keyMinOptMono = keyMin.map(Optional::of).defaultIfEmpty(Optional.empty()); - Mono> keyMaxOptMono = keyMax.map(Optional::of).defaultIfEmpty(Optional.empty()); + @Nullable K keyMin, + @Nullable K keyMax, + boolean reverse, + boolean smallRange) { - return Mono.zip(keyMinOptMono, keyMaxOptMono).flatMapMany(keys -> { - var keyMinOpt = keys.getT1(); - var keyMaxOpt = keys.getT2(); - Flux>> stagesFlux; - if (keyMinOpt.isPresent() || keyMaxOpt.isPresent()) { - stagesFlux = databaseMapDictionary - .getAllStages(snapshot, keyMinOpt.orElse(null), keyMaxOpt.orElse(null), reverse, smallRange); - } else { - stagesFlux = databaseMapDictionary.getAllStages(snapshot, smallRange); - } - return stagesFlux.doOnNext(e -> e.getValue().close()) - .doOnDiscard(Entry.class, e -> { - if (e.getValue() instanceof DatabaseStageEntry resource) { - LLUtils.onDiscard(resource); - } - }) - .map(Entry::getKey); - }); + Stream>> stagesFlux; + if (keyMin != null || keyMax != null) { + stagesFlux = databaseMapDictionary.getAllStages(snapshot, keyMin, keyMax, reverse, smallRange); + } else { + stagesFlux = databaseMapDictionary.getAllStages(snapshot, smallRange); + } + return stagesFlux.map(Entry::getKey); } - private @Nullable U deserializeValue(T keySuffix, Buffer value) { + private @Nullable U deserializeValue(T keySuffix, BufDataInput value) { try { return valueSerializer.deserialize(value); } catch (IndexOutOfBoundsException ex) { @@ -131,19 +111,16 @@ public class DatabaseMapDictionary extends DatabaseMapDictionaryDeep> extend } public static > DatabaseMapDictionaryDeep deepIntermediate( - LLDictionary dictionary, BufSupplier prefixKey, SerializerFixedBinaryLength keySuffixSerializer, + LLDictionary dictionary, Buf prefixKey, SerializerFixedBinaryLength keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength); } - @SuppressWarnings({"unchecked", "rawtypes"}) - protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable BufSupplier prefixKeySupplier, + protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable Buf prefixKey, SerializerFixedBinaryLength keySuffixSerializer, SubStageGetter subStageGetter, int keyExtLength) { - try (var prefixKey = prefixKeySupplier != null ? prefixKeySupplier.get() : null) { - this.dictionary = dictionary; - this.alloc = dictionary.getAllocator(); - this.subStageGetter = subStageGetter; - this.keySuffixSerializer = keySuffixSerializer; - this.keyPrefixLength = prefixKey != null ? prefixKey.readableBytes() : 0; - this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); - this.keyExtLength = keyExtLength; - try (var keySuffixAndExtZeroBuffer = alloc - .allocate(keySuffixLength + keyExtLength) - .fill((byte) 0) - .writerOffset(keySuffixLength + keyExtLength) - .makeReadOnly()) { - assert keySuffixAndExtZeroBuffer.readableBytes() == keySuffixLength + keyExtLength : - "Key suffix and ext zero buffer readable length is not equal" - + " to the key suffix length + key ext length. keySuffixAndExtZeroBuffer=" - + keySuffixAndExtZeroBuffer.readableBytes() + " keySuffixLength=" + keySuffixLength + " keyExtLength=" - + keyExtLength; - assert keySuffixAndExtZeroBuffer.readableBytes() > 0; - var firstKey = prefixKey != null ? prefixKeySupplier.get() - : alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength); - try { - firstRangeKey(firstKey, keyPrefixLength, keySuffixAndExtZeroBuffer); - var nextRangeKey = prefixKey != null ? prefixKeySupplier.get() - : alloc.allocate(keyPrefixLength + keySuffixLength + keyExtLength); - try { - nextRangeKey(nextRangeKey, keyPrefixLength, keySuffixAndExtZeroBuffer); - assert prefixKey == null || prefixKey.isAccessible(); - assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey); - if (keyPrefixLength == 0) { - this.rangeSupplier = RangeSupplier.ofOwned(LLRange.all()); - firstKey.close(); - nextRangeKey.close(); - } else { - this.rangeSupplier = RangeSupplier.ofOwned(LLRange.ofUnsafe(firstKey, nextRangeKey)); - } - this.rangeMono = Mono.fromSupplier(rangeSupplier); - assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength); - } catch (Throwable t) { - nextRangeKey.close(); - throw t; - } - } catch (Throwable t) { - firstKey.close(); - throw t; - } - - this.keyPrefixSupplier = prefixKeySupplier; - } - } catch (Throwable t) { - if (prefixKeySupplier != null) { - prefixKeySupplier.close(); - } - throw t; + this.dictionary = dictionary; + this.subStageGetter = subStageGetter; + this.keySuffixSerializer = keySuffixSerializer; + this.keyPrefixLength = prefixKey != null ? prefixKey.size() : 0; + this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); + this.keyExtLength = keyExtLength; + var keySuffixAndExtZeroBuffer = Buf.createZeroes(keySuffixLength + keyExtLength); + assert keySuffixAndExtZeroBuffer.size() == keySuffixLength + keyExtLength : + "Key suffix and ext zero buffer readable length is not equal" + + " to the key suffix length + key ext length. keySuffixAndExtZeroBuffer=" + + keySuffixAndExtZeroBuffer.size() + " keySuffixLength=" + keySuffixLength + " keyExtLength=" + + keyExtLength; + assert keySuffixAndExtZeroBuffer.size() > 0; + var firstKey = firstRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer); + var nextRangeKey = nextRangeKey(prefixKey, keyPrefixLength, keySuffixAndExtZeroBuffer); + assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey); + if (keyPrefixLength == 0) { + this.range = LLRange.all(); + } else { + this.range = LLRange.of(firstKey, nextRangeKey); } - } + assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength); - @SuppressWarnings({"unchecked", "rawtypes"}) + this.keyPrefix = prefixKey; + } private DatabaseMapDictionaryDeep(LLDictionary dictionary, - BufferAllocator alloc, SubStageGetter subStageGetter, SerializerFixedBinaryLength keySuffixSerializer, int keyPrefixLength, int keySuffixLength, int keyExtLength, - Mono rangeMono, - RangeSupplier rangeSupplier, - BufSupplier keyPrefixSupplier, - Runnable onClose) { + LLRange range, + Buf keyPrefix) { this.dictionary = dictionary; - this.alloc = alloc; this.subStageGetter = subStageGetter; this.keySuffixSerializer = keySuffixSerializer; this.keyPrefixLength = keyPrefixLength; this.keySuffixLength = keySuffixLength; this.keyExtLength = keyExtLength; - this.rangeMono = rangeMono; + this.range = range; - this.rangeSupplier = rangeSupplier; - this.keyPrefixSupplier = keyPrefixSupplier; + this.keyPrefix = keyPrefix; } @SuppressWarnings("unused") @@ -256,16 +186,39 @@ public class DatabaseMapDictionaryDeep> extend } /** - * Removes the prefix from the key * @return the prefix */ - protected Buffer splitPrefix(Buffer key) { - assert key.readableBytes() == keyPrefixLength + keySuffixLength + keyExtLength - || key.readableBytes() == keyPrefixLength + keySuffixLength; - var prefix = key.readSplit(this.keyPrefixLength); - assert key.readableBytes() == keySuffixLength + keyExtLength - || key.readableBytes() == keySuffixLength; - return prefix; + protected Buf prefixSubList(Buf key) { + assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength + || key.size() == keyPrefixLength + keySuffixLength; + return key.subList(0, this.keyPrefixLength); + } + + /** + * @return the suffix + */ + protected Buf suffixSubList(Buf key) { + assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength + || key.size() == keyPrefixLength + keySuffixLength; + return key.subList(this.keyPrefixLength, keyPrefixLength + keySuffixLength); + } + + /** + * @return the suffix + */ + protected Buf suffixAndExtSubList(Buf key) { + assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength + || key.size() == keyPrefixLength + keySuffixLength; + return key.subList(this.keyPrefixLength, key.size()); + } + + /** + * @return the ext + */ + protected Buf extSubList(Buf key) { + assert key.size() == keyPrefixLength + keySuffixLength + keyExtLength + || key.size() == keyPrefixLength + keySuffixLength; + return key.subList(this.keyPrefixLength + this.keySuffixLength, key.size()); } protected LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) { @@ -277,30 +230,23 @@ public class DatabaseMapDictionaryDeep> extend } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return dictionary.sizeRange(resolveSnapshot(snapshot), rangeMono, fast); + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + return dictionary.sizeRange(resolveSnapshot(snapshot), range, fast); } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return dictionary.isRangeEmpty(resolveSnapshot(snapshot), rangeMono, false); + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { + return dictionary.isRangeEmpty(resolveSnapshot(snapshot), range, false); } @Override - public Mono at(@Nullable CompositeSnapshot snapshot, T keySuffix) { - var suffixKeyWithoutExt = Mono.fromCallable(() -> { - var keyWithoutExtBuf = keyPrefixSupplier == null - ? alloc.allocate(keySuffixLength + keyExtLength) : keyPrefixSupplier.get(); - try { - keyWithoutExtBuf.ensureWritable(keySuffixLength + keyExtLength); - serializeSuffix(keySuffix, keyWithoutExtBuf); - } catch (Throwable ex) { - keyWithoutExtBuf.close(); - throw ex; - } - return keyWithoutExtBuf; - }); - return this.subStageGetter.subStage(dictionary, snapshot, suffixKeyWithoutExt); + public @NotNull US at(@Nullable CompositeSnapshot snapshot, T keySuffix) { + BufDataOutput bufOutput = BufDataOutput.createLimited(keyPrefixLength + keySuffixLength + keyExtLength); + if (keyPrefix != null) { + bufOutput.writeBytes(keyPrefix); + } + serializeSuffixTo(keySuffix, bufOutput); + return this.subStageGetter.subStage(dictionary, snapshot, bufOutput.asList()); } @Override @@ -309,39 +255,22 @@ public class DatabaseMapDictionaryDeep> extend } @Override - public Flux badBlocks() { - return dictionary.badBlocks(rangeMono); + public Stream badBlocks() { + return dictionary.badBlocks(range); } @Override - public Flux> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) { + public Stream> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) { return dictionary - .getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength, smallRange) - .flatMapSequential(groupKeyWithoutExt -> this.subStageGetter - .subStage(dictionary, snapshot, Mono.fromCallable(() -> groupKeyWithoutExt.copy())) - .map(us -> { - T deserializedSuffix; - try (var splittedGroupSuffix = splitGroupSuffix(groupKeyWithoutExt)) { - deserializedSuffix = this.deserializeSuffix(splittedGroupSuffix); - return new SubStageEntry<>(deserializedSuffix, us); - } - }) - .doFinally(s -> groupKeyWithoutExt.close()) - ); - } - - /** - * Split the input. The input will become the ext, the returned data will be the group suffix - * @param groupKey group key, will become ext - * @return group suffix - */ - private Buffer splitGroupSuffix(@NotNull Buffer groupKey) { - assert subStageKeysConsistency(groupKey.readableBytes()) - || subStageKeysConsistency(groupKey.readableBytes() + keyExtLength); - this.splitPrefix(groupKey).close(); - assert subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes()) - || subStageKeysConsistency(keyPrefixLength + groupKey.readableBytes() + keyExtLength); - return groupKey.readSplit(keySuffixLength); + .getRangeKeyPrefixes(resolveSnapshot(snapshot), range, keyPrefixLength + keySuffixLength, smallRange) + .parallel() + .map(groupKeyWithoutExt -> { + T deserializedSuffix; + var splittedGroupSuffix = suffixSubList(groupKeyWithoutExt); + deserializedSuffix = this.deserializeSuffix(BufDataInput.create(splittedGroupSuffix)); + return new SubStageEntry<>(deserializedSuffix, + this.subStageGetter.subStage(dictionary, snapshot, groupKeyWithoutExt)); + }); } private boolean subStageKeysConsistency(int totalKeyLength) { @@ -357,51 +286,45 @@ public class DatabaseMapDictionaryDeep> extend } @Override - public Flux> setAllValuesAndGetPrevious(Flux> entries) { - return this - .getAllValues(null, false) - .concatWith(this - .clear() - .then(this.putMulti(entries)) - .as(InternalMonoUtils::toAny) - ); + public void setAllValues(Stream> entries) { + this.clear(); + this.putMulti(entries); } @Override - public Mono clear() { - return Mono.using(() -> rangeSupplier.get(), range -> { - if (range.isAll()) { - return dictionary.clear(); - } else if (range.isSingle()) { - return dictionary - .remove(Mono.fromCallable(() -> range.getSingleUnsafe()), LLDictionaryResultType.VOID) - .doOnNext(resource -> LLUtils.finalizeResourceNow(resource)) - .then(); - } else { - return dictionary.setRange(rangeMono, Flux.empty(), false); - } - }, resource -> LLUtils.finalizeResourceNow(resource)); + public Stream> setAllValuesAndGetPrevious(Stream> entries) { + return this.getAllValues(null, false).onClose(() -> setAllValues(entries)); } - protected T deserializeSuffix(@NotNull Buffer keySuffix) throws SerializationException { - assert suffixKeyLengthConsistency(keySuffix.readableBytes()); - var result = keySuffixSerializer.deserialize(keySuffix); - return result; + @Override + public void clear() { + if (range.isAll()) { + dictionary.clear(); + } else if (range.isSingle()) { + dictionary.remove(range.getSingleUnsafe(), LLDictionaryResultType.VOID); + } else { + dictionary.setRange(range, Stream.empty(), false); + } } - protected void serializeSuffix(T keySuffix, Buffer output) throws SerializationException { - output.ensureWritable(keySuffixLength); - var beforeWriterOffset = output.writerOffset(); + protected T deserializeSuffix(@NotNull BufDataInput keySuffix) throws SerializationException { + assert suffixKeyLengthConsistency(keySuffix.available()); + return keySuffixSerializer.deserialize(keySuffix); + } + + protected void serializeSuffixTo(T keySuffix, BufDataOutput output) throws SerializationException { + assert suffixKeyLengthConsistency(output.size()); + var beforeWriterOffset = output.size(); keySuffixSerializer.serialize(keySuffix, output); - var afterWriterOffset = output.writerOffset(); + var afterWriterOffset = output.size(); assert suffixKeyLengthConsistency(afterWriterOffset - beforeWriterOffset) : "Invalid key suffix length: " + (afterWriterOffset - beforeWriterOffset) + ". Expected: " + keySuffixLength; } - public static Flux getAllLeaves2(DatabaseMapDictionaryDeep, ? extends DatabaseStageMap>> deepMap, + public static Stream getAllLeaves2(DatabaseMapDictionaryDeep, ? extends DatabaseStageMap>> deepMap, CompositeSnapshot snapshot, TriFunction merger, - @NotNull Mono savedProgressKey1) { + @Nullable K1 savedProgressKey1) { var keySuffix1Serializer = deepMap.keySuffixSerializer; SerializerFixedBinaryLength keySuffix2Serializer; Serializer valueSerializer; @@ -434,64 +357,47 @@ public class DatabaseMapDictionaryDeep> extend throw new IllegalArgumentException(); } - var savedProgressKey1Opt = savedProgressKey1.map(value1 -> Optional.of(value1)).defaultIfEmpty(Optional.empty()); + var firstKey = Optional.ofNullable(savedProgressKey1); + var fullRange = deepMap.range; - return deepMap - .dictionary - .getRange(deepMap.resolveSnapshot(snapshot), Mono.zip(savedProgressKey1Opt, deepMap.rangeMono).handle((tuple, sink) -> { - var firstKey = tuple.getT1(); - var fullRange = tuple.getT2(); - try { - if (firstKey.isPresent()) { - try (fullRange) { - try (var key1Buf = deepMap.alloc.allocate(keySuffix1Serializer.getSerializedBinaryLength())) { - keySuffix1Serializer.serialize(firstKey.get(), key1Buf); - sink.next(LLRange.of(key1Buf.send(), fullRange.getMax())); - } catch (SerializationException e) { - sink.error(e); - } - } - } else { - sink.next(fullRange); - } - } catch (Throwable ex) { - try { - fullRange.close(); - } catch (Throwable ex2) { - LOG.error(ex2); - } - sink.error(ex); - } - }), false, false) - .concatMapIterable(entry -> { + + LLRange range; + if (firstKey.isPresent()) { + var key1Buf = BufDataOutput.create(keySuffix1Serializer.getSerializedBinaryLength()); + keySuffix1Serializer.serialize(firstKey.get(), key1Buf); + range = LLRange.of(key1Buf.asList(), fullRange.getMax()); + } else { + range = fullRange; + } + + return deepMap.dictionary.getRange(deepMap.resolveSnapshot(snapshot), range, false, false) + .flatMap(entry -> { K1 key1 = null; Object key2 = null; - try (entry) { - var keyBuf = entry.getKeyUnsafe(); - var valueBuf = entry.getValueUnsafe(); + try { + var keyBuf = entry.getKey(); + var valueBuf = entry.getValue(); try { assert keyBuf != null; - keyBuf.skipReadableBytes(deepMap.keyPrefixLength); - try (var key1Buf = keyBuf.split(deepMap.keySuffixLength)) { - key1 = keySuffix1Serializer.deserialize(key1Buf); - } - key2 = keySuffix2Serializer.deserialize(keyBuf); + var suffix1And2 = BufDataInput.create(keyBuf.subList(deepMap.keyPrefixLength, deepMap.keyPrefixLength + deepMap.keySuffixLength + deepMap.keyExtLength)); + key1 = keySuffix1Serializer.deserialize(suffix1And2); + key2 = keySuffix2Serializer.deserialize(suffix1And2); assert valueBuf != null; - Object value = valueSerializer.deserialize(valueBuf); + Object value = valueSerializer.deserialize(BufDataInput.create(valueBuf)); if (isHashedSet) { //noinspection unchecked Set set = (Set) value; K1 finalKey1 = key1; //noinspection unchecked - return set.stream().map(e -> merger.apply(finalKey1, e, (V) Nothing.INSTANCE)).toList(); + return set.stream().map(e -> merger.apply(finalKey1, e, (V) Nothing.INSTANCE)); } else if (isHashed) { //noinspection unchecked Set> set = (Set>) value; K1 finalKey1 = key1; - return set.stream().map(e -> merger.apply(finalKey1, e.getKey(), e.getValue())).toList(); + return set.stream().map(e -> merger.apply(finalKey1, e.getKey(), e.getValue())); } else { //noinspection unchecked - return List.of(merger.apply(key1, (K2) key2, (V) value)); + return Stream.of(merger.apply(key1, (K2) key2, (V) value)); } } catch (IndexOutOfBoundsException ex) { var exMessage = ex.getMessage(); @@ -504,7 +410,7 @@ public class DatabaseMapDictionaryDeep> extend + ":" + key2 + "](" + LLUtils.toStringSafe(keyBuf) + ") total=" + totalZeroBytesErrors); } - return List.of(); + return Stream.empty(); } else { throw ex; } @@ -514,22 +420,4 @@ public class DatabaseMapDictionaryDeep> extend } }); } - - @Override - protected void onClose() { - try { - if (rangeSupplier != null) { - rangeSupplier.close(); - } - } catch (Throwable ex) { - LOG.error("Failed to close range", ex); - } - try { - if (keyPrefixSupplier != null) { - keyPrefixSupplier.close(); - } - } catch (Throwable ex) { - LOG.error("Failed to close keyPrefix", ex); - } - } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java index f8477f5..69ebd7c 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapDictionaryHashed.java @@ -1,49 +1,40 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.database.SubStageEntry; import it.cavallium.dbengine.database.UpdateMode; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; -import it.cavallium.dbengine.utils.SimpleResource; import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; import it.unimi.dsi.fastutil.objects.ObjectArraySet; +import java.util.Collection; import java.util.Collections; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; import java.util.function.Function; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; @SuppressWarnings("unused") -public class DatabaseMapDictionaryHashed extends SimpleResource implements - DatabaseStageMap> { +public class DatabaseMapDictionaryHashed implements DatabaseStageMap> { private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryHashed.class); - private final BufferAllocator alloc; private final Function keySuffixHashFunction; private final DatabaseMapDictionary>> subDictionary; protected DatabaseMapDictionaryHashed(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKeySupplier, Serializer keySuffixSerializer, Serializer valueSerializer, Function keySuffixHashFunction, @@ -52,7 +43,6 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem if (updateMode != UpdateMode.ALLOW) { throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW"); } - this.alloc = dictionary.getAllocator(); ValueWithHashSerializer valueWithHashSerializer = new ValueWithHashSerializer<>(keySuffixSerializer, valueSerializer); ValuesSetSerializer> valuesSetSerializer @@ -62,11 +52,8 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem this.keySuffixHashFunction = keySuffixHashFunction; } - private DatabaseMapDictionaryHashed(BufferAllocator alloc, - Function keySuffixHashFunction, - DatabaseStage>>> subDictionary, - Drop> drop) { - this.alloc = alloc; + private DatabaseMapDictionaryHashed(Function keySuffixHashFunction, + DatabaseStage>>> subDictionary) { this.keySuffixHashFunction = keySuffixHashFunction; this.subDictionary = (DatabaseMapDictionary>>) subDictionary; @@ -88,7 +75,7 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem } public static DatabaseMapDictionaryHashed tail(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKeySupplier, Serializer keySuffixSerializer, Serializer valueSerializer, Function keySuffixHashFunction, @@ -121,36 +108,35 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem } @Override - public Mono> get(@Nullable CompositeSnapshot snapshot) { - return subDictionary.get(snapshot).map(map -> deserializeMap(map)); + public Object2ObjectSortedMap get(@Nullable CompositeSnapshot snapshot) { + var v = subDictionary.get(snapshot); + return v != null ? deserializeMap(v) : null; } @Override - public Mono> getOrDefault(@Nullable CompositeSnapshot snapshot, - Mono> defaultValue) { - return this.get(snapshot).switchIfEmpty(defaultValue); + public Object2ObjectSortedMap getOrDefault(@Nullable CompositeSnapshot snapshot, + Object2ObjectSortedMap defaultValue) { + return Objects.requireNonNullElse(this.get(snapshot), defaultValue); } @Override - public Mono set(Object2ObjectSortedMap map) { - return Mono.fromSupplier(() -> this.serializeMap(map)).flatMap(value -> subDictionary.set(value)); + public void set(Object2ObjectSortedMap map) { + var value = this.serializeMap(map); + subDictionary.set(value); } @Override - public Mono setAndGetChanged(Object2ObjectSortedMap map) { - return Mono - .fromSupplier(() -> this.serializeMap(map)) - .flatMap(value -> subDictionary.setAndGetChanged(value)) - .single(); + public boolean setAndGetChanged(Object2ObjectSortedMap map) { + return subDictionary.setAndGetChanged(this.serializeMap(map)); } @Override - public Mono clearAndGetStatus() { + public boolean clearAndGetStatus() { return subDictionary.clearAndGetStatus(); } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { return subDictionary.isEmpty(snapshot); } @@ -160,20 +146,17 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem } @Override - public Flux badBlocks() { + public Stream badBlocks() { return this.subDictionary.badBlocks(); } @Override - public Mono> at(@Nullable CompositeSnapshot snapshot, T key) { - return this - .atPrivate(snapshot, key, keySuffixHashFunction.apply(key)) - .map(cast -> cast); + public @NotNull DatabaseStageEntry at(@Nullable CompositeSnapshot snapshot, T key) { + return this.atPrivate(snapshot, key, keySuffixHashFunction.apply(key)); } - private Mono> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) { - return subDictionary.at(snapshot, hash) - .map(entry -> new DatabaseSingleBucket(entry, key)); + private DatabaseSingleBucket atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) { + return new DatabaseSingleBucket(subDictionary.at(snapshot, hash), key); } @Override @@ -182,57 +165,55 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem } @Override - public Flux>> getAllStages(@Nullable CompositeSnapshot snapshot, + public Stream>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange) { return subDictionary .getAllValues(snapshot, smallRange) .map(Entry::getValue) .map(Collections::unmodifiableSet) - .flatMap(bucket -> Flux - .fromIterable(bucket) + .flatMap(bucket -> bucket.stream() .map(Entry::getKey) - .flatMap(key -> this.at(snapshot, key).map(stage -> new SubStageEntry<>(key, stage)))); + .map(key -> new SubStageEntry<>(key, this.at(snapshot, key)))); } @Override - public Flux> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) { + public Stream> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) { return subDictionary .getAllValues(snapshot, smallRange) .map(Entry::getValue) .map(Collections::unmodifiableSet) - .concatMapIterable(list -> list); + .flatMap(Collection::stream); } @Override - public Flux> setAllValuesAndGetPrevious(Flux> entries) { - return entries.flatMap(entry -> Mono.usingWhen(this.at(null, entry.getKey()), - stage -> stage.setAndGetPrevious(entry.getValue()).map(prev -> Map.entry(entry.getKey(), prev)), - LLUtils::finalizeResource - )); + public Stream> setAllValuesAndGetPrevious(Stream> entries) { + return entries.mapMulti((entry, sink) -> { + var prev = this.at(null, entry.getKey()).setAndGetPrevious(entry.getValue()); + if (prev != null) { + sink.accept(Map.entry(entry.getKey(), prev)); + } + }); } @Override - public Mono clear() { - return subDictionary.clear(); + public void clear() { + subDictionary.clear(); } @Override - public Mono> setAndGetPrevious(Object2ObjectSortedMap value) { - return Mono - .fromSupplier(() -> this.serializeMap(value)) - .flatMap(value1 -> subDictionary.setAndGetPrevious(value1)) - .map(map -> deserializeMap(map)); + public Object2ObjectSortedMap setAndGetPrevious(Object2ObjectSortedMap value) { + var v = subDictionary.setAndGetPrevious(this.serializeMap(value)); + return v != null ? deserializeMap(v) : null; } @Override - public Mono> clearAndGetPrevious() { - return subDictionary - .clearAndGetPrevious() - .map(map -> deserializeMap(map)); + public Object2ObjectSortedMap clearAndGetPrevious() { + var v = subDictionary.clearAndGetPrevious(); + return v != null ? deserializeMap(v) : null; } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { return subDictionary.leavesCount(snapshot, fast); } @@ -245,13 +226,14 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem @Override public ValueGetter getAsyncDbValueGetter(@Nullable CompositeSnapshot snapshot) { ValueGetter>> getter = subDictionary.getAsyncDbValueGetter(snapshot); - return key -> getter - .get(keySuffixHashFunction.apply(key)) - .flatMap(set -> this.extractValueTransformation(set, key)); - } - - private Mono extractValueTransformation(ObjectArraySet> entries, T key) { - return Mono.fromCallable(() -> extractValue(entries, key)); + return key -> { + ObjectArraySet> set = getter.get(keySuffixHashFunction.apply(key)); + if (set != null) { + return this.extractValue(set, key); + } else { + return null; + } + }; } @Nullable @@ -299,15 +281,4 @@ public class DatabaseMapDictionaryHashed extends SimpleResource implem return null; } } - - @Override - protected void onClose() { - try { - if (subDictionary != null) { - subDictionary.close(); - } - } catch (Throwable ex) { - logger.error("Failed to close subDictionary", ex); - } - } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapSingle.java index 79e3b77..0df6ffb 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseMapSingle.java @@ -1,12 +1,10 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; @@ -14,32 +12,26 @@ import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.UpdateReturnMode; +import it.cavallium.dbengine.database.disk.BinarySerializationFunction; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.cavallium.dbengine.database.serialization.Serializer; -import it.cavallium.dbengine.utils.SimpleResource; -import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -public class DatabaseMapSingle extends SimpleResource implements DatabaseStageEntry { +public final class DatabaseMapSingle implements DatabaseStageEntry { private static final Logger LOG = LogManager.getLogger(DatabaseMapSingle.class); - private final AtomicLong totalZeroBytesErrors = new AtomicLong(); - private final LLDictionary dictionary; - private final Mono keyMono; + private final Buf key; private final Serializer serializer; - private final BufSupplier keySupplier; - public DatabaseMapSingle(LLDictionary dictionary, BufSupplier keySupplier, Serializer serializer) { + public DatabaseMapSingle(LLDictionary dictionary, Buf key, Serializer serializer) { this.dictionary = dictionary; - this.keySupplier = keySupplier; - this.keyMono = Mono.fromSupplier(() -> keySupplier.get()); + this.key = key; this.serializer = serializer; } @@ -51,127 +43,98 @@ public class DatabaseMapSingle extends SimpleResource implements DatabaseStag } } - private U deserializeValue(Buffer value) { + private U deserializeValue(Buf value) { try { - return serializer.deserialize(value); + return serializer.deserialize(BufDataInput.create(value)); } catch (IndexOutOfBoundsException ex) { var exMessage = ex.getMessage(); if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) { - try (var key = keySupplier.get()) { - LOG.error("Unexpected zero-bytes value at " - + dictionary.getDatabaseName() + ":" + dictionary.getColumnName() + ":" + LLUtils.toStringSafe(key)); - } + LOG.error("Unexpected zero-bytes value at %s:%s:%s".formatted(dictionary.getDatabaseName(), + dictionary.getColumnName(), + LLUtils.toStringSafe(key) + )); return null; } else { throw ex; } - } catch (SerializationException ex) { - throw ex; } } - private Buffer serializeValue(U value) throws SerializationException { - var valSizeHint = serializer.getSerializedSizeHint(); - if (valSizeHint == -1) valSizeHint = 128; - var valBuf = dictionary.getAllocator().allocate(valSizeHint); - try { - serializer.serialize(value, valBuf); - return valBuf; - } catch (Throwable ex) { - valBuf.close(); - throw ex; + private Buf serializeValue(U value) throws SerializationException { + BufDataOutput valBuf = BufDataOutput.create(serializer.getSerializedSizeHint()); + serializer.serialize(value, valBuf); + return valBuf.asList(); + } + + @Override + public U get(@Nullable CompositeSnapshot snapshot) { + var result = dictionary.get(resolveSnapshot(snapshot), key); + if (result != null) { + return deserializeValue(result); + } else { + return null; } } @Override - public Mono get(@Nullable CompositeSnapshot snapshot) { - return Mono.usingWhen(dictionary.get(resolveSnapshot(snapshot), keyMono), - buf -> Mono.fromSupplier(() -> deserializeValue(buf)), - LLUtils::finalizeResource - ); + public U setAndGetPrevious(U value) { + var serializedKey = value != null ? serializeValue(value) : null; + var result = dictionary.put(key, serializedKey, LLDictionaryResultType.PREVIOUS_VALUE); + if (result != null) { + return deserializeValue(result); + } else { + return null; + } } @Override - public Mono setAndGetPrevious(U value) { - return Mono.usingWhen(dictionary - .put(keyMono, Mono.fromCallable(() -> serializeValue(value)), LLDictionaryResultType.PREVIOUS_VALUE), - buf -> Mono.fromSupplier(() -> deserializeValue(buf)), - LLUtils::finalizeResource); - } - - @Override - public Mono update(SerializationFunction<@Nullable U, @Nullable U> updater, + public U update(SerializationFunction<@Nullable U, @Nullable U> updater, UpdateReturnMode updateReturnMode) { - var resultMono = dictionary - .update(keyMono, (oldValueSer) -> { - try (oldValueSer) { - U result; - if (oldValueSer == null) { - result = updater.apply(null); - } else { - U deserializedValue = serializer.deserialize(oldValueSer); - result = updater.apply(deserializedValue); - } - if (result == null) { - return null; - } else { - return serializeValue(result); - } - } - }, updateReturnMode); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> deserializeValue(result)), - LLUtils::finalizeResource - ); + Buf resultBytes = dictionary.update(key, this.createUpdater(updater), updateReturnMode); + return deserializeValue(resultBytes); } @Override - public Mono> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) { - return dictionary - .updateAndGetDelta(keyMono, (oldValueSer) -> { - U result; - if (oldValueSer == null) { - result = updater.apply(null); - } else { - U deserializedValue = serializer.deserialize(oldValueSer); - result = updater.apply(deserializedValue); - } - if (result == null) { - return null; - } else { - return serializeValue(result); - } - }).transform(mono -> LLUtils.mapLLDelta(mono, serialized -> serializer.deserialize(serialized))); + public Delta updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) { + var delta = dictionary.updateAndGetDelta(key, this.createUpdater(updater)); + return LLUtils.mapLLDelta(delta, bytes -> serializer.deserialize(BufDataInput.create(bytes))); + } + + private BinarySerializationFunction createUpdater(SerializationFunction updater) { + return oldBytes -> { + U result; + if (oldBytes == null) { + result = updater.apply(null); + } else { + U deserializedValue = serializer.deserialize(BufDataInput.create(oldBytes)); + result = updater.apply(deserializedValue); + } + if (result == null) { + return null; + } else { + return serializeValue(result); + } + }; } @Override - public Mono clearAndGetPrevious() { - return Mono.usingWhen(dictionary.remove(keyMono, LLDictionaryResultType.PREVIOUS_VALUE), - result -> Mono.fromSupplier(() -> deserializeValue(result)), - LLUtils::finalizeResource - ); + public U clearAndGetPrevious() { + return deserializeValue(dictionary.remove(key, LLDictionaryResultType.PREVIOUS_VALUE)); } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(single -> LLRange.singleUnsafe(single)), false) - .map(empty -> empty ? 0L : 1L); + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), false) ? 0L : 1L; } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return dictionary - .isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(single -> LLRange.singleUnsafe(single)), true); + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { + return dictionary.isRangeEmpty(resolveSnapshot(snapshot), LLRange.single(key), true); } @Override - public Flux badBlocks() { - return dictionary.badBlocks(keyMono.map(single -> LLRange.singleUnsafe(single))); + public Stream badBlocks() { + return dictionary.badBlocks(LLRange.single(key)); } - @Override - protected void onClose() { - keySupplier.close(); - } } \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java index f101944..0f2e4dc 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionary.java @@ -1,32 +1,21 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap; -import java.util.HashMap; -import java.util.Map; import java.util.Set; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; @SuppressWarnings("unused") public class DatabaseSetDictionary extends DatabaseMapDictionary { protected DatabaseSetDictionary(LLDictionary dictionary, - BufSupplier prefixKeySupplier, + Buf prefixKeySupplier, SerializerFixedBinaryLength keySuffixSerializer) { - super(dictionary, - prefixKeySupplier, - keySuffixSerializer, - DatabaseEmpty.nothingSerializer(dictionary.getAllocator()) - ); + super(dictionary, prefixKeySupplier, keySuffixSerializer, DatabaseEmpty.nothingSerializer()); } public static DatabaseSetDictionary simple(LLDictionary dictionary, @@ -35,24 +24,27 @@ public class DatabaseSetDictionary extends DatabaseMapDictionary } public static DatabaseSetDictionary tail(LLDictionary dictionary, - BufSupplier prefixKeySupplier, + Buf prefixKeySupplier, SerializerFixedBinaryLength keySuffixSerializer) { return new DatabaseSetDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer); } - public Mono> getKeySet(@Nullable CompositeSnapshot snapshot) { - return get(snapshot).map(Map::keySet); + public Set getKeySet(@Nullable CompositeSnapshot snapshot) { + var v = get(snapshot); + return v != null ? v.keySet() : null; } - public Mono> setAndGetPreviousKeySet(Set value) { + public Set setAndGetPreviousKeySet(Set value) { var hm = new Object2ObjectLinkedOpenHashMap(); for (T t : value) { hm.put(t, DatabaseEmpty.NOTHING); } - return setAndGetPrevious(hm).map(Map::keySet); + var v = setAndGetPrevious(hm); + return v != null ? v.keySet() : null; } - public Mono> clearAndGetPreviousKeySet() { - return clearAndGetPrevious().map(Map::keySet); + public Set clearAndGetPreviousKeySet() { + var v = clearAndGetPrevious(); + return v != null ? v.keySet() : null; } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java index f77be36..51b7745 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSetDictionaryHashed.java @@ -1,36 +1,28 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap; -import java.util.HashMap; -import java.util.Map; import java.util.Set; import java.util.function.Function; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; @SuppressWarnings("unused") public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHashed { protected DatabaseSetDictionaryHashed(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKeySupplier, Serializer keySuffixSerializer, Function keySuffixHashFunction, SerializerFixedBinaryLength keySuffixHashSerializer) { super(dictionary, prefixKeySupplier, keySuffixSerializer, - DatabaseEmpty.nothingSerializer(dictionary.getAllocator()), + DatabaseEmpty.nothingSerializer(), keySuffixHashFunction, keySuffixHashSerializer ); @@ -49,7 +41,7 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas } public static DatabaseSetDictionaryHashed tail(LLDictionary dictionary, - @Nullable BufSupplier prefixKeySupplier, + @Nullable Buf prefixKeySupplier, Serializer keySuffixSerializer, Function keyHashFunction, SerializerFixedBinaryLength keyHashSerializer) { @@ -61,19 +53,22 @@ public class DatabaseSetDictionaryHashed extends DatabaseMapDictionaryHas ); } - public Mono> getKeySet(@Nullable CompositeSnapshot snapshot) { - return get(snapshot).map(Map::keySet); + public Set getKeySet(@Nullable CompositeSnapshot snapshot) { + var v = get(snapshot); + return v != null ? v.keySet() : null; } - public Mono> setAndGetPreviousKeySet(Set value) { + public Set setAndGetPreviousKeySet(Set value) { var hm = new Object2ObjectLinkedOpenHashMap(); for (T t : value) { hm.put(t, DatabaseEmpty.NOTHING); } - return setAndGetPrevious(hm).map(Map::keySet); + var v = setAndGetPrevious(hm); + return v != null ? v.keySet() : null; } - public Mono> clearAndGetPreviousKeySet() { - return clearAndGetPrevious().map(Map::keySet); + public Set clearAndGetPreviousKeySet() { + var v = clearAndGetPrevious(); + return v != null ? v.keySet() : null; } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleBucket.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleBucket.java index ceb9389..280a2de 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleBucket.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleBucket.java @@ -1,30 +1,24 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.LLUtils; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.SerializationFunction; -import it.cavallium.dbengine.utils.SimpleResource; import it.unimi.dsi.fastutil.objects.ObjectArraySet; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Set; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; @SuppressWarnings("unused") -public class DatabaseSingleBucket extends SimpleResource implements DatabaseStageEntry { +public class DatabaseSingleBucket implements DatabaseStageEntry { private static final Logger logger = LogManager.getLogger(DatabaseSingleBucket.class); @@ -43,33 +37,35 @@ public class DatabaseSingleBucket extends SimpleResource implements Da } @Override - public Mono get(@Nullable CompositeSnapshot snapshot) { - return bucketStage.get(snapshot).flatMap(entries -> extractValueTransformation(entries)); + public V get(@Nullable CompositeSnapshot snapshot) { + var entries = bucketStage.get(snapshot); + return entries != null ? extractValue(entries) : null; } @Override - public Mono getOrDefault(@Nullable CompositeSnapshot snapshot, Mono defaultValue) { - return bucketStage.get(snapshot).flatMap(entries -> extractValueTransformation(entries)).switchIfEmpty(defaultValue); + public V getOrDefault(@Nullable CompositeSnapshot snapshot, V defaultValue) { + var entries = bucketStage.get(snapshot); + return entries != null ? extractValue(entries) : defaultValue; } @Override - public Mono set(V value) { - return this.update(prev -> value, UpdateReturnMode.NOTHING).then(); + public void set(V value) { + this.update(prev -> value, UpdateReturnMode.NOTHING); } @Override - public Mono setAndGetPrevious(V value) { + public V setAndGetPrevious(V value) { return this.update(prev -> value, UpdateReturnMode.GET_OLD_VALUE); } @Override - public Mono setAndGetChanged(V value) { - return this.updateAndGetDelta(prev -> value).map(delta -> LLUtils.isDeltaChanged(delta)); + public boolean setAndGetChanged(V value) { + return LLUtils.isDeltaChanged(this.updateAndGetDelta(prev -> value)); } @Override - public Mono update(SerializationFunction<@Nullable V, @Nullable V> updater, UpdateReturnMode updateReturnMode) { - return bucketStage + public V update(SerializationFunction<@Nullable V, @Nullable V> updater, UpdateReturnMode updateReturnMode) { + var result = bucketStage .update(oldBucket -> { V oldValue = extractValue(oldBucket); V newValue = updater.apply(oldValue); @@ -79,13 +75,13 @@ public class DatabaseSingleBucket extends SimpleResource implements Da } else { return this.insertValueOrCreate(oldBucket, newValue); } - }, updateReturnMode) - .flatMap(entries -> extractValueTransformation(entries)); + }, updateReturnMode); + return result != null ? extractValue(result) : null; } @Override - public Mono> updateAndGetDelta(SerializationFunction<@Nullable V, @Nullable V> updater) { - return bucketStage.updateAndGetDelta(oldBucket -> { + public Delta updateAndGetDelta(SerializationFunction<@Nullable V, @Nullable V> updater) { + var delta = bucketStage.updateAndGetDelta(oldBucket -> { V oldValue = extractValue(oldBucket); var result = updater.apply(oldValue); if (result == null) { @@ -93,32 +89,33 @@ public class DatabaseSingleBucket extends SimpleResource implements Da } else { return this.insertValueOrCreate(oldBucket, result); } - }).transform(mono -> LLUtils.mapDelta(mono, entries -> extractValue(entries))); + }); + return LLUtils.mapDelta(delta, this::extractValue); } @Override - public Mono clear() { - return this.update(prev -> null, UpdateReturnMode.NOTHING).then(); + public void clear() { + this.update(prev -> null, UpdateReturnMode.NOTHING); } @Override - public Mono clearAndGetPrevious() { + public V clearAndGetPrevious() { return this.update(prev -> null, UpdateReturnMode.GET_OLD_VALUE); } @Override - public Mono clearAndGetStatus() { - return this.updateAndGetDelta(prev -> null).map(delta -> LLUtils.isDeltaChanged(delta)); + public boolean clearAndGetStatus() { + return LLUtils.isDeltaChanged(this.updateAndGetDelta(prev -> null)); } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return this.get(snapshot).map(prev -> 1L).defaultIfEmpty(0L); + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + return this.get(snapshot) != null ? 1L : 0L; } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return this.get(snapshot).map(prev -> true).defaultIfEmpty(true); + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { + return this.get(snapshot) == null; } @Override @@ -127,14 +124,10 @@ public class DatabaseSingleBucket extends SimpleResource implements Da } @Override - public Flux badBlocks() { + public Stream badBlocks() { return bucketStage.badBlocks(); } - private Mono extractValueTransformation(Set> entries) { - return Mono.fromCallable(() -> extractValue(entries)); - } - @Nullable private V extractValue(Set> entries) { if (entries == null) return null; @@ -188,15 +181,4 @@ public class DatabaseSingleBucket extends SimpleResource implements Da return null; } } - - @Override - protected void onClose() { - try { - if (bucketStage != null) { - bucketStage.close(); - } - } catch (Throwable ex) { - logger.error("Failed to close bucketStage", ex); - } - } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java index b100c6f..fd5adaa 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleMapped.java @@ -1,9 +1,5 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.client.Mapper; @@ -12,126 +8,108 @@ import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; -import it.cavallium.dbengine.utils.SimpleResource; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import java.util.stream.Stream; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SynchronousSink; -@SuppressWarnings("unused") -public class DatabaseSingleMapped extends SimpleResource implements DatabaseStageEntry { - - private static final Logger logger = LogManager.getLogger(DatabaseSingleMapped.class); +public class DatabaseSingleMapped implements DatabaseStageEntry { private final Mapper mapper; private final DatabaseStageEntry serializedSingle; - @SuppressWarnings({"unchecked", "rawtypes"}) - public DatabaseSingleMapped(DatabaseStageEntry serializedSingle, Mapper mapper, - Drop> drop) { + public DatabaseSingleMapped(DatabaseStageEntry serializedSingle, Mapper mapper) { this.serializedSingle = serializedSingle; this.mapper = mapper; } - @SuppressWarnings({"unchecked", "rawtypes"}) - private DatabaseSingleMapped(DatabaseStage serializedSingle, Mapper mapper, - Drop> drop) { + private DatabaseSingleMapped(DatabaseStage serializedSingle, Mapper mapper) { this.mapper = mapper; this.serializedSingle = (DatabaseStageEntry) serializedSingle; } - private void deserializeSink(B value, SynchronousSink sink) { - try { - sink.next(this.unMap(value)); - } catch (SerializationException ex) { - sink.error(ex); - } + @Override + public A get(@Nullable CompositeSnapshot snapshot) { + var data = serializedSingle.get(snapshot); + if (data == null) return null; + return this.unMap(data); } @Override - public Mono get(@Nullable CompositeSnapshot snapshot) { - return serializedSingle.get(snapshot).handle((value, sink) -> deserializeSink(value, sink)); + public A getOrDefault(@Nullable CompositeSnapshot snapshot, A defaultValue) { + var value = serializedSingle.get(snapshot); + if (value == null) return defaultValue; + return this.unMap(value); } @Override - public Mono getOrDefault(@Nullable CompositeSnapshot snapshot, Mono defaultValue) { - return serializedSingle.get(snapshot).handle((B value, SynchronousSink sink) -> deserializeSink(value, sink)).switchIfEmpty(defaultValue); + public void set(A value) { + B mappedValue = value != null ? map(value) : null; + serializedSingle.set(mappedValue); } @Override - public Mono set(A value) { - return Mono - .fromCallable(() -> map(value)) - .flatMap(value1 -> serializedSingle.set(value1)); + public A setAndGetPrevious(A value) { + var mappedValue = value != null ? map(value) : null; + var prev = serializedSingle.setAndGetPrevious(mappedValue); + return prev != null ? unMap(prev) : null; } @Override - public Mono setAndGetPrevious(A value) { - return Mono - .fromCallable(() -> map(value)) - .flatMap(value2 -> serializedSingle.setAndGetPrevious(value2)) - .handle((value1, sink) -> deserializeSink(value1, sink)); + public boolean setAndGetChanged(A value) { + var mappedValue = value != null ? map(value) : null; + return serializedSingle.setAndGetChanged(mappedValue); } @Override - public Mono setAndGetChanged(A value) { - return Mono - .fromCallable(() -> map(value)) - .flatMap(value1 -> serializedSingle.setAndGetChanged(value1)) - .single(); - } - - @Override - public Mono update(SerializationFunction<@Nullable A, @Nullable A> updater, - UpdateReturnMode updateReturnMode) { - return serializedSingle.update(oldValue -> { + public A update(SerializationFunction<@Nullable A, @Nullable A> updater, UpdateReturnMode updateReturnMode) { + B prev = serializedSingle.update(oldValue -> { var result = updater.apply(oldValue == null ? null : this.unMap(oldValue)); if (result == null) { return null; } else { return this.map(result); } - }, updateReturnMode).handle((value, sink) -> deserializeSink(value, sink)); + }, updateReturnMode); + return prev != null ? unMap(prev) : null; } @Override - public Mono> updateAndGetDelta(SerializationFunction<@Nullable A, @Nullable A> updater) { - return serializedSingle.updateAndGetDelta(oldValue -> { + public Delta updateAndGetDelta(SerializationFunction<@Nullable A, @Nullable A> updater) { + var delta = serializedSingle.updateAndGetDelta(oldValue -> { var result = updater.apply(oldValue == null ? null : this.unMap(oldValue)); if (result == null) { return null; } else { return this.map(result); } - }).transform(mono -> LLUtils.mapDelta(mono, bytes -> unMap(bytes))); + }); + return LLUtils.mapDelta(delta, this::unMap); } @Override - public Mono clear() { - return serializedSingle.clear(); + public void clear() { + serializedSingle.clear(); } @Override - public Mono clearAndGetPrevious() { - return serializedSingle.clearAndGetPrevious().handle((value, sink) -> deserializeSink(value, sink)); + public A clearAndGetPrevious() { + var prev = serializedSingle.clearAndGetPrevious(); + return prev != null ? unMap(prev) : null; } @Override - public Mono clearAndGetStatus() { + public boolean clearAndGetStatus() { return serializedSingle.clearAndGetStatus(); } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { return serializedSingle.leavesCount(snapshot, fast); } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { return serializedSingle.isEmpty(snapshot); } @@ -141,7 +119,7 @@ public class DatabaseSingleMapped extends SimpleResource implements Databa } @Override - public Flux badBlocks() { + public Stream badBlocks() { return this.serializedSingle.badBlocks(); } @@ -152,9 +130,4 @@ public class DatabaseSingleMapped extends SimpleResource implements Databa private B map(A bytes) throws SerializationException { return mapper.map(bytes); } - - @Override - protected void onClose() { - serializedSingle.close(); - } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleton.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleton.java index 1eda5cc..ddfc449 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleton.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseSingleton.java @@ -1,9 +1,8 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; @@ -14,23 +13,18 @@ import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.cavallium.dbengine.database.serialization.Serializer; -import it.cavallium.dbengine.utils.InternalMonoUtils; -import it.cavallium.dbengine.utils.SimpleResource; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SynchronousSink; -public class DatabaseSingleton extends SimpleResource implements DatabaseStageEntry { +public class DatabaseSingleton implements DatabaseStageEntry { private static final Logger LOG = LogManager.getLogger(DatabaseSingleton.class); private final LLSingleton singleton; private final Serializer serializer; - @SuppressWarnings({"unchecked", "rawtypes"}) public DatabaseSingleton(LLSingleton singleton, Serializer serializer) { this.singleton = singleton; this.serializer = serializer; @@ -44,13 +38,9 @@ public class DatabaseSingleton extends SimpleResource implements DatabaseStag } } - private U deserializeValue(Buffer value) { + private U deserializeValue(Buf value) { try { - U deserializedValue; - try (value) { - deserializedValue = serializer.deserialize(value); - } - return deserializedValue; + return serializer.deserialize(BufDataInput.create(value)); } catch (IndexOutOfBoundsException ex) { var exMessage = ex.getMessage(); if (exMessage != null && exMessage.contains("read 0 to 0, write 0 to ")) { @@ -63,124 +53,96 @@ public class DatabaseSingleton extends SimpleResource implements DatabaseStag } } - private Buffer serializeValue(U value) throws SerializationException { + private Buf serializeValue(U value) throws SerializationException { var valSizeHint = serializer.getSerializedSizeHint(); if (valSizeHint == -1) valSizeHint = 128; - var valBuf = singleton.getAllocator().allocate(valSizeHint); - try { - serializer.serialize(value, valBuf); - return valBuf; - } catch (Throwable ex) { - valBuf.close(); - throw ex; - } + var valBuf = BufDataOutput.create(valSizeHint); + serializer.serialize(value, valBuf); + return valBuf.asList(); } @Override - public Mono get(@Nullable CompositeSnapshot snapshot) { - var resultMono = singleton.get(resolveSnapshot(snapshot)); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> this.deserializeValue(result)), - LLUtils::finalizeResource - ); + public U get(@Nullable CompositeSnapshot snapshot) { + Buf result = singleton.get(resolveSnapshot(snapshot)); + return this.deserializeValue(result); } @Override - public Mono set(U value) { - return singleton.set(Mono.fromCallable(() -> serializeValue(value))); + public void set(U value) { + singleton.set(serializeValue(value)); } @Override - public Mono setAndGetPrevious(U value) { - var resultMono = Flux - .concat(singleton.get(null), - singleton.set(Mono.fromCallable(() -> serializeValue(value))).as(InternalMonoUtils::toAny) - ) - .last(); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> this.deserializeValue(result)), - LLUtils::finalizeResource - ); + public U setAndGetPrevious(U value) { + var prev = singleton.get(null); + singleton.set(serializeValue(value)); + return this.deserializeValue(prev); } @Override - public Mono update(SerializationFunction<@Nullable U, @Nullable U> updater, + public U update(SerializationFunction<@Nullable U, @Nullable U> updater, UpdateReturnMode updateReturnMode) { - var resultMono = singleton + Buf resultBuf = singleton .update((oldValueSer) -> { - try (oldValueSer) { - U result; - if (oldValueSer == null) { - result = updater.apply(null); - } else { - U deserializedValue = serializer.deserialize(oldValueSer); - result = updater.apply(deserializedValue); - } - if (result == null) { - return null; - } else { - return serializeValue(result); - } + U result; + if (oldValueSer == null) { + result = updater.apply(null); + } else { + U deserializedValue = serializer.deserialize(BufDataInput.create(oldValueSer)); + result = updater.apply(deserializedValue); + } + if (result == null) { + return null; + } else { + return serializeValue(result); } }, updateReturnMode); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> this.deserializeValue(result)), - LLUtils::finalizeResource - ); + return this.deserializeValue(resultBuf); } @Override - public Mono> updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) { - return singleton - .updateAndGetDelta((oldValueSer) -> { - try (oldValueSer) { - U result; - if (oldValueSer == null) { - result = updater.apply(null); - } else { - U deserializedValue = serializer.deserialize(oldValueSer); - result = updater.apply(deserializedValue); - } - if (result == null) { - return null; - } else { - return serializeValue(result); - } - } - }).transform(mono -> LLUtils.mapLLDelta(mono, serialized -> serializer.deserialize(serialized))); + public Delta updateAndGetDelta(SerializationFunction<@Nullable U, @Nullable U> updater) { + var mono = singleton.updateAndGetDelta((oldValueSer) -> { + U result; + if (oldValueSer == null) { + result = updater.apply(null); + } else { + U deserializedValue = serializer.deserialize(BufDataInput.create(oldValueSer)); + result = updater.apply(deserializedValue); + } + if (result == null) { + return null; + } else { + return serializeValue(result); + } + }); + return LLUtils.mapLLDelta(mono, serialized -> serializer.deserialize(BufDataInput.create(serialized))); } @Override - public Mono clear() { - return singleton.set(Mono.empty()); + public void clear() { + singleton.set(null); } @Override - public Mono clearAndGetPrevious() { - var resultMono = Flux.concat(singleton.get(null), singleton.set(Mono.empty()).as(InternalMonoUtils::toAny)).last(); - return Mono.usingWhen(resultMono, - result -> Mono.fromSupplier(() -> this.deserializeValue(result)), - LLUtils::finalizeResource - ); + public U clearAndGetPrevious() { + var result = singleton.get(null); + singleton.set(null); + return this.deserializeValue(result); } @Override - public Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return singleton.get(null).map(unused -> 1L).defaultIfEmpty(0L); + public long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + return singleton.get(null) != null ? 1L : 0L; } @Override - public Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return singleton.get(null).map(t -> false).defaultIfEmpty(true); + public boolean isEmpty(@Nullable CompositeSnapshot snapshot) { + return singleton.get(null) == null; } @Override - public Flux badBlocks() { - return Flux.empty(); - } - - @Override - protected void onClose() { - + public Stream badBlocks() { + return Stream.empty(); } } \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java index 02d6f22..dafa877 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStage.java @@ -1,64 +1,56 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.util.Resource; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.SerializationFunction; import java.util.Objects; +import java.util.stream.Stream; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -public interface DatabaseStage extends DatabaseStageWithEntry, SafeCloseable { +public interface DatabaseStage extends DatabaseStageWithEntry { - Mono get(@Nullable CompositeSnapshot snapshot); + @Nullable T get(@Nullable CompositeSnapshot snapshot); - default Mono getOrDefault(@Nullable CompositeSnapshot snapshot, - Mono defaultValue, - boolean existsAlmostCertainly) { - return get(snapshot).switchIfEmpty(defaultValue).single(); + default T getOrDefault(@Nullable CompositeSnapshot snapshot, T defaultValue, boolean existsAlmostCertainly) { + return Objects.requireNonNullElse(get(snapshot), defaultValue); } - default Mono getOrDefault(@Nullable CompositeSnapshot snapshot, Mono defaultValue) { + default T getOrDefault(@Nullable CompositeSnapshot snapshot, T defaultValue) { return getOrDefault(snapshot, defaultValue, false); } - default Mono set(T value) { - return this - .setAndGetChanged(value) - .then(); + default void set(@Nullable T value) { + this.setAndGetChanged(value); } - Mono setAndGetPrevious(T value); + @Nullable T setAndGetPrevious(@Nullable T value); - default Mono setAndGetChanged(T value) { - return this - .setAndGetPrevious(value) - .map(oldValue -> !Objects.equals(oldValue, value)) - .switchIfEmpty(Mono.fromSupplier(() -> value != null)); + default boolean setAndGetChanged(@Nullable T value) { + T oldValue = this.setAndGetPrevious(value); + if (oldValue != null) { + return !Objects.equals(oldValue, value); + } else { + return value != null; + } } - default Mono update(SerializationFunction<@Nullable T, @Nullable T> updater, - UpdateReturnMode updateReturnMode) { - return this - .updateAndGetDelta(updater) - .transform(prev -> LLUtils.resolveDelta(prev, updateReturnMode)); + default @Nullable T update(SerializationFunction<@Nullable T, @Nullable T> updater, UpdateReturnMode updateReturnMode) { + return LLUtils.resolveDelta(this.updateAndGetDelta(updater), updateReturnMode); } - Mono> updateAndGetDelta(SerializationFunction<@Nullable T, @Nullable T> updater); + Delta updateAndGetDelta(SerializationFunction<@Nullable T, @Nullable T> updater); - default Mono clear() { - return clearAndGetStatus().then(); + default void clear() { + clearAndGetStatus(); } - Mono clearAndGetPrevious(); + @Nullable T clearAndGetPrevious(); - default Mono clearAndGetStatus() { - return clearAndGetPrevious().map(Objects::nonNull).defaultIfEmpty(false); + default boolean clearAndGetStatus() { + return clearAndGetPrevious() != null; } /** @@ -66,11 +58,11 @@ public interface DatabaseStage extends DatabaseStageWithEntry, SafeCloseab * If it's a nested collection the count will include all the children recursively * @param fast true to return an approximate value */ - Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast); + long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast); - default Mono isEmpty(@Nullable CompositeSnapshot snapshot) { - return leavesCount(snapshot, false).map(size -> size <= 0); + default boolean isEmpty(@Nullable CompositeSnapshot snapshot) { + return leavesCount(snapshot, false) <= 0; } - Flux badBlocks(); + Stream badBlocks(); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageEntry.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageEntry.java index 2756408..b343f71 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageEntry.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageEntry.java @@ -1,9 +1,5 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.util.Resource; -import it.cavallium.dbengine.client.BadBlock; -import reactor.core.publisher.Flux; - public interface DatabaseStageEntry extends DatabaseStage { @Override diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java index c236c8e..5f6a901 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageMap.java @@ -1,5 +1,7 @@ package it.cavallium.dbengine.database.collections; +import static it.cavallium.dbengine.database.LLUtils.consume; + import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.Delta; import it.cavallium.dbengine.database.LLUtils; @@ -7,7 +9,6 @@ import it.cavallium.dbengine.database.SubStageEntry; import it.cavallium.dbengine.database.UpdateMode; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.KVSerializationFunction; -import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.SerializationFunction; import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; @@ -16,261 +17,211 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import java.util.Optional; +import java.util.function.Consumer; import java.util.function.Function; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; -import reactor.util.function.Tuples; @SuppressWarnings("unused") -public interface DatabaseStageMap> extends - DatabaseStageEntry> { +public interface DatabaseStageMap> extends DatabaseStageEntry> { - Mono at(@Nullable CompositeSnapshot snapshot, T key); + @NotNull US at(@Nullable CompositeSnapshot snapshot, T key); - default Mono containsKey(@Nullable CompositeSnapshot snapshot, T key) { - return Mono.usingWhen(this.at(snapshot, key), - stage -> stage.isEmpty(snapshot).map(empty -> !empty), - LLUtils::finalizeResource - ); + default boolean containsKey(@Nullable CompositeSnapshot snapshot, T key) { + return !this.at(snapshot, key).isEmpty(snapshot); } - default Mono getValue(@Nullable CompositeSnapshot snapshot, T key) { - return Mono.usingWhen(this.at(snapshot, key), - stage -> stage.get(snapshot), - LLUtils::finalizeResource - ); + default @Nullable U getValue(@Nullable CompositeSnapshot snapshot, T key) { + return this.at(snapshot, key).get(snapshot); } - default Mono getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, Mono defaultValue) { - return getValue(snapshot, key).switchIfEmpty(defaultValue).single(); + default U getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, U defaultValue) { + return Objects.requireNonNullElse(getValue(snapshot, key), defaultValue); } - default Mono putValue(T key, U value) { - return Mono.usingWhen(at(null, key).single(), stage -> stage.set(value), LLUtils::finalizeResource); + default U getValueOrDefault(@Nullable CompositeSnapshot snapshot, T key, Supplier defaultValue) { + return Objects.requireNonNullElseGet(getValue(snapshot, key), defaultValue); + } + + default void putValue(T key, U value) { + at(null, key).set(value); } UpdateMode getUpdateMode(); - default Mono updateValue(T key, + default U updateValue(T key, UpdateReturnMode updateReturnMode, SerializationFunction<@Nullable U, @Nullable U> updater) { - return Mono.usingWhen(at(null, key).single(), - stage -> stage.update(updater, updateReturnMode), - LLUtils::finalizeResource - ); + return at(null, key).update(updater, updateReturnMode); } - default Flux updateMulti(Flux keys, KVSerializationFunction updater) { - return keys.flatMapSequential(key -> this.updateValue(key, prevValue -> updater.apply(key, prevValue))); + default Stream updateMulti(Stream keys, KVSerializationFunction updater) { + return keys.parallel().map(key -> this.updateValue(key, prevValue -> updater.apply(key, prevValue))); } - default Mono updateValue(T key, SerializationFunction<@Nullable U, @Nullable U> updater) { - return updateValueAndGetDelta(key, updater).map(delta -> LLUtils.isDeltaChanged(delta)).single(); + default boolean updateValue(T key, SerializationFunction<@Nullable U, @Nullable U> updater) { + return LLUtils.isDeltaChanged(updateValueAndGetDelta(key, updater)); } - default Mono> updateValueAndGetDelta(T key, - SerializationFunction<@Nullable U, @Nullable U> updater) { - var stageMono = this.at(null, key).single(); - return stageMono.flatMap(stage -> stage - .updateAndGetDelta(updater) - .doFinally(s -> stage.close())); + default Delta updateValueAndGetDelta(T key, SerializationFunction<@Nullable U, @Nullable U> updater) { + return this.at(null, key).updateAndGetDelta(updater); } - default Mono putValueAndGetPrevious(T key, U value) { - return Mono.usingWhen(at(null, key).single(), - stage -> stage.setAndGetPrevious(value), - LLUtils::finalizeResource - ); + default @Nullable U putValueAndGetPrevious(T key, @Nullable U value) { + return at(null, key).setAndGetPrevious(value); } /** * @return true if the key was associated with any value, false if the key didn't exist. */ - default Mono putValueAndGetChanged(T key, U value) { - return Mono - .usingWhen(at(null, key).single(), stage -> stage.setAndGetChanged(value), LLUtils::finalizeResource) - .single(); + default boolean putValueAndGetChanged(T key, @Nullable U value) { + return at(null, key).setAndGetChanged(value); } - default Mono remove(T key) { - return removeAndGetStatus(key).then(); + default void remove(T key) { + removeAndGetStatus(key); } - default Mono removeAndGetPrevious(T key) { - return Mono.usingWhen(at(null, key), us -> us.clearAndGetPrevious(), LLUtils::finalizeResource); + default @Nullable U removeAndGetPrevious(T key) { + return at(null, key).clearAndGetPrevious(); } - default Mono removeAndGetStatus(T key) { - return removeAndGetPrevious(key).map(o -> true).defaultIfEmpty(false); + default boolean removeAndGetStatus(T key) { + return removeAndGetPrevious(key) != null; } /** * GetMulti must return the elements in sequence! */ - default Flux> getMulti(@Nullable CompositeSnapshot snapshot, Flux keys) { - return keys.flatMapSequential(key -> this - .getValue(snapshot, key) - .map(Optional::of) - .defaultIfEmpty(Optional.empty()) - ); + default Stream> getMulti(@Nullable CompositeSnapshot snapshot, Stream keys) { + return keys.parallel().map(key -> Optional.ofNullable(this.getValue(snapshot, key))); } - default Mono putMulti(Flux> entries) { - return entries.flatMap(entry -> this.putValue(entry.getKey(), entry.getValue())).then(); + default void putMulti(Stream> entries) { + try (var stream = entries.parallel()) { + stream.forEach(entry -> this.putValue(entry.getKey(), entry.getValue())); + } } - Flux> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange); + Stream> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange); - default Flux> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) { - return this - .getAllStages(snapshot, smallRange) - .flatMapSequential(stage -> stage - .getValue() - .get(snapshot) - .map(value -> Map.entry(stage.getKey(), value)) - .doFinally(s -> stage.getValue().close()) - ); + default Stream> getAllValues(@Nullable CompositeSnapshot snapshot, boolean smallRange) { + return this.getAllStages(snapshot, smallRange).parallel().mapMulti((stage, mapper) -> { + var val = stage.getValue().get(snapshot); + if (val != null) { + mapper.accept(Map.entry(stage.getKey(), val)); + } + }); } - default Mono setAllValues(Flux> entries) { - return setAllValuesAndGetPrevious(entries).then(); + default void setAllValues(Stream> entries) { + consume(setAllValuesAndGetPrevious(entries)); } - Flux> setAllValuesAndGetPrevious(Flux> entries); + Stream> setAllValuesAndGetPrevious(Stream> entries); - default Mono clear() { - return setAllValues(Flux.empty()); + default void clear() { + setAllValues(Stream.empty()); } - default Mono replaceAllValues(boolean canKeysChange, - Function, Mono>> entriesReplacer, + default void replaceAllValues(boolean canKeysChange, + Function, @NotNull Entry> entriesReplacer, boolean smallRange) { if (canKeysChange) { - return this.setAllValues(this.getAllValues(null, smallRange).flatMap(entriesReplacer)).then(); + this.setAllValues(this.getAllValues(null, smallRange).map(entriesReplacer)); } else { - return this - .getAllValues(null, smallRange) - .flatMap(entriesReplacer) - .flatMap(replacedEntry -> this - .at(null, replacedEntry.getKey()) - .flatMap(stage -> stage - .set(replacedEntry.getValue()) - .doFinally(s -> stage.close()) - ) - ) - .then(); + this.getAllValues(null, smallRange).map(entriesReplacer) + .forEach(replacedEntry -> this.at(null, replacedEntry.getKey()).set(replacedEntry.getValue())); } } - default Mono replaceAll(Function, Mono> entriesReplacer) { - return this - .getAllStages(null, false) - .flatMap(stage -> entriesReplacer.apply(stage) - .doFinally(s -> stage.getValue().close()) - ) - .then(); + default void replaceAll(Consumer> entriesReplacer) { + this.getAllStages(null, false).forEach(entriesReplacer); } @Override - default Mono> setAndGetPrevious(Object2ObjectSortedMap value) { - return this - .setAllValuesAndGetPrevious(Flux.fromIterable(value.entrySet())) - .collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new) - .map(map -> (Object2ObjectSortedMap) map) - .filter(map -> !map.isEmpty()); + default Object2ObjectSortedMap setAndGetPrevious(Object2ObjectSortedMap value) { + Object2ObjectSortedMap map; + if (value == null) { + map = this.clearAndGetPrevious(); + } else { + map = this + .setAllValuesAndGetPrevious(value.entrySet().stream()) + .collect(Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new)); + } + return map; } @Override - default Mono setAndGetChanged(Object2ObjectSortedMap value) { - return this - .setAndGetPrevious(value) - .map(oldValue -> !Objects.equals(oldValue, value.isEmpty() ? null : value)) - .switchIfEmpty(Mono.fromSupplier(() -> !value.isEmpty())); + default boolean setAndGetChanged(@Nullable Object2ObjectSortedMap value) { + if (value != null && value.isEmpty()) { + value = null; + } + var prev = this.setAndGetPrevious(value); + if (prev == null) { + return value != null; + } else { + return !Objects.equals(prev, value); + } } @Override - default Mono>> updateAndGetDelta(SerializationFunction<@Nullable Object2ObjectSortedMap, @Nullable Object2ObjectSortedMap> updater) { + default Delta> updateAndGetDelta( + SerializationFunction<@Nullable Object2ObjectSortedMap, @Nullable Object2ObjectSortedMap> updater) { var updateMode = this.getUpdateMode(); if (updateMode == UpdateMode.ALLOW_UNSAFE) { - return this + Object2ObjectSortedMap v = this .getAllValues(null, true) - .collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new) - .map(map -> (Object2ObjectSortedMap) map) - .single() - .>, Optional>>>handle((v, sink) -> { - if (v.isEmpty()) { - v = null; - } - try { - var result = updater.apply(v); - if (result != null && result.isEmpty()) { - result = null; - } - sink.next(Tuples.of(Optional.ofNullable(v), Optional.ofNullable(result))); - } catch (SerializationException ex) { - sink.error(ex); - } - }) - .flatMap(result -> Mono - .justOrEmpty(result.getT2()) - .flatMap(values -> this.setAllValues(Flux.fromIterable(values.entrySet()))) - .thenReturn(new Delta<>(result.getT1().orElse(null), result.getT2().orElse(null))) - ); + .collect(Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new)); + + if (v.isEmpty()) { + v = null; + } + + var result = updater.apply(v); + if (result != null && result.isEmpty()) { + result = null; + } + this.setAllValues(result != null ? result.entrySet().stream() : null); + return new Delta<>(v, result); } else if (updateMode == UpdateMode.ALLOW) { - return Mono.fromCallable(() -> { - throw new UnsupportedOperationException("Maps can't be updated atomically"); - }); + throw new UnsupportedOperationException("Maps can't be updated atomically"); } else if (updateMode == UpdateMode.DISALLOW) { - return Mono.fromCallable(() -> { - throw new UnsupportedOperationException("Map can't be updated because updates are disabled"); - }); + throw new UnsupportedOperationException("Map can't be updated because updates are disabled"); } else { - return Mono.fromCallable(() -> { - throw new UnsupportedOperationException("Unknown update mode: " + updateMode); - }); + throw new UnsupportedOperationException("Unknown update mode: " + updateMode); } } @Override - default Mono> clearAndGetPrevious() { + default Object2ObjectSortedMap clearAndGetPrevious() { return this.setAndGetPrevious(Object2ObjectSortedMaps.emptyMap()); } @Override - default Mono> get(@Nullable CompositeSnapshot snapshot) { - return this + default Object2ObjectSortedMap get(@Nullable CompositeSnapshot snapshot) { + Object2ObjectSortedMap map = this .getAllValues(snapshot, true) - .collectMap(Entry::getKey, Entry::getValue, Object2ObjectLinkedOpenHashMap::new) - .map(map -> (Object2ObjectSortedMap) map) - .filter(map -> !map.isEmpty()); + .collect(Collectors.toMap(Entry::getKey, Entry::getValue, (a, b) -> a, Object2ObjectLinkedOpenHashMap::new)); + return map.isEmpty() ? null : map; } @Override - default Mono leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { - return this - .getAllStages(snapshot, false) - .doOnNext(stage -> stage.getValue().close()) - .count(); + default long leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) { + return this.getAllStages(snapshot, false).count(); } /** * Value getter doesn't lock data. Please make sure to lock before getting data. */ default ValueGetterBlocking getDbValueGetter(@Nullable CompositeSnapshot snapshot) { - return k -> getValue(snapshot, k).transform(LLUtils::handleDiscard).block(); + return k -> getValue(snapshot, k); } default ValueGetter getAsyncDbValueGetter(@Nullable CompositeSnapshot snapshot) { return k -> getValue(snapshot, k); } - - default ValueTransformer getAsyncDbValueTransformer(@Nullable CompositeSnapshot snapshot) { - return keys -> { - var sharedKeys = keys.publish().refCount(2); - var values = DatabaseStageMap.this.getMulti(snapshot, sharedKeys); - return Flux.zip(sharedKeys, values, Map::entry); - }; - } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageWithEntry.java b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageWithEntry.java index 4503ae4..9ffdb89 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageWithEntry.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/DatabaseStageWithEntry.java @@ -1,9 +1,5 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.util.Resource; -import it.cavallium.dbengine.client.BadBlock; -import reactor.core.publisher.Mono; - public interface DatabaseStageWithEntry { DatabaseStageEntry entry(); diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java index de4e312..fe44243 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetter.java @@ -1,16 +1,12 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; import it.cavallium.dbengine.database.LLDictionary; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public interface SubStageGetter> { - Mono subStage(LLDictionary dictionary, - @Nullable CompositeSnapshot snapshot, - Mono prefixKey); + US subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, Buf prefixKey); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java index e6b07b8..721b615 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashMap.java @@ -1,19 +1,13 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Resource; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; -import java.util.Map; import java.util.function.Function; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; @SuppressWarnings("unused") public class SubStageGetterHashMap implements @@ -35,16 +29,16 @@ public class SubStageGetterHashMap implements } @Override - public Mono> subStage(LLDictionary dictionary, + public DatabaseMapDictionaryHashed subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { - return prefixKeyMono.map(prefixKey -> DatabaseMapDictionaryHashed.tail(dictionary, - BufSupplier.ofOwned(prefixKey), + Buf prefixKey) { + return DatabaseMapDictionaryHashed.tail(dictionary, + prefixKey, keySerializer, valueSerializer, keyHashFunction, keyHashSerializer - )); + ); } public int getKeyHashBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java index 8ae917c..f860ea2 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterHashSet.java @@ -1,22 +1,16 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Resource; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; -import java.util.Map; import java.util.function.Function; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -@SuppressWarnings({"unused", "ClassCanBeRecord"}) +@SuppressWarnings({"unused"}) public class SubStageGetterHashSet implements SubStageGetter, DatabaseSetDictionaryHashed> { @@ -33,15 +27,15 @@ public class SubStageGetterHashSet implements } @Override - public Mono> subStage(LLDictionary dictionary, + public DatabaseSetDictionaryHashed subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { - return prefixKeyMono.map(prefixKey -> DatabaseSetDictionaryHashed.tail(dictionary, - BufSupplier.ofOwned(prefixKey), + Buf prefixKey) { + return DatabaseSetDictionaryHashed.tail(dictionary, + prefixKey, keySerializer, keyHashFunction, keyHashSerializer - )); + ); } public int getKeyHashBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java index 578381d..17ae587 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMap.java @@ -1,14 +1,12 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class SubStageGetterMap implements SubStageGetter, DatabaseMapDictionary> { @@ -23,14 +21,10 @@ public class SubStageGetterMap implements } @Override - public Mono> subStage(LLDictionary dictionary, + public DatabaseMapDictionary subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { - return prefixKeyMono.map(prefixKey -> DatabaseMapDictionary.tail(dictionary, - BufSupplier.ofOwned(prefixKey), - keySerializer, - valueSerializer - )); + Buf prefixKey) { + return DatabaseMapDictionary.tail(dictionary, prefixKey, keySerializer, valueSerializer); } public int getKeyBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java index 7d9c187..e0b6894 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterMapDeep.java @@ -1,17 +1,11 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Resource; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; -import java.util.Map; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class SubStageGetterMapDeep> implements SubStageGetter, DatabaseMapDictionaryDeep> { @@ -41,15 +35,15 @@ public class SubStageGetterMapDeep> implements } @Override - public Mono> subStage(LLDictionary dictionary, + public DatabaseMapDictionaryDeep subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { - return prefixKeyMono.map(prefixKey -> DatabaseMapDictionaryDeep.deepIntermediate(dictionary, - BufSupplier.ofOwned(prefixKey), + Buf prefixKey) { + return DatabaseMapDictionaryDeep.deepIntermediate(dictionary, + prefixKey, keySerializer, subStageGetter, keyExtLength - )); + ); } public int getKeyBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java index 7ae81be..4b4b315 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSet.java @@ -1,17 +1,12 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Resource; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing; import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; -import java.util.Map; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class SubStageGetterSet implements SubStageGetter, DatabaseSetDictionary> { @@ -23,13 +18,10 @@ public class SubStageGetterSet implements } @Override - public Mono> subStage(LLDictionary dictionary, + public DatabaseSetDictionary subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono prefixKeyMono) { - return prefixKeyMono.map(prefixKey -> DatabaseSetDictionary.tail(dictionary, - BufSupplier.ofOwned(prefixKey), - keySerializer - )); + Buf prefixKey) { + return DatabaseSetDictionary.tail(dictionary, prefixKey, keySerializer); } public int getKeyBinaryLength() { diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java index 0b90a57..ee5079a 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingle.java @@ -1,13 +1,10 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.CompositeSnapshot; -import it.cavallium.dbengine.database.BufSupplier; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.serialization.Serializer; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class SubStageGetterSingle implements SubStageGetter> { @@ -18,13 +15,10 @@ public class SubStageGetterSingle implements SubStageGetter> subStage(LLDictionary dictionary, + public DatabaseStageEntry subStage(LLDictionary dictionary, @Nullable CompositeSnapshot snapshot, - Mono keyPrefixMono) { - return keyPrefixMono.map(keyPrefix -> new DatabaseMapSingle<>(dictionary, - BufSupplier.ofOwned(keyPrefix), - serializer - )); + Buf keyPrefix) { + return new DatabaseMapSingle<>(dictionary, keyPrefix, serializer); } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java index e599713..e7122e6 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/SubStageGetterSingleBytes.java @@ -1,12 +1,11 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.serialization.Serializer; -public class SubStageGetterSingleBytes extends SubStageGetterSingle> { +public class SubStageGetterSingleBytes extends SubStageGetterSingle { public SubStageGetterSingleBytes() { - super(Serializer.NOOP_SEND_SERIALIZER); + super(Serializer.NOOP_SERIALIZER); } } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValueGetter.java b/src/main/java/it/cavallium/dbengine/database/collections/ValueGetter.java index e7376b9..db10df8 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValueGetter.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValueGetter.java @@ -1,11 +1,11 @@ package it.cavallium.dbengine.database.collections; -import reactor.core.publisher.Mono; +import org.jetbrains.annotations.Nullable; public interface ValueGetter { /** * Can return Mono error IOException */ - Mono get(KEY key); + @Nullable VALUE get(KEY key); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValueGetterBlocking.java b/src/main/java/it/cavallium/dbengine/database/collections/ValueGetterBlocking.java index 8720b98..b2aedbe 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValueGetterBlocking.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValueGetterBlocking.java @@ -4,5 +4,5 @@ import java.io.IOException; public interface ValueGetterBlocking { - VALUE get(KEY key) throws IOException; + VALUE get(KEY key); } diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValueTransformer.java b/src/main/java/it/cavallium/dbengine/database/collections/ValueTransformer.java deleted file mode 100644 index ea2a074..0000000 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValueTransformer.java +++ /dev/null @@ -1,16 +0,0 @@ -package it.cavallium.dbengine.database.collections; - -import java.util.Map.Entry; -import java.util.Optional; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuple2; -import reactor.util.function.Tuple3; - -public interface ValueTransformer { - - /** - * Can return Flux error IOException - */ - Flux>> transform(Flux keys); -} diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java b/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java index 6fe90f2..05d0373 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValueWithHashSerializer.java @@ -1,17 +1,13 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.CompositeBuffer; -import io.netty5.util.Send; -import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; import java.util.Map; import java.util.Map.Entry; import java.util.Objects; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; class ValueWithHashSerializer implements Serializer> { @@ -26,17 +22,17 @@ class ValueWithHashSerializer implements Serializer> { } @Override - public @NotNull Entry deserialize(@NotNull Buffer serialized) throws SerializationException { - Objects.requireNonNull(serialized); - X deserializedKey = keySuffixSerializer.deserialize(serialized); - Y deserializedValue = valueSerializer.deserialize(serialized); + public @NotNull Entry deserialize(@NotNull BufDataInput in) throws SerializationException { + Objects.requireNonNull(in); + X deserializedKey = keySuffixSerializer.deserialize(in); + Y deserializedValue = valueSerializer.deserialize(in); return Map.entry(deserializedKey, deserializedValue); } @Override - public void serialize(@NotNull Entry deserialized, Buffer output) throws SerializationException { - keySuffixSerializer.serialize(deserialized.getKey(), output); - valueSerializer.serialize(deserialized.getValue(), output); + public void serialize(@NotNull Entry deserialized, BufDataOutput out) throws SerializationException { + keySuffixSerializer.serialize(deserialized.getKey(), out); + valueSerializer.serialize(deserialized.getValue(), out); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java b/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java index 9fc4443..2f3f28b 100644 --- a/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/collections/ValuesSetSerializer.java @@ -1,8 +1,7 @@ package it.cavallium.dbengine.database.collections; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import it.cavallium.dbengine.database.serialization.SerializationException; import it.cavallium.dbengine.database.serialization.Serializer; import it.unimi.dsi.fastutil.objects.ObjectArraySet; @@ -11,7 +10,6 @@ import java.util.Objects; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; class ValuesSetSerializer implements Serializer> { @@ -24,17 +22,17 @@ class ValuesSetSerializer implements Serializer> { } @Override - public @NotNull ObjectArraySet deserialize(@NotNull Buffer serialized) throws SerializationException { + public @NotNull ObjectArraySet deserialize(@NotNull BufDataInput in) throws SerializationException { try { - Objects.requireNonNull(serialized); - if (serialized.readableBytes() == 0) { + Objects.requireNonNull(in); + if (in.available() <= 0) { logger.error("Can't deserialize, 0 bytes are readable"); return new ObjectArraySet<>(); } - int entriesLength = serialized.readInt(); + int entriesLength = in.readInt(); ArrayList deserializedElements = new ArrayList<>(entriesLength); for (int i = 0; i < entriesLength; i++) { - var deserializationResult = entrySerializer.deserialize(serialized); + var deserializationResult = entrySerializer.deserialize(in); deserializedElements.add(deserializationResult); } return new ObjectArraySet<>(deserializedElements); @@ -45,10 +43,10 @@ class ValuesSetSerializer implements Serializer> { } @Override - public void serialize(@NotNull ObjectArraySet deserialized, Buffer output) throws SerializationException { - output.writeInt(deserialized.size()); + public void serialize(@NotNull ObjectArraySet deserialized, BufDataOutput out) throws SerializationException { + out.writeInt(deserialized.size()); for (X entry : deserialized) { - entrySerializer.serialize(entry, output); + entrySerializer.serialize(entry, out); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/AbstractRocksDBColumn.java b/src/main/java/it/cavallium/dbengine/database/disk/AbstractRocksDBColumn.java index 2603342..9e594d1 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/AbstractRocksDBColumn.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/AbstractRocksDBColumn.java @@ -1,27 +1,16 @@ package it.cavallium.dbengine.database.disk; -import static io.netty5.buffer.StandardAllocationTypes.OFF_HEAP; -import static it.cavallium.dbengine.database.LLUtils.INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES; -import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect; -import static java.lang.Boolean.parseBoolean; -import static java.lang.System.getProperty; import static java.util.Objects.requireNonNull; -import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithValue; -import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithoutValue; import io.micrometer.core.instrument.Counter; import io.micrometer.core.instrument.DistributionSummary; import io.micrometer.core.instrument.MeterRegistry; import io.micrometer.core.instrument.Timer; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import io.netty5.buffer.DefaultBufferAllocators; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.RepeatedElementList; import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; -import it.cavallium.dbengine.database.serialization.SerializationException; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; @@ -37,12 +26,9 @@ import org.rocksdb.AbstractImmutableNativeReference; import org.rocksdb.AbstractSlice; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactRangeOptions; -import org.rocksdb.DirectSlice; import org.rocksdb.FlushOptions; import org.rocksdb.Holder; import org.rocksdb.KeyMayExist; -import org.rocksdb.KeyMayExist.KeyMayExistEnum; -import org.rocksdb.Range; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; @@ -53,23 +39,15 @@ import org.rocksdb.Transaction; import org.rocksdb.TransactionOptions; import org.rocksdb.WriteBatch; import org.rocksdb.WriteOptions; -import reactor.core.scheduler.Schedulers; public sealed abstract class AbstractRocksDBColumn implements RocksDBColumn permits StandardRocksDBColumn, OptimisticRocksDBColumn, PessimisticRocksDBColumn { - - /** - * Default: true - */ - private static final boolean USE_DIRECT_BUFFER_BOUNDS = true; private static final byte[] NO_DATA = new byte[0]; protected static final UpdateAtomicResult RESULT_NOTHING = new UpdateAtomicResultNothing(); protected final Logger logger = LogManager.getLogger(this.getClass()); private final T db; - private final boolean nettyDirect; - private final BufferAllocator alloc; private final ColumnFamilyHandle cfh; protected final MeterRegistry meterRegistry; @@ -103,15 +81,11 @@ public sealed abstract class AbstractRocksDBColumn implements private final DBColumnKeyMayExistGetter keyMayExistGetter; public AbstractRocksDBColumn(T db, - boolean nettyDirect, - BufferAllocator alloc, String databaseName, ColumnFamilyHandle cfh, MeterRegistry meterRegistry, StampedLock closeLock) { this.db = db; - this.nettyDirect = nettyDirect && alloc.getAllocationType() == OFF_HEAP; - this.alloc = alloc; this.cfh = cfh; String columnName; try { @@ -253,18 +227,10 @@ public sealed abstract class AbstractRocksDBColumn implements /** * This method should not modify or move the writerIndex/readerIndex of the key */ - static AbstractSlice setIterateBound(boolean allowNettyDirect, - ReadOptions readOpts, IterateBound boundType, Buffer key) { + static AbstractSlice setIterateBound(ReadOptions readOpts, IterateBound boundType, Buf key) { requireNonNull(key); AbstractSlice slice; - if (allowNettyDirect && USE_DIRECT_BUFFER_BOUNDS && isReadOnlyDirect(key)) { - ByteBuffer keyInternalByteBuffer = ((BufferComponent) key).readableBuffer(); - assert keyInternalByteBuffer.position() == 0; - slice = new DirectSlice(keyInternalByteBuffer, key.readableBytes()); - assert slice.size() == key.readableBytes(); - } else { - slice = new Slice(requireNonNull(LLUtils.toArray(key))); - } + slice = new Slice(requireNonNull(LLUtils.asArray(key))); if (boundType == IterateBound.LOWER) { readOpts.setIterateLowerBound(slice); } else { @@ -282,23 +248,22 @@ public sealed abstract class AbstractRocksDBColumn implements /** * This method should not modify or move the writerIndex/readerIndex of the buffers inside the range */ + @Override @NotNull - public RocksIteratorObj newRocksIterator(boolean allowNettyDirect, - ReadOptions readOptions, - LLRange range, - boolean reverse) throws RocksDBException { - assert !Schedulers.isInNonBlockingThread() : "Called getRocksIterator in a nonblocking thread"; - var rocksIterator = this.newIterator(readOptions, range.getMinUnsafe(), range.getMaxUnsafe()); + public RocksIteratorObj newRocksIterator(ReadOptions readOptions, LLRange range, boolean reverse) + throws RocksDBException { + assert !LLUtils.isInNonBlockingThread() : "Called getRocksIterator in a nonblocking thread"; + var rocksIterator = this.newIterator(readOptions, range.getMin(), range.getMax()); try { if (reverse) { if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMax()) { - rocksIterator.seekFrom(range.getMaxUnsafe()); + rocksIterator.seekFrom(range.getMax()); } else { rocksIterator.seekToLast(); } } else { if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); + rocksIterator.seekTo(range.getMin()); } else { rocksIterator.seekToFirst(); } @@ -331,14 +296,8 @@ public sealed abstract class AbstractRocksDBColumn implements RocksDBUtils.ensureOwned(rocksObject); } - protected void ensureOwned(Buffer buffer) { - if (buffer != null && !buffer.isAccessible()) { - throw new IllegalStateException("Buffer is not accessible"); - } - } - @Override - public @Nullable Buffer get(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException { + public @Nullable Buf get(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); @@ -350,221 +309,74 @@ public sealed abstract class AbstractRocksDBColumn implements } @Override - public void put(@NotNull WriteOptions writeOptions, Buffer key, Buffer value) throws RocksDBException { + public void put(@NotNull WriteOptions writeOptions, Buf key, Buf value) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(writeOptions); - assert key.isAccessible(); - assert value.isAccessible(); - this.keyBufferSize.record(key.readableBytes()); - this.writeValueBufferSize.record(value.readableBytes()); - if (nettyDirect) { - // Get the key nio buffer to pass to RocksDB - ByteBuffer keyNioBuffer; - boolean mustCloseKey; - { - if (!LLUtils.isReadOnlyDirect(key)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseKey = true; - var directKey = DefaultBufferAllocators.offHeapAllocator().allocate(key.readableBytes()); - key.copyInto(key.readerOffset(), directKey, 0, key.readableBytes()); - key = directKey; - } else { - mustCloseKey = false; - } - keyNioBuffer = ((BufferComponent) key).readableBuffer(); - assert keyNioBuffer.isDirect(); - assert keyNioBuffer.limit() == key.readableBytes(); - } - try { - // Get the value nio buffer to pass to RocksDB - ByteBuffer valueNioBuffer; - boolean mustCloseValue; - { - if (!LLUtils.isReadOnlyDirect(value)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseValue = true; - var directValue = DefaultBufferAllocators.offHeapAllocator().allocate(value.readableBytes()); - value.copyInto(value.readerOffset(), directValue, 0, value.readableBytes()); - value = directValue; - } else { - mustCloseValue = false; - } - valueNioBuffer = ((BufferComponent) value).readableBuffer(); - assert valueNioBuffer.isDirect(); - assert valueNioBuffer.limit() == value.readableBytes(); - } - - try { - db.put(cfh, writeOptions, keyNioBuffer, valueNioBuffer); - } finally { - if (mustCloseValue) { - value.close(); - } - } - } finally { - if (mustCloseKey) { - key.close(); - } - } - } else { - db.put(cfh, writeOptions, LLUtils.toArray(key), LLUtils.toArray(value)); - } + this.keyBufferSize.record(key.size()); + this.writeValueBufferSize.record(value.size()); + db.put(cfh, writeOptions, LLUtils.asArray(key), LLUtils.asArray(value)); } finally { closeLock.unlockRead(closeReadLock); } } @Override - public boolean exists(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException { + public boolean exists(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(readOptions); - if (nettyDirect) { - // Get the key nio buffer to pass to RocksDB - ByteBuffer keyNioBuffer; - boolean mustCloseKey; - { - if (!LLUtils.isReadOnlyDirect(key)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseKey = true; - var directKey = DefaultBufferAllocators.offHeapAllocator().allocate(key.readableBytes()); - key.copyInto(key.readerOffset(), directKey, 0, key.readableBytes()); - key = directKey; - } else { - mustCloseKey = false; - } - keyNioBuffer = ((BufferComponent) key).readableBuffer(); - assert keyNioBuffer.isDirect(); - assert keyNioBuffer.limit() == key.readableBytes(); - } - try { - if (db.keyMayExist(cfh, keyNioBuffer)) { - int size = db.get(cfh, readOptions, keyNioBuffer.position(0), LLUtils.EMPTY_BYTE_BUFFER); - boolean found = size != RocksDB.NOT_FOUND; - if (found) { - readValueFoundWithBloomSimpleBufferSize.record(size); - return true; - } else { - readValueNotFoundWithMayExistBloomBufferSize.record(0); - return false; - } - } else { - readValueNotFoundWithBloomBufferSize.record(0); - return false; - } - } finally { - if (mustCloseKey) { - key.close(); - } - } - } else { - int size = RocksDB.NOT_FOUND; - byte[] keyBytes = LLUtils.toArray(key); - Holder data = new Holder<>(); - boolean mayExistHit = false; - if (db.keyMayExist(cfh, readOptions, keyBytes, data)) { - mayExistHit = true; - if (data.getValue() != null) { - size = data.getValue().length; - } else { - size = db.get(cfh, readOptions, keyBytes, NO_DATA); - } - } - boolean found = size != RocksDB.NOT_FOUND; - if (found) { - readValueFoundWithBloomSimpleBufferSize.record(size); + int size = RocksDB.NOT_FOUND; + byte[] keyBytes = LLUtils.asArray(key); + Holder data = new Holder<>(); + boolean mayExistHit = false; + if (db.keyMayExist(cfh, readOptions, keyBytes, data)) { + mayExistHit = true; + if (data.getValue() != null) { + size = data.getValue().length; } else { - if (mayExistHit) { - readValueNotFoundWithMayExistBloomBufferSize.record(0); - } else { - readValueNotFoundWithBloomBufferSize.record(0); - } + size = db.get(cfh, readOptions, keyBytes, NO_DATA); } - return found; } + boolean found = size != RocksDB.NOT_FOUND; + if (found) { + readValueFoundWithBloomSimpleBufferSize.record(size); + } else { + if (mayExistHit) { + readValueNotFoundWithMayExistBloomBufferSize.record(0); + } else { + readValueNotFoundWithBloomBufferSize.record(0); + } + } + return found; } finally { closeLock.unlockRead(closeReadLock); } } @Override - public boolean mayExists(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException { + public boolean mayExists(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(readOptions); - if (nettyDirect) { - // Get the key nio buffer to pass to RocksDB - ByteBuffer keyNioBuffer; - boolean mustCloseKey; - { - if (!LLUtils.isReadOnlyDirect(key)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseKey = true; - var directKey = DefaultBufferAllocators.offHeapAllocator().allocate(key.readableBytes()); - key.copyInto(key.readerOffset(), directKey, 0, key.readableBytes()); - key = directKey; - } else { - mustCloseKey = false; - } - keyNioBuffer = ((BufferComponent) key).readableBuffer(); - assert keyNioBuffer.isDirect(); - assert keyNioBuffer.limit() == key.readableBytes(); - } - try { - return db.keyMayExist(cfh, readOptions, keyNioBuffer); - } finally { - if (mustCloseKey) { - key.close(); - } - } - } else { - byte[] keyBytes = LLUtils.toArray(key); - return db.keyMayExist(cfh, readOptions, keyBytes, null); - } + byte[] keyBytes = LLUtils.asArray(key); + return db.keyMayExist(cfh, readOptions, keyBytes, null); } finally { closeLock.unlockRead(closeReadLock); } } @Override - public void delete(WriteOptions writeOptions, Buffer key) throws RocksDBException { + public void delete(WriteOptions writeOptions, Buf key) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(writeOptions); - keyBufferSize.record(key.readableBytes()); - if (nettyDirect) { - // Get the key nio buffer to pass to RocksDB - ByteBuffer keyNioBuffer; - boolean mustCloseKey; - { - if (!LLUtils.isReadOnlyDirect(key)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseKey = true; - var directKey = DefaultBufferAllocators.offHeapAllocator().allocate(key.readableBytes()); - key.copyInto(key.readerOffset(), directKey, 0, key.readableBytes()); - key = directKey; - } else { - mustCloseKey = false; - } - keyNioBuffer = ((BufferComponent) key).readableBuffer(); - assert keyNioBuffer.isDirect(); - assert keyNioBuffer.limit() == key.readableBytes(); - } - try { - db.delete(cfh, writeOptions, keyNioBuffer); - } finally { - if (mustCloseKey) { - key.close(); - } - } - } else { - db.delete(cfh, writeOptions, LLUtils.toArray(key)); - } + keyBufferSize.record(key.size()); + db.delete(cfh, writeOptions, LLUtils.asArray(key)); } finally { closeLock.unlockRead(closeReadLock); } @@ -690,21 +502,17 @@ public sealed abstract class AbstractRocksDBColumn implements @Override public final @NotNull UpdateAtomicResult updateAtomic(@NotNull ReadOptions readOptions, @NotNull WriteOptions writeOptions, - Buffer key, + Buf key, BinarySerializationFunction updater, - UpdateAtomicResultMode returnMode) throws IOException { + UpdateAtomicResultMode returnMode) throws RocksDBException { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(readOptions); try { - keyBufferSize.record(key.readableBytes()); + keyBufferSize.record(key.size()); startedUpdate.increment(); return updateAtomicImpl(readOptions, writeOptions, key, updater, returnMode); - } catch (IOException e) { - throw e; - } catch (Exception e) { - throw new IOException(e); } finally { endedUpdate.increment(); } @@ -733,31 +541,29 @@ public sealed abstract class AbstractRocksDBColumn implements protected abstract @NotNull UpdateAtomicResult updateAtomicImpl(@NotNull ReadOptions readOptions, @NotNull WriteOptions writeOptions, - Buffer key, + Buf key, BinarySerializationFunction updater, - UpdateAtomicResultMode returnMode) throws IOException; + UpdateAtomicResultMode returnMode) throws RocksDBException; @Override @NotNull public RocksIteratorObj newIterator(@NotNull ReadOptions readOptions, - @Nullable Buffer min, - @Nullable Buffer max) { + @Nullable Buf min, + @Nullable Buf max) { var closeReadLock = closeLock.readLock(); try { ensureOpen(); ensureOwned(readOptions); - ensureOwned(min); - ensureOwned(max); AbstractSlice sliceMin; AbstractSlice sliceMax; if (min != null) { - sliceMin = setIterateBound(nettyDirect, readOptions, IterateBound.LOWER, min); + sliceMin = setIterateBound(readOptions, IterateBound.LOWER, min); } else { sliceMin = null; } try { if (max != null) { - sliceMax = setIterateBound(nettyDirect, readOptions, IterateBound.UPPER, max); + sliceMax = setIterateBound(readOptions, IterateBound.UPPER, max); } else { sliceMax = null; } @@ -769,7 +575,6 @@ public sealed abstract class AbstractRocksDBColumn implements sliceMax, min, max, - nettyDirect, this.startedIterSeek, this.endedIterSeek, this.iterSeekTime, @@ -814,6 +619,8 @@ public sealed abstract class AbstractRocksDBColumn implements try { ensureOpen(); RocksDBUtils.forceCompaction(db, db.getName(), cfh, volumeId, logger); + } catch (RocksDBException e) { + throw new RuntimeException(e); } finally { closeLock.unlockRead(closeReadLock); } @@ -824,11 +631,6 @@ public sealed abstract class AbstractRocksDBColumn implements return cfh; } - @Override - public BufferAllocator getAllocator() { - return alloc; - } - public MeterRegistry getMeterRegistry() { return meterRegistry; } @@ -847,10 +649,6 @@ public sealed abstract class AbstractRocksDBColumn implements private class DBColumnKeyMayExistGetter extends KeyMayExistGetter { - public DBColumnKeyMayExistGetter() { - super(alloc, nettyDirect); - } - @Override protected KeyMayExist keyMayExist(ReadOptions readOptions, ByteBuffer key, ByteBuffer value) { return db.keyMayExist(cfh, readOptions, key, value); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/BinarySerializationFunction.java b/src/main/java/it/cavallium/dbengine/database/disk/BinarySerializationFunction.java index e90d4d1..1b67697 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/BinarySerializationFunction.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/BinarySerializationFunction.java @@ -1,7 +1,7 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.serialization.SerializationFunction; import org.jetbrains.annotations.Nullable; -public interface BinarySerializationFunction extends SerializationFunction<@Nullable Buffer, @Nullable Buffer> {} +public interface BinarySerializationFunction extends SerializationFunction<@Nullable Buf, @Nullable Buf> {} diff --git a/src/main/java/it/cavallium/dbengine/database/disk/CachedIndexSearcherManager.java b/src/main/java/it/cavallium/dbengine/database/disk/CachedIndexSearcherManager.java index f2b4ed4..1122615 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/CachedIndexSearcherManager.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/CachedIndexSearcherManager.java @@ -1,23 +1,19 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.lucene.LuceneCloseable; -import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.utils.SimpleResource; import java.io.IOException; -import java.io.UncheckedIOException; +import it.cavallium.dbengine.utils.DBException; import java.time.Duration; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.LockSupport; import org.apache.logging.log4j.LogManager; @@ -30,11 +26,6 @@ import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import it.cavallium.dbengine.utils.ShortNamedThreadFactory; -import reactor.core.Disposable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; // todo: deduplicate code between Cached and Simple searcher managers public class CachedIndexSearcherManager extends SimpleResource implements IndexSearcherManager, LuceneCloseable { @@ -49,7 +40,7 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS @Nullable private final SnapshotsManager snapshotsManager; - private final Scheduler luceneHeavyTasksScheduler; + private final ScheduledExecutorService luceneHeavyTasksScheduler; private final Similarity similarity; private final SearcherManager searcherManager; private final Duration queryRefreshDebounceTime; @@ -57,17 +48,16 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS private final AtomicLong activeSearchers = new AtomicLong(0); private final AtomicLong activeRefreshes = new AtomicLong(0); - private final LoadingCache> cachedSnapshotSearchers; - private final Mono cachedMainSearcher; - private final Disposable refreshSubscription; + private final LoadingCache cachedSnapshotSearchers; + private final ScheduledFuture refreshSubscription; public CachedIndexSearcherManager(IndexWriter indexWriter, @Nullable SnapshotsManager snapshotsManager, - Scheduler luceneHeavyTasksScheduler, + ScheduledExecutorService luceneHeavyTasksScheduler, Similarity similarity, boolean applyAllDeletes, boolean writeAllDeletes, - Duration queryRefreshDebounceTime) throws IOException { + Duration queryRefreshDebounceTime) { this.snapshotsManager = snapshotsManager; this.luceneHeavyTasksScheduler = luceneHeavyTasksScheduler; this.similarity = similarity; @@ -75,13 +65,17 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS this.searcherManager = new SearcherManager(indexWriter, applyAllDeletes, writeAllDeletes, SEARCHER_FACTORY); - refreshSubscription = LLUtils.scheduleRepeated(luceneHeavyTasksScheduler, () -> { - try { - maybeRefresh(); - } catch (Exception ex) { - LOG.error("Failed to refresh the searcher manager", ex); - } - }, queryRefreshDebounceTime); + refreshSubscription = luceneHeavyTasksScheduler.scheduleAtFixedRate(() -> { + try { + maybeRefresh(); + } catch (Exception ex) { + LOG.error("Failed to refresh the searcher manager", ex); + } + }, + queryRefreshDebounceTime.toMillis(), + queryRefreshDebounceTime.toMillis(), + TimeUnit.MILLISECONDS + ); this.cachedSnapshotSearchers = CacheBuilder.newBuilder() .expireAfterWrite(queryRefreshDebounceTime) @@ -89,44 +83,40 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS .maximumSize(3) .build(new CacheLoader<>() { @Override - public Mono load(@NotNull LLSnapshot snapshot) { + public LLIndexSearcher load(@NotNull LLSnapshot snapshot) { return CachedIndexSearcherManager.this.generateCachedSearcher(snapshot); } }); - this.cachedMainSearcher = this.generateCachedSearcher(null); } - private Mono generateCachedSearcher(@Nullable LLSnapshot snapshot) { - return Mono.fromCallable(() -> { - if (isClosed()) { - return null; - } - activeSearchers.incrementAndGet(); - try { - IndexSearcher indexSearcher; - boolean fromSnapshot; - if (snapshotsManager == null || snapshot == null) { - indexSearcher = searcherManager.acquire(); - fromSnapshot = false; - } else { - indexSearcher = snapshotsManager.resolveSnapshot(snapshot).getIndexSearcher(SEARCH_EXECUTOR); - fromSnapshot = true; - } - indexSearcher.setSimilarity(similarity); - assert indexSearcher.getIndexReader().getRefCount() > 0; - LLIndexSearcher llIndexSearcher; - if (fromSnapshot) { - llIndexSearcher = new SnapshotIndexSearcher(indexSearcher); - } else { - llIndexSearcher = new MainIndexSearcher(indexSearcher, searcherManager); - } - return llIndexSearcher; - } catch (Throwable ex) { - activeSearchers.decrementAndGet(); - throw ex; - } - }) - .transform(LuceneUtils::scheduleLucene); + private LLIndexSearcher generateCachedSearcher(@Nullable LLSnapshot snapshot) { + if (isClosed()) { + return null; + } + activeSearchers.incrementAndGet(); + try { + IndexSearcher indexSearcher; + boolean fromSnapshot; + if (snapshotsManager == null || snapshot == null) { + indexSearcher = searcherManager.acquire(); + fromSnapshot = false; + } else { + indexSearcher = snapshotsManager.resolveSnapshot(snapshot).getIndexSearcher(SEARCH_EXECUTOR); + fromSnapshot = true; + } + indexSearcher.setSimilarity(similarity); + assert indexSearcher.getIndexReader().getRefCount() > 0; + LLIndexSearcher llIndexSearcher; + if (fromSnapshot) { + llIndexSearcher = new SnapshotIndexSearcher(indexSearcher); + } else { + llIndexSearcher = new MainIndexSearcher(indexSearcher, searcherManager); + } + return llIndexSearcher; + } catch (Throwable ex) { + activeSearchers.decrementAndGet(); + throw ex; + } } private void dropCachedIndexSearcher() { @@ -135,7 +125,7 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS } @Override - public void maybeRefreshBlocking() throws IOException { + public void maybeRefreshBlocking() { try { activeRefreshes.incrementAndGet(); searcherManager.maybeRefreshBlocking(); @@ -147,7 +137,7 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS } @Override - public void maybeRefresh() throws IOException { + public void maybeRefresh() { try { activeRefreshes.incrementAndGet(); searcherManager.maybeRefresh(); @@ -159,9 +149,9 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS } @Override - public Mono retrieveSearcher(@Nullable LLSnapshot snapshot) { + public LLIndexSearcher retrieveSearcher(@Nullable LLSnapshot snapshot) { if (snapshot == null) { - return this.cachedMainSearcher; + return this.generateCachedSearcher(null); } else { return this.cachedSnapshotSearchers.getUnchecked(snapshot); } @@ -170,10 +160,15 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS @Override protected void onClose() { LOG.debug("Closing IndexSearcherManager..."); - refreshSubscription.dispose(); + long initTime = System.nanoTime(); + refreshSubscription.cancel(false); + while (!refreshSubscription.isDone() && (System.nanoTime() - initTime) <= 240000000000L) { + LockSupport.parkNanos(50000000); + } + refreshSubscription.cancel(true); LOG.debug("Closed IndexSearcherManager"); LOG.debug("Closing refreshes..."); - long initTime = System.nanoTime(); + initTime = System.nanoTime(); while (activeRefreshes.get() > 0 && (System.nanoTime() - initTime) <= 15000000000L) { LockSupport.parkNanos(50000000); } @@ -227,7 +222,7 @@ public class CachedIndexSearcherManager extends SimpleResource implements IndexS try { searcherManager.release(indexSearcher); } catch (IOException ex) { - throw new UncheckedIOException(ex); + throw new DBException(ex); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java b/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java index cc6c81e..16349b3 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/CappedWriteBatch.java @@ -1,19 +1,9 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.database.LLUtils.isDirect; -import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect; +import static it.cavallium.dbengine.database.LLUtils.asArray; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import io.netty5.util.Send; -import io.netty5.util.internal.PlatformDependent; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.disk.RocksDBColumn; -import java.io.Closeable; +import it.cavallium.dbengine.buffers.Buf; import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; import org.rocksdb.WriteBatch; @@ -21,61 +11,30 @@ import org.rocksdb.WriteOptions; public class CappedWriteBatch extends WriteBatch { - /** - * Default: true, Use false to debug problems with direct buffers - */ - private static final boolean USE_FAST_DIRECT_BUFFERS = true; private final RocksDBColumn db; - private final BufferAllocator alloc; private final int cap; private final WriteOptions writeOptions; - private final List buffersToRelease; - private final List byteBuffersToRelease; - /** * @param db * @param cap The limit of operations */ public CappedWriteBatch(RocksDBColumn db, - BufferAllocator alloc, int cap, int reservedWriteBatchSize, long maxWriteBatchSize, WriteOptions writeOptions) { super(reservedWriteBatchSize); this.db = db; - this.alloc = alloc; this.cap = cap; this.writeOptions = writeOptions; this.setMaxBytes(maxWriteBatchSize); - this.buffersToRelease = new ArrayList<>(); - this.byteBuffersToRelease = new ArrayList<>(); } private synchronized void flushIfNeeded(boolean force) throws RocksDBException { if (this.count() >= (force ? 1 : cap)) { - try { - db.write(writeOptions, this.getWriteBatch()); - this.clear(); - } finally { - releaseAllBuffers(); - } - } - } - - public synchronized void releaseAllBuffers() { - if (!buffersToRelease.isEmpty()) { - for (Buffer byteBuffer : buffersToRelease) { - byteBuffer.close(); - } - buffersToRelease.clear(); - } - if (!byteBuffersToRelease.isEmpty()) { - for (var byteBuffer : byteBuffersToRelease) { - PlatformDependent.freeDirectBuffer(byteBuffer); - } - byteBuffersToRelease.clear(); + db.write(writeOptions, this.getWriteBatch()); + this.clear(); } } @@ -109,29 +68,9 @@ public class CappedWriteBatch extends WriteBatch { } public synchronized void put(ColumnFamilyHandle columnFamilyHandle, - Send keyToReceive, - Send valueToReceive) throws RocksDBException { - var key = keyToReceive.receive(); - var value = valueToReceive.receive(); - if (USE_FAST_DIRECT_BUFFERS - && (isReadOnlyDirect(key)) - && (isReadOnlyDirect(value))) { - ByteBuffer keyNioBuffer = ((BufferComponent) key).readableBuffer(); - ByteBuffer valueNioBuffer = ((BufferComponent) value).readableBuffer(); - buffersToRelease.add(value); - buffersToRelease.add(key); - - super.put(columnFamilyHandle, keyNioBuffer, valueNioBuffer); - } else { - try { - byte[] keyArray = LLUtils.toArray(key); - byte[] valueArray = LLUtils.toArray(value); - super.put(columnFamilyHandle, keyArray, valueArray); - } finally { - key.close(); - value.close(); - } - } + Buf keyToReceive, + Buf valueToReceive) throws RocksDBException { + super.put(columnFamilyHandle, asArray(keyToReceive), asArray(valueToReceive)); flushIfNeeded(false); } @@ -159,19 +98,8 @@ public class CappedWriteBatch extends WriteBatch { flushIfNeeded(false); } - public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, Send keyToReceive) throws RocksDBException { - var key = keyToReceive.receive(); - if (USE_FAST_DIRECT_BUFFERS && isReadOnlyDirect(key)) { - ByteBuffer keyNioBuffer = ((BufferComponent) key).readableBuffer(); - buffersToRelease.add(key); - delete(columnFamilyHandle, keyNioBuffer); - } else { - try { - super.delete(columnFamilyHandle, LLUtils.toArray(key)); - } finally { - key.close(); - } - } + public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, Buf keyToDelete) throws RocksDBException { + super.delete(columnFamilyHandle, asArray(keyToDelete)); flushIfNeeded(false); } @@ -221,7 +149,6 @@ public class CappedWriteBatch extends WriteBatch { @Override public synchronized void clear() { super.clear(); - releaseAllBuffers(); } @Override @@ -250,26 +177,11 @@ public class CappedWriteBatch extends WriteBatch { } public synchronized void writeToDbAndClose() throws RocksDBException { - try { - flushIfNeeded(true); - super.close(); - } finally { - releaseAllBuffers(); - } + flushIfNeeded(true); + super.close(); } public void flush() throws RocksDBException { - try { - flushIfNeeded(true); - } finally { - releaseAllBuffers(); - } + flushIfNeeded(true); } - - /* - protected void disposeInternal(boolean owningHandle) { - super.disposeInternal(owningHandle); - releaseAllBuffers(); - } - */ } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/HugePqEnv.java b/src/main/java/it/cavallium/dbengine/database/disk/HugePqEnv.java deleted file mode 100644 index bdbb806..0000000 --- a/src/main/java/it/cavallium/dbengine/database/disk/HugePqEnv.java +++ /dev/null @@ -1,84 +0,0 @@ -package it.cavallium.dbengine.database.disk; - -import static it.cavallium.dbengine.database.disk.LLTempHugePqEnv.getColumnOptions; - -import com.google.common.primitives.Ints; -import io.micrometer.core.instrument.composite.CompositeMeterRegistry; -import io.netty5.buffer.BufferAllocator; -import it.cavallium.dbengine.utils.SimpleResource; -import it.unimi.dsi.fastutil.ints.Int2ObjectMap; -import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap; -import java.io.Closeable; -import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; -import java.util.Objects; -import java.util.concurrent.locks.ReentrantLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.locks.StampedLock; -import org.rocksdb.AbstractComparator; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; - -public class HugePqEnv extends SimpleResource { - - private final RocksDB db; - private final ArrayList defaultCfh; - private final Int2ObjectMap cfhs = new Int2ObjectOpenHashMap<>(); - - public HugePqEnv(RocksDB db, ArrayList defaultCfh) { - this.db = db; - this.defaultCfh = defaultCfh; - } - - @Override - protected void onClose() { - for (var cfh : defaultCfh) { - db.destroyColumnFamilyHandle(cfh); - } - try { - db.closeE(); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - public int createColumnFamily(int name, AbstractComparator comparator) throws RocksDBException { - var cfh = db.createColumnFamily(new ColumnFamilyDescriptor(Ints.toByteArray(name), getColumnOptions(comparator))); - synchronized (cfhs) { - var prev = cfhs.put(name, cfh); - if (prev != null) { - throw new UnsupportedOperationException("Db " + name + " already exists"); - } - return name; - } - } - - public void deleteColumnFamily(int db) throws RocksDBException { - ColumnFamilyHandle cfh; - synchronized (cfhs) { - cfh = cfhs.remove(db); - } - if (cfh != null) { - this.db.dropColumnFamily(cfh); - this.db.destroyColumnFamilyHandle(cfh); - } - } - - public StandardRocksDBColumn openDb(int hugePqId) { - ColumnFamilyHandle cfh; - synchronized (cfhs) { - cfh = Objects.requireNonNull(cfhs.get(hugePqId), () -> "column " + hugePqId + " does not exist"); - } - return new StandardRocksDBColumn(db, - true, - BufferAllocator.offHeapPooled(), - db.getName(), - cfh, - new CompositeMeterRegistry(), - new StampedLock() - ); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/disk/IndexSearcherManager.java b/src/main/java/it/cavallium/dbengine/database/disk/IndexSearcherManager.java index 9e5cd06..465bacb 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/IndexSearcherManager.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/IndexSearcherManager.java @@ -1,20 +1,16 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.util.Send; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.SafeCloseable; import java.io.IOException; -import java.util.function.Function; -import org.apache.lucene.search.IndexSearcher; +import java.util.function.Supplier; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public interface IndexSearcherManager extends SafeCloseable { - void maybeRefreshBlocking() throws IOException; + void maybeRefreshBlocking(); - void maybeRefresh() throws IOException; + void maybeRefresh(); - Mono retrieveSearcher(@Nullable LLSnapshot snapshot); + LLIndexSearcher retrieveSearcher(@Nullable LLSnapshot snapshot); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/KeyMayExistGetter.java b/src/main/java/it/cavallium/dbengine/database/disk/KeyMayExistGetter.java index cb8ce3e..367183a 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/KeyMayExistGetter.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/KeyMayExistGetter.java @@ -1,31 +1,19 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.database.LLUtils.INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES; import static java.lang.Boolean.parseBoolean; import static java.lang.System.getProperty; import static java.util.Objects.requireNonNull; -import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithValue; -import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kExistsWithoutValue; -import static org.rocksdb.KeyMayExist.KeyMayExistEnum.kNotExist; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import io.netty5.buffer.DefaultBufferAllocators; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLUtils; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.HexFormat; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.Holder; import org.rocksdb.KeyMayExist; -import org.rocksdb.KeyMayExist.KeyMayExistEnum; import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; public abstract class KeyMayExistGetter { @@ -40,180 +28,19 @@ public abstract class KeyMayExistGetter { "it.cavallium.dbengine.mayexist.strict_no_value", "false" )); - private static final BufferAllocator OFF_HEAP_ALLOCATOR = DefaultBufferAllocators.offHeapAllocator(); - private final BufferAllocator bufferAllocator; - private final boolean nettyDirect; - - public KeyMayExistGetter(BufferAllocator bufferAllocator, boolean nettyDirect) { - this.bufferAllocator = bufferAllocator; - this.nettyDirect = nettyDirect; + public KeyMayExistGetter() { } - public final @Nullable Buffer get(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException { - recordKeyBufferSize(key.readableBytes()); - if (nettyDirect) { - return getDirect(readOptions, key); - } else { - return getHeap(readOptions, key); - } + public final @Nullable Buf get(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException { + recordKeyBufferSize(key.size()); + return getHeap(readOptions, key); } - private Buffer getDirect(ReadOptions readOptions, Buffer key) throws RocksDBException { - int readAttemptsCount = 0; - // Get the key nio buffer to pass to RocksDB - ByteBuffer keyNioBuffer; - boolean mustCloseKey; - { - if (!LLUtils.isReadOnlyDirect(key)) { - // If the nio buffer is not available, copy the netty buffer into a new direct buffer - mustCloseKey = true; - var directKey = OFF_HEAP_ALLOCATOR.allocate(key.readableBytes()); - key.copyInto(key.readerOffset(), directKey, 0, key.readableBytes()); - key = directKey; - } else { - mustCloseKey = false; - } - keyNioBuffer = ((BufferComponent) key).readableBuffer(); - assert keyNioBuffer.isDirect(); - assert keyNioBuffer.limit() == key.readableBytes(); - } - - try { - // Create a direct result buffer because RocksDB works only with direct buffers - var resultBuffer = bufferAllocator.allocate(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES); - try { - assert resultBuffer.readerOffset() == 0; - assert resultBuffer.writerOffset() == 0; - var resultWritable = ((BufferComponent) resultBuffer).writableBuffer(); - - var keyMayExist = keyMayExist(readOptions, keyNioBuffer.rewind(), resultWritable.clear()); - if (STRICT_MAYEXIST_NO_VALUE && keyMayExist.exists != kExistsWithValue && keyMayExist.valueLength != 0) { - // Create a direct result buffer because RocksDB works only with direct buffers - try (var realResultBuffer = bufferAllocator.allocate(INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES)) { - var resultWritableF = resultWritable; - var realResultWritable = ((BufferComponent) realResultBuffer).writableBuffer(); - var realSize = get(readOptions, keyNioBuffer.rewind(), realResultWritable); - var hexFormat = HexFormat.ofDelimiter(" "); - LOG.error( - "KeyMayExist is {}, but value length is non-zero: {}! Disk value size is {}\nBytes from bloom cache:\n{}\nBytes from db:\n{}", - () -> keyMayExist.exists, - () -> keyMayExist.valueLength, - () -> realSize, - () -> { - resultBuffer.writerOffset(resultWritableF.limit()); - return hexFormat.formatHex(LLUtils.toArray(resultBuffer)); - }, - () -> { - realResultBuffer.writerOffset(realResultWritable.limit()); - return hexFormat.formatHex(LLUtils.toArray(realResultBuffer)); - } - ); - var sliceKME = LLUtils.toArray(resultBuffer.copy(0, Math.min(resultWritableF.limit(), realSize))); - var sliceDB = LLUtils.toArray(realResultBuffer.copy(0, Math.min(realResultWritable.limit(), realSize))); - throw new RocksDBException( - "KeyMayExist is " + keyMayExist.exists + ", but value length is non-zero: " + keyMayExist.valueLength - + "! Disk value size is " + realSize + ". The bloom cache partial value is " - + (Arrays.equals(sliceKME, sliceDB) ? "correct" : "corrupted")); - } - } - KeyMayExistEnum keyMayExistState = keyMayExist.exists; - int keyMayExistValueLength = keyMayExist.valueLength; - // At the beginning, size reflects the expected size, then it becomes the real data size - //noinspection SwitchStatementWithTooFewBranches - int size = switch (keyMayExistState) { - case kExistsWithValue -> keyMayExistValueLength; - default -> -1; - }; - boolean isKExistsWithoutValue = false; - switch (keyMayExistState) { - case kNotExist: { - recordReadValueNotFoundWithBloomBufferSize(0); - resultBuffer.close(); - return null; - } - // todo: kExistsWithValue is not reliable (read below), - // in some cases it should be treated as kExistsWithoutValue - case kExistsWithValue: - case kExistsWithoutValue: { - if (keyMayExistState == kExistsWithoutValue) { - isKExistsWithoutValue = true; - } else if (WORKAROUND_MAY_EXIST_FAKE_ZERO) { - // todo: "size == 0 || resultWritable.limit() == 0" is checked because keyMayExist is broken, - // and sometimes it returns an empty array, as if it exists - if (size == 0 || resultWritable.limit() == 0) { - isKExistsWithoutValue = true; - } - } - if (isKExistsWithoutValue) { - assert - !STRICT_MAYEXIST_NO_VALUE || keyMayExistValueLength == 0 : - "keyMayExist value length is " + keyMayExistValueLength + " instead of 0"; - resultWritable.clear(); - readAttemptsCount++; - // real data size - size = get(readOptions, keyNioBuffer.rewind(), resultWritable.clear()); - if (keyMayExistState == kExistsWithValue && size != keyMayExistValueLength) { - throw new IllegalStateException("Bloom filter data is corrupted." - + " Bloom value size=" + keyMayExistState + ", Real value size=" + size); - } - if (size == RocksDB.NOT_FOUND) { - resultBuffer.close(); - recordReadValueNotFoundWithMayExistBloomBufferSize(0); - return null; - } - } - } - default: { - // real data size - assert size >= 0; - if (size <= resultWritable.limit()) { - if (isKExistsWithoutValue) { - recordReadValueFoundWithBloomUncachedBufferSize(size); - } else { - recordReadValueFoundWithBloomCacheBufferSize(size); - } - assert size == resultWritable.limit(); - return resultBuffer.writerOffset(resultWritable.limit()); - } else { - resultBuffer.ensureWritable(size); - resultWritable = ((BufferComponent) resultBuffer).writableBuffer(); - assert resultBuffer.readerOffset() == 0; - assert resultBuffer.writerOffset() == 0; - - readAttemptsCount++; - size = get(readOptions, keyNioBuffer.rewind(), resultWritable.clear()); - if (size == RocksDB.NOT_FOUND) { - recordReadValueNotFoundWithMayExistBloomBufferSize(0); - resultBuffer.close(); - return null; - } - assert size == resultWritable.limit(); - if (isKExistsWithoutValue) { - recordReadValueFoundWithBloomUncachedBufferSize(size); - } else { - recordReadValueFoundWithBloomCacheBufferSize(size); - } - return resultBuffer.writerOffset(resultWritable.limit()); - } - } - } - } catch (Throwable t) { - resultBuffer.close(); - throw t; - } - } finally { - if (mustCloseKey) { - key.close(); - } - recordReadAttempts(readAttemptsCount); - } - } - - private Buffer getHeap(ReadOptions readOptions, Buffer key) throws RocksDBException { + private Buf getHeap(ReadOptions readOptions, Buf key) throws RocksDBException { int readAttemptsCount = 0; try { - byte[] keyArray = LLUtils.toArray(key); + byte[] keyArray = LLUtils.asArray(key); requireNonNull(keyArray); Holder data = new Holder<>(); if (keyMayExist(readOptions, keyArray, data)) { @@ -221,7 +48,7 @@ public abstract class KeyMayExistGetter { // returns an empty array, as if it exists if (data.getValue() != null && (!WORKAROUND_MAY_EXIST_FAKE_ZERO || data.getValue().length > 0)) { recordReadValueFoundWithBloomCacheBufferSize(data.getValue().length); - return LLUtils.fromByteArray(bufferAllocator, data.getValue()); + return LLUtils.asByteList(data.getValue()); } else { readAttemptsCount++; byte[] result = get(readOptions, keyArray); @@ -234,7 +61,7 @@ public abstract class KeyMayExistGetter { return null; } else { recordReadValueFoundWithBloomUncachedBufferSize(0); - return LLUtils.fromByteArray(bufferAllocator, result); + return LLUtils.asByteList(result); } } } else { diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcher.java b/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcher.java index 25afc3e..a07c8bf 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcher.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcher.java @@ -1,17 +1,10 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.utils.SimpleResource; -import java.io.Closeable; -import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; public abstract class LLIndexSearcher extends SimpleResource implements DiscardingCloseable { diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcherImpl.java b/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcherImpl.java index f39024d..cd67831 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcherImpl.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLIndexSearcherImpl.java @@ -1,11 +1,8 @@ package it.cavallium.dbengine.database.disk; -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.utils.SimpleResource; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.index.IndexReader; import org.apache.lucene.search.IndexSearcher; public abstract class LLIndexSearcherImpl extends LLIndexSearcher { diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java index 8fec71f..96a9080 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDatabaseConnection.java @@ -1,56 +1,38 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.database.LLDatabaseConnection; import it.cavallium.dbengine.database.LLLuceneIndex; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneRocksDBManager; -import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.rpc.current.data.Column; import it.cavallium.dbengine.rpc.current.data.DatabaseOptions; import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure; import it.cavallium.dbengine.rpc.current.data.LuceneOptions; +import it.cavallium.dbengine.utils.DBException; +import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.util.LinkedList; import java.util.List; -import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class LLLocalDatabaseConnection implements LLDatabaseConnection { private final AtomicBoolean connected = new AtomicBoolean(); - private final BufferAllocator allocator; private final MeterRegistry meterRegistry; private final Path basePath; private final boolean inMemory; - private final LuceneRocksDBManager rocksDBManager; - private final AtomicReference env = new AtomicReference<>(); - public LLLocalDatabaseConnection(BufferAllocator allocator, + public LLLocalDatabaseConnection( MeterRegistry meterRegistry, Path basePath, - boolean inMemory, - LuceneRocksDBManager rocksDBManager) { - this.allocator = allocator; + boolean inMemory) { this.meterRegistry = meterRegistry; this.basePath = basePath; this.inMemory = inMemory; - this.rocksDBManager = rocksDBManager; - } - - @Override - public BufferAllocator getAllocator() { - return allocator; } public MeterRegistry getMeterRegistry() { @@ -58,94 +40,67 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection { } @Override - public Mono connect() { - return Mono - .fromCallable(() -> { - if (!connected.compareAndSet(false, true)) { - throw new IllegalStateException("Already connected"); - } - if (Files.notExists(basePath)) { - Files.createDirectories(basePath); - } - var prev = env.getAndSet(new LLTempHugePqEnv()); - if (prev != null) { - throw new IllegalStateException("Env was already set"); - } - return this; - }) - .subscribeOn(Schedulers.boundedElastic()); + public LLDatabaseConnection connect() { + if (!connected.compareAndSet(false, true)) { + throw new IllegalStateException("Already connected"); + } + if (Files.notExists(basePath)) { + try { + Files.createDirectories(basePath); + } catch (IOException e) { + throw new DBException(e); + } + } + return this; } @Override - public Mono getDatabase(String name, - List columns, - DatabaseOptions databaseOptions) { - return Mono - .fromCallable(() -> new LLLocalKeyValueDatabase( - allocator, - meterRegistry, - name, - inMemory, - basePath.resolve("database_" + name), - columns, - new LinkedList<>(), - databaseOptions - )) - .subscribeOn(Schedulers.boundedElastic()); + public LLLocalKeyValueDatabase getDatabase(String name, List columns, DatabaseOptions databaseOptions) { + return new LLLocalKeyValueDatabase(meterRegistry, + name, + inMemory, + basePath.resolve("database_" + name), + columns, + new LinkedList<>(), + databaseOptions + ); } @Override - public Mono getLuceneIndex(String clusterName, + public LLLuceneIndex getLuceneIndex(String clusterName, LuceneIndexStructure indexStructure, IndicizerAnalyzers indicizerAnalyzers, IndicizerSimilarities indicizerSimilarities, LuceneOptions luceneOptions, @Nullable LuceneHacks luceneHacks) { - return Mono - .fromCallable(() -> { - var env = this.env.get(); - if (clusterName == null) { - throw new IllegalArgumentException("Cluster name must be set"); - } - if (indexStructure.activeShards().size() != 1) { - Objects.requireNonNull(env, "Environment not set"); - return new LLLocalMultiLuceneIndex(env, - meterRegistry, - clusterName, - indexStructure.activeShards(), - indexStructure.totalShards(), - indicizerAnalyzers, - indicizerSimilarities, - luceneOptions, - luceneHacks, - rocksDBManager - ); - } else { - return new LLLocalLuceneIndex(env, - meterRegistry, - clusterName, - indexStructure.activeShards().getInt(0), - indicizerAnalyzers, - indicizerSimilarities, - luceneOptions, - luceneHacks, - rocksDBManager - ); - } - }) - .transform(LuceneUtils::scheduleLucene); + if (clusterName == null) { + throw new IllegalArgumentException("Cluster name must be set"); + } + if (indexStructure.activeShards().size() != 1) { + return new LLLocalMultiLuceneIndex(meterRegistry, + clusterName, + indexStructure.activeShards(), + indexStructure.totalShards(), + indicizerAnalyzers, + indicizerSimilarities, + luceneOptions, + luceneHacks + ); + } else { + return new LLLocalLuceneIndex(meterRegistry, + clusterName, + indexStructure.activeShards().getInt(0), + indicizerAnalyzers, + indicizerSimilarities, + luceneOptions, + luceneHacks + ); + } } @Override - public Mono disconnect() { - return Mono.fromCallable(() -> { - if (connected.compareAndSet(true, false)) { - var env = this.env.get(); - if (env != null) { - env.close(); - } - } - return null; - }).subscribeOn(Schedulers.boundedElastic()); + public void disconnect() { + if (connected.compareAndSet(true, false)) { + } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java index 7a8afa8..eab321d 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalDictionary.java @@ -1,24 +1,20 @@ package it.cavallium.dbengine.database.disk; -import static io.netty5.buffer.StandardAllocationTypes.OFF_HEAP; import static it.cavallium.dbengine.database.LLUtils.ALLOW_STATIC_OPTIONS; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; -import static it.cavallium.dbengine.database.LLUtils.fromByteArray; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; -import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect; import static it.cavallium.dbengine.database.LLUtils.toStringSafe; import static it.cavallium.dbengine.database.disk.UpdateAtomicResultMode.DELTA; import static java.util.Objects.requireNonNull; +import static it.cavallium.dbengine.utils.StreamUtils.batches; +import com.google.common.collect.Lists; +import com.google.common.collect.Streams; import io.micrometer.core.instrument.Counter; import io.micrometer.core.instrument.Timer; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import io.netty5.util.Resource; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.database.ColumnUtils; -import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLDictionary; import it.cavallium.dbengine.database.LLDictionaryResultType; @@ -31,23 +27,26 @@ import it.cavallium.dbengine.database.UpdateMode; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.serialization.KVSerializationFunction; import it.cavallium.dbengine.rpc.current.data.DatabaseOptions; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; import java.nio.ByteBuffer; import java.time.Duration; import java.util.ArrayList; import java.util.List; +import java.util.Objects; import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.ForkJoinTask; import java.util.function.Function; import java.util.stream.IntStream; +import java.util.stream.Stream; import org.apache.commons.lang3.tuple.Pair; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.util.Supplier; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import org.rocksdb.AbstractNativeReference; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.CompactRangeOptions; import org.rocksdb.FlushOptions; @@ -57,13 +56,6 @@ import org.rocksdb.Slice; import org.rocksdb.Snapshot; import org.rocksdb.WriteBatch; import org.rocksdb.WriteOptions; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; -import reactor.util.function.Tuple2; -import reactor.util.function.Tuple3; -import reactor.util.function.Tuples; public class LLLocalDictionary implements LLDictionary { @@ -113,12 +105,8 @@ public class LLLocalDictionary implements LLDictionary { private final ColumnFamilyHandle cfh; private final String databaseName; private final String columnName; - private final Scheduler dbWScheduler; - private final Scheduler dbRScheduler; private final Function snapshotResolver; private final UpdateMode updateMode; - private final boolean nettyDirect; - private final BufferAllocator alloc; private final Counter startedUpdates; private final Counter endedUpdates; @@ -136,12 +124,10 @@ public class LLLocalDictionary implements LLDictionary { private final Counter endedRemove; private final Timer removeTime; - public LLLocalDictionary(BufferAllocator allocator, + public LLLocalDictionary( @NotNull RocksDBColumn db, String databaseName, String columnName, - Scheduler dbWScheduler, - Scheduler dbRScheduler, Function snapshotResolver, UpdateMode updateMode, DatabaseOptions databaseOptions) { @@ -150,12 +136,8 @@ public class LLLocalDictionary implements LLDictionary { this.cfh = db.getColumnFamilyHandle(); this.databaseName = databaseName; this.columnName = columnName; - this.dbWScheduler = dbWScheduler; - this.dbRScheduler = dbRScheduler; this.snapshotResolver = snapshotResolver; this.updateMode = updateMode; - alloc = allocator; - this.nettyDirect = databaseOptions.allowNettyDirect() && alloc.getAllocationType() == OFF_HEAP; var meterRegistry = db.getMeterRegistry(); this.startedGet = meterRegistry.counter("db.read.map.get.started.counter", "db.name", databaseName, "db.column", columnName); @@ -245,27 +227,15 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public BufferAllocator getAllocator() { - return alloc; + public Buf get(@Nullable LLSnapshot snapshot, Buf key) { + return this.getSync(snapshot, key); } - private @NotNull Mono runOnDb(boolean write, Callable<@Nullable T> callable) { - return Mono.fromCallable(callable).subscribeOn(write ? dbWScheduler : dbRScheduler); - } - - @Override - public Mono get(@Nullable LLSnapshot snapshot, Mono keyMono) { - return Mono.usingWhen(keyMono, - key -> runOnDb(false, () -> this.getSync(snapshot, key)), - LLUtils::finalizeResource - ); - } - - private Buffer getSync(LLSnapshot snapshot, Buffer key) throws IOException { + private Buf getSync(LLSnapshot snapshot, Buf key) { logger.trace(MARKER_ROCKSDB, "Reading {}", () -> toStringSafe(key)); try { var readOptions = generateReadOptionsOrStatic(snapshot); - Buffer result; + Buf result; startedGet.increment(); try { var initTime = System.nanoTime(); @@ -280,58 +250,53 @@ public class LLLocalDictionary implements LLDictionary { logger.trace(MARKER_ROCKSDB, "Read {}: {}", () -> toStringSafe(key), () -> toStringSafe(result)); return result; } catch (RocksDBException ex) { - throw new IOException("Failed to read " + toStringSafe(key) + ": " + ex.getMessage()); + throw new DBException("Failed to read " + toStringSafe(key) + ": " + ex.getMessage()); } } @Override - public Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono rangeMono, boolean fillCache) { - return Mono.usingWhen(rangeMono, range -> runOnDb(false, () -> { - assert !Schedulers.isInNonBlockingThread() : "Called isRangeEmpty in a nonblocking thread"; - startedContains.increment(); - try { - Boolean isRangeEmpty = containsTime.recordCallable(() -> { - if (range.isSingle()) { - return !containsKey(snapshot, range.getSingleUnsafe()); - } else { - // Temporary resources to release after finished + public boolean isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range, boolean fillCache) { + assert !LLUtils.isInNonBlockingThread() : "Called isRangeEmpty in a nonblocking thread"; + startedContains.increment(); + try { + Boolean isRangeEmpty = containsTime.recordCallable(() -> { + if (range.isSingle()) { + return !containsKey(snapshot, range.getSingleUnsafe()); + } else { + // Temporary resources to release after finished - try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), - true, - isBoundedRange(range), - true - )) { - readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); - readOpts.setFillCache(fillCache); - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - if (nettyDirect && isReadOnlyDirect(range.getMinUnsafe())) { - var seekBuf = ((BufferComponent) range.getMinUnsafe()).readableBuffer(); - rocksIterator.seek(seekBuf); - } else { - var seekArray = LLUtils.toArray(range.getMinUnsafe()); - rocksIterator.seek(seekArray); - } - } else { - rocksIterator.seekToFirst(); - } - return !rocksIterator.isValid(); + try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), + true, + isBoundedRange(range), + true + )) { + readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + readOpts.setFillCache(fillCache); + var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax()); + try { + if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + var seekArray = LLUtils.asArray(range.getMin()); + rocksIterator.seek(seekArray); + } else { + rocksIterator.seekToFirst(); } + return !rocksIterator.isValid(); + } finally { + rocksIterator.close(); } } - }); - assert isRangeEmpty != null; - return isRangeEmpty; - } catch (RocksDBException ex) { - throw new RocksDBException("Failed to read range " + LLUtils.toStringSafe(range) - + ": " + ex.getMessage()); - } finally { - endedContains.increment(); - } - }), LLUtils::finalizeResource); + } + }); + assert isRangeEmpty != null; + return isRangeEmpty; + } catch (Exception ex) { + throw new DBException("Failed to read range " + LLUtils.toStringSafe(range), ex); + } finally { + endedContains.increment(); + } } - private boolean containsKey(@Nullable LLSnapshot snapshot, Buffer key) throws RocksDBException { + private boolean containsKey(@Nullable LLSnapshot snapshot, Buf key) throws RocksDBException { startedContains.increment(); try { var result = containsTime.recordCallable(() -> { @@ -356,33 +321,14 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public Mono put(Mono keyMono, Mono valueMono, - LLDictionaryResultType resultType) { + public Buf put(Buf key, Buf value, LLDictionaryResultType resultType) { // Obtain the previous value from the database - var previousDataMono = this.getPreviousData(keyMono, resultType); - // Zip the entry to write to the database - var entryMono = Mono.zip(keyMono, valueMono, (k, v) -> LLEntry.of( - k.touch("put entry key"), - v.touch("put entry value") - )); - // Write the new entry to the database - Mono putMono = Mono.usingWhen(entryMono, entry -> runOnDb(true, () -> { - var key = entry.getKeyUnsafe(); - var value = entry.getValueUnsafe(); - assert key != null : "Key is null"; - assert value != null : "Value is null"; - key.touch("Dictionary put key"); - value.touch("Dictionary put value"); - put(key, value); - return null; - }), entry -> Mono.fromRunnable(entry::close)); - // Read the previous data, then write the new data, then return the previous data - return Flux.concatDelayError(Flux.just(previousDataMono, putMono), true, 1).singleOrEmpty(); + var previousData = this.getPreviousData(key, resultType); + putInternal(key, value); + return previousData; } - private void put(Buffer key, Buffer value) throws RocksDBException { - assert key.isAccessible(); - assert value.isAccessible(); + private void putInternal(Buf key, Buf value) { if (logger.isTraceEnabled(MARKER_ROCKSDB)) { var varargs = new Supplier[]{() -> toStringSafe(key), () -> toStringSafe(value)}; logger.trace(MARKER_ROCKSDB, "Writing {}: {}", varargs); @@ -390,13 +336,11 @@ public class LLLocalDictionary implements LLDictionary { startedPut.increment(); try (var writeOptions = new WriteOptions()) { putTime.recordCallable(() -> { - key.touch("low-level put key"); - value.touch("low-level put value"); db.put(writeOptions, key, value); return null; }); } catch (RocksDBException ex) { - throw new RocksDBException("Failed to write: " + ex.getMessage()); + throw new DBException("Failed to write: " + ex.getMessage()); } catch (RuntimeException ex) { throw ex; } catch (Exception ex) { @@ -413,230 +357,198 @@ public class LLLocalDictionary implements LLDictionary { @SuppressWarnings("DuplicatedCode") @Override - public Mono update(Mono keyMono, + public Buf update(Buf key, BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { - return Mono.usingWhen(keyMono, key -> runOnDb(true, () -> { - assert !Schedulers.isInNonBlockingThread() : "Called update in a nonblocking thread"; - if (updateMode == UpdateMode.DISALLOW) { - throw new UnsupportedOperationException("update() is disallowed"); + assert !LLUtils.isInNonBlockingThread() : "Called update in a nonblocking thread"; + if (updateMode == UpdateMode.DISALLOW) { + throw new UnsupportedOperationException("update() is disallowed"); + } + UpdateAtomicResultMode returnMode = switch (updateReturnMode) { + case NOTHING -> UpdateAtomicResultMode.NOTHING; + case GET_NEW_VALUE -> UpdateAtomicResultMode.CURRENT; + case GET_OLD_VALUE -> UpdateAtomicResultMode.PREVIOUS; + }; + UpdateAtomicResult result = null; + try { + var readOptions = generateReadOptionsOrStatic(null); + startedUpdates.increment(); + try (var writeOptions = new WriteOptions()) { + result = updateTime.recordCallable(() -> db.updateAtomic(readOptions, writeOptions, key, updater, returnMode)); + } finally { + endedUpdates.increment(); + if (readOptions != EMPTY_READ_OPTIONS) { + readOptions.close(); + } } - UpdateAtomicResultMode returnMode = switch (updateReturnMode) { - case NOTHING -> UpdateAtomicResultMode.NOTHING; - case GET_NEW_VALUE -> UpdateAtomicResultMode.CURRENT; - case GET_OLD_VALUE -> UpdateAtomicResultMode.PREVIOUS; + assert result != null; + return switch (updateReturnMode) { + case NOTHING -> { + yield null; + } + case GET_NEW_VALUE -> ((UpdateAtomicResultCurrent) result).current(); + case GET_OLD_VALUE -> ((UpdateAtomicResultPrevious) result).previous(); }; - UpdateAtomicResult result = null; - try { - var readOptions = generateReadOptionsOrStatic(null); - startedUpdates.increment(); - try (var writeOptions = new WriteOptions()) { - result = updateTime.recordCallable(() -> db.updateAtomic(readOptions, writeOptions, key, updater, returnMode)); - } finally { - endedUpdates.increment(); - if (readOptions != EMPTY_READ_OPTIONS) { - readOptions.close(); - } - } - assert result != null; - return switch (updateReturnMode) { - case NOTHING -> { - result.close(); - yield null; - } - case GET_NEW_VALUE -> ((UpdateAtomicResultCurrent) result).current(); - case GET_OLD_VALUE -> ((UpdateAtomicResultPrevious) result).previous(); - }; - } catch (Throwable ex) { - if (result != null) { - result.close(); - } - throw ex; - } - }), LLUtils::finalizeResource); + } catch (Exception ex) { + throw new DBException("Failed to update key-value pair", ex); + } } @SuppressWarnings("DuplicatedCode") @Override - public Mono updateAndGetDelta(Mono keyMono, BinarySerializationFunction updater) { - return Mono.usingWhen(keyMono, key -> runOnDb(true, () -> { - key.touch("low-level dictionary update"); - assert !Schedulers.isInNonBlockingThread() : "Called update in a nonblocking thread"; - if (updateMode == UpdateMode.DISALLOW) { - throw new UnsupportedOperationException("update() is disallowed"); - } - if (updateMode == UpdateMode.ALLOW && !db.supportsTransactions()) { - throw new UnsupportedOperationException("update() is disallowed because the database doesn't support" - + "safe atomic operations"); - } + public LLDelta updateAndGetDelta(Buf key, BinarySerializationFunction updater) { + assert !LLUtils.isInNonBlockingThread() : "Called update in a nonblocking thread"; + if (updateMode == UpdateMode.DISALLOW) { + throw new UnsupportedOperationException("update() is disallowed"); + } + if (updateMode == UpdateMode.ALLOW && !db.supportsTransactions()) { + throw new UnsupportedOperationException("update() is disallowed because the database doesn't support" + + "safe atomic operations"); + } - UpdateAtomicResultDelta result = null; - try { - var readOptions = generateReadOptionsOrStatic(null); - startedUpdates.increment(); - try (var writeOptions = new WriteOptions()) { - result = updateTime.recordCallable(() -> - (UpdateAtomicResultDelta) db.updateAtomic(readOptions, writeOptions, key, updater, DELTA)); - } finally { - endedUpdates.increment(); - if (readOptions != EMPTY_READ_OPTIONS) { - readOptions.close(); - } + UpdateAtomicResultDelta result = null; + try { + var readOptions = generateReadOptionsOrStatic(null); + startedUpdates.increment(); + try (var writeOptions = new WriteOptions()) { + result = updateTime.recordCallable(() -> + (UpdateAtomicResultDelta) db.updateAtomic(readOptions, writeOptions, key, updater, DELTA)); + } finally { + endedUpdates.increment(); + if (readOptions != EMPTY_READ_OPTIONS) { + readOptions.close(); } - assert result != null; - return result.delta(); - } catch (Throwable ex) { - if (result != null && !result.delta().isClosed()) { - result.close(); - } - throw ex; } - }), LLUtils::finalizeResource); + assert result != null; + return result.delta(); + } catch (Exception ex) { + throw new DBException("Failed to update key-value pair and/or return the delta", ex); + } } @Override - public Mono remove(Mono keyMono, LLDictionaryResultType resultType) { + public Buf remove(Buf key, LLDictionaryResultType resultType) { // Obtain the previous value from the database - Mono previousDataMono = this.getPreviousData(keyMono, resultType); + Buf previousData = this.getPreviousData(key, resultType); // Delete the value from the database - Mono removeMono = Mono.usingWhen(keyMono, key -> runOnDb(true, () -> { - try { - logger.trace(MARKER_ROCKSDB, "Deleting {}", () -> toStringSafe(key)); - startedRemove.increment(); - try (var writeOptions = new WriteOptions()) { - removeTime.recordCallable(() -> { - db.delete(writeOptions, key); - return null; - }); - } finally { - endedRemove.increment(); - } - return null; - } catch (RocksDBException ex) { - throw new RocksDBException("Failed to delete: " + ex.getMessage()); - } - }), LLUtils::finalizeResource); - // Read the previous data, then delete the data, then return the previous data - return Flux.concat(previousDataMono, removeMono).singleOrEmpty(); - } - - private Mono getPreviousData(Mono keyMono, LLDictionaryResultType resultType) { - return switch (resultType) { - case PREVIOUS_VALUE_EXISTENCE -> Mono.usingWhen(keyMono, key -> runOnDb(false, () -> { - var contained = containsKey(null, key); - return LLUtils.booleanToResponseByteBuffer(alloc, contained); - }), LLUtils::finalizeResource); - case PREVIOUS_VALUE -> Mono.usingWhen(keyMono, key -> runOnDb(false, () -> { - assert !Schedulers.isInNonBlockingThread() : "Called getPreviousData in a nonblocking thread"; - Buffer result; - var readOptions = generateReadOptionsOrStatic(null); - try { - result = db.get(readOptions, key); - } finally { - if (readOptions != EMPTY_READ_OPTIONS) { - readOptions.close(); - } - } - logger.trace(MARKER_ROCKSDB, "Read {}: {}", () -> toStringSafe(key), () -> toStringSafe(result)); - return result; - }), LLUtils::finalizeResource); - case VOID -> Mono.empty(); - }; - } - - @Override - public Flux getMulti(@Nullable LLSnapshot snapshot, Flux keys) { - return keys - .publishOn(dbRScheduler) - .handle((key, sink) -> { - try (key) { - sink.next(OptionalBuf.ofNullable(getSync(snapshot, key))); - } catch (IOException ex) { - sink.error(ex); - } + try { + logger.trace(MARKER_ROCKSDB, "Deleting {}", () -> toStringSafe(key)); + startedRemove.increment(); + try (var writeOptions = new WriteOptions()) { + removeTime.recordCallable(() -> { + db.delete(writeOptions, key); + return null; }); + } finally { + endedRemove.increment(); + } + return previousData; + } catch (Exception ex) { + throw new DBException("Failed to delete", ex); + } } - @Override - public Mono putMulti(Flux entries) { - return entries - .buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) - .publishOn(dbWScheduler) - .handle((entriesWindow, sink) -> { - try (var writeOptions = new WriteOptions()) { - assert !Schedulers.isInNonBlockingThread() : "Called putMulti in a nonblocking thread"; - if (USE_WRITE_BATCHES_IN_PUT_MULTI) { - var batch = new CappedWriteBatch(db, - alloc, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - writeOptions - ); - try { - for (LLEntry entry : entriesWindow) { - var k = entry.getKeyUnsafe(); - var v = entry.getValueUnsafe(); - if (nettyDirect) { - batch.put(cfh, k.send(), v.send()); - } else { - batch.put(cfh, LLUtils.toArray(k), LLUtils.toArray(v)); - } - } - batch.flush(); - } finally { - batch.releaseAllBuffers(); - batch.close(); - } - } else { - for (LLEntry entry : entriesWindow) { - db.put(writeOptions, entry.getKeyUnsafe(), entry.getValueUnsafe()); - } - } - sink.next(true); - } catch (RocksDBException ex) { - sink.error(new RocksDBException("Failed to write: " + ex.getMessage())); + private Buf getPreviousData(Buf key, LLDictionaryResultType resultType) { + try { + return switch (resultType) { + case PREVIOUS_VALUE_EXISTENCE -> { + var contained = containsKey(null, key); + yield LLUtils.booleanToResponseByteBuffer(contained); + } + case PREVIOUS_VALUE -> { + assert !LLUtils.isInNonBlockingThread() : "Called getPreviousData in a nonblocking thread"; + Buf result; + var readOptions = generateReadOptionsOrStatic(null); + try { + result = db.get(readOptions, key); } finally { - for (LLEntry llEntry : entriesWindow) { - llEntry.close(); + if (readOptions != EMPTY_READ_OPTIONS) { + readOptions.close(); } } - }) - .then(); + logger.trace(MARKER_ROCKSDB, "Read {}: {}", () -> toStringSafe(key), () -> toStringSafe(result)); + yield result; + } + case VOID -> null; + }; + } catch (RocksDBException ex) { + throw new DBException("Failed to read previous data"); + } } @Override - public Flux updateMulti(Flux keys, Flux serializedKeys, - KVSerializationFunction updateFunction) { - return Flux.zip(keys, serializedKeys) - .buffer(Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) - .flatMapSequential(entriesWindow -> this.>runOnDb(true, () -> { + public Stream getMulti(@Nullable LLSnapshot snapshot, Stream keys) { + return keys.map(key -> { + try { + return OptionalBuf.ofNullable(getSync(snapshot, key)); + } catch (IOException e) { + throw new CompletionException(e); + } + }); + } + + @Override + public void putMulti(Stream entries) { + batches(entries, Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)).parallel().forEach(entriesWindow -> { + try (var writeOptions = new WriteOptions()) { + assert !LLUtils.isInNonBlockingThread() : "Called putMulti in a nonblocking thread"; + if (USE_WRITE_BATCHES_IN_PUT_MULTI) { + try (var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + writeOptions + )) { + for (LLEntry entry : entriesWindow) { + batch.put(cfh, entry.getKey(), entry.getValue()); + } + batch.flush(); + } + } else { + for (LLEntry entry : entriesWindow) { + db.put(writeOptions, entry.getKey(), entry.getValue()); + } + } + } catch (RocksDBException ex) { + throw new CompletionException(new DBException("Failed to write: " + ex.getMessage())); + } + }); + } + + @Override + public Stream updateMulti(Stream keys, Stream serializedKeys, + KVSerializationFunction updateFunction) { + record Key(K key, Buf serializedKey) {} + record MappedInput(K key, Buf serializedKey, OptionalBuf mapped) {} + return batches(Streams.zip(keys, serializedKeys, Key::new), Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)) + .parallel() + .flatMap(entriesWindow -> { try (var writeOptions = new WriteOptions()) { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called updateMulti in a nonblocking thread"); } - List keyBufsWindow = new ArrayList<>(entriesWindow.size()); - for (Tuple2 objects : entriesWindow) { - keyBufsWindow.add(objects.getT2()); + List keyBufsWindow = new ArrayList<>(entriesWindow.size()); + for (Key objects : entriesWindow) { + keyBufsWindow.add(objects.serializedKey()); } - ArrayList> mappedInputs; + ArrayList> mappedInputs; { var readOptions = generateReadOptionsOrStatic(null); try { - var inputs = db.multiGetAsList(readOptions, LLUtils.toArray(keyBufsWindow)); + var inputs = db.multiGetAsList(readOptions, Lists.transform(keyBufsWindow, Buf::asArray)); mappedInputs = new ArrayList<>(inputs.size()); for (int i = 0; i < inputs.size(); i++) { var val = inputs.get(i); if (val != null) { inputs.set(i, null); - mappedInputs.add(Tuples.of( - entriesWindow.get(i).getT1(), + mappedInputs.add(new MappedInput<>( + entriesWindow.get(i).key(), keyBufsWindow.get(i), - OptionalBuf.of(fromByteArray(alloc, val)) + OptionalBuf.of(Buf.wrap(val)) )); } else { - mappedInputs.add(Tuples.of( - entriesWindow.get(i).getT1(), + mappedInputs.add(new MappedInput<>( + entriesWindow.get(i).key(), keyBufsWindow.get(i), OptionalBuf.empty() )); @@ -648,603 +560,479 @@ public class LLLocalDictionary implements LLDictionary { } } } - var updatedValuesToWrite = new ArrayList(mappedInputs.size()); + var updatedValuesToWrite = new ArrayList(mappedInputs.size()); var valueChangedResult = new ArrayList(mappedInputs.size()); - try { - for (var mappedInput : mappedInputs) { - var updatedValue = updateFunction.apply(mappedInput.getT1(), mappedInput.getT2()); - try { - if (updatedValue != null) { - try (var t3 = mappedInput.getT3().orElse(null)) { - valueChangedResult.add(!LLUtils.equals(t3, updatedValue)); - } - updatedValuesToWrite.add(updatedValue); - } else { - try (var t3 = mappedInput.getT3().orElse(null)) { - valueChangedResult.add(!LLUtils.equals(t3, null)); - } - updatedValuesToWrite.add(null); - } - } catch (Throwable t) { - if (updatedValue != null) { - updatedValue.close(); - } - throw t; - } - } - } finally { - for (var mappedInput : mappedInputs) { - mappedInput.getT3().ifPresent(LLUtils::finalizeResourceNow); - } + for (var mappedInput : mappedInputs) { + var updatedValue = updateFunction.apply(mappedInput.key(), mappedInput.serializedKey()); + var t3 = mappedInput.mapped().orElse(null); + valueChangedResult.add(!LLUtils.equals(t3, updatedValue)); + updatedValuesToWrite.add(updatedValue); } if (USE_WRITE_BATCHES_IN_PUT_MULTI) { - var batch = new CappedWriteBatch(db, - alloc, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - writeOptions - ); - try { - int i = 0; - for (Tuple2 entry : entriesWindow) { - try (var valueToWrite = updatedValuesToWrite.get(i)) { - if (valueToWrite == null) { - batch.delete(cfh, entry.getT2().send()); - } else { - batch.put(cfh, entry.getT2().send(), valueToWrite.send()); - } - } - i++; - } - batch.flush(); - } finally { - batch.releaseAllBuffers(); - batch.close(); - } - } else { - int i = 0; - for (Tuple2 entry : entriesWindow) { - db.put(writeOptions, entry.getT2(), updatedValuesToWrite.get(i)); - i++; - } - } - return valueChangedResult; - } finally { - for (Tuple2 tuple : entriesWindow) { - tuple.getT2().close(); - } - } - }).flatMapIterable(list -> list), /* Max concurrency is 2 to update data while preparing the next segment */ 2); - } - - @Override - public Flux getRange(@Nullable LLSnapshot snapshot, - Mono rangeMono, - boolean reverse, - boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - if (range.isSingle()) { - var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe()); - return getRangeSingle(snapshot, rangeSingleMono); - } else { - return getRangeMulti(snapshot, rangeMono, reverse, smallRange); - } - }, LLUtils::finalizeResource); - } - - @Override - public Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, - int prefixLength, - boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - if (range.isSingle()) { - var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe()); - - return getRangeSingle(snapshot, rangeSingleMono).map(List::of); - } else { - return getRangeMultiGrouped(snapshot, rangeMono, prefixLength, smallRange); - } - }, LLUtils::finalizeResource); - } - - private Flux getRangeSingle(LLSnapshot snapshot, Mono keyMono) { - return Mono.zip(keyMono, this.get(snapshot, keyMono), LLEntry::of).flux(); - } - - private Flux getRangeMulti(LLSnapshot snapshot, - Mono rangeMono, - boolean reverse, - boolean smallRange) { - return new LLLocalEntryReactiveRocksIterator(db, - rangeMono, - nettyDirect, - () -> generateReadOptionsOrNull(snapshot), - reverse, - smallRange - ).flux().subscribeOn(dbRScheduler, false); - } - - private Flux> getRangeMultiGrouped(LLSnapshot snapshot, Mono rangeMono, - int prefixLength, boolean smallRange) { - return new LLLocalGroupedEntryReactiveRocksIterator(db, - prefixLength, - rangeMono, - nettyDirect, - () -> generateReadOptionsOrNull(snapshot), - smallRange - ).flux().subscribeOn(dbRScheduler, false); - } - - @Override - public Flux getRangeKeys(@Nullable LLSnapshot snapshot, - Mono rangeMono, - boolean reverse, - boolean smallRange) { - return rangeMono.flatMapMany(range -> { - try { - if (range.isSingle()) { - return this.getRangeKeysSingle(snapshot, rangeMono.map(llRange -> llRange.getSingleUnsafe())); - } else { - return this.getRangeKeysMulti(snapshot, rangeMono, reverse, smallRange); - } - } finally { - if (range != null && !range.isClosed()) { - range.close(); - } - } - }); - } - - @Override - public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, - int prefixLength, - boolean smallRange) { - return new LLLocalGroupedKeyReactiveRocksIterator(db, - prefixLength, - rangeMono, - nettyDirect, - () -> generateReadOptionsOrNull(snapshot), - smallRange - ).flux().subscribeOn(dbRScheduler, false); - } - - @Override - public Flux badBlocks(Mono rangeMono) { - return Flux.usingWhen(rangeMono, - range -> Flux - .create(sink -> { - try (var ro = LLUtils.generateCustomReadOptions(null, - false, - isBoundedRange(range), - false - )) { - ro.setFillCache(false); - if (!range.isSingle()) { - if (LLUtils.MANUAL_READAHEAD) { - ro.setReadaheadSize(32 * 1024); - } - } - ro.setVerifyChecksums(true); - try (var rocksIterator = db.newRocksIterator(nettyDirect, ro, range, false)) { - rocksIterator.seekToFirst(); - while (rocksIterator.isValid() && !sink.isCancelled()) { - try { - rocksIterator.key(DUMMY_WRITE_ONLY_BYTE_BUFFER); - rocksIterator.value(DUMMY_WRITE_ONLY_BYTE_BUFFER); - rocksIterator.next(); - } catch (RocksDBException ex) { - sink.next(new BadBlock(databaseName, ColumnUtils.special(columnName), null, ex)); - } - } - } - sink.complete(); - } catch (Throwable ex) { - sink.error(ex); - } - }) - .subscribeOn(dbRScheduler), - LLUtils::finalizeResource - ); - } - - @Override - public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono rangeMono, - int prefixLength, boolean smallRange) { - return new LLLocalKeyPrefixReactiveRocksIterator(db, - prefixLength, - rangeMono, - nettyDirect, - () -> generateReadOptionsOrNull(snapshot), - true, - smallRange - ).flux().subscribeOn(dbRScheduler); - } - - private Flux getRangeKeysSingle(LLSnapshot snapshot, Mono keyMono) { - return Mono.usingWhen(keyMono, key -> runOnDb(false, () -> { - if (containsKey(snapshot, key)) { - return key; - } else { - return null; - } - }), LLUtils::finalizeResource).flux(); - } - - private record RocksObjTuple>(T t1, U t2) implements - DiscardingCloseable { - - @Override - public void close() { - //noinspection EmptyTryBlock - try (t1; t2) {} - } - } - - private Flux getRangeKeysMulti(LLSnapshot snapshot, - Mono rangeMono, - boolean reverse, - boolean smallRange) { - return new LLLocalKeyReactiveRocksIterator(db, - rangeMono, - nettyDirect, - () -> generateReadOptionsOrNull(snapshot), - reverse, - smallRange - ).flux().subscribeOn(dbRScheduler, false); - } - - @Override - public Mono setRange(Mono rangeMono, Flux entries, boolean smallRange) { - if (USE_WINDOW_IN_SET_RANGE) { - return Mono - .usingWhen(rangeMono, range -> runOnDb(true, () -> { - try (var writeOptions = new WriteOptions()) { - assert !Schedulers.isInNonBlockingThread() : "Called setRange in a nonblocking thread"; - if (!USE_WRITE_BATCH_IN_SET_RANGE_DELETE || !USE_WRITE_BATCHES_IN_SET_RANGE) { - try (var opts = LLUtils.generateCustomReadOptions(null, true, isBoundedRange(range), smallRange)) { - try (var it = db.newIterator(opts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - it.seekTo(range.getMinUnsafe()); - } else { - it.seekToFirst(); - } - while (it.isValid()) { - db.delete(writeOptions, it.key()); - it.next(); - } - } - } - } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { - var batch = new CappedWriteBatch(db, - alloc, + try (var batch = new CappedWriteBatch(db, CAPPED_WRITE_BATCH_CAP, RESERVED_WRITE_BATCH_SIZE, MAX_WRITE_BATCH_SIZE, writeOptions - ); - try { - if (range.isSingle()) { - batch.delete(cfh, range.getSingle()); + )) { + int i = 0; + for (Key entry : entriesWindow) { + var valueToWrite = updatedValuesToWrite.get(i); + if (valueToWrite == null) { + batch.delete(cfh, entry.serializedKey()); } else { - deleteSmallRangeWriteBatch(batch, range.copy()); + batch.put(cfh, entry.serializedKey(), valueToWrite); + } + i++; + } + batch.flush(); + } + } else { + int i = 0; + for (Key entry : entriesWindow) { + db.put(writeOptions, entry.serializedKey(), updatedValuesToWrite.get(i)); + i++; + } + } + return valueChangedResult.stream(); + } catch (RocksDBException e) { + throw new CompletionException(new DBException("Failed to update multiple key-value pairs", e)); + } + }); + } + + @Override + public Stream getRange(@Nullable LLSnapshot snapshot, + LLRange range, + boolean reverse, + boolean smallRange) { + if (range.isSingle()) { + var rangeSingle = range.getSingle(); + return getRangeSingle(snapshot, rangeSingle); + } else { + return getRangeMulti(snapshot, range, reverse, smallRange); + } + } + + @Override + public Stream> getRangeGrouped(@Nullable LLSnapshot snapshot, + LLRange range, + int prefixLength, + boolean smallRange) { + if (range.isSingle()) { + var rangeSingle = range.getSingle(); + + return Stream.of(getRangeSingle(snapshot, rangeSingle).toList()); + } else { + return getRangeMultiGrouped(snapshot, range, prefixLength, smallRange); + } + } + + private Stream getRangeSingle(LLSnapshot snapshot, Buf key) { + var val = this.get(snapshot, key); + if (val == null) return Stream.of(); + return Stream.of(LLEntry.of(key, val)); + } + + private Stream getRangeMulti(LLSnapshot snapshot, + LLRange range, + boolean reverse, + boolean smallRange) { + return new LLLocalEntryReactiveRocksIterator(db, + range, + () -> generateReadOptionsOrNull(snapshot), + reverse, + smallRange + ).stream(); + } + + private Stream> getRangeMultiGrouped(LLSnapshot snapshot, LLRange range, + int prefixLength, boolean smallRange) { + return new LLLocalGroupedEntryReactiveRocksIterator(db, + prefixLength, + range, + () -> generateReadOptionsOrNull(snapshot), + smallRange + ).stream(); + } + + @Override + public Stream getRangeKeys(@Nullable LLSnapshot snapshot, + LLRange range, + boolean reverse, + boolean smallRange) { + if (range.isSingle()) { + return this.getRangeKeysSingle(snapshot, range.getSingle()); + } else { + return this.getRangeKeysMulti(snapshot, range, reverse, smallRange); + } + } + + @Override + public Stream> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, + LLRange range, + int prefixLength, + boolean smallRange) { + return new LLLocalGroupedKeyReactiveRocksIterator(db, + prefixLength, + range, + () -> generateReadOptionsOrNull(snapshot), + smallRange + ).stream(); + } + + @Override + public Stream badBlocks(LLRange range) { + try { + var ro = LLUtils.generateCustomReadOptions(null, + false, + isBoundedRange(range), + false + ); + ro.setFillCache(false); + if (!range.isSingle()) { + if (LLUtils.MANUAL_READAHEAD) { + ro.setReadaheadSize(32 * 1024); + } + } + ro.setVerifyChecksums(true); + var rocksIterator = db.newRocksIterator(ro, range, false); + try { + rocksIterator.seekToFirst(); + } catch (Exception ex) { + rocksIterator.close(); + ro.close(); + throw new DBException("Failed to open rocksdb iterator", ex); + } + return Stream.generate(() -> { + if (!rocksIterator.isValid()) return null; + Buf rawKey = null; + try { + rawKey = rocksIterator.keyBuf().copy(); + rocksIterator.next(); + } catch (RocksDBException ex) { + return new BadBlock(databaseName, ColumnUtils.special(columnName), rawKey, ex); + } + return null; + }).takeWhile(x -> rocksIterator.isValid()).filter(Objects::nonNull).onClose(() -> { + rocksIterator.close(); + ro.close(); + }); + } catch (RocksDBException e) { + throw new DBException("Failed to get bad blocks", e); + } + } + + @Override + public Stream getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, LLRange range, + int prefixLength, boolean smallRange) { + return new LLLocalKeyPrefixReactiveRocksIterator(db, + prefixLength, + range, + () -> generateReadOptionsOrNull(snapshot), + true, + smallRange + ).stream(); + } + + private Stream getRangeKeysSingle(LLSnapshot snapshot, Buf key) { + try { + if (containsKey(snapshot, key)) { + return Stream.of(key); + } else { + return Stream.empty(); + } + } catch (RocksDBException e) { + throw new DBException("Failed to get range keys", e); + } + } + + private Stream getRangeKeysMulti(LLSnapshot snapshot, + LLRange range, + boolean reverse, + boolean smallRange) { + return new LLLocalKeyReactiveRocksIterator(db, + range, + () -> generateReadOptionsOrNull(snapshot), + reverse, + smallRange + ).stream(); + } + + @Override + public void setRange(LLRange range, Stream entries, boolean smallRange) { + if (USE_WINDOW_IN_SET_RANGE) { + try (var writeOptions = new WriteOptions()) { + assert !LLUtils.isInNonBlockingThread() : "Called setRange in a nonblocking thread"; + if (!USE_WRITE_BATCH_IN_SET_RANGE_DELETE || !USE_WRITE_BATCHES_IN_SET_RANGE) { + try (var opts = LLUtils.generateCustomReadOptions(null, true, isBoundedRange(range), smallRange)) { + try (var it = db.newIterator(opts, range.getMin(), range.getMax())) { + if (!PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + it.seekTo(range.getMin()); + } else { + it.seekToFirst(); + } + while (it.isValid()) { + db.delete(writeOptions, it.key()); + it.next(); + } + } + } + } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { + try (var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + writeOptions + )) { + if (range.isSingle()) { + batch.delete(cfh, range.getSingle()); + } else { + deleteSmallRangeWriteBatch(batch, range.copy()); + } + batch.flush(); + } + } else { + try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { + if (range.isSingle()) { + batch.delete(cfh, LLUtils.asArray(range.getSingleUnsafe())); + } else { + deleteSmallRangeWriteBatch(batch, range.copy()); + } + db.write(writeOptions, batch); + batch.clear(); + } + } + } catch (RocksDBException ex) { + throw new DBException("Failed to set a range: " + ex.getMessage()); + } + + batches(entries, MULTI_GET_WINDOW) + .forEach(entriesList -> { + try (var writeOptions = new WriteOptions()) { + if (!USE_WRITE_BATCHES_IN_SET_RANGE) { + for (LLEntry entry : entriesList) { + db.put(writeOptions, entry.getKey(), entry.getValue()); + } + } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { + + try (var batch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + writeOptions + )) { + for (LLEntry entry : entriesList) { + batch.put(cfh, entry.getKey(), entry.getValue()); } batch.flush(); - } finally { - batch.releaseAllBuffers(); - batch.close(); } } else { try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { - if (range.isSingle()) { - batch.delete(cfh, LLUtils.toArray(range.getSingleUnsafe())); - } else { - deleteSmallRangeWriteBatch(batch, range.copy()); + for (LLEntry entry : entriesList) { + batch.put(cfh, LLUtils.asArray(entry.getKey()), LLUtils.asArray(entry.getValue())); } db.write(writeOptions, batch); batch.clear(); } } - return true; } catch (RocksDBException ex) { - throw new RocksDBException("Failed to set a range: " + ex.getMessage()); + throw new CompletionException(new DBException("Failed to write range", ex)); } - }), LLUtils::finalizeResource) - .thenMany(entries.window(MULTI_GET_WINDOW)) - .flatMap(keysWindowFlux -> keysWindowFlux - .collectList() - .flatMap(entriesList -> this.runOnDb(true, () -> { - try (var writeOptions = new WriteOptions()) { - if (!USE_WRITE_BATCHES_IN_SET_RANGE) { - for (LLEntry entry : entriesList) { - db.put(writeOptions, entry.getKeyUnsafe(), entry.getValueUnsafe()); - } - } else if (USE_CAPPED_WRITE_BATCH_IN_SET_RANGE) { - var batch = new CappedWriteBatch(db, - alloc, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - writeOptions); - - try { - for (LLEntry entry : entriesList) { - if (nettyDirect) { - batch.put(cfh, entry.getKeyUnsafe().send(), entry.getValueUnsafe().send()); - } else { - batch.put(cfh, - LLUtils.toArray(entry.getKeyUnsafe()), - LLUtils.toArray(entry.getValueUnsafe()) - ); - } - } - batch.flush(); - } finally { - batch.releaseAllBuffers(); - batch.close(); - } - } else { - try (var batch = new WriteBatch(RESERVED_WRITE_BATCH_SIZE)) { - for (LLEntry entry : entriesList) { - batch.put(cfh, LLUtils.toArray(entry.getKeyUnsafe()), LLUtils.toArray(entry.getValueUnsafe())); - } - db.write(writeOptions, batch); - batch.clear(); - } - } - return null; - } finally { - for (LLEntry entry : entriesList) { - entry.close(); - } - } - }))) - .then() - .onErrorMap(cause -> new IOException("Failed to write range", cause)); + }); } else { if (USE_WRITE_BATCHES_IN_SET_RANGE) { - return Mono.error(() -> new UnsupportedOperationException( - "Can't use write batches in setRange without window. Please fix the parameters")); + throw new UnsupportedOperationException("Can't use write batches in setRange without window. Please fix the parameters"); } - var deleteMono = this - .getRange(null, rangeMono, false, smallRange) - .publishOn(dbWScheduler) - .handle((oldValue, sink) -> { - try (var writeOptions = new WriteOptions(); oldValue) { - db.delete(writeOptions, oldValue.getKeyUnsafe()); - sink.next(true); - } catch (RocksDBException ex) { - sink.error(new RocksDBException("Failed to write range: " + ex.getMessage())); - } - }) - .then(Mono.empty()); - - var putMono = entries.publishOn(dbWScheduler).handle((entry, sink) -> { - try (entry) { - if (entry.getKeyUnsafe() != null && entry.getValueUnsafe() != null) { - this.put(entry.getKeyUnsafe(), entry.getValueUnsafe()); - } - sink.next(true); + this.getRange(null, range, false, smallRange).forEach(oldValue -> { + try (var writeOptions = new WriteOptions()) { + db.delete(writeOptions, oldValue.getKey()); } catch (RocksDBException ex) { - sink.error(new RocksDBException("Failed to write range: " + ex.getMessage())); + throw new CompletionException(new DBException("Failed to write range", ex)); } - }).then(Mono.empty()); + }); - return deleteMono.then(putMono); + entries.forEach(entry -> { + try { + if (entry.getKey() != null && entry.getValue() != null) { + this.putInternal(entry.getKey(), entry.getValue()); + } + } catch (IOException ex) { + throw new CompletionException(new DBException("Failed to write range", ex)); + } + }); } } - //todo: this is broken, check why. (is this still true?) - private void deleteSmallRangeWriteBatch(CappedWriteBatch writeBatch, LLRange range) + private void deleteSmallRangeWriteBatch(WriteBatch writeBatch, LLRange range) throws RocksDBException { - try (range; var readOpts = generateReadOptionsOrNew(null)) { - readOpts.setFillCache(false); - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { + try (var readOpts = LLUtils.generateCustomReadOptions(null, false, isBoundedRange(range), true)) { + try (var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax())) { if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); + rocksIterator.seekTo(range.getMin()); } else { rocksIterator.seekToFirst(); } while (rocksIterator.isValid()) { - writeBatch.delete(cfh, LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.key(buffer)).send()); + writeBatch.delete(cfh, rocksIterator.key()); rocksIterator.next(); } } } } - private void deleteSmallRangeWriteBatch(WriteBatch writeBatch, LLRange range) - throws RocksDBException { - try (range) { - try (var readOpts = LLUtils.generateCustomReadOptions(null, true, isBoundedRange(range), true)) { - readOpts.setFillCache(false); - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); - } else { + public void clear() { + assert !LLUtils.isInNonBlockingThread() : "Called clear in a nonblocking thread"; + boolean shouldCompactLater = false; + try (var writeOptions = new WriteOptions(); + var readOpts = LLUtils.generateCustomReadOptions(null, false, false, false)) { + if (VERIFY_CHECKSUMS_WHEN_NOT_NEEDED) { + readOpts.setVerifyChecksums(true); + } + // readOpts.setIgnoreRangeDeletions(true); + if (LLUtils.MANUAL_READAHEAD) { + readOpts.setReadaheadSize(32 * 1024); // 32KiB + } + try (CappedWriteBatch writeBatch = new CappedWriteBatch(db, + CAPPED_WRITE_BATCH_CAP, + RESERVED_WRITE_BATCH_SIZE, + MAX_WRITE_BATCH_SIZE, + writeOptions + )) { + + byte[] firstDeletedKey = null; + byte[] lastDeletedKey = null; + try (var rocksIterator = db.newIterator(readOpts, null, null)) { + // If the database supports transactions, delete each key one by one + if (db.supportsTransactions()) { rocksIterator.seekToFirst(); - } - while (rocksIterator.isValid()) { - writeBatch.delete(cfh, rocksIterator.key()); - rocksIterator.next(); + while (rocksIterator.isValid()) { + writeBatch.delete(cfh, rocksIterator.key()); + rocksIterator.next(); + } + } else { + rocksIterator.seekToLast(); + + if (rocksIterator.isValid()) { + firstDeletedKey = FIRST_KEY; + lastDeletedKey = rocksIterator.key().clone(); + writeBatch.deleteRange(cfh, FIRST_KEY, lastDeletedKey); + writeBatch.delete(cfh, lastDeletedKey); + shouldCompactLater = true; + } } } + + writeBatch.flush(); + + if (shouldCompactLater) { + // Compact range + db.suggestCompactRange(); + if (lastDeletedKey != null) { + try (var cro = new CompactRangeOptions() + .setAllowWriteStall(false) + .setExclusiveManualCompaction(false) + .setChangeLevel(false)) { + db.compactRange(firstDeletedKey, lastDeletedKey, cro); + } + } + } + + try (var fo = new FlushOptions().setWaitForFlush(true).setAllowWriteStall(true)) { + db.flush(fo); + } + db.flushWal(true); } + } catch (RocksDBException ex) { + throw new DBException("Failed to clear", ex); } } - public Mono clear() { - return Mono - .fromCallable(() -> { - assert !Schedulers.isInNonBlockingThread() : "Called clear in a nonblocking thread"; - boolean shouldCompactLater = false; - try (var writeOptions = new WriteOptions(); - var readOpts = LLUtils.generateCustomReadOptions(null, false, false, false)) { - readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + @Override + public long sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast) { + try { + assert !LLUtils.isInNonBlockingThread() : "Called sizeRange in a nonblocking thread"; + if (range.isAll()) { + return fast ? fastSizeAll(snapshot) : exactSizeAll(snapshot); + } else { + try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), + false, + isBoundedRange(range), + false + )) { + readOpts.setFillCache(false); + readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); + if (fast) { + readOpts.setIgnoreRangeDeletions(true); - // readOpts.setIgnoreRangeDeletions(true); - readOpts.setFillCache(false); - if (LLUtils.MANUAL_READAHEAD) { - readOpts.setReadaheadSize(32 * 1024); // 32KiB + } + try (var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax())) { + if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + rocksIterator.seekTo(range.getMin()); + } else { + rocksIterator.seekToFirst(); } - CappedWriteBatch writeBatch = new CappedWriteBatch(db, - alloc, - CAPPED_WRITE_BATCH_CAP, - RESERVED_WRITE_BATCH_SIZE, - MAX_WRITE_BATCH_SIZE, - writeOptions - ); - try { - - byte[] firstDeletedKey = null; - byte[] lastDeletedKey = null; - try (var rocksIterator = db.newIterator(readOpts, null, null)) { - // If the database supports transactions, delete each key one by one - if (db.supportsTransactions()) { - rocksIterator.seekToFirst(); - while (rocksIterator.isValid()) { - writeBatch.delete(cfh, rocksIterator.key()); - rocksIterator.next(); - } - } else { - rocksIterator.seekToLast(); - - if (rocksIterator.isValid()) { - firstDeletedKey = FIRST_KEY; - lastDeletedKey = rocksIterator.key(); - writeBatch.deleteRange(cfh, FIRST_KEY, rocksIterator.key()); - writeBatch.delete(cfh, rocksIterator.key()); - shouldCompactLater = true; - } - } - } - - writeBatch.flush(); - - if (shouldCompactLater) { - // Compact range - db.suggestCompactRange(); - if (lastDeletedKey != null) { - try (var cro = new CompactRangeOptions() - .setAllowWriteStall(false) - .setExclusiveManualCompaction(false) - .setChangeLevel(false)) { - db.compactRange(firstDeletedKey, lastDeletedKey, cro); - } - } - } - - try (var fo = new FlushOptions().setWaitForFlush(true).setAllowWriteStall(true)) { - db.flush(fo); - } - db.flushWal(true); - } finally { - writeBatch.releaseAllBuffers(); - writeBatch.close(); + long i = 0; + while (rocksIterator.isValid()) { + rocksIterator.next(); + i++; } + return i; + } + } + } + } catch (RocksDBException ex) { + throw new DBException("Failed to get size of range", ex); + } + } + + @Override + public LLEntry getOne(@Nullable LLSnapshot snapshot, LLRange range) { + try { + assert !LLUtils.isInNonBlockingThread() : "Called getOne in a nonblocking thread"; + try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), true, true, true)) { + try (var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax())) { + if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + rocksIterator.seekTo(range.getMin()); + } else { + rocksIterator.seekToFirst(); + } + if (rocksIterator.isValid()) { + var keyView = rocksIterator.keyBuf(); + var valueView = rocksIterator.valueBuf(); + return LLEntry.copyOf(keyView, valueView); + } else { return null; } - }) - .onErrorMap(cause -> new IOException("Failed to clear", cause)) - .subscribeOn(dbWScheduler); - + } + } + } catch (RocksDBException ex) { + throw new DBException("Failed to get one entry", ex); + } } @Override - public Mono sizeRange(@Nullable LLSnapshot snapshot, Mono rangeMono, boolean fast) { - return Mono.usingWhen(rangeMono, range -> runOnDb(false, () -> { - try { - assert !Schedulers.isInNonBlockingThread() : "Called sizeRange in a nonblocking thread"; - if (range.isAll()) { - return fast ? fastSizeAll(snapshot) : exactSizeAll(snapshot); - } else { - try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), - false, - isBoundedRange(range), - false - )) { - readOpts.setFillCache(false); - readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED); - if (fast) { - readOpts.setIgnoreRangeDeletions(true); - - } - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); - } else { - rocksIterator.seekToFirst(); - } - long i = 0; - while (rocksIterator.isValid()) { - rocksIterator.next(); - i++; - } - return i; - } + public Buf getOneKey(@Nullable LLSnapshot snapshot, LLRange range) { + try { + assert !LLUtils.isInNonBlockingThread() : "Called getOneKey in a nonblocking thread"; + try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), true, true, true)) { + try (var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax())) { + if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + rocksIterator.seekTo(range.getMin()); + } else { + rocksIterator.seekToFirst(); + } + if (rocksIterator.isValid()) { + return rocksIterator.keyBuf(); + } else { + return null; } } - } catch (RocksDBException ex) { - throw new RocksDBException("Failed to get size of range: " + ex.getMessage()); } - }), LLUtils::finalizeResource); - } - - @Override - public Mono getOne(@Nullable LLSnapshot snapshot, Mono rangeMono) { - return Mono.usingWhen(rangeMono, range -> runOnDb(false, () -> { - try { - assert !Schedulers.isInNonBlockingThread() : "Called getOne in a nonblocking thread"; - try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), true, true, true)) { - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); - } else { - rocksIterator.seekToFirst(); - } - if (rocksIterator.isValid()) { - try (var key = LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.key(buffer))) { - try (var value = LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.value(buffer))) { - return LLEntry.of(key.touch("get-one key"), value.touch("get-one value")); - } - } - } else { - return null; - } - } - } - } catch (RocksDBException ex) { - throw new RocksDBException("Failed to get one entry: " + ex.getMessage()); - } - }), LLUtils::finalizeResource); - } - - @Override - public Mono getOneKey(@Nullable LLSnapshot snapshot, Mono rangeMono) { - return Mono.usingWhen(rangeMono, range -> runOnDb(false, () -> { - try { - assert !Schedulers.isInNonBlockingThread() : "Called getOneKey in a nonblocking thread"; - try (var readOpts = LLUtils.generateCustomReadOptions(generateReadOptionsOrNull(snapshot), true, true, true)) { - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); - } else { - rocksIterator.seekToFirst(); - } - if (rocksIterator.isValid()) { - return LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.key(buffer)); - } else { - return null; - } - } - } - } catch (RocksDBException ex) { - throw new RocksDBException("Failed to get one key: " + ex.getMessage()); - } - }), LLUtils::finalizeResource); + } catch (RocksDBException ex) { + throw new DBException("Failed to get one key", ex); + } } private long fastSizeAll(@Nullable LLSnapshot snapshot) throws RocksDBException { @@ -1280,7 +1068,7 @@ public class LLLocalDictionary implements LLDictionary { } private long exactSizeAll(@Nullable LLSnapshot snapshot) { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called exactSizeAll in a nonblocking thread"); } if (snapshot == null && USE_NUM_ENTRIES_PRECISE_COUNTER) { @@ -1368,27 +1156,27 @@ public class LLLocalDictionary implements LLDictionary { } @Override - public Mono removeOne(Mono rangeMono) { - return Mono.usingWhen(rangeMono, range -> runOnDb(true, () -> { - assert !Schedulers.isInNonBlockingThread() : "Called removeOne in a nonblocking thread"; - try (var readOpts = new ReadOptions(); - var writeOpts = new WriteOptions()) { - try (var rocksIterator = db.newIterator(readOpts, range.getMinUnsafe(), range.getMaxUnsafe())) { - if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { - rocksIterator.seekTo(range.getMinUnsafe()); - } else { - rocksIterator.seekToFirst(); - } - if (!rocksIterator.isValid()) { - return null; - } - Buffer key = LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.key(buffer)); - Buffer value = LLUtils.readDirectNioBuffer(alloc, buffer -> rocksIterator.value(buffer)); - db.delete(writeOpts, key); - return LLEntry.of(key, value); + public LLEntry removeOne(LLRange range) { + assert !LLUtils.isInNonBlockingThread() : "Called removeOne in a nonblocking thread"; + try (var readOpts = new ReadOptions(); + var writeOpts = new WriteOptions()) { + try (var rocksIterator = db.newIterator(readOpts, range.getMin(), range.getMax())) { + if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMin()) { + rocksIterator.seekTo(range.getMin()); + } else { + rocksIterator.seekToFirst(); } + if (!rocksIterator.isValid()) { + return null; + } + Buf key = rocksIterator.keyBuf().copy(); + Buf value = rocksIterator.valueBuf().copy(); + db.delete(writeOpts, key); + return LLEntry.of(key, value); + } catch (RocksDBException e) { + throw new DBException("Failed to remove key", e); } - }), LLUtils::finalizeResource); + } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java index 3924a8c..7a16647 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalEntryReactiveRocksIterator.java @@ -1,31 +1,25 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLEntry; import it.cavallium.dbengine.database.LLRange; import java.util.function.Supplier; import org.rocksdb.ReadOptions; -import reactor.core.publisher.Mono; public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator { public LLLocalEntryReactiveRocksIterator(RocksDBColumn db, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean reverse, boolean smallRange) { - super(db, rangeMono, allowNettyDirect, readOptions, true, reverse, smallRange); + super(db, range, readOptions, true, reverse, smallRange); } @Override - public LLEntry getEntry(Buffer key, Buffer value) { + public LLEntry getEntry(Buf key, Buf value) { assert key != null; assert value != null; - return LLEntry.of( - key.touch("iteration entry key"), - value.touch("iteration entry value") - ); + return LLEntry.of(key.copy(), value.copy()); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java index 79c2665..c1978e1 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedEntryReactiveRocksIterator.java @@ -1,32 +1,25 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLEntry; import it.cavallium.dbengine.database.LLRange; import java.util.function.Supplier; import org.rocksdb.ReadOptions; -import reactor.core.publisher.Mono; -public class LLLocalGroupedEntryReactiveRocksIterator extends - LLLocalGroupedReactiveRocksIterator { +public class LLLocalGroupedEntryReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { public LLLocalGroupedEntryReactiveRocksIterator(RocksDBColumn db, int prefixLength, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean smallRange) { - super(db, prefixLength, rangeMono, allowNettyDirect, readOptions, false, true, smallRange); + super(db, prefixLength, range, readOptions, true, true, smallRange); } @Override - public LLEntry getEntry(Buffer key, Buffer value) { + public LLEntry getEntry(Buf key, Buf value) { assert key != null; assert value != null; - return LLEntry.of( - key.touch("iteration entry key"), - value.touch("iteration entry value") - ); + return LLEntry.of(key.copy(), value.copy()); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java index d7bc379..6742d96 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedKeyReactiveRocksIterator.java @@ -1,28 +1,23 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import java.util.function.Supplier; import org.rocksdb.ReadOptions; -import reactor.core.publisher.Mono; -public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { +public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator { public LLLocalGroupedKeyReactiveRocksIterator(RocksDBColumn db, int prefixLength, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean smallRange) { - super(db, prefixLength, rangeMono, allowNettyDirect, readOptions, true, false, smallRange); + super(db, prefixLength, range, readOptions, true, false, smallRange); } @Override - public Buffer getEntry(Buffer key, Buffer value) { - if (value != null) { - value.close(); - } - return key; + public Buf getEntry(Buf key, Buf value) { + assert key != null; + return key.copy(); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java index 3e3c171..8f78236 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalGroupedReactiveRocksIterator.java @@ -4,19 +4,23 @@ import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; -import io.netty5.buffer.Buffer; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; +import it.cavallium.dbengine.utils.DBException; import it.unimi.dsi.fastutil.objects.ObjectArrayList; +import java.io.IOException; import java.util.List; +import java.util.Objects; +import java.util.concurrent.CompletionException; import java.util.function.Supplier; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public abstract class LLLocalGroupedReactiveRocksIterator { @@ -24,8 +28,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator { private final RocksDBColumn db; private final int prefixLength; - private final Mono rangeMono; - private final boolean allowNettyDirect; + private final LLRange range; private final Supplier readOptions; private final boolean canFillCache; private final boolean readValues; @@ -33,97 +36,91 @@ public abstract class LLLocalGroupedReactiveRocksIterator { public LLLocalGroupedReactiveRocksIterator(RocksDBColumn db, int prefixLength, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean canFillCache, boolean readValues, boolean smallRange) { this.db = db; this.prefixLength = prefixLength; - this.rangeMono = rangeMono; - this.allowNettyDirect = allowNettyDirect; + this.range = range; this.readOptions = readOptions != null ? readOptions : ReadOptions::new; this.canFillCache = canFillCache; this.readValues = readValues; this.smallRange = smallRange; } - public final Flux> flux() { - return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> { - var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange); - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); - } - return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, false)); - }, (tuple, sink) -> { + public final Stream> stream() { + var readOptions = generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange); + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); + } + + RocksIteratorObj rocksIterator; + try { + rocksIterator = db.newRocksIterator(readOptions, range, false); + } catch (RocksDBException e) { + readOptions.close(); + throw new DBException("Failed to iterate the range", e); + } + + return Stream.>generate(() -> { try { - var rocksIterator = tuple.iter(); ObjectArrayList values = new ObjectArrayList<>(); - Buffer firstGroupKey = null; - try { - while (rocksIterator.isValid()) { - try (Buffer key = LLUtils.readDirectNioBuffer(db.getAllocator(), rocksIterator::key)) { - if (firstGroupKey == null) { - firstGroupKey = key.copy(); - } else if (!LLUtils.equals(firstGroupKey, - firstGroupKey.readerOffset(), - key, - key.readerOffset(), - prefixLength - )) { - break; - } - @Nullable Buffer value; - if (readValues) { - value = LLUtils.readDirectNioBuffer(db.getAllocator(), rocksIterator::value); - } else { - value = null; - } - - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, - "Range {} is reading {}: {}", - LLUtils.toStringSafe(range), - LLUtils.toStringSafe(key), - LLUtils.toStringSafe(value) - ); - } - - try { - rocksIterator.next(); - T entry = getEntry(key, value); - values.add(entry); - } finally { - if (value != null) { - value.close(); - } - } - } + Buf firstGroupKey = null; + while (rocksIterator.isValid()) { + // Note that the underlying array is subject to changes! + Buf key = rocksIterator.keyBuf(); + if (firstGroupKey == null) { + firstGroupKey = key.copy(); + } else if (!LLUtils.equals(firstGroupKey, 0, key, 0, prefixLength)) { + break; } - } finally { - if (firstGroupKey != null) { - firstGroupKey.close(); + // Note that the underlying array is subject to changes! + @Nullable Buf value; + if (readValues) { + value = rocksIterator.valueBuf(); + } else { + value = null; } + + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Range {} is reading {}: {}", + LLUtils.toStringSafe(range), + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(value) + ); + } + + rocksIterator.next(); + T entry = getEntry(key, value); + values.add(entry); } if (!values.isEmpty()) { - sink.next(values); + return values; } else { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); } - sink.complete(); + return null; } } catch (RocksDBException ex) { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); } - sink.error(ex); + throw new CompletionException(new DBException("Range failed", ex)); } - return tuple; - }, RocksIterWithReadOpts::close), LLUtils::finalizeResource); + }).takeWhile(Objects::nonNull).onClose(() -> { + rocksIterator.close(); + readOptions.close(); + }); } - public abstract T getEntry(@Nullable Buffer key, @Nullable Buffer value); + /** + * @param key this buffer content will be changed during the next iteration + * @param value this buffer content will be changed during the next iteration + */ + public abstract T getEntry(@Nullable Buf key, @Nullable Buf value); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java index 61b2b07..9e14870 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyPrefixReactiveRocksIterator.java @@ -4,21 +4,18 @@ import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.utils.DBException; +import java.io.IOException; +import java.util.concurrent.CompletionException; import java.util.function.Supplier; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuples; public class LLLocalKeyPrefixReactiveRocksIterator { @@ -26,75 +23,63 @@ public class LLLocalKeyPrefixReactiveRocksIterator { private final RocksDBColumn db; private final int prefixLength; - private final Mono rangeMono; - private final boolean allowNettyDirect; + private final LLRange range; private final Supplier readOptions; private final boolean canFillCache; private final boolean smallRange; public LLLocalKeyPrefixReactiveRocksIterator(RocksDBColumn db, int prefixLength, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean canFillCache, boolean smallRange) { this.db = db; this.prefixLength = prefixLength; - this.rangeMono = rangeMono; - this.allowNettyDirect = allowNettyDirect; + this.range = range; this.readOptions = readOptions != null ? readOptions : ReadOptions::new; this.canFillCache = canFillCache; this.smallRange = smallRange; } - public Flux flux() { - return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> { + public Stream stream() { + try { var readOptions = generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange); if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); } - return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, false)); - }, (tuple, sink) -> { - try { - var rocksIterator = tuple.iter(); - Buffer firstGroupKey = null; + var rocksIterator = db.newRocksIterator(readOptions, range, false); + + return Stream.generate(() -> { try { + Buf firstGroupKey = null; while (rocksIterator.isValid()) { - Buffer key; - if (allowNettyDirect) { - key = LLUtils.readDirectNioBuffer(db.getAllocator(), buffer -> rocksIterator.key(buffer)); - } else { - key = LLUtils.fromByteArray(db.getAllocator(), rocksIterator.key()); - } - try (key) { - var keyLen = key.readableBytes(); - if (keyLen >= prefixLength) { - if (firstGroupKey == null) { - firstGroupKey = key.copy(); - assert firstGroupKey == null || firstGroupKey.readableBytes() >= prefixLength; - } else if (!LLUtils.equals(firstGroupKey, - firstGroupKey.readerOffset(), - key, - key.readerOffset(), - prefixLength - )) { - break; - } - } else { - logger.error("Skipped a key with length {}, the expected minimum prefix key length is {}!" - + " This key will be dropped", key.readableBytes(), prefixLength); + // Note that the underlying array is subject to changes! + Buf key = rocksIterator.keyBuf(); + var keyLen = key.size(); + if (keyLen >= prefixLength) { + if (firstGroupKey == null) { + firstGroupKey = key.copy(); + assert firstGroupKey == null || firstGroupKey.size() >= prefixLength; + } else if (!LLUtils.equals(firstGroupKey, + 0, + key, + 0, + prefixLength + )) { + break; } - rocksIterator.next(); + } else { + logger.error("Skipped a key with length {}, the expected minimum prefix key length is {}!" + + " This key will be dropped", key.size(), prefixLength); } + rocksIterator.next(); } if (firstGroupKey != null) { - assert firstGroupKey.isAccessible(); - var groupKeyPrefix = firstGroupKey.copy(firstGroupKey.readerOffset(), prefixLength); - assert groupKeyPrefix.isAccessible(); + var groupKeyPrefix = firstGroupKey.subList(0, prefixLength); if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, @@ -104,26 +89,26 @@ public class LLLocalKeyPrefixReactiveRocksIterator { ); } - sink.next(groupKeyPrefix); + return groupKeyPrefix; } else { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); } - sink.complete(); + return null; } - } finally { - if (firstGroupKey != null) { - firstGroupKey.close(); + } catch (RocksDBException ex) { + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); } + throw new CompletionException(new DBException("Range failed", ex)); } - } catch (RocksDBException ex) { - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); - } - sink.error(ex); - } - return tuple; - }, RocksIterWithReadOpts::close), LLUtils::finalizeResource); + }).onClose(() -> { + rocksIterator.close(); + readOptions.close(); + }); + } catch (RocksDBException e) { + throw new DBException("Failed to open stream", e); + } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java index b0d75a2..66ff3ae 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyReactiveRocksIterator.java @@ -1,28 +1,22 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import java.util.function.Supplier; import org.rocksdb.ReadOptions; -import reactor.core.publisher.Mono; -public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator { +public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator { public LLLocalKeyReactiveRocksIterator(RocksDBColumn db, - Mono rangeMono, - boolean allowNettyDirect, + LLRange rangeMono, Supplier readOptions, boolean reverse, boolean smallRange) { - super(db, rangeMono, allowNettyDirect, readOptions, false, reverse, smallRange); + super(db, rangeMono, readOptions, false, reverse, smallRange); } @Override - public Buffer getEntry(Buffer key, Buffer value) { - if (value != null) { - value.close(); - } - return key; + public Buf getEntry(Buf key, Buf value) { + return key != null ? key.copy() : null; } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java index cf986bf..6f56c62 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalKeyValueDatabase.java @@ -1,6 +1,5 @@ package it.cavallium.dbengine.database.disk; -import static io.netty5.buffer.StandardAllocationTypes.OFF_HEAP; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static java.lang.Boolean.parseBoolean; import static java.util.Objects.requireNonNull; @@ -9,8 +8,7 @@ import static org.rocksdb.ColumnFamilyOptionsInterface.DEFAULT_COMPACTION_MEMTAB import io.micrometer.core.instrument.MeterRegistry; import io.micrometer.core.instrument.Tag; import io.micrometer.core.instrument.Timer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.internal.PlatformDependent; +import io.netty.util.internal.PlatformDependent; import it.cavallium.data.generator.nativedata.NullableString; import it.cavallium.dbengine.client.Backuppable; import it.cavallium.dbengine.client.MemoryStats; @@ -33,6 +31,7 @@ import it.cavallium.dbengine.rpc.current.data.NamedColumnOptions; import it.cavallium.dbengine.rpc.current.data.NoFilter; import java.io.File; import java.io.IOException; +import it.cavallium.dbengine.utils.DBException; import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; @@ -44,6 +43,7 @@ import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.concurrent.CompletionException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; @@ -55,7 +55,6 @@ import org.apache.commons.lang3.time.StopWatch; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; -import org.reactivestreams.Publisher; import org.rocksdb.AbstractImmutableNativeReference; import org.rocksdb.BlockBasedTableConfig; import org.rocksdb.BloomFilter; @@ -91,11 +90,6 @@ import org.rocksdb.TxnDBWritePolicy; import org.rocksdb.WALRecoveryMode; import org.rocksdb.WriteBufferManager; import org.rocksdb.util.SizeUnit; -import it.cavallium.dbengine.utils.ShortNamedThreadFactory; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDatabase { @@ -108,15 +102,11 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa static { RocksDB.loadLibrary(); - LLUtils.initHooks(); } protected static final Logger logger = LogManager.getLogger(LLLocalKeyValueDatabase.class); - private final BufferAllocator allocator; private final MeterRegistry meterRegistry; - private final Scheduler dbWScheduler; - private final Scheduler dbRScheduler; private final Timer snapshotTime; @@ -142,17 +132,14 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa private volatile boolean closed = false; @SuppressWarnings("SwitchStatementWithTooFewBranches") - public LLLocalKeyValueDatabase(BufferAllocator allocator, - MeterRegistry meterRegistry, + public LLLocalKeyValueDatabase(MeterRegistry meterRegistry, String name, boolean inMemory, @Nullable Path path, List columns, List handles, - DatabaseOptions databaseOptions) throws IOException { + DatabaseOptions databaseOptions) { this.name = name; - this.allocator = allocator; - boolean nettyDirect = databaseOptions.allowNettyDirect() && allocator.getAllocationType() == OFF_HEAP; this.meterRegistry = meterRegistry; this.snapshotTime = Timer @@ -162,12 +149,10 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa .tags("db.name", name) .register(meterRegistry); - if (nettyDirect) { - if (!PlatformDependent.hasUnsafe()) { - throw new UnsupportedOperationException("Please enable unsafe support or disable netty direct buffers", - PlatformDependent.getUnsafeUnavailabilityCause() - ); - } + if (!PlatformDependent.hasUnsafe()) { + throw new UnsupportedOperationException("Please enable unsafe support or disable netty direct buffers", + PlatformDependent.getUnsafeUnavailabilityCause() + ); } this.enableColumnsBug = "true".equals(databaseOptions.extraFlags().getOrDefault("enableColumnBug", "false")); @@ -419,55 +404,11 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa requireNonNull(path); Path databasesDirPath = path.toAbsolutePath().getParent(); String dbPathString = databasesDirPath.toString() + File.separatorChar + path.getFileName(); - Path dbPath = Paths.get(dbPathString); - this.dbPath = dbPath; + this.dbPath = Paths.get(dbPathString); // Set options this.databaseOptions = databaseOptions; - int threadCap; - if (databaseOptions.lowMemory()) { - threadCap = Math.max(1, Runtime.getRuntime().availableProcessors()); - - this.dbWScheduler = Schedulers.boundedElastic(); - this.dbRScheduler = Schedulers.boundedElastic(); - } else { - // 8 or more - threadCap = Math.max(8, Runtime.getRuntime().availableProcessors()); - { - var threadCapProperty = Integer.parseInt(System.getProperty("it.cavallium.dbengine.scheduler.write.threads", "0")); - if (threadCapProperty > 1) { - threadCap = threadCapProperty; - } - } - if (parseBoolean(System.getProperty("it.cavallium.dbengine.scheduler.write.shared", "false"))) { - this.dbWScheduler = Schedulers.boundedElastic(); - } else { - this.dbWScheduler = Schedulers.newBoundedElastic(threadCap, - Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, - new ShortNamedThreadFactory("db-write-" + name).setDaemon(true).withGroup(new ThreadGroup("database-write")), - 60 - ); - } - // 8 or more - threadCap = Math.max(8, Runtime.getRuntime().availableProcessors()); - { - var threadCapProperty = Integer.parseInt(System.getProperty("it.cavallium.dbengine.scheduler.read.threads", "0")); - if (threadCapProperty > 1) { - threadCap = threadCapProperty; - } - } - if (parseBoolean(System.getProperty("it.cavallium.dbengine.scheduler.read.shared", "false"))) { - this.dbRScheduler = Schedulers.boundedElastic(); - } else { - this.dbRScheduler = Schedulers.newBoundedElastic(threadCap, - Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, - new ShortNamedThreadFactory("db-read-" + name).setDaemon(true).withGroup(new ThreadGroup("database-read")), - 60 - ); - } - } - var statsLevel = System.getProperty("it.cavallium.dbengine.stats.level"); if (statsLevel != null) { this.statistics = registerStatistics(name, rocksdbOptions, meterRegistry, StatsLevel.valueOf(statsLevel)); @@ -531,7 +472,7 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa // compactDb(db, handles); flushDb(db, handles); } catch (RocksDBException ex) { - throw new IOException(ex); + throw new DBException(ex); } try { @@ -677,40 +618,39 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa } @Override - public Mono preClose() { - return Mono.fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - try (var fo = new FlushOptions().setWaitForFlush(true)) { - flush(fo); - } - db.cancelAllBackgroundWork(true); - return null; - } finally { - closeLock.unlockRead(closeReadLock); + public void preClose() { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + try (var fo = new FlushOptions().setWaitForFlush(true)) { + flush(fo); + } catch (RocksDBException ex) { + throw new DBException(ex); } - }).subscribeOn(dbWScheduler); + db.cancelAllBackgroundWork(true); + } finally { + closeLock.unlockRead(closeReadLock); + } } @Override - protected Mono onPauseForBackup() { - return pauseWrites(); + protected void onPauseForBackup() { + pauseWrites(); } @Override - protected Mono onResumeAfterBackup() { - return resumeWrites(); + protected void onResumeAfterBackup() { + resumeWrites(); } @Override - public Mono ingestSST(Column column, Publisher files, boolean replaceExisting) { + public void ingestSST(Column column, Stream files, boolean replaceExisting) { var columnHandle = handles.get(column); if (columnHandle == null) { logger.warn("Column {} doesn't exist", column); - return Mono.empty(); + return; } - return Flux.from(files).concatMap(sst -> Mono.fromCallable(() -> { + files.forEachOrdered(sst -> { try (var opts = new IngestExternalFileOptions()) { opts.setIngestBehind(!replaceExisting); opts.setSnapshotConsistency(false); @@ -718,10 +658,9 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa opts.setMoveFiles(true); db.ingestExternalFile(columnHandle, List.of(sst.toString()), opts); } catch (RocksDBException ex) { - throw new IOException("Failed to ingest SST file " + sst, ex); + throw new DBException(new DBException("Failed to ingest SST file " + sst, ex)); } - return null; - })).then(); + }); } private record RocksLevelOptions(CompressionType compressionType, CompressionOptions compressionOptions) {} @@ -860,7 +799,7 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa } private void flushDb(RocksDB db, List handles) throws RocksDBException { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { logger.error("Called flushDb in a nonblocking thread"); } // force flush the database @@ -877,7 +816,7 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa @SuppressWarnings("unused") private void compactDb(TransactionDB db, List handles) { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { logger.error("Called compactDb in a nonblocking thread"); } // force compact the database @@ -1185,53 +1124,47 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa } @Override - public Mono getSingleton(byte[] singletonListColumnName, + public LLLocalSingleton getSingleton(byte[] singletonListColumnName, byte[] name, byte @Nullable[] defaultValue) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - var cfh = getCfh(singletonListColumnName); - ensureOwned(cfh); - return new LLLocalSingleton( - getRocksDBColumn(db, cfh), - this::getSnapshotLambda, - LLLocalKeyValueDatabase.this.name, - name, - ColumnUtils.toString(singletonListColumnName), - dbWScheduler, dbRScheduler, defaultValue - ); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .onErrorMap(cause -> new IOException("Failed to read " + Arrays.toString(name), cause)) - .subscribeOn(dbRScheduler); + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + var cfh = getCfh(singletonListColumnName); + ensureOwned(cfh); + return new LLLocalSingleton(getRocksDBColumn(db, cfh), + this::getSnapshotLambda, + LLLocalKeyValueDatabase.this.name, + name, + ColumnUtils.toString(singletonListColumnName), + defaultValue + ); + } catch (RocksDBException ex) { + throw new DBException("Failed to read " + Arrays.toString(name), ex); + } finally { + closeLock.unlockRead(closeReadLock); + } } @Override - public Mono getDictionary(byte[] columnName, UpdateMode updateMode) { - return Mono.fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - var cfh = getCfh(columnName); - ensureOwned(cfh); - return new LLLocalDictionary(allocator, - getRocksDBColumn(db, cfh), - name, - ColumnUtils.toString(columnName), - dbWScheduler, - dbRScheduler, snapshot -> getSnapshotLambda(snapshot), - updateMode, - databaseOptions - ); - } finally { - closeLock.unlockRead(closeReadLock); - } - }).subscribeOn(dbRScheduler); + public LLLocalDictionary getDictionary(byte[] columnName, UpdateMode updateMode) { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + var cfh = getCfh(columnName); + ensureOwned(cfh); + return new LLLocalDictionary(getRocksDBColumn(db, cfh), + name, + ColumnUtils.toString(columnName), + this::getSnapshotLambda, + updateMode, + databaseOptions + ); + } catch (RocksDBException e) { + throw new DBException(e); + } finally { + closeLock.unlockRead(closeReadLock); + } } public RocksDBColumn getRocksDBColumn(byte[] columnName) { @@ -1252,12 +1185,9 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa } private RocksDBColumn getRocksDBColumn(RocksDB db, ColumnFamilyHandle cfh) { - var nettyDirect = databaseOptions.allowNettyDirect(); var closeLock = getCloseLock(); if (db instanceof OptimisticTransactionDB optimisticTransactionDB) { return new OptimisticRocksDBColumn(optimisticTransactionDB, - nettyDirect, - allocator, name, cfh, meterRegistry, @@ -1265,15 +1195,13 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa ); } else if (db instanceof TransactionDB transactionDB) { return new PessimisticRocksDBColumn(transactionDB, - nettyDirect, - allocator, name, cfh, meterRegistry, closeLock ); } else { - return new StandardRocksDBColumn(db, nettyDirect, allocator, name, cfh, meterRegistry, closeLock); + return new StandardRocksDBColumn(db, name, cfh, meterRegistry, closeLock); } } @@ -1287,308 +1215,273 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa return databaseOptions; } - public Flux getSSTS() { + public Stream getSSTS() { var paths = convertPaths(dbPath.toAbsolutePath().getParent(), dbPath.getFileName(), databaseOptions.volumes()); - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - return db.getLiveFiles(); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .flatMapIterable(liveFiles -> liveFiles.files) - .filter(file -> file.endsWith(".sst")) - .map(file -> file.substring(1)) - .flatMapSequential(file -> Mono.fromCallable(() -> { - { - var path = dbPath.resolve(file); - if (Files.exists(path)) { - return path; + + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + return db.getLiveFiles().files.stream() + .filter(file -> file.endsWith(".sst")) + .map(file -> file.substring(1)) + .mapMulti((file, sink) -> { + { + var path = dbPath.resolve(file); + if (Files.exists(path)) { + sink.accept(path); + return; + } } - } - for (var volumePath : paths) { - var path = volumePath.path().resolve(file); - if (Files.exists(path)) { - return path; + for (var volumePath : paths) { + var path = volumePath.path().resolve(file); + if (Files.exists(path)) { + sink.accept(path); + return; + } } - } - return null; - }).subscribeOn(Schedulers.boundedElastic())); + }); + } catch (RocksDBException e) { + throw new DBException(e); + } finally { + closeLock.unlockRead(closeReadLock); + } } - public Mono ingestSSTS(Flux sstsFlux) { - return sstsFlux - .map(path -> path.toAbsolutePath().toString()) - .flatMap(sst -> Mono.fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try (var opts = new IngestExternalFileOptions()) { - try { - logger.info("Ingesting SST \"{}\"...", sst); - db.ingestExternalFile(List.of(sst), opts); - logger.info("Ingested SST \"{}\" successfully", sst); - } catch (RocksDBException e) { - logger.error("Can't ingest SST \"{}\"", sst, e); - } - } finally { - closeLock.unlockRead(closeReadLock); - } - return null; - }).subscribeOn(Schedulers.boundedElastic())) - .then(); - } - - @Override - public Mono getMemoryStats() { - return Mono - .fromCallable(() -> { - if (closeRequested || closed) return null; - long closeReadLock = 0; - try { - //noinspection BlockingMethodInNonBlockingContext - closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); - } catch (InterruptedException ignored) {} - try { - if (closeRequested || closed || closeReadLock == 0) return null; - ensureOpen(); - return new MemoryStats(db.getAggregatedLongProperty("rocksdb.estimate-table-readers-mem"), - db.getAggregatedLongProperty("rocksdb.size-all-mem-tables"), - db.getAggregatedLongProperty("rocksdb.cur-size-all-mem-tables"), - db.getAggregatedLongProperty("rocksdb.estimate-num-keys"), - db.getAggregatedLongProperty("rocksdb.block-cache-usage") / this.handles.size(), - db.getAggregatedLongProperty("rocksdb.block-cache-pinned-usage") / this.handles.size() - ); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .onErrorMap(cause -> new IOException("Failed to read memory stats", cause)) - .subscribeOn(dbRScheduler); - } - - @Override - public Mono> getMapProperty(@Nullable Column column, RocksDBMapProperty property) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - if (column == null) { - return db.getMapProperty(property.getName()); - } else { - var cfh = requireNonNull(handles.get(column)); - return db.getMapProperty(cfh, property.getName()); - } - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .transform(this::convertNotFoundToEmpty) - .onErrorMap(cause -> new IOException("Failed to read property " + property.name(), cause)) - .subscribeOn(dbRScheduler); - } - - @Override - public Flux>> getMapColumnProperties(RocksDBMapProperty property) { - return Flux - .fromIterable(getAllColumnFamilyHandles().keySet()) - .flatMapSequential(c -> this - .getMapProperty(c, property) - .map(result -> new ColumnProperty<>(c.name(), property.getName(), result))); - } - - @Override - public Mono getStringProperty(@Nullable Column column, RocksDBStringProperty property) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - if (column == null) { - return db.getProperty(property.getName()); - } else { - var cfh = requireNonNull(handles.get(column)); - return db.getProperty(cfh, property.getName()); - } - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .transform(this::convertNotFoundToEmpty) - .onErrorMap(cause -> new IOException("Failed to read property " + property.name(), cause)) - .subscribeOn(dbRScheduler); - } - - @Override - public Flux> getStringColumnProperties(RocksDBStringProperty property) { - return Flux - .fromIterable(getAllColumnFamilyHandles().keySet()) - .flatMapSequential(c -> this - .getStringProperty(c, property) - .map(result -> new ColumnProperty<>(c.name(), property.getName(), result))); - } - - @Override - public Mono getLongProperty(@Nullable Column column, RocksDBLongProperty property) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - if (column == null) { - return db.getLongProperty(property.getName()); - } else { - var cfh = requireNonNull(handles.get(column)); - return db.getLongProperty(cfh, property.getName()); - } - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .transform(this::convertNotFoundToEmpty) - .onErrorMap(cause -> new IOException("Failed to read property " + property.name(), cause)) - .subscribeOn(dbRScheduler); - } - - @Override - public Flux> getLongColumnProperties(RocksDBLongProperty property) { - return Flux - .fromIterable(getAllColumnFamilyHandles().keySet()) - .flatMapSequential(c -> this - .getLongProperty(c, property) - .map(result -> new ColumnProperty<>(c.name(), property.getName(), result))); - } - - @Override - public Mono getAggregatedLongProperty(RocksDBLongProperty property) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - return db.getAggregatedLongProperty(property.getName()); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .transform(this::convertNotFoundToEmpty) - .onErrorMap(cause -> new IOException("Failed to read property " + property.name(), cause)) - .subscribeOn(dbRScheduler); - } - - private Mono convertNotFoundToEmpty(Mono mono) { - return mono.onErrorResume(RocksDBException.class, ex -> { - if (ex.getMessage().equals("NotFound")) { - return Mono.empty(); - } else { - return Mono.error(ex); + public void ingestSSTS(Stream sstsFlux) { + sstsFlux.map(path -> path.toAbsolutePath().toString()).forEachOrdered(sst -> { + var closeReadLock = closeLock.readLock(); + try (var opts = new IngestExternalFileOptions()) { + try { + logger.info("Ingesting SST \"{}\"...", sst); + db.ingestExternalFile(List.of(sst), opts); + logger.info("Ingested SST \"{}\" successfully", sst); + } catch (RocksDBException e) { + logger.error("Can't ingest SST \"{}\"", sst, e); + } + } finally { + closeLock.unlockRead(closeReadLock); } }); } @Override - public Mono getRocksDBStats() { - return Mono - .fromCallable(() -> { - if (closeRequested || closed) return null; - long closeReadLock = 0; - try { - //noinspection BlockingMethodInNonBlockingContext - closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); - } catch (InterruptedException ignored) {} - try { - if (closeRequested || closed || closeReadLock == 0) return null; - ensureOpen(); - StringBuilder aggregatedStats = new StringBuilder(); - for (var entry : this.handles.entrySet()) { - aggregatedStats - .append(entry.getKey().name()) - .append("\n") - .append(db.getProperty(entry.getValue(), "rocksdb.stats")) - .append("\n"); - } - return aggregatedStats.toString(); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .onErrorMap(cause -> new IOException("Failed to read stats", cause)) - .subscribeOn(dbRScheduler); + public MemoryStats getMemoryStats() { + if (closeRequested || closed) return null; + long closeReadLock = 0; + try { + //noinspection BlockingMethodInNonBlockingContext + closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); + } catch (InterruptedException ignored) {} + try { + if (closeRequested || closed || closeReadLock == 0) return null; + ensureOpen(); + return new MemoryStats(db.getAggregatedLongProperty("rocksdb.estimate-table-readers-mem"), + db.getAggregatedLongProperty("rocksdb.size-all-mem-tables"), + db.getAggregatedLongProperty("rocksdb.cur-size-all-mem-tables"), + db.getAggregatedLongProperty("rocksdb.estimate-num-keys"), + db.getAggregatedLongProperty("rocksdb.block-cache-usage") / this.handles.size(), + db.getAggregatedLongProperty("rocksdb.block-cache-pinned-usage") / this.handles.size() + ); + } catch (RocksDBException e) { + throw new DBException("Failed to read memory stats", e); + } finally { + closeLock.unlockRead(closeReadLock); + } } @Override - public Flux getTableProperties() { - return Flux - .fromIterable(handles.entrySet()) - .flatMapSequential(handle -> Mono - .fromCallable(() -> { - if (closeRequested || closed) return null; - long closeReadLock = 0; - try { - //noinspection BlockingMethodInNonBlockingContext - closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); - } catch (InterruptedException ignored) {} - try { - if (closeRequested || closed || closeReadLock == 0) return null; - ensureOpen(); - return db.getPropertiesOfAllTables(handle.getValue()); - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .subscribeOn(dbRScheduler) - .flatMapIterable(Map::entrySet) - .map(entry -> new TableWithProperties(handle.getKey().name(), entry.getKey(), entry.getValue())) - ) - .onErrorMap(cause -> new IOException("Failed to read stats", cause)); - } - - @Override - public Mono verifyChecksum() { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - db.verifyChecksum(); - } finally { - closeLock.unlockRead(closeReadLock); - } - return null; - }) - .onErrorMap(cause -> new IOException("Failed to verify checksum of database \"" - + getDatabaseName() + "\"", cause)) - .subscribeOn(dbRScheduler); - } - - @Override - public Mono compact() { - return Mono.fromCallable(() -> { - this.forceCompaction(getLastVolumeId()); - return null; - }).subscribeOn(dbWScheduler); - } - - @Override - public Mono flush() { - return Mono.fromCallable(() -> { - try (var fo = new FlushOptions().setWaitForFlush(true)) { - this.flush(fo); - } catch (RocksDBException ex) { - if (!"ShutdownInProgress".equals(ex.getMessage())) { - throw ex; - } - logger.warn("Shutdown in progress. Flush cancelled", ex); + public Map getMapProperty(@Nullable Column column, RocksDBMapProperty property) { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + Map result; + if (column == null) { + result = db.getMapProperty(property.getName()); + } else { + var cfh = requireNonNull(handles.get(column)); + result = db.getMapProperty(cfh, property.getName()); } - return null; - }).subscribeOn(dbWScheduler); + return result; + } catch (RocksDBException e) { + if (isEmpty(e)) return null; + throw new DBException("Failed to read property " + property.name(), e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + private boolean isEmpty(RocksDBException ex) { + return "NotFound".equals(ex.getMessage()); } @Override - public BufferAllocator getAllocator() { - return allocator; + public Stream>> getMapColumnProperties(RocksDBMapProperty property) { + return getAllColumnFamilyHandles().keySet().stream().map(c -> { + try { + return new ColumnProperty<>(c.name(), property.getName(), this.getMapProperty(c, property)); + } catch (IOException e) { + throw new CompletionException(e); + } + }); + } + + @Override + public String getStringProperty(@Nullable Column column, RocksDBStringProperty property) { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + if (column == null) { + return db.getProperty(property.getName()); + } else { + var cfh = requireNonNull(handles.get(column)); + return db.getProperty(cfh, property.getName()); + } + } catch (RocksDBException e) { + if (isEmpty(e)) return null; + throw new DBException("Failed to read property " + property.name(), e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public Stream> getStringColumnProperties(RocksDBStringProperty property) { + return getAllColumnFamilyHandles().keySet().stream().map(c -> { + try { + return new ColumnProperty<>(c.name(), property.getName(), this.getStringProperty(c, property)); + } catch (IOException e) { + throw new CompletionException(e); + } + }); + } + + @Override + public Long getLongProperty(@Nullable Column column, RocksDBLongProperty property) { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + if (column == null) { + return db.getLongProperty(property.getName()); + } else { + var cfh = requireNonNull(handles.get(column)); + return db.getLongProperty(cfh, property.getName()); + } + } catch (RocksDBException e) { + if (isEmpty(e)) return null; + throw new DBException("Failed to read property " + property.name(), e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public Stream> getLongColumnProperties(RocksDBLongProperty property) { + return getAllColumnFamilyHandles().keySet().stream().map(c -> { + try { + return new ColumnProperty<>(c.name(), property.getName(), this.getLongProperty(c, property)); + } catch (IOException e) { + throw new CompletionException(e); + } + }); + } + + @Override + public Long getAggregatedLongProperty(RocksDBLongProperty property) { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + return db.getAggregatedLongProperty(property.getName()); + } catch (RocksDBException e) { + if (isEmpty(e)) return null; + throw new DBException("Failed to read property " + property.name(), e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public String getRocksDBStats() { + if (closeRequested || closed) return null; + long closeReadLock = 0; + try { + closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); + } catch (InterruptedException ignored) {} + try { + if (closeRequested || closed || closeReadLock == 0) return null; + ensureOpen(); + StringBuilder aggregatedStats = new StringBuilder(); + for (var entry : this.handles.entrySet()) { + aggregatedStats + .append(entry.getKey().name()) + .append("\n") + .append(db.getProperty(entry.getValue(), "rocksdb.stats")) + .append("\n"); + } + return aggregatedStats.toString(); + } catch (RocksDBException e) { + throw new DBException("Failed to read stats", e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public Stream getTableProperties() { + return handles.entrySet().stream().flatMap(handle -> { + if (closeRequested || closed) { + return null; + } + long closeReadLock = 0; + try { + closeReadLock = closeLock.tryReadLock(1, TimeUnit.SECONDS); + } catch (InterruptedException ignored) { + } + try { + if (closeRequested || closed || closeReadLock == 0) { + return null; + } + ensureOpen(); + return db + .getPropertiesOfAllTables(handle.getValue()) + .entrySet() + .stream() + .map(entry -> new TableWithProperties(handle.getKey().name(), entry.getKey(), entry.getValue())); + } catch (RocksDBException e) { + throw new CompletionException(new DBException("Failed to read stats", e)); + } finally { + closeLock.unlockRead(closeReadLock); + } + }); + } + + @Override + public void verifyChecksum() { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + db.verifyChecksum(); + } catch (RocksDBException e) { + throw new DBException("Failed to verify checksum of database \"" + getDatabaseName() + "\"", e); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public void compact() throws RocksDBException { + this.forceCompaction(getLastVolumeId()); + } + + @Override + public void flush() { + try (var fo = new FlushOptions().setWaitForFlush(true)) { + this.flush(fo); + } catch (RocksDBException ex) { + if (!"ShutdownInProgress".equals(ex.getMessage())) { + throw new DBException(ex); + } + logger.warn("Shutdown in progress. Flush cancelled", ex); + } } @Override @@ -1597,89 +1490,78 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa } @Override - public Mono takeSnapshot() { - return Mono.fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try { - ensureOpen(); - return snapshotTime.recordCallable(() -> { - var snapshot = db.getSnapshot(); - long currentSnapshotSequenceNumber = nextSnapshotNumbers.getAndIncrement(); - this.snapshotsHandles.put(currentSnapshotSequenceNumber, snapshot); - return new LLSnapshot(currentSnapshotSequenceNumber); - }); - } finally { - closeLock.unlockRead(closeReadLock); + public LLSnapshot takeSnapshot() { + var closeReadLock = closeLock.readLock(); + try { + ensureOpen(); + return snapshotTime.record(() -> { + var snapshot = db.getSnapshot(); + long currentSnapshotSequenceNumber = nextSnapshotNumbers.getAndIncrement(); + this.snapshotsHandles.put(currentSnapshotSequenceNumber, snapshot); + return new LLSnapshot(currentSnapshotSequenceNumber); + }); + } finally { + closeLock.unlockRead(closeReadLock); + } + } + + @Override + public void releaseSnapshot(LLSnapshot snapshot) { + var closeReadLock = closeLock.readLock(); + try (var dbSnapshot = this.snapshotsHandles.remove(snapshot.getSequenceNumber())) { + if (dbSnapshot == null) { + throw new DBException("Snapshot " + snapshot.getSequenceNumber() + " not found!"); } - }).subscribeOn(dbRScheduler); + if (!db.isOwningHandle()) { + return; + } + db.releaseSnapshot(dbSnapshot); + } finally { + closeLock.unlockRead(closeReadLock); + } } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return Mono - .fromCallable(() -> { - var closeReadLock = closeLock.readLock(); - try (var dbSnapshot = this.snapshotsHandles.remove(snapshot.getSequenceNumber())) { - if (dbSnapshot == null) { - throw new IOException("Snapshot " + snapshot.getSequenceNumber() + " not found!"); - } - if (!db.isOwningHandle()) { - return null; - } - db.releaseSnapshot(dbSnapshot); - return null; - } finally { - closeLock.unlockRead(closeReadLock); - } - }) - .subscribeOn(dbRScheduler); + public void close() { + closeRequested = true; + if (statistics != null) { + statistics.close(); + statistics = null; + } + try { + flushAndCloseDb(db, + standardCache, + compressedCache, + new ArrayList<>(handles.values()) + ); + handles.values().forEach(columnFamilyHandleRocksObj -> { + if (LLUtils.isAccessible(columnFamilyHandleRocksObj)) { + columnFamilyHandleRocksObj.close(); + } + }); + handles.clear(); + deleteUnusedOldLogFiles(); + } catch (RocksDBException e) { + throw new DBException("Failed to close", e); + } } - @Override - public Mono close() { - return Mono - .fromCallable(() -> { - closeRequested = true; - if (statistics != null) { - statistics.close(); - statistics = null; - } - try { - flushAndCloseDb(db, - standardCache, - compressedCache, - new ArrayList<>(handles.values()) - ); - handles.values().forEach(columnFamilyHandleRocksObj -> { - if (LLUtils.isAccessible(columnFamilyHandleRocksObj)) { - columnFamilyHandleRocksObj.close(); - } - }); - handles.clear(); - deleteUnusedOldLogFiles(); - } catch (RocksDBException e) { - throw new IOException(e); - } - return null; - }) - .onErrorMap(cause -> new IOException("Failed to close", cause)) - .subscribeOn(dbWScheduler); - } - - private Mono pauseWrites() { - return Mono.fromCallable(() -> { + private void pauseWrites() { + try { db.pauseBackgroundWork(); db.disableFileDeletions(); - return null; - }).subscribeOn(dbWScheduler); + } catch (RocksDBException e) { + throw new DBException(e); + } } - private Mono resumeWrites() { - return Mono.fromCallable(() -> { + private void resumeWrites() { + try { db.continueBackgroundWork(); db.enableFileDeletions(false); - return null; - }).subscribeOn(dbWScheduler); + } catch (RocksDBException e) { + throw new DBException(e); + } } /** diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java index fbb0ff2..0d897a0 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalLuceneIndex.java @@ -1,13 +1,10 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; import static it.cavallium.dbengine.database.LLUtils.MARKER_LUCENE; import static it.cavallium.dbengine.database.LLUtils.toDocument; import static it.cavallium.dbengine.database.LLUtils.toFields; import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; import static java.util.Objects.requireNonNull; -import static reactor.core.scheduler.Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE; -import static reactor.core.scheduler.Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE; import com.google.common.collect.Multimap; import io.micrometer.core.instrument.Counter; @@ -32,7 +29,6 @@ import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneConcurrentMergeScheduler; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneRocksDBManager; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.directory.Lucene91CodecWithNoFieldCompression; @@ -48,18 +44,23 @@ import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.rpc.current.data.LuceneOptions; import it.cavallium.dbengine.utils.SimpleResource; import java.io.IOException; -import java.io.UncheckedIOException; +import it.cavallium.dbengine.utils.DBException; import java.time.Duration; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; import java.util.Objects; import java.util.concurrent.Callable; +import java.util.concurrent.CompletionException; +import java.util.concurrent.Executors; import java.util.concurrent.Phaser; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.LongAdder; import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; import java.util.logging.Level; +import java.util.stream.Stream; import org.apache.commons.lang3.time.StopWatch; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -75,14 +76,9 @@ import org.apache.lucene.index.SnapshotDeletionPolicy; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.Directory; import org.apache.lucene.store.MMapDirectory; +import org.apache.lucene.util.IOSupplier; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import it.cavallium.dbengine.utils.ShortNamedThreadFactory; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SignalType; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, LLLuceneIndex, LuceneCloseable { @@ -94,19 +90,13 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, * There is only a single thread globally to not overwhelm the disk with * concurrent commits or concurrent refreshes. */ - private static final Scheduler luceneHeavyTasksScheduler = uninterruptibleScheduler(Schedulers.newBoundedElastic( - DEFAULT_BOUNDED_ELASTIC_SIZE, - DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, - new LuceneThreadFactory("heavy-tasks").setDaemon(true).withGroup(new ThreadGroup("lucene-heavy-tasks")), - Math.toIntExact(Duration.ofHours(1).toSeconds()) - )); - private static final Scheduler luceneWriteScheduler = uninterruptibleScheduler(Schedulers.newBoundedElastic( - DEFAULT_BOUNDED_ELASTIC_SIZE, - DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, - new LuceneThreadFactory("lucene-write").setDaemon(true).withGroup(new ThreadGroup("lucene-write")), - Math.toIntExact(Duration.ofHours(1).toSeconds()) - )); - private static final Scheduler bulkScheduler = luceneWriteScheduler; + private static final ScheduledExecutorService luceneHeavyTasksScheduler = Executors.newScheduledThreadPool(4, + new LuceneThreadFactory("heavy-tasks").setDaemon(true).withGroup(new ThreadGroup("lucene-heavy-tasks")) + ); + private static final ScheduledExecutorService luceneWriteScheduler = Executors.newScheduledThreadPool(8, + new LuceneThreadFactory("lucene-write").setDaemon(true).withGroup(new ThreadGroup("lucene-write")) + ); + private static final ScheduledExecutorService bulkScheduler = luceneWriteScheduler; private static final boolean ENABLE_SNAPSHOTS = Boolean.parseBoolean(System.getProperty("it.cavallium.dbengine.lucene.snapshot.enable", "true")); @@ -116,10 +106,6 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, private static final LLSnapshot DUMMY_SNAPSHOT = new LLSnapshot(-1); - static { - LLUtils.initHooks(); - } - private final LocalSearcher localSearcher; private final DecimalBucketMultiSearcher decimalBucketMultiSearcher = new DecimalBucketMultiSearcher(); @@ -138,25 +124,22 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, private final IndexSearcherManager searcherManager; private final PerFieldAnalyzerWrapper luceneAnalyzer; private final Similarity luceneSimilarity; - private final LuceneRocksDBManager rocksDBManager; private final Directory directory; private final LuceneBackuppable backuppable; private final boolean lowMemory; private final Phaser activeTasks = new Phaser(1); - public LLLocalLuceneIndex(LLTempHugePqEnv env, - MeterRegistry meterRegistry, + public LLLocalLuceneIndex(MeterRegistry meterRegistry, @NotNull String clusterName, int shardIndex, IndicizerAnalyzers indicizerAnalyzers, IndicizerSimilarities indicizerSimilarities, LuceneOptions luceneOptions, - @Nullable LuceneHacks luceneHacks, - @Nullable LuceneRocksDBManager rocksDBManager) throws IOException { + @Nullable LuceneHacks luceneHacks) { if (clusterName.isBlank()) { - throw new IOException("Empty lucene database name"); + throw new DBException("Empty lucene database name"); } if (!MMapDirectory.UNMAP_SUPPORTED) { logger.error("Unmap is unsupported, lucene will run slower: {}", MMapDirectory.UNMAP_NOT_SUPPORTED_REASON); @@ -165,21 +148,21 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, } this.lowMemory = luceneOptions.lowMemory(); this.shardName = LuceneUtils.getStandardName(clusterName, shardIndex); - this.directory = LuceneUtils.createLuceneDirectory(luceneOptions.directoryOptions(), - shardName, - rocksDBManager); + try { + this.directory = LuceneUtils.createLuceneDirectory(luceneOptions.directoryOptions(), shardName); + } catch (IOException e) { + throw new DBException(e); + } boolean isFilesystemCompressed = LuceneUtils.getIsFilesystemCompressed(luceneOptions.directoryOptions()); this.luceneAnalyzer = LuceneUtils.toPerFieldAnalyzerWrapper(indicizerAnalyzers); this.luceneSimilarity = LuceneUtils.toPerFieldSimilarityWrapper(indicizerSimilarities); - this.rocksDBManager = rocksDBManager; - var useHugePq = luceneOptions.allowNonVolatileCollection(); var maxInMemoryResultEntries = luceneOptions.maxInMemoryResultEntries(); if (luceneHacks != null && luceneHacks.customLocalSearcher() != null) { localSearcher = luceneHacks.customLocalSearcher().get(); } else { - localSearcher = new AdaptiveLocalSearcher(env, useHugePq, maxInMemoryResultEntries); + localSearcher = new AdaptiveLocalSearcher(maxInMemoryResultEntries); } var indexWriterConfig = new IndexWriterConfig(luceneAnalyzer); @@ -225,7 +208,11 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, indexWriterConfig.setReaderPooling(luceneOptions.indexWriterReaderPooling().get()); } indexWriterConfig.setSimilarity(getLuceneSimilarity()); - this.indexWriter = new IndexWriter(directory, indexWriterConfig); + try { + this.indexWriter = new IndexWriter(directory, indexWriterConfig); + } catch (IOException e) { + throw new DBException(e); + } if (ENABLE_SNAPSHOTS) { this.snapshotsManager = new SnapshotsManager(indexWriter, (SnapshotDeletionPolicy) deletionPolicy); } else { @@ -281,7 +268,7 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, // Start scheduled tasks var commitMillis = luceneOptions.commitDebounceTime().toMillis(); - luceneHeavyTasksScheduler.schedulePeriodically(this::scheduledCommit, commitMillis, commitMillis, + luceneHeavyTasksScheduler.scheduleAtFixedRate(this::scheduledCommit, commitMillis, commitMillis, TimeUnit.MILLISECONDS); this.backuppable = new LuceneBackuppable(); @@ -297,205 +284,206 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, } @Override - public Mono takeSnapshot() { - if (snapshotsManager == null) { - return Mono.just(DUMMY_SNAPSHOT); - } - return snapshotsManager.takeSnapshot().elapsed().map(elapsed -> { - snapshotTime.record(elapsed.getT1(), TimeUnit.MILLISECONDS); - return elapsed.getT2(); - }).transform(this::ensureOpen); - } - - private Mono ensureOpen(Mono mono) { - return Mono.fromCallable(() -> { - if (isClosed()) { - throw new IllegalStateException("Lucene index is closed"); - } else { - return null; + public LLSnapshot takeSnapshot() { + return runTask(() -> { + if (snapshotsManager == null) { + return DUMMY_SNAPSHOT; } - }).then(mono).doFirst(activeTasks::register).doFinally(s -> activeTasks.arriveAndDeregister()); + try { + return snapshotTime.recordCallable(snapshotsManager::takeSnapshot); + } catch (Exception e) { + throw new DBException("Failed to take snapshot", e); + } + }); } - private Mono runSafe(Callable callable) { - return Mono - .fromCallable(callable) - .subscribeOn(luceneWriteScheduler) - .publishOn(Schedulers.parallel()); + private V runTask(Supplier supplier) { + if (isClosed()) { + throw new IllegalStateException("Lucene index is closed"); + } else { + activeTasks.register(); + try { + return supplier.get(); + } finally { + activeTasks.arriveAndDeregister(); + } + } } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { + public void releaseSnapshot(LLSnapshot snapshot) { if (snapshotsManager == null) { if (snapshot != null && !Objects.equals(snapshot, DUMMY_SNAPSHOT)) { - return Mono.error(new IllegalStateException("Can't release snapshot " + snapshot)); + throw new IllegalStateException("Can't release snapshot " + snapshot); } - return Mono.empty(); + return; } - return snapshotsManager - .releaseSnapshot(snapshot) - .elapsed() - .doOnNext(elapsed -> snapshotTime.record(elapsed.getT1(), TimeUnit.MILLISECONDS)) - .then(); + snapshotsManager.releaseSnapshot(snapshot); } @Override - public Mono addDocument(LLTerm key, LLUpdateDocument doc) { - return this.runSafe(() -> { - docIndexingTime.recordCallable(() -> { - startedDocIndexings.increment(); - try { - indexWriter.addDocument(toDocument(doc)); - } finally { - endeddDocIndexings.increment(); - } - return null; - }); + public void addDocument(LLTerm key, LLUpdateDocument doc) { + runTask(() -> { + try { + docIndexingTime.recordCallable(() -> { + startedDocIndexings.increment(); + try { + indexWriter.addDocument(toDocument(doc)); + } finally { + endeddDocIndexings.increment(); + } + return null; + }); + } catch (Exception e) { + throw new DBException("Failed to add document", e); + } logger.trace(MARKER_LUCENE, "Added document {}: {}", key, doc); return null; - }).transform(this::ensureOpen); + }); } @Override - public Mono addDocuments(boolean atomic, Flux> documents) { - if (!atomic) { - return documents - .publishOn(bulkScheduler) - .handle((document, sink) -> { - LLUpdateDocument value = document.getValue(); - startedDocIndexings.increment(); - try { - docIndexingTime.recordCallable(() -> { - indexWriter.addDocument(toDocument(value)); - return null; - }); - } catch (Exception ex) { - sink.error(ex); - return; - } finally { - endeddDocIndexings.increment(); - } - logger.trace(MARKER_LUCENE, "Added document: {}", document); - sink.next(true); - }) - .count() - .transform(this::ensureOpen); - } else { - return documents - .collectList() - .publishOn(bulkScheduler) - .handle((documentsList, sink) -> { - var count = documentsList.size(); - StopWatch stopWatch = StopWatch.createStarted(); - try { - startedDocIndexings.increment(count); - try { - indexWriter.addDocuments(LLUtils.toDocumentsFromEntries(documentsList)); - } finally { - endeddDocIndexings.increment(count); - } - } catch (IOException ex) { - sink.error(ex); - return; - } finally { - docIndexingTime.record(stopWatch.getTime(TimeUnit.MILLISECONDS) / Math.max(count, 1), - TimeUnit.MILLISECONDS - ); - } - sink.next((long) documentsList.size()); - }) - .transform(this::ensureOpen); - } - } - - - @Override - public Mono deleteDocument(LLTerm id) { - return this.runSafe(() -> docIndexingTime.recordCallable(() -> { - startedDocIndexings.increment(); - try { - indexWriter.deleteDocuments(LLUtils.toTerm(id)); - } finally { - endeddDocIndexings.increment(); - } - return null; - })).transform(this::ensureOpen); - } - - @Override - public Mono update(LLTerm id, LLIndexRequest request) { - return this.runSafe(() -> { - docIndexingTime.recordCallable(() -> { - startedDocIndexings.increment(); - try { - if (request instanceof LLUpdateDocument updateDocument) { - indexWriter.updateDocument(LLUtils.toTerm(id), toDocument(updateDocument)); - } else if (request instanceof LLSoftUpdateDocument softUpdateDocument) { - indexWriter.softUpdateDocument(LLUtils.toTerm(id), - toDocument(softUpdateDocument.items()), - toFields(softUpdateDocument.softDeleteItems()) - ); - } else if (request instanceof LLUpdateFields updateFields) { - indexWriter.updateDocValues(LLUtils.toTerm(id), toFields(updateFields.items())); - } else { - throw new UnsupportedOperationException("Unexpected request type: " + request); - } - } finally { - endeddDocIndexings.increment(); - } - return null; - }); - logger.trace(MARKER_LUCENE, "Updated document {}: {}", id, request); - return null; - }).transform(this::ensureOpen); - } - - @Override - public Mono updateDocuments(Flux> documents) { - return documents - .log("local-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE) - .publishOn(bulkScheduler) - .handle((document, sink) -> { - LLTerm key = document.getKey(); + public long addDocuments(boolean atomic, Stream> documents) { + return this.runTask(() -> { + if (!atomic) { + LongAdder count = new LongAdder(); + documents.forEach(document -> { + count.increment(); LLUpdateDocument value = document.getValue(); startedDocIndexings.increment(); try { docIndexingTime.recordCallable(() -> { - indexWriter.updateDocument(LLUtils.toTerm(key), toDocument(value)); + indexWriter.addDocument(toDocument(value)); return null; }); - logger.trace(MARKER_LUCENE, "Updated document {}: {}", key, value); } catch (Exception ex) { - sink.error(ex); - return; + throw new CompletionException("Failed to add document", ex); } finally { endeddDocIndexings.increment(); } - sink.next(true); - }) - .count() - .transform(this::ensureOpen); + logger.trace(MARKER_LUCENE, "Added document: {}", document); + }); + return count.sum(); + } else { + var documentsList = documents.toList(); + var count = documentsList.size(); + StopWatch stopWatch = StopWatch.createStarted(); + try { + startedDocIndexings.increment(count); + try { + indexWriter.addDocuments(LLUtils.toDocumentsFromEntries(documentsList)); + } catch (IOException e) { + throw new DBException(e); + } finally { + endeddDocIndexings.increment(count); + } + } finally { + docIndexingTime.record(stopWatch.getTime(TimeUnit.MILLISECONDS) / Math.max(count, 1), + TimeUnit.MILLISECONDS + ); + } + return (long) documentsList.size(); + } + }); } @Override - public Mono deleteAll() { - return this.runSafe(() -> { + public void deleteDocument(LLTerm id) { + this.runTask(() -> { + try { + return docIndexingTime.recordCallable(() -> { + startedDocIndexings.increment(); + try { + indexWriter.deleteDocuments(LLUtils.toTerm(id)); + } finally { + endeddDocIndexings.increment(); + } + return null; + }); + } catch (Exception e) { + throw new DBException("Failed to delete document", e); + } + }); + } + + @Override + public void update(LLTerm id, LLIndexRequest request) { + this.runTask(() -> { + try { + docIndexingTime.recordCallable(() -> { + startedDocIndexings.increment(); + try { + if (request instanceof LLUpdateDocument updateDocument) { + indexWriter.updateDocument(LLUtils.toTerm(id), toDocument(updateDocument)); + } else if (request instanceof LLSoftUpdateDocument softUpdateDocument) { + indexWriter.softUpdateDocument(LLUtils.toTerm(id), + toDocument(softUpdateDocument.items()), + toFields(softUpdateDocument.softDeleteItems()) + ); + } else if (request instanceof LLUpdateFields updateFields) { + indexWriter.updateDocValues(LLUtils.toTerm(id), toFields(updateFields.items())); + } else { + throw new UnsupportedOperationException("Unexpected request type: " + request); + } + } finally { + endeddDocIndexings.increment(); + } + return null; + }); + } catch (Exception e) { + throw new DBException("Failed to update document", e); + } + logger.trace(MARKER_LUCENE, "Updated document {}: {}", id, request); + return null; + }); + } + + @Override + public long updateDocuments(Stream> documents) { + return runTask(() -> { + var count = new LongAdder(); + documents.forEach(document -> { + count.increment(); + LLTerm key = document.getKey(); + LLUpdateDocument value = document.getValue(); + startedDocIndexings.increment(); + try { + docIndexingTime.recordCallable(() -> { + indexWriter.updateDocument(LLUtils.toTerm(key), toDocument(value)); + return null; + }); + logger.trace(MARKER_LUCENE, "Updated document {}: {}", key, value); + } catch (Exception ex) { + throw new CompletionException(ex); + } finally { + endeddDocIndexings.increment(); + } + }); + return count.sum(); + }); + } + + @Override + public void deleteAll() { + this.runTask(() -> { shutdownLock.lock(); try { indexWriter.deleteAll(); indexWriter.forceMergeDeletes(true); indexWriter.commit(); indexWriter.deleteUnusedFiles(); + } catch (IOException e) { + throw new DBException(e); } finally { shutdownLock.unlock(); } return null; - }).subscribeOn(luceneHeavyTasksScheduler).publishOn(Schedulers.parallel()).transform(this::ensureOpen); + }); } @Override - public Flux moreLikeThis(@Nullable LLSnapshot snapshot, + public Stream moreLikeThis(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName, Multimap mltDocumentFieldsFlux) { @@ -503,21 +491,18 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, var searcher = this.searcherManager.retrieveSearcher(snapshot); var transformer = new MoreLikeThisTransformer(mltDocumentFieldsFlux, luceneAnalyzer, luceneSimilarity); - return localSearcher - .collect(searcher, localQueryParams, keyFieldName, transformer) - .map(result -> LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)) - .flux(); + var result = localSearcher.collect(searcher, localQueryParams, keyFieldName, transformer); + return Stream.of(LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)); } @Override - public Flux search(@Nullable LLSnapshot snapshot, QueryParams queryParams, + public Stream search(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName) { - return searchInternal(snapshot, queryParams, keyFieldName) - .map(result -> LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)) - .flux(); + var result = searchInternal(snapshot, queryParams, keyFieldName); + return Stream.of(LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)); } - public Mono searchInternal(@Nullable LLSnapshot snapshot, QueryParams queryParams, + public LuceneSearchResult searchInternal(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName) { LocalQueryParams localQueryParams = LuceneUtils.toLocalQueryParams(queryParams, luceneAnalyzer); var searcher = searcherManager.retrieveSearcher(snapshot); @@ -526,18 +511,16 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, } @Override - public Mono count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { + public TotalHitsCount count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { var params = LuceneUtils.getCountQueryParams(query); - return Mono - .usingWhen(this.searchInternal(snapshot, params, null), - result -> Mono.just(result.totalHitsCount()), - LLUtils::finalizeResource - ) - .defaultIfEmpty(TotalHitsCount.of(0, true)); + try (var result = this.searchInternal(snapshot, params, null)) { + if (result == null) return TotalHitsCount.of(0, true); + return result.totalHitsCount(); + } } @Override - public Mono computeBuckets(@Nullable LLSnapshot snapshot, + public Buckets computeBuckets(@Nullable LLSnapshot snapshot, @NotNull List queries, @Nullable Query normalizationQuery, BucketParams bucketParams) { @@ -546,14 +529,12 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, localQueries.add(QueryParser.toQuery(query, luceneAnalyzer)); } var localNormalizationQuery = QueryParser.toQuery(normalizationQuery, luceneAnalyzer); - Mono searchers = searcherManager - .retrieveSearcher(snapshot) - .map(indexSearcher -> LLIndexSearchers.unsharded(indexSearcher)); + LLIndexSearchers searchers = LLIndexSearchers.unsharded(searcherManager.retrieveSearcher(snapshot)); return decimalBucketMultiSearcher.collectMulti(searchers, bucketParams, localQueries, localNormalizationQuery); } - public Mono retrieveSearcher(@Nullable LLSnapshot snapshot) { + public LLIndexSearcher retrieveSearcher(@Nullable LLSnapshot snapshot) { return searcherManager.retrieveSearcher(snapshot); } @@ -572,112 +553,107 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, directory.close(); logger.debug("IndexWriter closed"); } catch (IOException ex) { - throw new UncheckedIOException(ex); + throw new DBException(ex); } finally { shutdownLock.unlock(); } } @Override - public Mono flush() { - return Mono - .fromCallable(() -> { - if (activeTasks.isTerminated()) return null; - shutdownLock.lock(); - try { - if (isClosed()) { - return null; - } - flushTime.recordCallable(() -> { - indexWriter.flush(); - return null; - }); - } finally { - shutdownLock.unlock(); - } + public void flush() { + runTask(() -> { + if (activeTasks.isTerminated()) return null; + shutdownLock.lock(); + try { + if (isClosed()) { return null; - }) - .subscribeOn(luceneHeavyTasksScheduler) - .transform(this::ensureOpen); + } + flushTime.recordCallable(() -> { + indexWriter.flush(); + return null; + }); + } catch (Exception e) { + throw new DBException("Failed to flush", e); + } finally { + shutdownLock.unlock(); + } + return null; + }); } @Override - public Mono waitForMerges() { - return Mono - .fromCallable(() -> { - if (activeTasks.isTerminated()) return null; - shutdownLock.lock(); - try { - if (isClosed()) { - return null; - } - var mergeScheduler = indexWriter.getConfig().getMergeScheduler(); - if (mergeScheduler instanceof ConcurrentMergeScheduler concurrentMergeScheduler) { - concurrentMergeScheduler.sync(); - } - } finally { - shutdownLock.unlock(); - } + public void waitForMerges() { + runTask(() -> { + if (activeTasks.isTerminated()) return null; + shutdownLock.lock(); + try { + if (isClosed()) { return null; - }) - .subscribeOn(luceneHeavyTasksScheduler) - .transform(this::ensureOpen); + } + var mergeScheduler = indexWriter.getConfig().getMergeScheduler(); + if (mergeScheduler instanceof ConcurrentMergeScheduler concurrentMergeScheduler) { + concurrentMergeScheduler.sync(); + } + } finally { + shutdownLock.unlock(); + } + return null; + }); } @Override - public Mono waitForLastMerges() { - return Mono - .fromCallable(() -> { - if (activeTasks.isTerminated()) return null; - shutdownLock.lock(); - try { - if (isClosed()) { - return null; - } - indexWriter.getConfig().setMergePolicy(NoMergePolicy.INSTANCE); - var mergeScheduler = indexWriter.getConfig().getMergeScheduler(); - if (mergeScheduler instanceof ConcurrentMergeScheduler concurrentMergeScheduler) { - concurrentMergeScheduler.sync(); - } - indexWriter.deleteUnusedFiles(); - } finally { - shutdownLock.unlock(); - } + public void waitForLastMerges() { + runTask(() -> { + if (activeTasks.isTerminated()) return null; + shutdownLock.lock(); + try { + if (isClosed()) { return null; - }) - .subscribeOn(luceneHeavyTasksScheduler) - .transform(this::ensureOpen); + } + indexWriter.getConfig().setMergePolicy(NoMergePolicy.INSTANCE); + var mergeScheduler = indexWriter.getConfig().getMergeScheduler(); + if (mergeScheduler instanceof ConcurrentMergeScheduler concurrentMergeScheduler) { + concurrentMergeScheduler.sync(); + } + indexWriter.deleteUnusedFiles(); + } catch (IOException e) { + throw new DBException(e); + } finally { + shutdownLock.unlock(); + } + return null; + }); } @Override - public Mono refresh(boolean force) { - return Mono - .fromCallable(() -> { - activeTasks.register(); - try { - if (activeTasks.isTerminated()) return null; - shutdownLock.lock(); - try { - if (isClosed()) { - return null; - } - refreshTime.recordCallable(() -> { - if (force) { - searcherManager.maybeRefreshBlocking(); - } else { - searcherManager.maybeRefresh(); - } - return null; - }); - } finally { - shutdownLock.unlock(); - } - } finally { - activeTasks.arriveAndDeregister(); + public void refresh(boolean force) { + runTask(() -> { + activeTasks.register(); + try { + if (activeTasks.isTerminated()) return null; + shutdownLock.lock(); + try { + if (isClosed()) { + return null; } - return null; - }) - .subscribeOn(luceneHeavyTasksScheduler); + refreshTime.recordCallable(() -> { + if (force) { + searcherManager.maybeRefreshBlocking(); + } else { + searcherManager.maybeRefresh(); + } + return null; + }); + } catch (Exception e) { + throw new DBException("Failed to refresh", e); + } finally { + shutdownLock.unlock(); + } + } finally { + activeTasks.arriveAndDeregister(); + } + return null; + }); } /** @@ -855,13 +831,13 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, } @Override - public Mono pauseForBackup() { - return backuppable.pauseForBackup(); + public void pauseForBackup() { + backuppable.pauseForBackup(); } @Override - public Mono resumeAfterBackup() { - return backuppable.resumeAfterBackup(); + public void resumeAfterBackup() { + backuppable.resumeAfterBackup(); } @Override @@ -874,21 +850,20 @@ public class LLLocalLuceneIndex extends SimpleResource implements IBackuppable, private LLSnapshot snapshot; @Override - protected Mono onPauseForBackup() { - return LLLocalLuceneIndex.this.takeSnapshot().doOnSuccess(snapshot -> { - if (snapshot == null) { - logger.error("Can't pause index \"{}\" because snapshots are not enabled!", shardName); - } - this.snapshot = snapshot; - }).then(); + protected void onPauseForBackup() { + var snapshot = LLLocalLuceneIndex.this.takeSnapshot(); + if (snapshot == null) { + logger.error("Can't pause index \"{}\" because snapshots are not enabled!", shardName); + } + this.snapshot = snapshot; } @Override - protected Mono onResumeAfterBackup() { + protected void onResumeAfterBackup() { if (snapshot == null) { - return Mono.empty(); + return; } - return LLLocalLuceneIndex.this.releaseSnapshot(snapshot); + LLLocalLuceneIndex.this.releaseSnapshot(snapshot); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMigrationReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMigrationReactiveRocksIterator.java index 1cd58d2..0b38bc5 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMigrationReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMigrationReactiveRocksIterator.java @@ -2,57 +2,59 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.database.LLEntry; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.utils.SimpleResource; +import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; +import it.cavallium.dbengine.utils.DBException; +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.CompletionException; import java.util.function.Supplier; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; +import java.util.stream.Stream; +import org.jetbrains.annotations.NotNull; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.util.function.Tuples; public final class LLLocalMigrationReactiveRocksIterator { private final RocksDBColumn db; - private Mono rangeMono; + private LLRange range; private Supplier readOptions; public LLLocalMigrationReactiveRocksIterator(RocksDBColumn db, - Mono rangeMono, + LLRange range, Supplier readOptions) { this.db = db; - this.rangeMono = rangeMono; + this.range = range; this.readOptions = readOptions; } - public record ByteEntry(byte[] key, byte[] value) {} - - public Flux flux() { - return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> { - var readOptions = generateCustomReadOptions(this.readOptions.get(), false, false, false); - return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(false, readOptions, range, false)); - }, (tuple, sink) -> { + public Stream stream() { + var readOptions = generateCustomReadOptions(this.readOptions.get(), false, false, false); + RocksIteratorObj rocksIterator; + try { + rocksIterator = db.newRocksIterator(readOptions, range, false); + } catch (RocksDBException e) { + throw new DBException("Failed to open iterator", e); + } + return Stream.generate(() -> { try { - var rocksIterator = tuple.iter(); if (rocksIterator.isValid()) { - byte[] key = rocksIterator.key(); - byte[] value = rocksIterator.value(); + var key = rocksIterator.keyBuf().copy(); + var value = rocksIterator.valueBuf().copy(); rocksIterator.next(false); - sink.next(new ByteEntry(key, value)); + return LLEntry.of(key, value); } else { - sink.complete(); + return null; } } catch (RocksDBException ex) { - sink.error(ex); + throw new CompletionException(new DBException("Failed to iterate", ex)); } - return tuple; - }, RocksIterWithReadOpts::close), LLUtils::finalizeResource); + }).takeWhile(Objects::nonNull).onClose(() -> { + rocksIterator.close(); + readOptions.close(); + }); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java index 7769f70..4b2ab72 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalMultiLuceneIndex.java @@ -1,15 +1,13 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; +import static it.cavallium.dbengine.lucene.LuceneUtils.getLuceneIndexId; +import static java.util.stream.Collectors.groupingBy; -import com.google.common.collect.Iterables; import com.google.common.collect.Multimap; +import com.google.common.collect.Streams; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.util.Send; import it.cavallium.dbengine.client.IBackuppable; import it.cavallium.dbengine.client.query.QueryParser; -import it.cavallium.dbengine.client.query.current.data.NoSort; import it.cavallium.dbengine.client.query.current.data.Query; import it.cavallium.dbengine.client.query.current.data.QueryParams; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; @@ -17,12 +15,12 @@ import it.cavallium.dbengine.database.LLIndexRequest; import it.cavallium.dbengine.database.LLLuceneIndex; import it.cavallium.dbengine.database.LLSearchResultShard; import it.cavallium.dbengine.database.LLSnapshot; +import it.cavallium.dbengine.database.LLSnapshottable; import it.cavallium.dbengine.database.LLTerm; import it.cavallium.dbengine.database.LLUpdateDocument; -import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.LuceneRocksDBManager; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.mlt.MoreLikeThisTransformer; @@ -36,33 +34,29 @@ import it.cavallium.dbengine.lucene.searcher.MultiSearcher; import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; import it.cavallium.dbengine.rpc.current.data.LuceneOptions; +import it.cavallium.dbengine.utils.DBException; import it.cavallium.dbengine.utils.SimpleResource; +import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap; import it.unimi.dsi.fastutil.ints.IntList; import java.io.Closeable; import java.io.IOException; -import java.io.UncheckedIOException; import java.time.Duration; import java.util.ArrayList; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Map.Entry; import java.util.Objects; -import java.util.Optional; +import java.util.concurrent.CompletionException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper; import org.apache.lucene.search.similarities.PerFieldSimilarityWrapper; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SignalType; -import reactor.core.scheduler.Schedulers; public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneIndex, LuceneCloseable { @@ -72,10 +66,6 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI "false" )); - static { - LLUtils.initHooks(); - } - private final String clusterName; private final boolean lowMemory; private final MeterRegistry meterRegistry; @@ -84,26 +74,23 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI private final LLLocalLuceneIndex[] luceneIndicesById; private final List luceneIndicesSet; private final int totalShards; - private final Flux luceneIndicesFlux; private final PerFieldAnalyzerWrapper luceneAnalyzer; private final PerFieldSimilarityWrapper luceneSimilarity; private final MultiSearcher multiSearcher; private final DecimalBucketMultiSearcher decimalBucketMultiSearcher = new DecimalBucketMultiSearcher(); - public LLLocalMultiLuceneIndex(LLTempHugePqEnv env, - MeterRegistry meterRegistry, + public LLLocalMultiLuceneIndex(MeterRegistry meterRegistry, String clusterName, IntList activeShards, int totalShards, IndicizerAnalyzers indicizerAnalyzers, IndicizerSimilarities indicizerSimilarities, LuceneOptions luceneOptions, - @Nullable LuceneHacks luceneHacks, - LuceneRocksDBManager rocksDBManager) throws IOException { + @Nullable LuceneHacks luceneHacks) { if (totalShards <= 1 || totalShards > 100) { - throw new IOException("Unsupported instances count: " + totalShards); + throw new DBException("Unsupported instances count: " + totalShards); } this.meterRegistry = meterRegistry; @@ -112,15 +99,13 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI if (!activeShards.contains(i)) { continue; } - luceneIndices[i] = new LLLocalLuceneIndex(env, - meterRegistry, + luceneIndices[i] = new LLLocalLuceneIndex(meterRegistry, clusterName, i, indicizerAnalyzers, indicizerSimilarities, luceneOptions, - luceneHacks, - rocksDBManager + luceneHacks ); } this.clusterName = clusterName; @@ -133,17 +118,15 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI } } this.luceneIndicesSet = new ArrayList<>(luceneIndicesSet); - this.luceneIndicesFlux = Flux.fromIterable(luceneIndicesSet); this.luceneAnalyzer = LuceneUtils.toPerFieldAnalyzerWrapper(indicizerAnalyzers); this.luceneSimilarity = LuceneUtils.toPerFieldSimilarityWrapper(indicizerSimilarities); this.lowMemory = luceneOptions.lowMemory(); - var useHugePq = luceneOptions.allowNonVolatileCollection(); var maxInMemoryResultEntries = luceneOptions.maxInMemoryResultEntries(); if (luceneHacks != null && luceneHacks.customMultiSearcher() != null) { multiSearcher = luceneHacks.customMultiSearcher().get(); } else { - multiSearcher = new AdaptiveMultiSearcher(env, useHugePq, maxInMemoryResultEntries); + multiSearcher = new AdaptiveMultiSearcher(maxInMemoryResultEntries); } } @@ -156,111 +139,62 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI return clusterName; } - private Mono getIndexSearchers(LLSnapshot snapshot) { - return luceneIndicesFlux.index() - // Resolve the snapshot of each shard - .flatMap(tuple -> Mono - .fromCallable(() -> resolveSnapshotOptional(snapshot, (int) (long) tuple.getT1())) - .flatMap(luceneSnapshot -> tuple.getT2().retrieveSearcher(luceneSnapshot.orElse(null))) - ) - .collectList() - .doOnDiscard(LLIndexSearcher.class, indexSearcher -> { - try { - LLUtils.onDiscard(indexSearcher); - } catch (UncheckedIOException ex) { - LOG.error("Failed to close an index searcher", ex); - } - }) - .map(indexSearchers -> LLIndexSearchers.of(indexSearchers)); + private LLIndexSearchers getIndexSearchers(LLSnapshot snapshot) { + // Resolve the snapshot of each shard + return LLIndexSearchers.of(Streams.mapWithIndex(this.luceneIndicesSet.parallelStream(), (luceneIndex, index) -> { + var subSnapshot = resolveSnapshot(snapshot, (int) index); + return luceneIndex.retrieveSearcher(subSnapshot); + }).toList()); } @Override - public Mono addDocument(LLTerm id, LLUpdateDocument doc) { - return getLuceneIndex(id).addDocument(id, doc); + public void addDocument(LLTerm id, LLUpdateDocument doc) { + getLuceneIndex(id).addDocument(id, doc); } @Override - public Mono addDocuments(boolean atomic, Flux> documents) { - if (BYPASS_GROUPBY_BUG) { - return documents - .buffer(8192) - .flatMap(inputEntries -> { - List>[] sortedEntries = new List[totalShards]; - Mono[] results = new Mono[totalShards]; + public long addDocuments(boolean atomic, Stream> documents) { + var groupedRequests = documents + .collect(groupingBy(term -> getLuceneIndexId(term.getKey(), totalShards), + Int2ObjectOpenHashMap::new, + Collectors.toList() + )); - // Sort entries - for(var inputEntry : inputEntries) { - int luceneIndexId = LuceneUtils.getLuceneIndexId(inputEntry.getKey(), totalShards); - if (sortedEntries[luceneIndexId] == null) { - sortedEntries[luceneIndexId] = new ArrayList<>(); - } - sortedEntries[luceneIndexId].add(inputEntry); - } - - // Add documents - int luceneIndexId = 0; - for (List> docs : sortedEntries) { - if (docs != null && !docs.isEmpty()) { - LLLocalLuceneIndex luceneIndex = Objects.requireNonNull(luceneIndicesById[luceneIndexId]); - results[luceneIndexId] = luceneIndex.addDocuments(atomic, Flux.fromIterable(docs)); - } else { - results[luceneIndexId] = Mono.empty(); - } - luceneIndexId++; - } - - return Flux.merge(results).reduce(0L, Long::sum); - }) - .reduce(0L, Long::sum); - } else { - return documents - .groupBy(term -> getLuceneIndex(term.getKey())) - .flatMap(group -> group.key().addDocuments(atomic, group)) - .reduce(0L, Long::sum); - } + return groupedRequests + .int2ObjectEntrySet() + .stream() + .map(entry -> luceneIndicesById[entry.getIntKey()].addDocuments(atomic, entry.getValue().stream())) + .reduce(0L, Long::sum); } @Override - public Mono deleteDocument(LLTerm id) { - return getLuceneIndex(id).deleteDocument(id); + public void deleteDocument(LLTerm id) { + getLuceneIndex(id).deleteDocument(id); } @Override - public Mono update(LLTerm id, LLIndexRequest request) { - return getLuceneIndex(id).update(id, request); + public void update(LLTerm id, LLIndexRequest request) { + getLuceneIndex(id).update(id, request); } @Override - public Mono updateDocuments(Flux> documents) { - documents = documents - .log("local-multi-update-documents", Level.FINEST, false, SignalType.ON_NEXT, SignalType.ON_COMPLETE); - if (BYPASS_GROUPBY_BUG) { - int bufferSize = 8192; - return documents - .window(bufferSize) - .flatMap(bufferFlux -> bufferFlux - .collect(Collectors.groupingBy(inputEntry -> LuceneUtils.getLuceneIndexId(inputEntry.getKey(), totalShards), - Collectors.collectingAndThen(Collectors.toList(), docs -> { - var luceneIndex = getLuceneIndex(docs.get(0).getKey()); - return luceneIndex.updateDocuments(Flux.fromIterable(docs)); - })) - ) - .map(Map::values) - .flatMap(parts -> Flux.merge(parts).reduce(0L, Long::sum)) - ) - .reduce(0L, Long::sum); - } else { - return documents - .groupBy(term -> getLuceneIndex(term.getKey())) - .flatMap(group -> group.key().updateDocuments(group)) - .reduce(0L, Long::sum); - } + public long updateDocuments(Stream> documents) { + var groupedRequests = documents + .collect(groupingBy(term -> getLuceneIndexId(term.getKey(), totalShards), + Int2ObjectOpenHashMap::new, + Collectors.toList() + )); + + return groupedRequests + .int2ObjectEntrySet() + .stream() + .map(entry -> luceneIndicesById[entry.getIntKey()].updateDocuments(entry.getValue().stream())) + .reduce(0L, Long::sum); } @Override - public Mono deleteAll() { - Iterable> it = () -> luceneIndicesSet.stream().map(llLocalLuceneIndex -> llLocalLuceneIndex.deleteAll()).iterator(); - return Mono.whenDelayError(it); + public void deleteAll() { + luceneIndicesSet.forEach(LLLuceneIndex::deleteAll); } private LLSnapshot resolveSnapshot(LLSnapshot multiSnapshot, int instanceId) { @@ -271,12 +205,8 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI } } - private Optional resolveSnapshotOptional(LLSnapshot multiSnapshot, int instanceId) { - return Optional.ofNullable(resolveSnapshot(multiSnapshot, instanceId)); - } - @Override - public Flux moreLikeThis(@Nullable LLSnapshot snapshot, + public Stream moreLikeThis(@Nullable LLSnapshot snapshot, QueryParams queryParams, String keyFieldName, Multimap mltDocumentFields) { @@ -285,24 +215,22 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI var transformer = new MoreLikeThisTransformer(mltDocumentFields, luceneAnalyzer, luceneSimilarity); // Collect all the shards results into a single global result - return multiSearcher - .collectMulti(searchers, localQueryParams, keyFieldName, transformer) - // Transform the result type - .map(result -> LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)) - .flux(); + LuceneSearchResult result = multiSearcher.collectMulti(searchers, localQueryParams, keyFieldName, transformer); + + // Transform the result type + return Stream.of(new LLSearchResultShard(result.results(), result.totalHitsCount())); } @Override - public Flux search(@Nullable LLSnapshot snapshot, + public Stream search(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName) { - return searchInternal(snapshot, queryParams, keyFieldName) - // Transform the result type - .map(result -> LLSearchResultShard.withResource(result.results(), result.totalHitsCount(), result)) - .flux(); + LuceneSearchResult result = searchInternal(snapshot, queryParams, keyFieldName); + // Transform the result type + return Stream.of(new LLSearchResultShard(result.results(), result.totalHitsCount())); } - private Mono searchInternal(@Nullable LLSnapshot snapshot, + private LuceneSearchResult searchInternal(@Nullable LLSnapshot snapshot, QueryParams queryParams, @Nullable String keyFieldName) { LocalQueryParams localQueryParams = LuceneUtils.toLocalQueryParams(queryParams, luceneAnalyzer); @@ -313,18 +241,14 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI } @Override - public Mono count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { + public TotalHitsCount count(@Nullable LLSnapshot snapshot, Query query, @Nullable Duration timeout) { var params = LuceneUtils.getCountQueryParams(query); - return Mono - .usingWhen(this.searchInternal(snapshot, params, null), - result -> Mono.just(result.totalHitsCount()), - LLUtils::finalizeResource - ) - .defaultIfEmpty(TotalHitsCount.of(0, true)); + var result = this.searchInternal(snapshot, params, null); + return result != null ? result.totalHitsCount() : TotalHitsCount.of(0, true); } @Override - public Mono computeBuckets(@Nullable LLSnapshot snapshot, + public Buckets computeBuckets(@Nullable LLSnapshot snapshot, @NotNull List queries, @Nullable Query normalizationQuery, BucketParams bucketParams) { @@ -341,76 +265,53 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI @Override protected void onClose() { - Iterable> it = () -> luceneIndicesSet - .stream() - .map(part -> Mono - .fromRunnable(part::close) - .transform(LuceneUtils::scheduleLucene) - ) - .iterator(); - var indicesCloseMono = Mono.whenDelayError(it); - indicesCloseMono - .then(Mono.fromCallable(() -> { - if (multiSearcher instanceof Closeable closeable) { - //noinspection BlockingMethodInNonBlockingContext - closeable.close(); - } - return null; - }).transform(LuceneUtils::scheduleLucene)) - .then() - .transform(LLUtils::handleDiscard) - .block(); + luceneIndicesSet.parallelStream().forEach(SafeCloseable::close); + if (multiSearcher instanceof Closeable closeable) { + try { + closeable.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } } @Override - public Mono flush() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::flush).iterator(); - return Mono.whenDelayError(it); + public void flush() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::flush); } @Override - public Mono waitForMerges() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForMerges).iterator(); - return Mono.whenDelayError(it); + public void waitForMerges() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::waitForMerges); } @Override - public Mono waitForLastMerges() { - Iterable> it = () -> luceneIndicesSet.stream().map(LLLuceneIndex::waitForLastMerges).iterator(); - return Mono.whenDelayError(it); + public void waitForLastMerges() { + luceneIndicesSet.parallelStream().forEach(LLLuceneIndex::waitForLastMerges); } @Override - public Mono refresh(boolean force) { - Iterable> it = () -> luceneIndicesSet.stream().map(index -> index.refresh(force)).iterator(); - return Mono.whenDelayError(it); + public void refresh(boolean force) { + luceneIndicesSet.parallelStream().forEach(index -> index.refresh(force)); } @Override - public Mono takeSnapshot() { - return Mono - // Generate next snapshot index - .fromCallable(nextSnapshotNumber::getAndIncrement) - .flatMap(snapshotIndex -> luceneIndicesFlux - .flatMapSequential(llLocalLuceneIndex -> llLocalLuceneIndex.takeSnapshot()) - .collectList() - .doOnNext(instancesSnapshotsArray -> registeredSnapshots.put(snapshotIndex, instancesSnapshotsArray)) - .thenReturn(new LLSnapshot(snapshotIndex)) - ); + public LLSnapshot takeSnapshot() { + // Generate next snapshot index + var snapshotIndex = nextSnapshotNumber.getAndIncrement(); + var snapshot = luceneIndicesSet.parallelStream().map(LLSnapshottable::takeSnapshot).toList(); + registeredSnapshots.put(snapshotIndex, snapshot); + return new LLSnapshot(snapshotIndex); } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return Mono - .fromCallable(() -> registeredSnapshots.remove(snapshot.getSequenceNumber())) - .flatMapIterable(list -> list) - .index() - .flatMapSequential(tuple -> { - int index = (int) (long) tuple.getT1(); - LLSnapshot instanceSnapshot = tuple.getT2(); - return luceneIndicesSet.get(index).releaseSnapshot(instanceSnapshot); - }) - .then(); + public void releaseSnapshot(LLSnapshot snapshot) { + var list = registeredSnapshots.remove(snapshot.getSequenceNumber()); + for (int shardIndex = 0; shardIndex < list.size(); shardIndex++) { + var luceneIndex = luceneIndicesSet.get(shardIndex); + LLSnapshot instanceSnapshot = list.get(shardIndex); + luceneIndex.releaseSnapshot(instanceSnapshot); + } } @Override @@ -419,22 +320,17 @@ public class LLLocalMultiLuceneIndex extends SimpleResource implements LLLuceneI } @Override - public Mono pauseForBackup() { - return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::pauseForBackup)); + public void pauseForBackup() { + this.luceneIndicesSet.forEach(IBackuppable::pauseForBackup); } @Override - public Mono resumeAfterBackup() { - return Mono.whenDelayError(Iterables.transform(this.luceneIndicesSet, IBackuppable::resumeAfterBackup)); + public void resumeAfterBackup() { + this.luceneIndicesSet.forEach(IBackuppable::resumeAfterBackup); } @Override public boolean isPaused() { - for (LLLuceneIndex llLuceneIndex : this.luceneIndicesSet) { - if (llLuceneIndex.isPaused()) { - return true; - } - } - return false; + return this.luceneIndicesSet.stream().anyMatch(IBackuppable::isPaused); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java index 7e98b98..2d894e9 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalReactiveRocksIterator.java @@ -4,127 +4,112 @@ import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions; import static it.cavallium.dbengine.database.LLUtils.isBoundedRange; -import io.netty5.buffer.Buffer; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; +import it.cavallium.dbengine.utils.DBException; +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.CompletionException; import java.util.function.Supplier; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.jetbrains.annotations.Nullable; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public abstract class LLLocalReactiveRocksIterator { protected static final Logger logger = LogManager.getLogger(LLLocalReactiveRocksIterator.class); private final RocksDBColumn db; - private final Mono rangeMono; - private final boolean allowNettyDirect; + private final LLRange range; private final Supplier readOptions; private final boolean readValues; private final boolean reverse; private final boolean smallRange; public LLLocalReactiveRocksIterator(RocksDBColumn db, - Mono rangeMono, - boolean allowNettyDirect, + LLRange range, Supplier readOptions, boolean readValues, boolean reverse, boolean smallRange) { this.db = db; - this.rangeMono = rangeMono; - this.allowNettyDirect = allowNettyDirect; + this.range = range; this.readOptions = readOptions != null ? readOptions : ReadOptions::new; this.readValues = readValues; this.reverse = reverse; this.smallRange = smallRange; } - public final Flux flux() { - return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> { - var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange); - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); - } - return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, reverse)); - }, (tuple, sink) -> { + public final Stream stream() { + var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange); + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range)); + } + + RocksIteratorObj rocksIterator; + try { + rocksIterator = db.newRocksIterator(readOptions, range, reverse); + } catch (RocksDBException e) { + readOptions.close(); + throw new DBException("Failed to iterate the range", e); + } + + return Stream.generate(() -> { try { - var rocksIterator = tuple.iter(); if (rocksIterator.isValid()) { - Buffer key; - if (allowNettyDirect) { - key = LLUtils.readDirectNioBuffer(db.getAllocator(), rocksIterator::key); + // Note that the underlying array is subject to changes! + Buf key; + key = rocksIterator.keyBuf(); + // Note that the underlying array is subject to changes! + Buf value; + if (readValues) { + value = rocksIterator.valueBuf(); } else { - key = LLUtils.fromByteArray(db.getAllocator(), rocksIterator.key()); + value = null; } - try { - Buffer value; - if (readValues) { - if (allowNettyDirect) { - value = LLUtils.readDirectNioBuffer(db.getAllocator(), rocksIterator::value); - } else { - value = LLUtils.fromByteArray(db.getAllocator(), rocksIterator.value()); - } - } else { - value = null; - } - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, - "Range {} is reading {}: {}", - LLUtils.toStringSafe(range), - LLUtils.toStringSafe(key), - LLUtils.toStringSafe(value) - ); - } - - try { - if (reverse) { - rocksIterator.prev(); - } else { - rocksIterator.next(); - } - sink.next(getEntry(key, value)); - } catch (Throwable ex) { - if (value != null && value.isAccessible()) { - try { - value.close(); - } catch (Throwable ex2) { - logger.error(ex2); - } - } - throw ex; - } - } catch (Throwable ex) { - if (key.isAccessible()) { - try { - key.close(); - } catch (Throwable ex2) { - logger.error(ex2); - } - } - throw ex; + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Range {} is reading {}: {}", + LLUtils.toStringSafe(range), + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(value) + ); } + + if (reverse) { + rocksIterator.prev(); + } else { + rocksIterator.next(); + } + return getEntry(key, value); } else { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range)); } - sink.complete(); + return null; } } catch (RocksDBException ex) { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range)); } - sink.error(ex); + throw new CompletionException(ex); } - return tuple; - }, RocksIterWithReadOpts::close), LLUtils::finalizeResource); + }).takeWhile(Objects::nonNull).onClose(() -> { + rocksIterator.close(); + readOptions.close(); + }); } - public abstract T getEntry(@Nullable Buffer key, @Nullable Buffer value); + /** + * @param key this buffer content will be changed during the next iteration + * @param value this buffer content will be changed during the next iteration + */ + public abstract T getEntry(@Nullable Buf key, @Nullable Buf value); } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalSingleton.java b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalSingleton.java index 5e11af9..4886d04 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLLocalSingleton.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LLLocalSingleton.java @@ -2,16 +2,16 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.disk.UpdateAtomicResultMode.DELTA; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import com.google.common.util.concurrent.AbstractScheduledService.Scheduler; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLSingleton; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.UpdateReturnMode; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.util.Arrays; +import java.nio.charset.StandardCharsets; import java.util.concurrent.Callable; import java.util.function.Function; import org.jetbrains.annotations.NotNull; @@ -20,56 +20,36 @@ import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; import org.rocksdb.Snapshot; import org.rocksdb.WriteOptions; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; public class LLLocalSingleton implements LLSingleton { private final RocksDBColumn db; private final Function snapshotResolver; - private final byte[] name; + private final Buf name; private final String columnName; - private final Mono nameMono; private final String databaseName; - private final Scheduler dbWScheduler; - private final Scheduler dbRScheduler; public LLLocalSingleton(RocksDBColumn db, Function snapshotResolver, String databaseName, byte[] name, String columnName, - Scheduler dbWScheduler, - Scheduler dbRScheduler, byte @Nullable [] defaultValue) throws RocksDBException { this.db = db; this.databaseName = databaseName; this.snapshotResolver = snapshotResolver; - this.name = name; + this.name = Buf.wrap(name); this.columnName = columnName; - this.nameMono = Mono.fromCallable(() -> { - var alloc = db.getAllocator(); - var nameBuf = alloc.allocate(this.name.length); - nameBuf.writeBytes(this.name); - return nameBuf; - }); - this.dbWScheduler = dbWScheduler; - this.dbRScheduler = dbRScheduler; - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Initialized in a nonblocking thread"); } try (var readOptions = new ReadOptions(); var writeOptions = new WriteOptions()) { - if (defaultValue != null && db.get(readOptions, this.name, true) == null) { - db.put(writeOptions, this.name, defaultValue); + if (defaultValue != null && db.get(readOptions, this.name.asArray(), true) == null) { + db.put(writeOptions, this.name.asArray(), defaultValue); } } } - private @NotNull Mono runOnDb(boolean write, Callable<@Nullable T> callable) { - return Mono.fromCallable(callable).subscribeOn(write ? dbWScheduler : dbRScheduler); - } - private ReadOptions generateReadOptions(LLSnapshot snapshot) { if (snapshot != null) { return new ReadOptions().setSnapshot(snapshotResolver.apply(snapshot)); @@ -79,94 +59,71 @@ public class LLLocalSingleton implements LLSingleton { } @Override - public BufferAllocator getAllocator() { - return db.getAllocator(); + public Buf get(@Nullable LLSnapshot snapshot) { + try { + Buf result; + try (var readOptions = generateReadOptions(snapshot)) { + result = db.get(readOptions, name); + } + return result; + } catch (RocksDBException ex) { + throw new DBException("Failed to read " + LLUtils.toString(name), ex); + } } @Override - public Mono get(@Nullable LLSnapshot snapshot) { - return nameMono.publishOn(dbRScheduler).handle((name, sink) -> { - try (name) { - Buffer result; - try (var readOptions = generateReadOptions(snapshot)) { - result = db.get(readOptions, name); - } - if (result != null) { - sink.next(result); - } else { - sink.complete(); - } - } catch (RocksDBException ex) { - sink.error(new IOException("Failed to read " + LLUtils.toString(name), ex)); - } - }); - } - - @Override - public Mono set(Mono valueMono) { - return Mono.zip(nameMono, valueMono).publishOn(dbWScheduler).handle((tuple, sink) -> { - var name = tuple.getT1(); - var value = tuple.getT2(); - try (name; value; var writeOptions = new WriteOptions()) { - db.put(writeOptions, name, value); - sink.next(true); - } catch (RocksDBException ex) { - sink.error(new IOException("Failed to write " + LLUtils.toString(name), ex)); - } - }).switchIfEmpty(unset().thenReturn(true)).then(); - } - - private Mono unset() { - return nameMono.publishOn(dbWScheduler).handle((name, sink) -> { - try (name; var writeOptions = new WriteOptions()) { + public void set(Buf value) { + try (var writeOptions = new WriteOptions()) { + if (value == null) { db.delete(writeOptions, name); - } catch (RocksDBException ex) { - sink.error(new IOException("Failed to read " + LLUtils.toString(name), ex)); + } else { + db.put(writeOptions, name, value); } - }); + } catch (RocksDBException ex) { + throw new DBException("Failed to write " + LLUtils.toString(name), ex); + } + } + + private void unset() { + this.set(null); } @Override - public Mono update(BinarySerializationFunction updater, + public Buf update(BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { - return Mono.usingWhen(nameMono, key -> runOnDb(true, () -> { - if (Schedulers.isInNonBlockingThread()) { - throw new UnsupportedOperationException("Called update in a nonblocking thread"); - } - UpdateAtomicResultMode returnMode = switch (updateReturnMode) { - case NOTHING -> UpdateAtomicResultMode.NOTHING; - case GET_NEW_VALUE -> UpdateAtomicResultMode.CURRENT; - case GET_OLD_VALUE -> UpdateAtomicResultMode.PREVIOUS; - }; - UpdateAtomicResult result; - try (var readOptions = new ReadOptions(); var writeOptions = new WriteOptions()) { - result = db.updateAtomic(readOptions, writeOptions, key, updater, returnMode); - } - return switch (updateReturnMode) { - case NOTHING -> { - result.close(); - yield null; - } - case GET_NEW_VALUE -> ((UpdateAtomicResultCurrent) result).current(); - case GET_OLD_VALUE -> ((UpdateAtomicResultPrevious) result).previous(); - }; - }).onErrorMap(cause -> new IOException("Failed to read or write", cause)), - LLUtils::finalizeResource); + if (LLUtils.isInNonBlockingThread()) { + throw new UnsupportedOperationException("Called update in a nonblocking thread"); + } + UpdateAtomicResultMode returnMode = switch (updateReturnMode) { + case NOTHING -> UpdateAtomicResultMode.NOTHING; + case GET_NEW_VALUE -> UpdateAtomicResultMode.CURRENT; + case GET_OLD_VALUE -> UpdateAtomicResultMode.PREVIOUS; + }; + UpdateAtomicResult result; + try (var readOptions = new ReadOptions(); var writeOptions = new WriteOptions()) { + result = db.updateAtomic(readOptions, writeOptions, name, updater, returnMode); + } catch (RocksDBException e) { + throw new DBException("Failed to read or write", e); + } + return switch (updateReturnMode) { + case NOTHING -> null; + case GET_NEW_VALUE -> ((UpdateAtomicResultCurrent) result).current(); + case GET_OLD_VALUE -> ((UpdateAtomicResultPrevious) result).previous(); + }; } @Override - public Mono updateAndGetDelta(BinarySerializationFunction updater) { - return Mono.usingWhen(nameMono, key -> runOnDb(true, () -> { - if (Schedulers.isInNonBlockingThread()) { - throw new UnsupportedOperationException("Called update in a nonblocking thread"); - } - UpdateAtomicResult result; - try (var readOptions = new ReadOptions(); var writeOptions = new WriteOptions()) { - result = db.updateAtomic(readOptions, writeOptions, key, updater, DELTA); - } - return ((UpdateAtomicResultDelta) result).delta(); - }).onErrorMap(cause -> new IOException("Failed to read or write", cause)), - LLUtils::finalizeResource); + public LLDelta updateAndGetDelta(BinarySerializationFunction updater) { + if (LLUtils.isInNonBlockingThread()) { + throw new UnsupportedOperationException("Called update in a nonblocking thread"); + } + UpdateAtomicResult result; + try (var readOptions = new ReadOptions(); var writeOptions = new WriteOptions()) { + result = db.updateAtomic(readOptions, writeOptions, name, updater, DELTA); + } catch (RocksDBException e) { + throw new DBException("Failed to read or write", e); + } + return ((UpdateAtomicResultDelta) result).delta(); } @Override @@ -181,6 +138,6 @@ public class LLLocalSingleton implements LLSingleton { @Override public String getName() { - return new String(name); + return name.toString(StandardCharsets.UTF_8); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LLTempHugePqEnv.java b/src/main/java/it/cavallium/dbengine/database/disk/LLTempHugePqEnv.java deleted file mode 100644 index 2baf513..0000000 --- a/src/main/java/it/cavallium/dbengine/database/disk/LLTempHugePqEnv.java +++ /dev/null @@ -1,137 +0,0 @@ -package it.cavallium.dbengine.database.disk; - -import java.io.Closeable; -import java.io.File; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import org.rocksdb.AbstractComparator; -import org.rocksdb.BlockBasedTableConfig; -import org.rocksdb.ChecksumType; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompressionType; -import org.rocksdb.DBOptions; -import org.rocksdb.InfoLogLevel; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; - -public class LLTempHugePqEnv implements Closeable { - - private Path tempDirectory; - private AtomicInteger nextColumnName; - private HugePqEnv env; - private volatile boolean initialized; - private volatile boolean closed; - - public LLTempHugePqEnv() { - } - - public HugePqEnv getEnv() { - if (closed) { - throw new IllegalStateException("Environment closed"); - } - initializeIfPossible(); - return env; - } - - private void initializeIfPossible() { - if (!initialized) { - synchronized(this) { - if (!initialized) { - try { - tempDirectory = Files.createTempDirectory("huge-pq"); - var opts = new DBOptions(); - opts.setCreateIfMissing(true); - opts.setAtomicFlush(false); - opts.optimizeForSmallDb(); - opts.setParanoidChecks(false); - opts.setIncreaseParallelism(Runtime.getRuntime().availableProcessors()); - opts.setMaxOpenFiles(-1); - opts.setUseFsync(false); - opts.setUnorderedWrite(true); - opts.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); - - var cfh = new ArrayList(); - nextColumnName = new AtomicInteger(0); - var db = RocksDB.open(opts, - tempDirectory.toString(), - List.of(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, getColumnOptions(null))), - cfh - ); - var cfhObjs = new ArrayList(cfh.size()); - for (ColumnFamilyHandle columnFamilyHandle : cfh) { - cfhObjs.add(columnFamilyHandle); - } - env = new HugePqEnv(db, cfhObjs); - initialized = true; - } catch (RocksDBException | IOException e) { - throw new RuntimeException(e); - } - } - } - } - } - - static ColumnFamilyOptions getColumnOptions(AbstractComparator comparator) { - var opts = new ColumnFamilyOptions() - .setOptimizeFiltersForHits(true) - .setParanoidFileChecks(false) - .setEnableBlobFiles(true) - .setBlobCompressionType(CompressionType.LZ4_COMPRESSION) - .optimizeLevelStyleCompaction() - .setLevelCompactionDynamicLevelBytes(true) - .setTableFormatConfig(new BlockBasedTableConfig() - .setOptimizeFiltersForMemory(true) - .setVerifyCompression(false) - .setChecksumType(ChecksumType.kNoChecksum)); - if (comparator != null) { - opts.setComparator(comparator); - } - return opts; - } - - public int allocateDb(AbstractComparator comparator) { - initializeIfPossible(); - try { - return env.createColumnFamily(nextColumnName.getAndIncrement(), comparator); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - public void freeDb(int db) { - initializeIfPossible(); - try { - env.deleteColumnFamily(db); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - @Override - public void close() throws IOException { - if (this.closed) { - return; - } - if (!this.initialized) { - synchronized (this) { - closed = true; - initialized = true; - return; - } - } - this.closed = true; - env.close(); - //noinspection ResultOfMethodCallIgnored - Files.walk(tempDirectory) - .sorted(Comparator.reverseOrder()) - .map(Path::toFile) - .forEach(File::delete); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LRUCacheFactory.java b/src/main/java/it/cavallium/dbengine/database/disk/LRUCacheFactory.java index e1416a1..61082b5 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LRUCacheFactory.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LRUCacheFactory.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.database.disk; import org.rocksdb.Cache; -import org.rocksdb.ClockCache; import org.rocksdb.LRUCache; public class LRUCacheFactory implements CacheFactory { diff --git a/src/main/java/it/cavallium/dbengine/database/disk/LuceneIndexSnapshot.java b/src/main/java/it/cavallium/dbengine/database/disk/LuceneIndexSnapshot.java index ca0909c..8610722 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/LuceneIndexSnapshot.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/LuceneIndexSnapshot.java @@ -3,11 +3,9 @@ package it.cavallium.dbengine.database.disk; import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.utils.SimpleResource; -import java.io.Closeable; import java.io.IOException; -import java.io.UncheckedIOException; +import it.cavallium.dbengine.utils.DBException; import java.util.concurrent.Executor; -import java.util.concurrent.ForkJoinPool; import org.apache.lucene.index.DirectoryReader; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.search.IndexSearcher; @@ -69,7 +67,7 @@ public class LuceneIndexSnapshot extends SimpleResource implements DiscardingClo try { indexReader.close(); } catch (IOException e) { - throw new UncheckedIOException(e); + throw new DBException(e); } indexSearcher = null; } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/OptimisticRocksDBColumn.java b/src/main/java/it/cavallium/dbengine/database/disk/OptimisticRocksDBColumn.java index d4b2dd9..3469221 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/OptimisticRocksDBColumn.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/OptimisticRocksDBColumn.java @@ -4,20 +4,16 @@ import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import io.micrometer.core.instrument.DistributionSummary; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.MemoryManager; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.lucene.ExponentialPageLimits; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; import java.util.concurrent.ThreadLocalRandom; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.LockSupport; import java.util.concurrent.locks.StampedLock; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.OptimisticTransactionDB; import org.rocksdb.ReadOptions; @@ -27,7 +23,6 @@ import org.rocksdb.Transaction; import org.rocksdb.TransactionOptions; import org.rocksdb.WriteBatch; import org.rocksdb.WriteOptions; -import reactor.core.scheduler.Schedulers; public final class OptimisticRocksDBColumn extends AbstractRocksDBColumn { @@ -36,13 +31,11 @@ public final class OptimisticRocksDBColumn extends AbstractRocksDBColumn= 5 && retries % 5 == 0 || ALWAYS_PRINT_OPTIMISTIC_RETRIES) { - logger.warn(MARKER_ROCKSDB, "Failed optimistic transaction {} (update):" - + " waiting {} ms before retrying for the {} time", LLUtils.toStringSafe(key), retryNs / 1000000d, retries); - } else if (logger.isDebugEnabled(MARKER_ROCKSDB)) { - logger.debug(MARKER_ROCKSDB, "Failed optimistic transaction {} (update):" - + " waiting {} ms before retrying for the {} time", LLUtils.toStringSafe(key), retryNs / 1000000d, retries); - } - // Wait for n milliseconds - if (retryNs > 0) { - LockSupport.parkNanos(retryNs); - } - } - } while (!committedSuccessfully); - if (retries > 5) { - logger.warn(MARKER_ROCKSDB, "Took {} retries to update key {}", retries, LLUtils.toStringSafe(key)); + tx.put(cfh, keyArray, newDataArray); + changed = true; + committedSuccessfully = commitOptimistically(tx); + } else { + changed = false; + committedSuccessfully = true; + tx.rollback(); } - recordAtomicUpdateTime(changed, prevData != null, newData != null, initNanoTime); - optimisticAttempts.record(retries); - return switch (returnMode) { - case NOTHING -> { - if (prevData != null) { - prevData.close(); - } - if (newData != null) { - newData.close(); - } - yield RESULT_NOTHING; + if (!committedSuccessfully) { + tx.undoGetForUpdate(cfh, keyArray); + tx.rollback(); + retries++; + + if (retries == 1) { + retryTime = new ExponentialPageLimits(0, 2, 2000); } - case CURRENT -> { - if (prevData != null) { - prevData.close(); - } - yield new UpdateAtomicResultCurrent(newData); + long retryNs = 1000000L * retryTime.getPageLimit(retries); + + // +- 30% + retryNs = retryNs + ThreadLocalRandom.current().nextLong(-retryNs * 30L / 100L, retryNs * 30L / 100L); + + if (retries >= 5 && retries % 5 == 0 || ALWAYS_PRINT_OPTIMISTIC_RETRIES) { + logger.warn(MARKER_ROCKSDB, "Failed optimistic transaction {} (update):" + + " waiting {} ms before retrying for the {} time", LLUtils.toStringSafe(key), retryNs / 1000000d, retries); + } else if (logger.isDebugEnabled(MARKER_ROCKSDB)) { + logger.debug(MARKER_ROCKSDB, "Failed optimistic transaction {} (update):" + + " waiting {} ms before retrying for the {} time", LLUtils.toStringSafe(key), retryNs / 1000000d, retries); } - case PREVIOUS -> { - if (newData != null) { - newData.close(); - } - yield new UpdateAtomicResultPrevious(prevData); + // Wait for n milliseconds + if (retryNs > 0) { + LockSupport.parkNanos(retryNs); } - case BINARY_CHANGED -> { - if (prevData != null) { - prevData.close(); - } - if (newData != null) { - newData.close(); - } - yield new UpdateAtomicResultBinaryChanged(changed); - } - case DELTA -> new UpdateAtomicResultDelta(LLDelta.of(prevData, newData)); - }; - } catch (Throwable ex) { - if (prevData != null && prevData.isAccessible()) { - prevData.close(); } - if (newData != null && newData.isAccessible()) { - newData.close(); - } - throw ex; + } while (!committedSuccessfully); + if (retries > 5) { + logger.warn(MARKER_ROCKSDB, "Took {} retries to update key {}", retries, LLUtils.toStringSafe(key)); } + recordAtomicUpdateTime(changed, prevData != null, newData != null, initNanoTime); + optimisticAttempts.record(retries); + return switch (returnMode) { + case NOTHING -> RESULT_NOTHING; + case CURRENT -> new UpdateAtomicResultCurrent(newData); + case PREVIOUS -> new UpdateAtomicResultPrevious(prevData); + case BINARY_CHANGED -> new UpdateAtomicResultBinaryChanged(changed); + case DELTA -> new UpdateAtomicResultDelta(LLDelta.of(prevData, newData)); + }; } - } catch (Throwable ex) { - throw new IOException("Failed to update key " + LLUtils.toStringSafe(key), ex); + } catch (Exception ex) { + throw new DBException("Failed to update key " + LLUtils.toStringSafe(key), ex); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/PessimisticRocksDBColumn.java b/src/main/java/it/cavallium/dbengine/database/disk/PessimisticRocksDBColumn.java index 5a83bf3..1c46c8b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/PessimisticRocksDBColumn.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/PessimisticRocksDBColumn.java @@ -3,17 +3,13 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.MemoryManager; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.StampedLock; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.ReadOptions; import org.rocksdb.RocksDBException; @@ -21,20 +17,17 @@ import org.rocksdb.Transaction; import org.rocksdb.TransactionDB; import org.rocksdb.TransactionOptions; import org.rocksdb.WriteOptions; -import reactor.core.scheduler.Schedulers; public final class PessimisticRocksDBColumn extends AbstractRocksDBColumn { private static final TransactionOptions DEFAULT_TX_OPTIONS = new TransactionOptions(); public PessimisticRocksDBColumn(TransactionDB db, - boolean nettyDirect, - BufferAllocator alloc, String dbName, ColumnFamilyHandle cfh, MeterRegistry meterRegistry, StampedLock closeLock) { - super(db, nettyDirect, alloc, dbName, cfh, meterRegistry, closeLock); + super(db, dbName, cfh, meterRegistry, closeLock); } @Override @@ -52,136 +45,94 @@ public final class PessimisticRocksDBColumn extends AbstractRocksDBColumn { - if (prevData != null) { - prevData.close(); - } - if (newData != null) { - newData.close(); - } - yield RESULT_NOTHING; - } - case CURRENT -> { - if (prevData != null) { - prevData.close(); - } - yield new UpdateAtomicResultCurrent(newData); - } - case PREVIOUS -> { - if (newData != null) { - newData.close(); - } - yield new UpdateAtomicResultPrevious(prevData); - } - case BINARY_CHANGED -> { - if (prevData != null) { - prevData.close(); - } - if (newData != null) { - newData.close(); - } - yield new UpdateAtomicResultBinaryChanged(changed); - } - case DELTA -> new UpdateAtomicResultDelta(LLDelta.of(prevData, newData)); - }; - } catch (Throwable ex) { - if (prevData != null && prevData.isAccessible()) { - prevData.close(); - } - if (newData != null && newData.isAccessible()) { - newData.close(); - } - throw ex; + Buf prevData = null; + Buf newData = null; + boolean changed; + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Reading {} (before update lock)", LLUtils.toStringSafe(key)); } + var prevDataArray = tx.getForUpdate(readOptions, cfh, keyArray, true); + try { + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Reading {}: {} (before update)", + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(prevDataArray) + ); + } + if (prevDataArray != null) { + readValueFoundWithoutBloomBufferSize.record(prevDataArray.length); + prevData = Buf.wrap(prevDataArray); + } else { + readValueNotFoundWithoutBloomBufferSize.record(0); + } + Buf prevDataToSendToUpdater; + if (prevData != null) { + prevDataToSendToUpdater = prevData.copy(); + } else { + prevDataToSendToUpdater = null; + } + + newData = updater.apply(prevDataToSendToUpdater); + var newDataArray = newData == null ? null : LLUtils.asArray(newData); + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Updating {}. previous data: {}, updated data: {}", + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(prevDataArray), + LLUtils.toStringSafe(newDataArray) + ); + } + if (prevData != null && newData == null) { + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Deleting {} (after update)", LLUtils.toStringSafe(key)); + } + writeValueBufferSize.record(0); + tx.delete(cfh, keyArray, true); + changed = true; + tx.commit(); + } else if (newData != null && (prevData == null || !LLUtils.equals(prevData, newData))) { + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Writing {}: {} (after update)", + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(newData) + ); + } + writeValueBufferSize.record(newDataArray.length); + tx.put(cfh, keyArray, newDataArray); + changed = true; + tx.commit(); + } else { + changed = false; + tx.rollback(); + } + } finally { + tx.undoGetForUpdate(cfh, keyArray); + } + recordAtomicUpdateTime(changed, prevData != null, newData != null, initNanoTime); + return switch (returnMode) { + case NOTHING -> RESULT_NOTHING; + case CURRENT -> new UpdateAtomicResultCurrent(newData); + case PREVIOUS -> new UpdateAtomicResultPrevious(prevData); + case BINARY_CHANGED -> new UpdateAtomicResultBinaryChanged(changed); + case DELTA -> new UpdateAtomicResultDelta(LLDelta.of(prevData, newData)); + }; } - } catch (Throwable ex) { - throw new IOException("Failed to update key " + LLUtils.toStringSafe(key), ex); + } catch (Exception ex) { + throw new DBException("Failed to update key " + LLUtils.toStringSafe(key), ex); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBColumn.java b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBColumn.java index 2521173..4a16a51 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBColumn.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBColumn.java @@ -1,8 +1,7 @@ package it.cavallium.dbengine.database.disk; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLRange; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; @@ -23,8 +22,7 @@ public sealed interface RocksDBColumn permits AbstractRocksDBColumn { /** * This method should not modify or move the writerIndex/readerIndex of the buffers inside the range */ - @NotNull RocksIteratorObj newRocksIterator(boolean allowNettyDirect, - ReadOptions readOptions, + @NotNull RocksIteratorObj newRocksIterator(ReadOptions readOptions, LLRange range, boolean reverse) throws RocksDBException; @@ -32,48 +30,35 @@ public sealed interface RocksDBColumn permits AbstractRocksDBColumn { byte[] key, boolean existsAlmostCertainly) throws RocksDBException { - var allocator = getAllocator(); - try (var keyBuf = allocator.allocate(key.length)) { - keyBuf.writeBytes(key); - try (var result = this.get(readOptions, keyBuf)) { - if (result == null) { - return null; - } - return LLUtils.toArray(result); - } + var result = this.get(readOptions, Buf.wrap(key)); + if (result == null) { + return null; } + return LLUtils.asArray(result); } @Nullable - Buffer get(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException; + Buf get(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException; - boolean exists(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException; + boolean exists(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException; - boolean mayExists(@NotNull ReadOptions readOptions, Buffer key) throws RocksDBException; + boolean mayExists(@NotNull ReadOptions readOptions, Buf key) throws RocksDBException; - void put(@NotNull WriteOptions writeOptions, Buffer key, Buffer value) throws RocksDBException; + void put(@NotNull WriteOptions writeOptions, Buf key, Buf value) throws RocksDBException; default void put(@NotNull WriteOptions writeOptions, byte[] key, byte[] value) throws RocksDBException { - var allocator = getAllocator(); - try (var keyBuf = allocator.allocate(key.length)) { - keyBuf.writeBytes(key); - try (var valBuf = allocator.allocate(value.length)) { - valBuf.writeBytes(value); - - this.put(writeOptions, keyBuf, valBuf); - } - } + this.put(writeOptions, Buf.wrap(key), Buf.wrap(value)); } - @NotNull RocksIteratorObj newIterator(@NotNull ReadOptions readOptions, @Nullable Buffer min, @Nullable Buffer max); + @NotNull RocksIteratorObj newIterator(@NotNull ReadOptions readOptions, @Nullable Buf min, @Nullable Buf max); @NotNull UpdateAtomicResult updateAtomic(@NotNull ReadOptions readOptions, @NotNull WriteOptions writeOptions, - Buffer key, + Buf key, BinarySerializationFunction updater, - UpdateAtomicResultMode returnMode) throws RocksDBException, IOException; + UpdateAtomicResultMode returnMode) throws RocksDBException; - void delete(WriteOptions writeOptions, Buffer key) throws RocksDBException; + void delete(WriteOptions writeOptions, Buf key) throws RocksDBException; void delete(WriteOptions writeOptions, byte[] key) throws RocksDBException; @@ -95,8 +80,6 @@ public sealed interface RocksDBColumn permits AbstractRocksDBColumn { ColumnFamilyHandle getColumnFamilyHandle(); - BufferAllocator getAllocator(); - MeterRegistry getMeterRegistry(); boolean supportsTransactions(); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBRefs.java b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBRefs.java index 2beb3c4..368a30a 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBRefs.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBRefs.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.database.disk; import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; import java.util.ArrayList; import org.rocksdb.AbstractImmutableNativeReference; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBUtils.java b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBUtils.java index 3acc75e..0218fbd 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/RocksDBUtils.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/RocksDBUtils.java @@ -3,7 +3,6 @@ package it.cavallium.dbengine.database.disk; import static com.google.common.collect.Lists.partition; import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.rpc.current.data.Column; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; @@ -19,8 +18,6 @@ import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; import org.rocksdb.SstFileMetaData; import org.rocksdb.util.SizeUnit; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class RocksDBUtils { @@ -48,7 +45,7 @@ public class RocksDBUtils { String logDbName, ColumnFamilyHandle cfh, int volumeId, - Logger logger) { + Logger logger) throws RocksDBException { try (var co = new CompactionOptions() .setCompression(CompressionType.LZ4_COMPRESSION) .setMaxSubcompactions(0) @@ -64,7 +61,7 @@ public class RocksDBUtils { partitions = List.of(filesToCompact); } int finalBottommostLevelId = getLevels(db, cfh) - 1; - Mono.whenDelayError(partitions.stream().map(partition -> Mono.fromCallable(() -> { + for (List partition : partitions) { logger.info("Compacting {} files in database {} in column family {} to level {}", partition.size(), logDbName, @@ -92,14 +89,13 @@ public class RocksDBUtils { ); } } - return null; - }).subscribeOn(Schedulers.boundedElastic())).toList()).transform(LLUtils::handleDiscard).block(); + }; } } } public static void ensureOpen(RocksDB db, @Nullable ColumnFamilyHandle cfh) { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called in a nonblocking thread"); } ensureOwned(db); diff --git a/src/main/java/it/cavallium/dbengine/database/disk/RocksIterWithReadOpts.java b/src/main/java/it/cavallium/dbengine/database/disk/RocksIterWithReadOpts.java index 9260d35..23a65da 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/RocksIterWithReadOpts.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/RocksIterWithReadOpts.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.database.disk; import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.SafeCloseable; import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj; import org.rocksdb.ReadOptions; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/SimpleIndexSearcherManager.java b/src/main/java/it/cavallium/dbengine/database/disk/SimpleIndexSearcherManager.java index c6a0649..1758c64 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/SimpleIndexSearcherManager.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/SimpleIndexSearcherManager.java @@ -1,26 +1,21 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.utils.SimpleResource; import java.io.IOException; -import java.io.UncheckedIOException; -import java.lang.ref.Cleaner; +import it.cavallium.dbengine.utils.DBException; import java.time.Duration; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.LockSupport; +import java.util.function.Supplier; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.index.IndexWriter; @@ -29,15 +24,7 @@ import org.apache.lucene.search.SearcherFactory; import org.apache.lucene.search.SearcherManager; import org.apache.lucene.search.similarities.Similarity; import org.apache.lucene.store.AlreadyClosedException; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import it.cavallium.dbengine.utils.ShortNamedThreadFactory; -import reactor.core.Disposable; -import reactor.core.publisher.Mono; -import reactor.core.publisher.Sinks; -import reactor.core.publisher.Sinks.Empty; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; // todo: deduplicate code between Cached and Simple searcher managers public class SimpleIndexSearcherManager extends SimpleResource implements IndexSearcherManager, LuceneCloseable { @@ -52,24 +39,22 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS @Nullable private final SnapshotsManager snapshotsManager; - private final Scheduler luceneHeavyTasksScheduler; + private final ScheduledExecutorService luceneHeavyTasksScheduler; private final Similarity similarity; private final SearcherManager searcherManager; private final Duration queryRefreshDebounceTime; - private Mono noSnapshotSearcherMono; - private final AtomicLong activeSearchers = new AtomicLong(0); private final AtomicLong activeRefreshes = new AtomicLong(0); - private final Disposable refreshSubscription; + private final Future refreshSubscription; public SimpleIndexSearcherManager(IndexWriter indexWriter, @Nullable SnapshotsManager snapshotsManager, - Scheduler luceneHeavyTasksScheduler, + ScheduledExecutorService luceneHeavyTasksScheduler, Similarity similarity, boolean applyAllDeletes, boolean writeAllDeletes, - Duration queryRefreshDebounceTime) throws IOException { + Duration queryRefreshDebounceTime) { this.snapshotsManager = snapshotsManager; this.luceneHeavyTasksScheduler = luceneHeavyTasksScheduler; this.similarity = similarity; @@ -77,15 +62,13 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS this.searcherManager = new SearcherManager(indexWriter, applyAllDeletes, writeAllDeletes, SEARCHER_FACTORY); - refreshSubscription = LLUtils.scheduleRepeated(luceneHeavyTasksScheduler, () -> { + refreshSubscription = luceneHeavyTasksScheduler.scheduleAtFixedRate(() -> { try { maybeRefresh(); } catch (Exception ex) { LOG.error("Failed to refresh the searcher manager", ex); } - }, queryRefreshDebounceTime); - - this.noSnapshotSearcherMono = retrieveSearcherInternal(null); + }, queryRefreshDebounceTime.toMillis(), queryRefreshDebounceTime.toMillis(), TimeUnit.MILLISECONDS); } private void dropCachedIndexSearcher() { @@ -94,7 +77,7 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS } @Override - public void maybeRefreshBlocking() throws IOException { + public void maybeRefreshBlocking() { try { activeRefreshes.incrementAndGet(); searcherManager.maybeRefreshBlocking(); @@ -106,7 +89,7 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS } @Override - public void maybeRefresh() throws IOException { + public void maybeRefresh() { try { activeRefreshes.incrementAndGet(); searcherManager.maybeRefresh(); @@ -118,46 +101,46 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS } @Override - public Mono retrieveSearcher(@Nullable LLSnapshot snapshot) { + public LLIndexSearcher retrieveSearcher(@Nullable LLSnapshot snapshot) { if (snapshot == null) { - return noSnapshotSearcherMono; + return retrieveSearcherInternal(null); } else { return retrieveSearcherInternal(snapshot); } } - private Mono retrieveSearcherInternal(@Nullable LLSnapshot snapshot) { - return Mono.fromCallable(() -> { - if (isClosed()) { - return null; - } - try { - if (snapshotsManager == null || snapshot == null) { - return new OnDemandIndexSearcher(searcherManager, similarity); - } else { - activeSearchers.incrementAndGet(); - IndexSearcher indexSearcher = snapshotsManager - .resolveSnapshot(snapshot) - .getIndexSearcher(SEARCH_EXECUTOR); - indexSearcher.setSimilarity(similarity); - assert indexSearcher.getIndexReader().getRefCount() > 0; - return new SnapshotIndexSearcher(indexSearcher); - } - } catch (Throwable ex) { - activeSearchers.decrementAndGet(); - throw ex; - } - }) - .transform(LuceneUtils::scheduleLucene); + private LLIndexSearcher retrieveSearcherInternal(@Nullable LLSnapshot snapshot) { + if (isClosed()) { + return null; + } + try { + if (snapshotsManager == null || snapshot == null) { + return new OnDemandIndexSearcher(searcherManager, similarity); + } else { + activeSearchers.incrementAndGet(); + IndexSearcher indexSearcher = snapshotsManager.resolveSnapshot(snapshot).getIndexSearcher(SEARCH_EXECUTOR); + indexSearcher.setSimilarity(similarity); + assert indexSearcher.getIndexReader().getRefCount() > 0; + return new SnapshotIndexSearcher(indexSearcher); + } + } catch (Throwable ex) { + activeSearchers.decrementAndGet(); + throw ex; + } } @Override protected void onClose() { LOG.debug("Closing IndexSearcherManager..."); - refreshSubscription.dispose(); + refreshSubscription.cancel(false); + long initTime = System.nanoTime(); + while (!refreshSubscription.isDone() && (System.nanoTime() - initTime) <= 15000000000L) { + LockSupport.parkNanos(50000000); + } + refreshSubscription.cancel(true); LOG.debug("Closed IndexSearcherManager"); LOG.debug("Closing refreshes..."); - long initTime = System.nanoTime(); + initTime = System.nanoTime(); while (activeRefreshes.get() > 0 && (System.nanoTime() - initTime) <= 15000000000L) { LockSupport.parkNanos(50000000); } @@ -209,7 +192,7 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS try { searcherManager.release(indexSearcher); } catch (IOException ex) { - throw new UncheckedIOException(ex); + throw new DBException(ex); } } } @@ -268,7 +251,7 @@ public class SimpleIndexSearcherManager extends SimpleResource implements IndexS } } } catch (IOException ex) { - throw new UncheckedIOException(ex); + throw new DBException(ex); } } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/SnapshotsManager.java b/src/main/java/it/cavallium/dbengine/database/disk/SnapshotsManager.java index ae159bf..975fde9 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/SnapshotsManager.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/SnapshotsManager.java @@ -1,24 +1,17 @@ package it.cavallium.dbengine.database.disk; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.utils.SimpleResource; import java.io.IOException; -import java.io.UncheckedIOException; +import it.cavallium.dbengine.utils.DBException; import java.util.Objects; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executor; import java.util.concurrent.Phaser; import java.util.concurrent.atomic.AtomicLong; import org.apache.lucene.index.IndexCommit; import org.apache.lucene.index.IndexWriter; import org.apache.lucene.index.SnapshotDeletionPolicy; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class SnapshotsManager extends SimpleResource { @@ -49,17 +42,15 @@ public class SnapshotsManager extends SimpleResource { ); } - public Mono takeSnapshot() { - return Mono - .fromCallable(() -> takeLuceneSnapshot()) - .transform(LuceneUtils::scheduleLucene); + public LLSnapshot takeSnapshot() { + return takeLuceneSnapshot(); } /** * Use internally. This method commits before taking the snapshot if there are no commits in a new database, * avoiding the exception. */ - private LLSnapshot takeLuceneSnapshot() throws IOException { + private LLSnapshot takeLuceneSnapshot() { activeTasks.register(); try { if (snapshotter.getSnapshots().isEmpty()) { @@ -73,32 +64,34 @@ public class SnapshotsManager extends SimpleResource { if (prevSnapshot != null) { try { prevSnapshot.close(); - } catch (UncheckedIOException e) { + } catch (DBException e) { throw new IllegalStateException("Can't close snapshot", e); } } return new LLSnapshot(snapshotSeqNo); + } catch (IOException e) { + throw new DBException(e); } finally { activeTasks.arriveAndDeregister(); } } - public Mono releaseSnapshot(LLSnapshot snapshot) { - return Mono.fromCallable(() -> { - activeTasks.register(); - try (var indexSnapshot = this.snapshots.remove(snapshot.getSequenceNumber())) { - if (indexSnapshot == null) { - throw new IOException("LLSnapshot " + snapshot.getSequenceNumber() + " not found!"); - } - - var luceneIndexSnapshot = indexSnapshot.getSnapshot(); - snapshotter.release(luceneIndexSnapshot); - return null; - } finally { - activeTasks.arriveAndDeregister(); + public void releaseSnapshot(LLSnapshot snapshot) { + activeTasks.register(); + try { + var indexSnapshot = this.snapshots.remove(snapshot.getSequenceNumber()); + if (indexSnapshot == null) { + throw new DBException("LLSnapshot " + snapshot.getSequenceNumber() + " not found!"); } - }).transform(LuceneUtils::scheduleLucene); + + var luceneIndexSnapshot = indexSnapshot.getSnapshot(); + snapshotter.release(luceneIndexSnapshot); + } catch (IOException e) { + throw new DBException(e); + } finally { + activeTasks.arriveAndDeregister(); + } } /** diff --git a/src/main/java/it/cavallium/dbengine/database/disk/StandardRocksDBColumn.java b/src/main/java/it/cavallium/dbengine/database/disk/StandardRocksDBColumn.java index dfd405d..a9f752d 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/StandardRocksDBColumn.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/StandardRocksDBColumn.java @@ -3,12 +3,11 @@ package it.cavallium.dbengine.database.disk; import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.StampedLock; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -22,13 +21,11 @@ import org.rocksdb.WriteOptions; public final class StandardRocksDBColumn extends AbstractRocksDBColumn { public StandardRocksDBColumn(RocksDB db, - boolean nettyDirect, - BufferAllocator alloc, String dbName, ColumnFamilyHandle cfh, MeterRegistry meterRegistry, StampedLock closeLock) { - super(db, nettyDirect, alloc, dbName, cfh, meterRegistry, closeLock); + super(db, dbName, cfh, meterRegistry, closeLock); } @Override @@ -45,92 +42,75 @@ public final class StandardRocksDBColumn extends AbstractRocksDBColumn @Override public @NotNull UpdateAtomicResult updateAtomicImpl(@NotNull ReadOptions readOptions, @NotNull WriteOptions writeOptions, - Buffer key, + Buf key, BinarySerializationFunction updater, - UpdateAtomicResultMode returnMode) throws IOException { + UpdateAtomicResultMode returnMode) { long initNanoTime = System.nanoTime(); try { - @Nullable Buffer prevData = this.get(readOptions, key); - try (prevData) { + @Nullable Buf prevData = this.get(readOptions, key); + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Reading {}: {} (before update)", + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(prevData) + ); + } + + Buf prevDataToSendToUpdater; + if (prevData != null) { + prevDataToSendToUpdater = prevData.copy(); + } else { + prevDataToSendToUpdater = null; + } + + @Nullable Buf newData; + newData = updater.apply(prevDataToSendToUpdater); + boolean changed; + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, + "Updating {}. previous data: {}, updated data: {}", + LLUtils.toStringSafe(key), + LLUtils.toStringSafe(prevData), + LLUtils.toStringSafe(newData) + ); + } + if (prevData != null && newData == null) { + if (logger.isTraceEnabled()) { + logger.trace(MARKER_ROCKSDB, "Deleting {} (after update)", LLUtils.toStringSafe(key)); + } + this.delete(writeOptions, key); + changed = true; + } else if (newData != null && (prevData == null || !LLUtils.equals(prevData, newData))) { if (logger.isTraceEnabled()) { logger.trace(MARKER_ROCKSDB, - "Reading {}: {} (before update)", + "Writing {}: {} (after update)", LLUtils.toStringSafe(key), - LLUtils.toStringSafe(prevData) + LLUtils.toStringSafe(newData) ); } - - Buffer prevDataToSendToUpdater; - if (prevData != null) { - prevDataToSendToUpdater = prevData.copy().makeReadOnly(); + Buf dataToPut; + if (returnMode == UpdateAtomicResultMode.CURRENT) { + dataToPut = newData.copy(); } else { - prevDataToSendToUpdater = null; - } - - @Nullable Buffer newData; - try { - newData = updater.apply(prevDataToSendToUpdater); - } finally { - if (prevDataToSendToUpdater != null && prevDataToSendToUpdater.isAccessible()) { - prevDataToSendToUpdater.close(); - } - } - try (newData) { - boolean changed; - assert newData == null || newData.isAccessible(); - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, - "Updating {}. previous data: {}, updated data: {}", - LLUtils.toStringSafe(key), - LLUtils.toStringSafe(prevData), - LLUtils.toStringSafe(newData) - ); - } - if (prevData != null && newData == null) { - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, "Deleting {} (after update)", LLUtils.toStringSafe(key)); - } - this.delete(writeOptions, key); - changed = true; - } else if (newData != null && (prevData == null || !LLUtils.equals(prevData, newData))) { - if (logger.isTraceEnabled()) { - logger.trace(MARKER_ROCKSDB, - "Writing {}: {} (after update)", - LLUtils.toStringSafe(key), - LLUtils.toStringSafe(newData) - ); - } - Buffer dataToPut; - if (returnMode == UpdateAtomicResultMode.CURRENT) { - dataToPut = newData.copy(); - } else { - dataToPut = newData; - } - try { - this.put(writeOptions, key, dataToPut); - changed = true; - } finally { - if (dataToPut != newData) { - dataToPut.close(); - } - } - } else { - changed = false; - } - recordAtomicUpdateTime(changed, prevData != null, newData != null, initNanoTime); - return switch (returnMode) { - case NOTHING -> RESULT_NOTHING; - case CURRENT -> new UpdateAtomicResultCurrent(newData != null ? newData.copy() : null); - case PREVIOUS -> new UpdateAtomicResultPrevious(prevData != null ? prevData.copy() : null); - case BINARY_CHANGED -> new UpdateAtomicResultBinaryChanged(changed); - case DELTA -> new UpdateAtomicResultDelta(LLDelta.of( - prevData != null ? prevData.copy() : null, - newData != null ? newData.copy() : null)); - }; + dataToPut = newData; } + this.put(writeOptions, key, dataToPut); + changed = true; + } else { + changed = false; } - } catch (Throwable ex) { - throw new IOException("Failed to update key " + LLUtils.toStringSafe(key), ex); + recordAtomicUpdateTime(changed, prevData != null, newData != null, initNanoTime); + return switch (returnMode) { + case NOTHING -> RESULT_NOTHING; + case CURRENT -> new UpdateAtomicResultCurrent(newData != null ? newData.copy() : null); + case PREVIOUS -> new UpdateAtomicResultPrevious(prevData != null ? prevData.copy() : null); + case BINARY_CHANGED -> new UpdateAtomicResultBinaryChanged(changed); + case DELTA -> new UpdateAtomicResultDelta(LLDelta.of( + prevData != null ? prevData.copy() : null, + newData != null ? newData.copy() : null)); + }; + } catch (Exception ex) { + throw new DBException("Failed to update key " + LLUtils.toStringSafe(key), ex); } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResult.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResult.java index 1059934..be92a5b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResult.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResult.java @@ -1,7 +1,4 @@ package it.cavallium.dbengine.database.disk; -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; - -public sealed interface UpdateAtomicResult extends DiscardingCloseable permits UpdateAtomicResultBinaryChanged, +public sealed interface UpdateAtomicResult permits UpdateAtomicResultBinaryChanged, UpdateAtomicResultDelta, UpdateAtomicResultNothing, UpdateAtomicResultPrevious, UpdateAtomicResultCurrent {} diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultBinaryChanged.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultBinaryChanged.java index b51ffc8..023ab3c 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultBinaryChanged.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultBinaryChanged.java @@ -2,8 +2,4 @@ package it.cavallium.dbengine.database.disk; public record UpdateAtomicResultBinaryChanged(boolean changed) implements UpdateAtomicResult { - @Override - public void close() { - - } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultCurrent.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultCurrent.java index e8e10b4..00fca63 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultCurrent.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultCurrent.java @@ -1,14 +1,6 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; -public record UpdateAtomicResultCurrent(Buffer current) implements UpdateAtomicResult { - - @Override - public void close() { - if (current != null && current.isAccessible()) { - current.close(); - } - } +public record UpdateAtomicResultCurrent(Buf current) implements UpdateAtomicResult { } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultDelta.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultDelta.java index 8aa1e42..3e4148a 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultDelta.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultDelta.java @@ -1,14 +1,6 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.util.Send; import it.cavallium.dbengine.database.LLDelta; public record UpdateAtomicResultDelta(LLDelta delta) implements UpdateAtomicResult { - - @Override - public void close() { - if (delta != null && !delta.isClosed()) { - delta.close(); - } - } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultNothing.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultNothing.java index 2d6c9f1..2176515 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultNothing.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultNothing.java @@ -2,8 +2,4 @@ package it.cavallium.dbengine.database.disk; public record UpdateAtomicResultNothing() implements UpdateAtomicResult { - @Override - public void close() { - - } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultPrevious.java b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultPrevious.java index d1a1429..fbaafa3 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultPrevious.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/UpdateAtomicResultPrevious.java @@ -1,14 +1,7 @@ package it.cavallium.dbengine.database.disk; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; -public record UpdateAtomicResultPrevious(Buffer previous) implements UpdateAtomicResult { +public record UpdateAtomicResultPrevious(Buf previous) implements UpdateAtomicResult { - @Override - public void close() { - if (previous != null && previous.isAccessible()) { - previous.close(); - } - } } diff --git a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLColumnFamilyHandle.java b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLColumnFamilyHandle.java index 200c73e..8b4f91b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLColumnFamilyHandle.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLColumnFamilyHandle.java @@ -1,10 +1,6 @@ package it.cavallium.dbengine.database.disk.rocksdb; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.utils.SimpleResource; -import org.rocksdb.AbstractSlice; import org.rocksdb.ColumnFamilyHandle; public final class LLColumnFamilyHandle extends SimpleResource { diff --git a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLCompactionOptions.java b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLCompactionOptions.java index d11c3bf..68d3434 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLCompactionOptions.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLCompactionOptions.java @@ -1,8 +1,5 @@ package it.cavallium.dbengine.database.disk.rocksdb; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.utils.SimpleResource; import org.rocksdb.CompactionOptions; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLReadOptions.java b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLReadOptions.java index 915074b..af56ea4 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLReadOptions.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLReadOptions.java @@ -1,13 +1,5 @@ package it.cavallium.dbengine.database.disk.rocksdb; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; -import it.cavallium.dbengine.database.LLDelta; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLLocalGroupedReactiveRocksIterator; import it.cavallium.dbengine.utils.SimpleResource; import org.rocksdb.ReadOptions; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLWriteOptions.java b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLWriteOptions.java index 2c30bb0..8e5f9b3 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLWriteOptions.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/LLWriteOptions.java @@ -1,8 +1,5 @@ package it.cavallium.dbengine.database.disk.rocksdb; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.utils.SimpleResource; import org.rocksdb.WriteOptions; diff --git a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/RocksIteratorObj.java b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/RocksIteratorObj.java index 9cd8bae..36db90b 100644 --- a/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/RocksIteratorObj.java +++ b/src/main/java/it/cavallium/dbengine/database/disk/rocksdb/RocksIteratorObj.java @@ -1,14 +1,8 @@ package it.cavallium.dbengine.database.disk.rocksdb; -import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect; - import io.micrometer.core.instrument.Counter; import io.micrometer.core.instrument.Timer; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferComponent; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; -import io.netty5.buffer.internal.ResourceSupport; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.utils.SimpleResource; import java.nio.ByteBuffer; @@ -21,9 +15,8 @@ public class RocksIteratorObj extends SimpleResource { private RocksIterator rocksIterator; private AbstractSlice sliceMin; private AbstractSlice sliceMax; - private Buffer min; - private Buffer max; - private final boolean allowNettyDirect; + private Buf min; + private Buf max; private final Counter startedIterSeek; private final Counter endedIterSeek; private final Timer iterSeekTime; @@ -36,9 +29,8 @@ public class RocksIteratorObj extends SimpleResource { public RocksIteratorObj(RocksIterator rocksIterator, AbstractSlice sliceMin, AbstractSlice sliceMax, - Buffer min, - Buffer max, - boolean allowNettyDirect, + Buf min, + Buf max, Counter startedIterSeek, Counter endedIterSeek, Timer iterSeekTime, @@ -50,7 +42,6 @@ public class RocksIteratorObj extends SimpleResource { sliceMax, min, max, - allowNettyDirect, startedIterSeek, endedIterSeek, iterSeekTime, @@ -65,9 +56,8 @@ public class RocksIteratorObj extends SimpleResource { private RocksIteratorObj(RocksIterator rocksIterator, AbstractSlice sliceMin, AbstractSlice sliceMax, - Buffer min, - Buffer max, - boolean allowNettyDirect, + Buf min, + Buf max, Counter startedIterSeek, Counter endedIterSeek, Timer iterSeekTime, @@ -81,7 +71,6 @@ public class RocksIteratorObj extends SimpleResource { this.min = min; this.max = max; this.rocksIterator = rocksIterator; - this.allowNettyDirect = allowNettyDirect; this.startedIterSeek = startedIterSeek; this.endedIterSeek = endedIterSeek; this.iterSeekTime = iterSeekTime; @@ -139,43 +128,25 @@ public class RocksIteratorObj extends SimpleResource { /** * Useful for reverse iterations */ - public void seekFrom(Buffer key) { + public void seekFrom(Buf key) { ensureOpen(); - if (allowNettyDirect && isReadOnlyDirect(key)) { - ByteBuffer keyInternalByteBuffer = ((BufferComponent) key).readableBuffer(); - assert keyInternalByteBuffer.position() == 0; - rocksIterator.seekForPrev(keyInternalByteBuffer); - // This is useful to retain the key buffer in memory and avoid deallocations - this.seekingFrom = key; - } else { - var keyArray = LLUtils.toArray(key); - rocksIterator.seekForPrev(keyArray); - // This is useful to retain the key buffer in memory and avoid deallocations - this.seekingFrom = keyArray; - } + var keyArray = LLUtils.asArray(key); + rocksIterator.seekForPrev(keyArray); + // This is useful to retain the key buffer in memory and avoid deallocations + this.seekingFrom = keyArray; } /** * Useful for forward iterations */ - public void seekTo(Buffer key) { + public void seekTo(Buf key) { ensureOpen(); - if (allowNettyDirect && isReadOnlyDirect(key)) { - ByteBuffer keyInternalByteBuffer = ((BufferComponent) key).readableBuffer(); - assert keyInternalByteBuffer.position() == 0; - startedIterSeek.increment(); - iterSeekTime.record(() -> rocksIterator.seek(keyInternalByteBuffer)); - endedIterSeek.increment(); - // This is useful to retain the key buffer in memory and avoid deallocations - this.seekingTo = key; - } else { - var keyArray = LLUtils.toArray(key); - startedIterSeek.increment(); - iterSeekTime.record(() -> rocksIterator.seek(keyArray)); - endedIterSeek.increment(); - // This is useful to retain the key buffer in memory and avoid deallocations - this.seekingTo = keyArray; - } + var keyArray = LLUtils.asArray(key); + startedIterSeek.increment(); + iterSeekTime.record(() -> rocksIterator.seek(keyArray)); + endedIterSeek.increment(); + // This is useful to retain the key buffer in memory and avoid deallocations + this.seekingTo = keyArray; } public boolean isValid() { @@ -183,26 +154,48 @@ public class RocksIteratorObj extends SimpleResource { return rocksIterator.isValid(); } + @Deprecated(forRemoval = true) public int key(ByteBuffer buffer) { ensureOpen(); return rocksIterator.key(buffer); } + @Deprecated(forRemoval = true) public int value(ByteBuffer buffer) { ensureOpen(); return rocksIterator.value(buffer); } + /** + * The returned buffer may change when calling next() or when the iterator is not valid anymore + */ public byte[] key() { ensureOpen(); return rocksIterator.key(); } + /** + * The returned buffer may change when calling next() or when the iterator is not valid anymore + */ public byte[] value() { ensureOpen(); return rocksIterator.value(); } + /** + * The returned buffer may change when calling next() or when the iterator is not valid anymore + */ + public Buf keyBuf() { + return Buf.wrap(this.key()); + } + + /** + * The returned buffer may change when calling next() or when the iterator is not valid anymore + */ + public Buf valueBuf() { + return Buf.wrap(this.value()); + } + public void next() throws RocksDBException { ensureOpen(); next(true); diff --git a/src/main/java/it/cavallium/dbengine/database/memory/BLRange.java b/src/main/java/it/cavallium/dbengine/database/memory/BLRange.java index e97530e..6491992 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/BLRange.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/BLRange.java @@ -1,14 +1,14 @@ package it.cavallium.dbengine.database.memory; -import it.unimi.dsi.fastutil.bytes.ByteList; +import it.cavallium.dbengine.buffers.Buf; public class BLRange { - private final ByteList min; - private final ByteList max; - private final ByteList single; + private final Buf min; + private final Buf max; + private final Buf single; - public BLRange(ByteList min, ByteList max, ByteList single) { + public BLRange(Buf min, Buf max, Buf single) { if (single != null && (min != null || max != null)) { throw new IllegalArgumentException(); } @@ -17,15 +17,15 @@ public class BLRange { this.single = single; } - public ByteList getMin() { + public Buf getMin() { return min; } - public ByteList getMax() { + public Buf getMax() { return max; } - public ByteList getSingle() { + public Buf getSingle() { return single; } diff --git a/src/main/java/it/cavallium/dbengine/database/memory/BinaryLexicographicList.java b/src/main/java/it/cavallium/dbengine/database/memory/BinaryLexicographicList.java deleted file mode 100644 index 6fb0b6a..0000000 --- a/src/main/java/it/cavallium/dbengine/database/memory/BinaryLexicographicList.java +++ /dev/null @@ -1,286 +0,0 @@ -package it.cavallium.dbengine.database.memory; - -import it.unimi.dsi.fastutil.bytes.ByteArrayList; -import it.unimi.dsi.fastutil.bytes.ByteCollection; -import it.unimi.dsi.fastutil.bytes.ByteList; -import it.unimi.dsi.fastutil.bytes.ByteListIterator; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.StringJoiner; -import org.jetbrains.annotations.NotNull; - -@SuppressWarnings("ClassCanBeRecord") -public class BinaryLexicographicList implements ByteList { - - private final byte[] bytes; - - public BinaryLexicographicList(byte[] bytes) { - this.bytes = bytes; - } - - @Override - public int size() { - return bytes.length; - } - - @Override - public boolean isEmpty() { - return bytes.length == 0; - } - - @Override - public ByteListIterator iterator() { - return ByteList.of(bytes).iterator(); - } - - @NotNull - @Override - public Object @NotNull [] toArray() { - var output = new Object[bytes.length]; - for (int i = 0; i < bytes.length; i++) { - output[i] = bytes[i]; - } - return output; - } - - @SuppressWarnings("unchecked") - @NotNull - @Override - public T @NotNull [] toArray(T @NotNull[] a) { - Object[] content = toArray(); - if (a.length < bytes.length) - // Make a new array of a's runtime type, but my contents: - return (T[]) Arrays.copyOf(content, bytes.length, a.getClass()); - System.arraycopy(content, 0, a, 0, bytes.length); - if (a.length > bytes.length) - a[bytes.length] = null; - return a; - } - - @Override - public boolean containsAll(@NotNull Collection c) { - return ByteArrayList.wrap(bytes).containsAll(c); - } - - @Override - public boolean addAll(@NotNull Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean addAll(int index, @NotNull Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean removeAll(@NotNull Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean retainAll(@NotNull Collection c) { - throw new UnsupportedOperationException(); - } - - @Override - public void clear() { - throw new UnsupportedOperationException(); - } - - @Override - public ByteListIterator listIterator() { - return ByteList.of(bytes).listIterator(); - } - - @Override - public ByteListIterator listIterator(int index) { - return ByteList.of(bytes).listIterator(index); - } - - @Override - public ByteList subList(int from, int to) { - return ByteList.of(bytes).subList(from, to); - } - - @Override - public void size(int size) { - throw new UnsupportedOperationException(); - } - - @Override - public void getElements(int from, byte[] a, int offset, int length) { - ByteList.of(bytes).getElements(from, a, offset, length); - } - - @Override - public void removeElements(int from, int to) { - throw new UnsupportedOperationException(); - } - - @Override - public void addElements(int index, byte[] a) { - throw new UnsupportedOperationException(); - } - - @Override - public void addElements(int index, byte[] a, int offset, int length) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean add(byte key) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean contains(byte key) { - for (byte aByte : bytes) { - if (aByte == key) { - return true; - } - } - return false; - } - - @Override - public boolean rem(byte key) { - throw new UnsupportedOperationException(); - } - - @Override - public byte[] toByteArray() { - return ByteList.of(bytes).toByteArray(); - } - - @Override - public byte[] toArray(byte[] a) { - return ByteList.of(bytes).toArray(a); - } - - @Override - public boolean addAll(ByteCollection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean containsAll(ByteCollection c) { - return ByteList.of(bytes).containsAll(c); - } - - @Override - public boolean removeAll(ByteCollection c) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean retainAll(ByteCollection c) { - throw new UnsupportedOperationException(); - } - - @Override - public void add(int index, byte key) { - throw new UnsupportedOperationException(); - } - - @Override - public boolean addAll(int index, ByteCollection c) { - throw new UnsupportedOperationException(); - } - - @Override - public byte set(int index, byte k) { - throw new UnsupportedOperationException(); - } - - @Override - public byte getByte(int index) { - return bytes[index]; - } - - @Override - public int indexOf(byte k) { - for (int i = 0; i < bytes.length; i++) { - if (bytes[i] == k) { - return i; - } - } - return -1; - } - - @Override - public int lastIndexOf(byte k) { - for (int i = bytes.length - 1; i >= 0; i--) { - if (bytes[i] == k) { - return i; - } - } - return -1; - } - - @Override - public byte removeByte(int index) { - throw new UnsupportedOperationException(); - } - - @Override - public int compareTo(@NotNull List o) { - var length1 = bytes.length; - var length2 = o.size(); - if (length1 == length2) { - int i = 0; - for (byte ob : o) { - if (bytes[i] != ob) { - var compareResult = Byte.compareUnsigned(bytes[i], ob); - if (compareResult != 0) { - return compareResult; - } - } - i++; - } - return 0; - } else if (length1 > length2) { - return 1; - } else { - return -1; - } - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - if (o instanceof List) { - int i = 0; - for (Object o1 : ((List) o)) { - if (i >= size()) { - return false; - } - if (!(o1 instanceof Byte)) { - return false; - } - if (this.bytes[i] != (Byte) o1) { - return false; - } - i++; - } - return (size() == i); - } - return false; - } - BinaryLexicographicList bytes1 = (BinaryLexicographicList) o; - return Arrays.equals(bytes, bytes1.bytes); - } - - @Override - public int hashCode() { - return Arrays.hashCode(bytes); - } - - @Override - public String toString() { - return Arrays.toString(bytes); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java index 883a4af..1cfbefb 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDatabaseConnection.java @@ -1,14 +1,10 @@ package it.cavallium.dbengine.database.memory; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; import it.cavallium.dbengine.database.LLDatabaseConnection; import it.cavallium.dbengine.database.LLKeyValueDatabase; import it.cavallium.dbengine.database.LLLuceneIndex; import it.cavallium.dbengine.database.disk.LLLocalLuceneIndex; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; import it.cavallium.dbengine.lucene.LuceneHacks; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.rpc.current.data.ByteBuffersDirectory; @@ -23,63 +19,38 @@ import java.util.List; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class LLMemoryDatabaseConnection implements LLDatabaseConnection { private final AtomicBoolean connected = new AtomicBoolean(); - private final BufferAllocator allocator; private final MeterRegistry meterRegistry; - private final AtomicReference env = new AtomicReference<>(); - public LLMemoryDatabaseConnection(BufferAllocator allocator, MeterRegistry meterRegistry) { - this.allocator = allocator; + public LLMemoryDatabaseConnection(MeterRegistry meterRegistry) { this.meterRegistry = meterRegistry; } - @Override - public BufferAllocator getAllocator() { - return allocator; - } - @Override public MeterRegistry getMeterRegistry() { return meterRegistry; } @Override - public Mono connect() { - return Mono - .fromCallable(() -> { - if (!connected.compareAndSet(false, true)) { - throw new IllegalStateException("Already connected"); - } - var prev = env.getAndSet(new LLTempHugePqEnv()); - if (prev != null) { - throw new IllegalStateException("Env was already set"); - } - return this; - }) - .subscribeOn(Schedulers.boundedElastic()); + public LLDatabaseConnection connect() { + if (!connected.compareAndSet(false, true)) { + throw new IllegalStateException("Already connected"); + } + return this; } @Override - public Mono getDatabase(String name, + public LLKeyValueDatabase getDatabase(String name, List columns, DatabaseOptions databaseOptions) { - return Mono - .fromCallable(() -> new LLMemoryKeyValueDatabase( - allocator, - meterRegistry, - name, - columns - )) - .subscribeOn(Schedulers.boundedElastic()); + return new LLMemoryKeyValueDatabase(meterRegistry, name, columns); } @Override - public Mono getLuceneIndex(String clusterName, + public LLLuceneIndex getLuceneIndex(String clusterName, LuceneIndexStructure indexStructure, IndicizerAnalyzers indicizerAnalyzers, IndicizerSimilarities indicizerSimilarities, @@ -89,33 +60,18 @@ public class LLMemoryDatabaseConnection implements LLDatabaseConnection { .builder(luceneOptions) .directoryOptions(new ByteBuffersDirectory()) .build(); - return Mono - .fromCallable(() -> { - var env = this.env.get(); - return new LLLocalLuceneIndex(env, - meterRegistry, - clusterName, - 0, - indicizerAnalyzers, - indicizerSimilarities, - memoryLuceneOptions, - luceneHacks, - null - ); - }) - .transform(LuceneUtils::scheduleLucene); + return new LLLocalLuceneIndex(meterRegistry, + clusterName, + 0, + indicizerAnalyzers, + indicizerSimilarities, + memoryLuceneOptions, + luceneHacks + ); } @Override - public Mono disconnect() { - return Mono.fromCallable(() -> { - if (connected.compareAndSet(true, false)) { - var env = this.env.get(); - if (env != null) { - env.close(); - } - } - return null; - }).subscribeOn(Schedulers.boundedElastic()); + public void disconnect() { + connected.compareAndSet(true, false); } } diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java index 521996a..e82d1ac 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryDictionary.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.database.memory; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Resource; -import io.netty5.util.Send; +import static java.util.stream.Collectors.groupingBy; +import static java.util.stream.Collectors.mapping; + +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.BadBlock; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLDictionary; @@ -17,46 +17,42 @@ import it.cavallium.dbengine.database.UpdateMode; import it.cavallium.dbengine.database.disk.BinarySerializationFunction; import it.cavallium.dbengine.database.serialization.KVSerializationFunction; import it.cavallium.dbengine.database.serialization.SerializationException; -import it.cavallium.dbengine.database.serialization.SerializationFunction; -import it.cavallium.dbengine.utils.InternalMonoUtils; -import it.unimi.dsi.fastutil.bytes.ByteList; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import java.util.Objects; -import java.util.Optional; +import java.util.Set; import java.util.SortedMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentNavigableMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public class LLMemoryDictionary implements LLDictionary { private final String databaseName; private final String columnName; - private final BufferAllocator allocator; private final UpdateMode updateMode; - private final Getter> snapshots; - private final ConcurrentSkipListMap mainDb; + private final Getter> snapshots; + private final ConcurrentSkipListMap mainDb; private interface Getter { U get(T argument); } - public LLMemoryDictionary(BufferAllocator allocator, - String databaseName, + public LLMemoryDictionary(String databaseName, String columnName, UpdateMode updateMode, - ConcurrentHashMap>> snapshots, - ConcurrentHashMap> mainDb) { + ConcurrentHashMap>> snapshots, + ConcurrentHashMap> mainDb) { this.databaseName = databaseName; this.columnName = columnName; - this.allocator = allocator; this.updateMode = updateMode; this.snapshots = (snapshotId) -> snapshots.get(snapshotId).get(columnName); this.mainDb = mainDb.get(columnName); @@ -67,11 +63,6 @@ public class LLMemoryDictionary implements LLDictionary { return columnName; } - @Override - public BufferAllocator getAllocator() { - return allocator; - } - private long resolveSnapshot(LLSnapshot snapshot) { if (snapshot == null) { return Long.MIN_VALUE + 1L; @@ -82,71 +73,58 @@ public class LLMemoryDictionary implements LLDictionary { } } - private Mono transformResult(Mono result, LLDictionaryResultType resultType) { + private Buf transformResult(Buf result, LLDictionaryResultType resultType) { if (resultType == LLDictionaryResultType.PREVIOUS_VALUE) { // Don't retain the result because it has been removed from the skip list - return result.map(this::kkB); + return kkB(result); } else if (resultType == LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE) { - return result - .map(prev -> true) - .defaultIfEmpty(false) - .map((Boolean bool) -> LLUtils.booleanToResponseByteBuffer(allocator, bool)); + return LLUtils.booleanToResponseByteBuffer(result != null); } else { - return result.as(InternalMonoUtils::ignoreElements); + return null; } } - private ByteList k(Send buf) { - try (var b = buf.receive()) { - return new BinaryLexicographicList(LLUtils.toArray(b)); - } + private Buf k(Buf buf) { + if (buf == null) return null; + return buf; } - private ByteList kShr(Buffer buf) { - return new BinaryLexicographicList(LLUtils.toArray(buf)); + private Buf kShr(Buf buf) { + if (buf == null) return null; + return buf; } - private ByteList kOwn(Buffer buf) { - try (buf) { - return new BinaryLexicographicList(LLUtils.toArray(buf)); - } + private Buf kOwn(Buf buf) { + if (buf == null) return null; + return buf; } - private Send kk(ByteList bytesList) { - try (var buffer = getAllocator().allocate(bytesList.size())) { - buffer.writeBytes(bytesList.toByteArray()); - return buffer.send(); - } + private Buf kk(Buf bytesList) { + if (bytesList == null) return null; + return bytesList; } - private Buffer kkB(ByteList bytesList) { - var buffer = getAllocator().allocate(bytesList.size()); - try { - buffer.writeBytes(bytesList.toByteArray()); - return buffer; - } catch (Throwable t) { - buffer.close(); - throw t; - } + private Buf kkB(Buf bytesList) { + if (bytesList == null) return null; + return bytesList; } private BLRange r(Supplier send) { - try(var range = send.get()) { - if (range.isAll()) { - return new BLRange(null, null, null); - } else if (range.isSingle()) { - return new BLRange(null, null, k(range.getSingle())); - } else if (range.hasMin() && range.hasMax()) { - return new BLRange(k(range.getMin()), k(range.getMax()), null); - } else if (range.hasMin()) { - return new BLRange(k(range.getMin()), null, null); - } else { - return new BLRange(k(range.getMax()), null, null); - } + var range = send.get(); + if (range.isAll()) { + return new BLRange(null, null, null); + } else if (range.isSingle()) { + return new BLRange(null, null, k(range.getSingle())); + } else if (range.hasMin() && range.hasMax()) { + return new BLRange(k(range.getMin()), k(range.getMax()), null); + } else if (range.hasMin()) { + return new BLRange(k(range.getMin()), null, null); + } else { + return new BLRange(k(range.getMax()), null, null); } } - private ConcurrentNavigableMap mapSlice(LLSnapshot snapshot, LLRange range) { + private ConcurrentNavigableMap mapSlice(LLSnapshot snapshot, LLRange range) { if (range.isAll()) { return snapshots.get(resolveSnapshot(snapshot)); } else if (range.isSingle()) { @@ -180,25 +158,14 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono get(@Nullable LLSnapshot snapshot, Mono keyMono) { - return Mono.usingWhen(keyMono, - key -> Mono - .fromCallable(() -> snapshots.get(resolveSnapshot(snapshot)).get(kShr(key))) - .map(this::kkB) - .onErrorMap(cause -> new IOException("Failed to read", cause)), - LLUtils::finalizeResource - ); + public Buf get(@Nullable LLSnapshot snapshot, Buf key) { + return snapshots.get(resolveSnapshot(snapshot)).get(kShr(key)); } @Override - public Mono put(Mono keyMono, Mono valueMono, LLDictionaryResultType resultType) { - var kMono = keyMono.map(this::kOwn); - var vMono = valueMono.map(this::kOwn); - return Mono - .zip(kMono, vMono) - .mapNotNull(tuple -> mainDb.put(tuple.getT1(), tuple.getT2())) - .transform(result -> this.transformResult(result, resultType)) - .onErrorMap(cause -> new IOException("Failed to read", cause)); + public Buf put(Buf key, Buf value, LLDictionaryResultType resultType) { + var result = mainDb.put(key, value); + return this.transformResult(result, resultType); } @Override @@ -207,308 +174,217 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono updateAndGetDelta(Mono keyMono, BinarySerializationFunction updater) { - return Mono.usingWhen(keyMono, - key -> Mono.fromCallable(() -> { - if (updateMode == UpdateMode.DISALLOW) { - throw new UnsupportedOperationException("update() is disallowed"); - } - AtomicReference oldRef = new AtomicReference<>(null); - var newValue = mainDb.compute(kShr(key), (_unused, old) -> { - if (old != null) { - oldRef.set(old); - } - Buffer v; - var oldToSend = old != null ? kkB(old) : null; - try { - assert oldToSend == null || oldToSend.isAccessible(); - v = updater.apply(oldToSend); - assert v == null || v.isAccessible(); - } catch (SerializationException e) { - throw new IllegalStateException(e); - } finally { - if (oldToSend != null && oldToSend.isAccessible()) { - oldToSend.close(); - } - } - if (v != null) { - return kOwn(v); - } else { - return null; - } - }); - var oldVal = oldRef.get(); - return LLDelta.of(oldVal != null ? kkB(oldRef.get()) : null, newValue != null ? kkB(newValue) : null); - }), - LLUtils::finalizeResource - ); + public LLDelta updateAndGetDelta(Buf key, BinarySerializationFunction updater) { + if (updateMode == UpdateMode.DISALLOW) { + throw new UnsupportedOperationException("update() is disallowed"); + } + AtomicReference oldRef = new AtomicReference<>(null); + var newValue = mainDb.compute(kShr(key), (_unused, old) -> { + if (old != null) { + oldRef.set(old); + } + Buf v; + var oldToSend = old != null ? kkB(old) : null; + try { + v = updater.apply(oldToSend); + } catch (SerializationException e) { + throw new IllegalStateException(e); + } + if (v != null) { + return kOwn(v); + } else { + return null; + } + }); + var oldVal = oldRef.get(); + return LLDelta.of(oldVal != null ? kkB(oldRef.get()) : null, newValue != null ? kkB(newValue) : null); } @Override - public Mono clear() { - return Mono.fromRunnable(mainDb::clear); + public void clear() { + mainDb.clear(); } @Override - public Mono remove(Mono keyMono, LLDictionaryResultType resultType) { - return Mono.usingWhen(keyMono, - key -> Mono - .fromCallable(() -> mainDb.remove(kShr(key))) - // Don't retain the result because it has been removed from the skip list - .mapNotNull(bytesList -> switch (resultType) { - case VOID -> null; - case PREVIOUS_VALUE_EXISTENCE -> LLUtils.booleanToResponseByteBuffer(allocator, true); - case PREVIOUS_VALUE -> kkB(bytesList); - }) - .switchIfEmpty(Mono.defer(() -> { - if (resultType == LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE) { - return Mono.fromCallable(() -> LLUtils.booleanToResponseByteBuffer(allocator, false)); - } else { - return Mono.empty(); - } - })) - .onErrorMap(cause -> new IOException("Failed to read", cause)), - LLUtils::finalizeResource - ); + public Buf remove(Buf key, LLDictionaryResultType resultType) { + var prev = mainDb.remove(kShr(key)); + // Don't retain the result because it has been removed from the skip list + if (prev != null) { + return switch (resultType) { + case VOID -> null; + case PREVIOUS_VALUE_EXISTENCE -> LLUtils.booleanToResponseByteBuffer(true); + case PREVIOUS_VALUE -> kkB(prev); + }; + } else { + if (resultType == LLDictionaryResultType.PREVIOUS_VALUE_EXISTENCE) { + return LLUtils.booleanToResponseByteBuffer(false); + } else { + return null; + } + } } @Override - public Flux getMulti(@Nullable LLSnapshot snapshot, Flux keys) { + public Stream getMulti(@Nullable LLSnapshot snapshot, Stream keys) { return keys.map(key -> { - try (key) { - ByteList v = snapshots.get(resolveSnapshot(snapshot)).get(k(key.copy().send())); - if (v != null) { - return OptionalBuf.of(kkB(v)); - } else { - return OptionalBuf.empty(); - } + Buf v = snapshots.get(resolveSnapshot(snapshot)).get(k(key)); + if (v != null) { + return OptionalBuf.of(kkB(v)); + } else { + return OptionalBuf.empty(); } }); } @Override - public Mono putMulti(Flux entries) { - return entries.doOnNext(entry -> { - try (entry) { - mainDb.put(k(entry.getKeyUnsafe().send()), k(entry.getValueUnsafe().send())); - } - }).then(); + public void putMulti(Stream entries) { + entries.forEach(entry -> mainDb.put(k(entry.getKey()), k(entry.getValue()))); } @Override - public Flux updateMulti(Flux keys, - Flux serializedKeys, - KVSerializationFunction updateFunction) { - return Flux.error(new UnsupportedOperationException("Not implemented")); + public Stream updateMulti(Stream keys, + Stream serializedKeys, + KVSerializationFunction updateFunction) { + throw new UnsupportedOperationException("Not implemented"); } @Override - public Flux getRange(@Nullable LLSnapshot snapshot, - Mono rangeMono, + public Stream getRange(@Nullable LLSnapshot snapshot, + LLRange range, boolean reverse, boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - try (var single = singleToReceive.receive()) { - var element = snapshots.get(resolveSnapshot(snapshot)).get(k(single.copy().send())); - if (element != null) { - return LLEntry.of(single, kkB(element)); - } else { - return null; - } - } - }).flux(); + if (range.isSingle()) { + var single = range.getSingle(); + var element = snapshots.get(resolveSnapshot(snapshot)).get(k(single)); + if (element != null) { + return Stream.of(LLEntry.of(single, kkB(element))); } else { - return Mono - .fromCallable(() -> mapSlice(snapshot, range)) - .flatMapIterable(map -> { - if (reverse) { - return map.descendingMap().entrySet(); - } else { - return map.entrySet(); - } - }) - .map(entry -> LLEntry.of(kkB(entry.getKey()), kkB(entry.getValue()))); + return Stream.empty(); } - }, LLUtils::finalizeResource); + } else { + var map = mapSlice(snapshot, range); + + Set> set; + if (reverse) { + set = map.descendingMap().entrySet(); + } else { + set = map.entrySet(); + } + return set.stream().map(entry -> LLEntry.of(kkB(entry.getKey()), kkB(entry.getValue()))); + } } @Override - public Flux> getRangeGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, + public Stream> getRangeGrouped(@Nullable LLSnapshot snapshot, + LLRange range, int prefixLength, boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - try (range) { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - try (var single = singleToReceive.receive()) { - var element = snapshots.get(resolveSnapshot(snapshot)).get(k(single.copy().send())); - if (element != null) { - return List.of(LLEntry.of(single, kkB(element))); - } else { - return List.of(); - } - } - }).flux(); - } else { - return Mono - .fromCallable(() -> mapSlice(snapshot, range)) - .flatMapIterable(SortedMap::entrySet) - .groupBy(k -> k.getKey().subList(0, prefixLength)) - .flatMap(groupedFlux -> groupedFlux - .map(entry -> LLEntry.of(kkB(entry.getKey()), kkB(entry.getValue()))) - .collectList() - ); - } + if (range.isSingle()) { + var single = range.getSingle(); + var element = snapshots.get(resolveSnapshot(snapshot)).get(k(single)); + if (element != null) { + return Stream.of(List.of(LLEntry.of(single, kkB(element)))); + } else { + return Stream.empty(); } - }, LLUtils::finalizeResource); + } else { + return mapSlice(snapshot, range) + .entrySet() + .stream() + .collect(groupingBy(k -> k.getKey().subList(0, prefixLength), + mapping(entry -> LLEntry.of(kkB(entry.getKey()), kkB(entry.getValue())), Collectors.toList()) + )) + .values() + .stream(); + } } @Override - public Flux getRangeKeys(@Nullable LLSnapshot snapshot, - Mono rangeMono, - boolean reverse, + public Stream getRangeKeys(@Nullable LLSnapshot snapshot, LLRange range, boolean reverse, boolean smallRange) { + if (range.isSingle()) { + var single = range.getSingle(); + var contains = snapshots.get(resolveSnapshot(snapshot)).containsKey(k(single)); + return contains ? Stream.of(single) : Stream.empty(); + } else { + var map = mapSlice(snapshot, range); + if (reverse) { + map = map.descendingMap(); + } + return map.keySet().stream().map(this::kkB); + } + } + + @Override + public Stream> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, + LLRange range, + int prefixLength, boolean smallRange) { + if (range.isSingle()) { + var single = range.getSingle(); + var containsElement = snapshots.get(resolveSnapshot(snapshot)).containsKey(k(single)); + if (containsElement) { + return Stream.of(List.of(single)); + } else { + return Stream.empty(); + } + } else { + return mapSlice(snapshot, range) + .entrySet() + .stream() + .collect(groupingBy(k -> k.getKey().subList(0, prefixLength), + mapping(entry -> kkB(entry.getKey()), Collectors.toList()) + )) + .values() + .stream(); + } + } + + @Override + public Stream getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, + LLRange range, + int prefixLength, boolean smallRange) { - return Flux.usingWhen(rangeMono, - range -> { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - var single = singleToReceive.receive(); - try { - var contains = snapshots.get(resolveSnapshot(snapshot)).containsKey(k(single.copy().send())); - return contains ? single : null; - } catch (Throwable ex) { - single.close(); - throw ex; - } - }).flux(); - } else { - return Mono - .fromCallable(() -> mapSlice(snapshot, range)) - .flatMapIterable(map -> { - if (reverse) { - return map.descendingMap().keySet(); - } else { - return map.keySet(); - } - }) - .map(this::kkB); - } - }, - LLUtils::finalizeResource - ); - } - - @Override - public Flux> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, - Mono rangeMono, - int prefixLength, boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - try (range) { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - var single = singleToReceive.receive(); - try { - var containsElement = snapshots.get(resolveSnapshot(snapshot)).containsKey(k(single.copy().send())); - if (containsElement) { - return List.of(single); - } else { - return List.of(); - } - } catch (Throwable ex) { - single.close(); - throw ex; - } - }).flux(); - } else { - return Mono - .fromCallable(() -> mapSlice(snapshot, range)) - .flatMapIterable(SortedMap::entrySet) - .groupBy(k -> k.getKey().subList(0, prefixLength)) - .flatMap(groupedFlux -> groupedFlux - .map(entry -> kkB(entry.getKey())) - .collectList() - ); - } - } - }, LLUtils::finalizeResource); - } - - @SuppressWarnings("RedundantCast") - @Override - public Flux getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, - Mono rangeMono, - int prefixLength, boolean smallRange) { - return Flux.usingWhen(rangeMono, range -> { - try (range) { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - try (var single = singleToReceive.receive()) { - var k = k(single.copy().send()); - var containsElement = snapshots.get(resolveSnapshot(snapshot)).containsKey(k); - if (containsElement) { - return kkB(k.subList(0, prefixLength)); - } else { - return null; - } - } - }).flux(); - } else { - return Mono - .fromCallable(() -> mapSlice(snapshot, range)) - .flatMapIterable(SortedMap::entrySet) - .map(k -> (ByteList) k.getKey().subList(0, prefixLength)) - .distinctUntilChanged() - .map(this::kkB); - } - } - }, LLUtils::finalizeResource); - } - - @Override - public Flux badBlocks(Mono rangeMono) { - return Flux.empty(); - } - - @Override - public Mono setRange(Mono rangeMono, Flux entries, boolean smallRange) { - return Mono.usingWhen(rangeMono, range -> { - Mono clearMono; - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - clearMono = Mono.fromRunnable(() -> { - try (var single = singleToReceive.receive()) { - var k = k(single.copy().send()); - mainDb.remove(k); - } - }); + if (range.isSingle()) { + var single = range.getSingle(); + var k = k(single); + var containsElement = snapshots.get(resolveSnapshot(snapshot)).containsKey(k); + if (containsElement) { + return Stream.of(kkB(k.subList(0, prefixLength))); } else { - clearMono = Mono.fromRunnable(() -> mapSlice(null, range).clear()); + return Stream.empty(); } - - var r = r(range::copy); - - return clearMono - .thenMany(entries) - .doOnNext(entry -> { - try (entry) { - if (!isInsideRange(r, kShr(entry.getKeyUnsafe()))) { - throw new IndexOutOfBoundsException("Trying to set a key outside the range!"); - } - mainDb.put(kShr(entry.getKeyUnsafe()), kShr(entry.getValueUnsafe())); - } - }) - .then(); - }, LLUtils::finalizeResource); + } else { + return mapSlice(snapshot, range).keySet().stream() + .map(bytes -> bytes.subList(0, prefixLength)) + .distinct() + .map(this::kkB); + } } - private boolean isInsideRange(BLRange range, ByteList key) { + @Override + public Stream badBlocks(LLRange range) { + return Stream.empty(); + } + + @Override + public void setRange(LLRange range, Stream entries, boolean smallRange) { + if (range.isSingle()) { + var single = range.getSingle(); + var k = k(single); + mainDb.remove(k); + } else { + mapSlice(null, range).clear(); + } + + var r = r(range::copy); + + entries.forEach(entry -> { + if (!isInsideRange(r, kShr(entry.getKey()))) { + throw new IndexOutOfBoundsException("Trying to set a key outside the range!"); + } + mainDb.put(kShr(entry.getKey()), kShr(entry.getValue())); + }); + } + + private boolean isInsideRange(BLRange range, Buf key) { if (range.isAll()) { return true; } else if (range.isSingle()) { @@ -528,68 +404,46 @@ public class LLMemoryDictionary implements LLDictionary { } @Override - public Mono isRangeEmpty(@Nullable LLSnapshot snapshot, Mono rangeMono, boolean fillCache) { - return getRangeKeys(snapshot, rangeMono, false, false) - .doOnNext(LLUtils::finalizeResourceNow) - .count() - .map(count -> count == 0); + public boolean isRangeEmpty(@Nullable LLSnapshot snapshot, LLRange range, boolean fillCache) { + return getRangeKeys(snapshot, range, false, false).count() == 0; } @Override - public Mono sizeRange(@Nullable LLSnapshot snapshot, Mono rangeMono, boolean fast) { - return Mono.usingWhen(rangeMono, - range -> Mono.fromCallable(() -> (long) mapSlice(snapshot, range).size()), - LLUtils::finalizeResource - ); + public long sizeRange(@Nullable LLSnapshot snapshot, LLRange range, boolean fast) { + return mapSlice(snapshot, range).size(); } @Override - public Mono getOne(@Nullable LLSnapshot snapshot, Mono rangeMono) { - return getRange(snapshot, rangeMono, false, false) - .take(1, true) - .singleOrEmpty(); + public LLEntry getOne(@Nullable LLSnapshot snapshot, LLRange range) { + return getRange(snapshot, range, false, false).findAny().orElse(null); } @Override - public Mono getOneKey(@Nullable LLSnapshot snapshot, Mono rangeMono) { - return getRangeKeys(snapshot, rangeMono, false, false) - .take(1, true) - .singleOrEmpty(); + public Buf getOneKey(@Nullable LLSnapshot snapshot, LLRange range) { + return getRangeKeys(snapshot, range, false, false).findAny().orElse(null); } @Override - public Mono removeOne(Mono rangeMono) { - return Mono.usingWhen(rangeMono, range -> { - try (range) { - if (range.isSingle()) { - var singleToReceive = range.getSingle(); - return Mono.fromCallable(() -> { - try (var single = singleToReceive.receive()) { - var element = mainDb.remove(k(single.copy().send())); - if (element != null) { - return LLEntry.of(single, kkB(element)); - } else { - return null; - } - } - }); - } else { - return Mono - .fromCallable(() -> mapSlice(null, range)) - .mapNotNull(map -> { - var it = map.entrySet().iterator(); - if (it.hasNext()) { - var next = it.next(); - it.remove(); - return next; - } else { - return null; - } - }) - .map(entry -> LLEntry.of(kkB(entry.getKey()), kkB(entry.getValue()))); - } + public LLEntry removeOne(LLRange range) { + if (range.isSingle()) { + var single = range.getSingle(); + var element = mainDb.remove(k(single)); + if (element != null) { + return LLEntry.of(single, kkB(element)); + } else { + return null; } - }, LLUtils::finalizeResource); + } else { + var map = mapSlice(null, range); + var it = map.entrySet().iterator(); + if (it.hasNext()) { + var next = it.next(); + it.remove(); + return LLEntry.of(kkB(next.getKey()), kkB(next.getValue())); + } else { + return null; + } + } } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java index 26afcc7..fa735b1 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemoryKeyValueDatabase.java @@ -1,7 +1,7 @@ package it.cavallium.dbengine.database.memory; import io.micrometer.core.instrument.MeterRegistry; -import io.netty5.buffer.BufferAllocator; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.client.MemoryStats; import it.cavallium.dbengine.database.ColumnProperty; import it.cavallium.dbengine.database.LLDictionary; @@ -15,8 +15,6 @@ import it.cavallium.dbengine.database.RocksDBStringProperty; import it.cavallium.dbengine.database.TableWithProperties; import it.cavallium.dbengine.database.UpdateMode; import it.cavallium.dbengine.rpc.current.data.Column; -import it.cavallium.dbengine.utils.InternalMonoUtils; -import it.unimi.dsi.fastutil.bytes.ByteList; import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.util.List; @@ -24,31 +22,21 @@ import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentSkipListMap; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; import org.jetbrains.annotations.Nullable; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase { - - static { - LLUtils.initHooks(); - } - - private final BufferAllocator allocator; private final MeterRegistry meterRegistry; private final String name; private final AtomicLong nextSnapshotNumber = new AtomicLong(1); - private final ConcurrentHashMap>> snapshots = new ConcurrentHashMap<>(); - private final ConcurrentHashMap> mainDb; + private final ConcurrentHashMap>> snapshots = new ConcurrentHashMap<>(); + private final ConcurrentHashMap> mainDb; private final ConcurrentHashMap singletons = new ConcurrentHashMap<>(); - public LLMemoryKeyValueDatabase(BufferAllocator allocator, - MeterRegistry meterRegistry, + public LLMemoryKeyValueDatabase(MeterRegistry meterRegistry, String name, List columns) { - this.allocator = allocator; this.meterRegistry = meterRegistry; this.name = name; this.mainDb = new ConcurrentHashMap<>(); @@ -59,113 +47,95 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase { } @Override - public Mono getSingleton(byte[] singletonListColumnName, + public LLSingleton getSingleton(byte[] singletonListColumnName, byte[] singletonName, byte @Nullable[] defaultValue) { var columnNameString = new String(singletonListColumnName, StandardCharsets.UTF_8); - var dict = singletons.computeIfAbsent(columnNameString, _unused -> new LLMemoryDictionary(allocator, - name, + var dict = singletons.computeIfAbsent(columnNameString, _unused -> new LLMemoryDictionary(name, columnNameString, UpdateMode.ALLOW, snapshots, mainDb )); - return Mono - .fromCallable(() -> new LLMemorySingleton(dict, columnNameString, singletonName)).flatMap(singleton -> singleton - .get(null) - .transform(mono -> { - if (defaultValue != null) { - return mono.switchIfEmpty(singleton - .set(Mono.fromSupplier(() -> allocator.copyOf(defaultValue))) - .as(InternalMonoUtils::toAny)); - } else { - return mono; - } - }) - .thenReturn(singleton) - ); + var singleton = new LLMemorySingleton(dict, columnNameString, singletonName); + Buf returnValue = singleton.get(null); + if (returnValue == null) { + singleton.set(Buf.wrap(defaultValue)); + } + return singleton; } @Override - public Mono getDictionary(byte[] columnName, UpdateMode updateMode) { + public LLDictionary getDictionary(byte[] columnName, UpdateMode updateMode) { var columnNameString = new String(columnName, StandardCharsets.UTF_8); - return Mono.fromCallable(() -> new LLMemoryDictionary(allocator, - name, + return new LLMemoryDictionary(name, columnNameString, updateMode, snapshots, mainDb - )); + ); } @Override - public Mono getMemoryStats() { - return Mono.just(new MemoryStats(0, 0, 0, 0, 0, 0)); + public MemoryStats getMemoryStats() { + return new MemoryStats(0, 0, 0, 0, 0, 0); } @Override - public Mono getRocksDBStats() { - return Mono.empty(); + public String getRocksDBStats() { + return null; } @Override - public Mono> getMapProperty(@Nullable Column column, RocksDBMapProperty property) { - return Mono.empty(); + public Map getMapProperty(@Nullable Column column, RocksDBMapProperty property) { + return null; } @Override - public Flux>> getMapColumnProperties(RocksDBMapProperty property) { - return Flux.empty(); + public Stream>> getMapColumnProperties(RocksDBMapProperty property) { + return Stream.empty(); } @Override - public Mono getStringProperty(@Nullable Column column, RocksDBStringProperty property) { - return Mono.empty(); + public String getStringProperty(@Nullable Column column, RocksDBStringProperty property) { + return null; } @Override - public Flux> getStringColumnProperties(RocksDBStringProperty property) { - return Flux.empty(); + public Stream> getStringColumnProperties(RocksDBStringProperty property) { + return Stream.empty(); } @Override - public Mono getLongProperty(@Nullable Column column, RocksDBLongProperty property) { - return Mono.empty(); + public Long getLongProperty(@Nullable Column column, RocksDBLongProperty property) { + return null; } @Override - public Flux> getLongColumnProperties(RocksDBLongProperty property) { - return Flux.empty(); + public Stream> getLongColumnProperties(RocksDBLongProperty property) { + return Stream.empty(); } @Override - public Mono getAggregatedLongProperty(RocksDBLongProperty property) { - return Mono.empty(); + public Long getAggregatedLongProperty(RocksDBLongProperty property) { + return null; } @Override - public Flux getTableProperties() { - return Flux.empty(); + public Stream getTableProperties() { + return Stream.empty(); } @Override - public Mono verifyChecksum() { - return Mono.empty(); + public void verifyChecksum() { } @Override - public Mono compact() { - return Mono.empty(); + public void compact() { } @Override - public Mono flush() { - return Mono.empty(); - } - - @Override - public BufferAllocator getAllocator() { - return allocator; + public void flush() { } @Override @@ -174,19 +144,16 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase { } @Override - public Mono preClose() { - return null; + public void preClose() { } @Override - public Mono close() { - return Mono.fromRunnable(() -> { - snapshots.forEach((snapshot, dbs) -> dbs.forEach((columnName, db) -> { - db.clear(); - })); - mainDb.forEach((columnName, db) -> { - db.clear(); - }); + public void close() { + snapshots.forEach((snapshot, dbs) -> dbs.forEach((columnName, db) -> { + db.clear(); + })); + mainDb.forEach((columnName, db) -> { + db.clear(); }); } @@ -196,35 +163,28 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase { } @Override - public Mono takeSnapshot() { - return Mono - .fromCallable(() -> { - var snapshotNumber = nextSnapshotNumber.getAndIncrement(); - var snapshot = new ConcurrentHashMap>(); - mainDb.forEach((columnName, column) -> { - var cloned = column.clone(); - snapshot.put(columnName, cloned); - }); - snapshots.put(snapshotNumber, snapshot); - return new LLSnapshot(snapshotNumber); - }); + public LLSnapshot takeSnapshot() { + var snapshotNumber = nextSnapshotNumber.getAndIncrement(); + var snapshot = new ConcurrentHashMap>(); + mainDb.forEach((columnName, column) -> { + var cloned = column.clone(); + snapshot.put(columnName, cloned); + }); + snapshots.put(snapshotNumber, snapshot); + return new LLSnapshot(snapshotNumber); } @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return Mono - .fromCallable(() -> snapshots.remove(snapshot.getSequenceNumber())) - .then(); + public void releaseSnapshot(LLSnapshot snapshot) { + snapshots.remove(snapshot.getSequenceNumber()); } @Override - public Mono pauseForBackup() { - return Mono.empty(); + public void pauseForBackup() { } @Override - public Mono resumeAfterBackup() { - return Mono.empty(); + public void resumeAfterBackup() { } @Override @@ -233,7 +193,7 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase { } @Override - public Mono ingestSST(Column column, Publisher files, boolean replaceExisting) { - return Mono.error(new UnsupportedOperationException("Memory db doesn't support SST files")); + public void ingestSST(Column column, Stream files, boolean replaceExisting) { + throw new UnsupportedOperationException("Memory db doesn't support SST files"); } } diff --git a/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java b/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java index 62028e4..80ad4c7 100644 --- a/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java +++ b/src/main/java/it/cavallium/dbengine/database/memory/LLMemorySingleton.java @@ -1,31 +1,25 @@ package it.cavallium.dbengine.database.memory; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.Buf; import it.cavallium.dbengine.database.LLDelta; import it.cavallium.dbengine.database.LLDictionaryResultType; import it.cavallium.dbengine.database.LLSingleton; import it.cavallium.dbengine.database.LLSnapshot; import it.cavallium.dbengine.database.UpdateReturnMode; import it.cavallium.dbengine.database.disk.BinarySerializationFunction; -import it.cavallium.dbengine.database.serialization.SerializationFunction; +import java.nio.charset.StandardCharsets; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public class LLMemorySingleton implements LLSingleton { private final LLMemoryDictionary dict; private final String columnNameString; - private final byte[] singletonName; - private final Mono singletonNameBufMono; + private final Buf singletonName; public LLMemorySingleton(LLMemoryDictionary dict, String columnNameString, byte[] singletonName) { this.dict = dict; this.columnNameString = columnNameString; - this.singletonName = singletonName; - this.singletonNameBufMono = Mono.fromSupplier(() -> dict.getAllocator().allocate(singletonName.length) - .writeBytes(singletonName)); + this.singletonName = Buf.wrap(singletonName); } @Override @@ -34,32 +28,24 @@ public class LLMemorySingleton implements LLSingleton { } @Override - public BufferAllocator getAllocator() { - return dict.getAllocator(); + public Buf get(@Nullable LLSnapshot snapshot) { + return dict.get(snapshot, singletonName); } @Override - public Mono get(@Nullable LLSnapshot snapshot) { - return dict.get(snapshot, singletonNameBufMono); + public void set(Buf value) { + dict.put(singletonName, value, LLDictionaryResultType.VOID); } @Override - public Mono set(Mono value) { - var bbKey = singletonNameBufMono; - return dict - .put(bbKey, value, LLDictionaryResultType.VOID) - .then(); - } - - @Override - public Mono update(BinarySerializationFunction updater, + public Buf update(BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { - return dict.update(singletonNameBufMono, updater, updateReturnMode); + return dict.update(singletonName, updater, updateReturnMode); } @Override - public Mono updateAndGetDelta(BinarySerializationFunction updater) { - return dict.updateAndGetDelta(singletonNameBufMono, updater); + public LLDelta updateAndGetDelta(BinarySerializationFunction updater) { + return dict.updateAndGetDelta(singletonName, updater); } @Override @@ -69,6 +55,6 @@ public class LLMemorySingleton implements LLSingleton { @Override public String getName() { - return new String(singletonName); + return singletonName.toString(StandardCharsets.UTF_8); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/ByteListSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/BufSerializer.java similarity index 56% rename from src/main/java/it/cavallium/dbengine/database/remote/ByteListSerializer.java rename to src/main/java/it/cavallium/dbengine/database/remote/BufSerializer.java index a1de4d4..3307cb9 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/ByteListSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/BufSerializer.java @@ -1,17 +1,16 @@ package it.cavallium.dbengine.database.remote; import it.cavallium.data.generator.DataSerializer; -import it.unimi.dsi.fastutil.bytes.ByteArrayList; -import it.unimi.dsi.fastutil.bytes.ByteList; +import it.cavallium.dbengine.buffers.Buf; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import org.jetbrains.annotations.NotNull; -public class ByteListSerializer implements DataSerializer { +public class BufSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull ByteList bytes) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull Buf bytes) { dataOutput.writeInt(bytes.size()); for (Byte aByte : bytes) { dataOutput.writeByte(aByte); @@ -19,9 +18,9 @@ public class ByteListSerializer implements DataSerializer { } @Override - public @NotNull ByteList deserialize(DataInput dataInput) throws IOException { + public @NotNull Buf deserialize(DataInput dataInput) { var size = dataInput.readInt(); - var bal = new ByteArrayList(size); + var bal = Buf.create(size); for (int i = 0; i < size; i++) { bal.add(dataInput.readByte()); } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/ColumnFamilyHandleSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/ColumnFamilyHandleSerializer.java index 3051cc1..1f963e8 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/ColumnFamilyHandleSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/ColumnFamilyHandleSerializer.java @@ -10,12 +10,12 @@ import org.rocksdb.ColumnFamilyHandle; public class ColumnFamilyHandleSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull ColumnFamilyHandle columnFamilyHandle) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull ColumnFamilyHandle columnFamilyHandle) { throw new UnsupportedOperationException("Can't encode this type"); } @Override - public @NotNull ColumnFamilyHandle deserialize(DataInput dataInput) throws IOException { + public @NotNull ColumnFamilyHandle deserialize(DataInput dataInput) { throw new UnsupportedOperationException("Can't encode this type"); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/CompressionSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/CompressionSerializer.java index 53a84aa..3e2f035 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/CompressionSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/CompressionSerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class CompressionSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull Compression compression) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull Compression compression) { dataOutput.writeInt(compression.ordinal()); } @Override - public @NotNull Compression deserialize(DataInput dataInput) throws IOException { + public @NotNull Compression deserialize(DataInput dataInput) { return Compression.values()[dataInput.readInt()]; } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/DurationSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/DurationSerializer.java index 9e5a1fd..ad4104d 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/DurationSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/DurationSerializer.java @@ -6,13 +6,12 @@ import java.io.DataOutput; import java.io.IOException; import java.time.Duration; import java.time.temporal.ChronoUnit; -import java.time.temporal.TemporalUnit; import org.jetbrains.annotations.NotNull; public class DurationSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull Duration duration) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull Duration duration) { var units = duration.getUnits(); var smallestUnit = (ChronoUnit) units.get(units.size() - 1); dataOutput.writeInt(smallestUnit.ordinal()); @@ -20,7 +19,7 @@ public class DurationSerializer implements DataSerializer { } @Override - public @NotNull Duration deserialize(DataInput dataInput) throws IOException { + public @NotNull Duration deserialize(DataInput dataInput) { var smallestUnit = ChronoUnit.values()[dataInput.readInt()]; return Duration.of(dataInput.readLong(), smallestUnit); } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/LLQuicConnection.java b/src/main/java/it/cavallium/dbengine/database/remote/LLQuicConnection.java deleted file mode 100644 index 142e055..0000000 --- a/src/main/java/it/cavallium/dbengine/database/remote/LLQuicConnection.java +++ /dev/null @@ -1,595 +0,0 @@ -package it.cavallium.dbengine.database.remote; - -import com.google.common.collect.Multimap; -import io.micrometer.core.instrument.MeterRegistry; -import io.netty.handler.ssl.util.InsecureTrustManagerFactory; -import io.netty.incubator.codec.quic.QuicSslContextBuilder; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import it.cavallium.dbengine.client.MemoryStats; -import it.cavallium.dbengine.client.query.current.data.Query; -import it.cavallium.dbengine.client.query.current.data.QueryParams; -import it.cavallium.dbengine.database.ColumnProperty; -import it.cavallium.dbengine.database.LLDatabaseConnection; -import it.cavallium.dbengine.database.LLDelta; -import it.cavallium.dbengine.database.LLDictionary; -import it.cavallium.dbengine.database.LLIndexRequest; -import it.cavallium.dbengine.database.LLKeyValueDatabase; -import it.cavallium.dbengine.database.LLLuceneIndex; -import it.cavallium.dbengine.database.LLSearchResultShard; -import it.cavallium.dbengine.database.LLSingleton; -import it.cavallium.dbengine.database.LLSnapshot; -import it.cavallium.dbengine.database.LLTerm; -import it.cavallium.dbengine.database.LLUpdateDocument; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.RocksDBLongProperty; -import it.cavallium.dbengine.database.RocksDBMapProperty; -import it.cavallium.dbengine.database.RocksDBStringProperty; -import it.cavallium.dbengine.database.TableWithProperties; -import it.cavallium.dbengine.database.UpdateMode; -import it.cavallium.dbengine.database.UpdateReturnMode; -import it.cavallium.dbengine.database.disk.BinarySerializationFunction; -import it.cavallium.dbengine.database.remote.RPCCodecs.RPCEventCodec; -import it.cavallium.dbengine.database.serialization.SerializationException; -import it.cavallium.dbengine.lucene.LuceneHacks; -import it.cavallium.dbengine.lucene.collector.Buckets; -import it.cavallium.dbengine.lucene.searcher.BucketParams; -import it.cavallium.dbengine.rpc.current.data.BinaryOptional; -import it.cavallium.dbengine.rpc.current.data.ClientBoundRequest; -import it.cavallium.dbengine.rpc.current.data.ClientBoundResponse; -import it.cavallium.dbengine.rpc.current.data.CloseDatabase; -import it.cavallium.dbengine.rpc.current.data.CloseLuceneIndex; -import it.cavallium.dbengine.rpc.current.data.Column; -import it.cavallium.dbengine.rpc.current.data.DatabaseOptions; -import it.cavallium.dbengine.rpc.current.data.GeneratedEntityId; -import it.cavallium.dbengine.rpc.current.data.GetDatabase; -import it.cavallium.dbengine.rpc.current.data.GetLuceneIndex; -import it.cavallium.dbengine.rpc.current.data.GetSingleton; -import it.cavallium.dbengine.rpc.current.data.IndicizerAnalyzers; -import it.cavallium.dbengine.rpc.current.data.IndicizerSimilarities; -import it.cavallium.dbengine.rpc.current.data.LuceneIndexStructure; -import it.cavallium.dbengine.rpc.current.data.LuceneOptions; -import it.cavallium.dbengine.rpc.current.data.RPCEvent; -import it.cavallium.dbengine.rpc.current.data.ServerBoundRequest; -import it.cavallium.dbengine.rpc.current.data.ServerBoundResponse; -import it.cavallium.dbengine.rpc.current.data.SingletonGet; -import it.cavallium.dbengine.rpc.current.data.SingletonSet; -import it.cavallium.dbengine.rpc.current.data.SingletonUpdateEnd; -import it.cavallium.dbengine.rpc.current.data.SingletonUpdateInit; -import it.cavallium.dbengine.rpc.current.data.SingletonUpdateOldData; -import it.cavallium.dbengine.rpc.current.data.nullables.NullableBytes; -import it.cavallium.dbengine.rpc.current.data.nullables.NullableLLSnapshot; -import it.unimi.dsi.fastutil.bytes.ByteList; -import java.io.File; -import java.net.SocketAddress; -import java.nio.file.Path; -import java.time.Duration; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.ConcurrentHashMap; -import java.util.function.Function; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.netty.incubator.quic.QuicClient; -import reactor.netty.incubator.quic.QuicConnection; - -public class LLQuicConnection implements LLDatabaseConnection { - - private final BufferAllocator allocator; - private final MeterRegistry meterRegistry; - private final SocketAddress bindAddress; - private final SocketAddress remoteAddress; - private volatile QuicConnection quicConnection; - private final ConcurrentHashMap> databases = new ConcurrentHashMap<>(); - private final ConcurrentHashMap> indexes = new ConcurrentHashMap<>(); - private Mono connectionMono = Mono.error(new IllegalStateException("Not connected")); - - public LLQuicConnection(BufferAllocator allocator, - MeterRegistry meterRegistry, - SocketAddress bindAddress, - SocketAddress remoteAddress) { - this.allocator = allocator; - this.meterRegistry = meterRegistry; - this.bindAddress = bindAddress; - this.remoteAddress = remoteAddress; - } - - @Override - public BufferAllocator getAllocator() { - return allocator; - } - - @Override - public MeterRegistry getMeterRegistry() { - return meterRegistry; - } - - @Override - public Mono connect() { - String keyFileLocation = System.getProperty("it.cavalliumdb.keyFile", null); - String certFileLocation = System.getProperty("it.cavalliumdb.certFile", null); - String keyStorePassword = System.getProperty("it.cavalliumdb.keyPassword", null); - String certChainLocation = System.getProperty("it.cavalliumdb.caFile", null); - File keyFile; - File certFile; - File certChain; - if (keyFileLocation != null) { - keyFile = new File(keyFileLocation); - } else { - keyFile = null; - } - if (certFileLocation != null) { - certFile = new File(certFileLocation); - } else { - certFile = null; - } - if (certChainLocation != null) { - certChain = new File(certChainLocation); - } else { - certChain = null; - } - var sslContextBuilder = QuicSslContextBuilder.forClient(); - if (keyFileLocation != null || certFileLocation != null) { - sslContextBuilder.keyManager(keyFile, keyStorePassword, certFile); - } - if (certChainLocation != null) { - sslContextBuilder.trustManager(certChain); - } else { - sslContextBuilder.trustManager(InsecureTrustManagerFactory.INSTANCE); - } - var sslContext = sslContextBuilder - .applicationProtocols("db/0.9") - .build(); - return QuicClient.create() - .bindAddress(() -> bindAddress) - .remoteAddress(() -> remoteAddress) - .secure(sslContext) - .idleTimeout(Duration.ofSeconds(30)) - .initialSettings(spec -> spec - .maxData(10000000) - .maxStreamDataBidirectionalLocal(1000000) - ) - .connect() - .doOnNext(conn -> quicConnection = conn) - .thenReturn(this); - } - - @SuppressWarnings("unchecked") - private Mono sendRequest(ServerBoundRequest serverBoundRequest) { - return QuicUtils.sendSimpleRequest(quicConnection, - RPCEventCodec::new, - RPCEventCodec::new, - serverBoundRequest - ).map(event -> (T) event); - } - - private Mono sendEvent(ServerBoundRequest serverBoundRequest) { - return QuicUtils.sendSimpleEvent(quicConnection, - RPCEventCodec::new, - serverBoundRequest - ); - } - - private Mono sendUpdateRequest(ServerBoundRequest serverBoundReq, - Function updaterFunction) { - return Mono.empty(); - /* - - return Mono.defer(() -> { - Empty streamTerminator = Sinks.empty(); - return QuicUtils.createStream(quicConnection, stream -> { - Mono serverReq = Mono.defer(() -> stream.out() - .withConnection(conn -> conn.addHandler(new RPCCodecs.RPCServerBoundRequestDecoder())) - .sendObject(serverBoundReq) - .then()).doOnSubscribe(s -> System.out.println("out1")); - //noinspection unchecked - Mono clientBoundReqMono = Mono.defer(() -> stream.in() - .withConnection(conn -> conn.addHandler(new RPCClientBoundRequestDecoder())) - .receiveObject() - .log("TO_CLIENT_REQ", Level.INFO) - .take(1, true) - .singleOrEmpty() - .map(req -> (U) req) - .doOnSubscribe(s -> System.out.println("in1")) - .switchIfEmpty((Mono) QuicUtils.NO_RESPONSE_ERROR) - ); - Mono serverBoundRespFlux = clientBoundReqMono - .map(updaterFunction) - .transform(respMono -> Mono.defer(() -> stream.out() - .withConnection(conn -> conn.addHandler(new RPCServerBoundResponseDecoder())) - .sendObject(respMono) - .then() - .doOnSubscribe(s -> System.out.println("out2")) - )); - //noinspection unchecked - Mono clientBoundResponseMono = Mono.defer(() -> stream.in() - .withConnection(conn -> conn.addHandler(new RPCClientBoundResponseDecoder())) - .receiveObject() - .map(resp -> (T) resp) - .log("TO_SERVER_RESP", Level.INFO) - .take(1, true) - .doOnSubscribe(s -> System.out.println("out2")) - .singleOrEmpty() - .switchIfEmpty((Mono) QuicUtils.NO_RESPONSE_ERROR)); - return serverReq - .then(serverBoundRespFlux) - .then(clientBoundResponseMono) - .doFinally(s -> streamTerminator.tryEmitEmpty()); - }, streamTerminator.asMono()).single(); - }); - - */ - } - - @Override - public Mono getDatabase(String databaseName, - List columns, DatabaseOptions databaseOptions) { - return sendRequest(new GetDatabase(databaseName, columns, databaseOptions)) - .cast(GeneratedEntityId.class) - .map(GeneratedEntityId::id) - .map(id -> new LLKeyValueDatabase() { - - @Override - public Mono ingestSST(Column column, Publisher files, boolean replaceExisting) { - return null; - } - - @Override - public Mono getSingleton(byte[] singletonListColumnName, - byte[] name, - byte @Nullable[] defaultValue) { - return sendRequest(new GetSingleton(id, - ByteList.of(singletonListColumnName), - ByteList.of(name), - defaultValue == null ? NullableBytes.empty() : NullableBytes.of(ByteList.of(defaultValue)) - )).cast(GeneratedEntityId.class).map(GeneratedEntityId::id).map(singletonId -> new LLSingleton() { - - @Override - public BufferAllocator getAllocator() { - return allocator; - } - - @Override - public Mono get(@Nullable LLSnapshot snapshot) { - return sendRequest(new SingletonGet(singletonId, NullableLLSnapshot.ofNullable(snapshot))) - .cast(BinaryOptional.class) - .mapNotNull(result -> { - if (result.val().isPresent()) { - return allocator.copyOf(QuicUtils.toArrayNoCopy(result.val().get().val())); - } else { - return null; - } - }); - } - - @Override - public Mono set(Mono valueMono) { - return QuicUtils.toBytes(valueMono) - .flatMap(valueSendOpt -> sendRequest(new SingletonSet(singletonId, valueSendOpt)).then()); - } - - @Override - public Mono update(BinarySerializationFunction updater, UpdateReturnMode updateReturnMode) { - return LLQuicConnection.this.sendUpdateRequest(new SingletonUpdateInit(singletonId, updateReturnMode), prev -> { - byte[] oldData = toArrayNoCopy(prev); - Buffer oldDataBuf; - if (oldData != null) { - oldDataBuf = allocator.copyOf(oldData); - } else { - oldDataBuf = null; - } - try (oldDataBuf) { - try (var result = updater.apply(oldDataBuf)) { - if (result == null) { - return new SingletonUpdateEnd(false, ByteList.of()); - } else { - byte[] resultArray = new byte[result.readableBytes()]; - result.readBytes(resultArray, 0, resultArray.length); - return new SingletonUpdateEnd(true, ByteList.of(resultArray)); - } - } - } catch (SerializationException e) { - throw new IllegalStateException(e); - } - }).mapNotNull(result -> { - if (result.val().isPresent()) { - return allocator.copyOf(QuicUtils.toArrayNoCopy(result.val().get().val())); - } else { - return null; - } - }); - } - - @Override - public Mono updateAndGetDelta(BinarySerializationFunction updater) { - return Mono.error(new UnsupportedOperationException()); - } - - @Override - public String getDatabaseName() { - return databaseName; - } - - @Override - public String getColumnName() { - return new String(singletonListColumnName); - } - - @Override - public String getName() { - return new String(name); - } - }); - } - - @Override - public Mono getDictionary(byte[] columnName, UpdateMode updateMode) { - return null; - } - - @Override - public Mono getMemoryStats() { - return null; - } - - @Override - public Mono getRocksDBStats() { - return null; - } - - @Override - public Mono getAggregatedLongProperty(RocksDBLongProperty property) { - return null; - } - - @Override - public Mono getStringProperty(@Nullable Column column, RocksDBStringProperty property) { - return null; - } - - @Override - public Flux> getStringColumnProperties(RocksDBStringProperty property) { - return null; - } - - @Override - public Mono getLongProperty(@Nullable Column column, RocksDBLongProperty property) { - return null; - } - - @Override - public Flux> getLongColumnProperties(RocksDBLongProperty property) { - return null; - } - - @Override - public Mono> getMapProperty(@Nullable Column column, RocksDBMapProperty property) { - return null; - } - - @Override - public Flux>> getMapColumnProperties(RocksDBMapProperty property) { - return null; - } - - @Override - public Flux getTableProperties() { - return null; - } - - @Override - public Mono verifyChecksum() { - return null; - } - - @Override - public Mono compact() { - return null; - } - - @Override - public Mono flush() { - return null; - } - - @Override - public BufferAllocator getAllocator() { - return allocator; - } - - @Override - public MeterRegistry getMeterRegistry() { - return meterRegistry; - } - - @Override - public Mono preClose() { - return null; - } - - @Override - public Mono close() { - return sendRequest(new CloseDatabase(id)).then(); - } - - @Override - public String getDatabaseName() { - return databaseName; - } - - @Override - public Mono takeSnapshot() { - return null; - } - - @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return null; - } - - @Override - public Mono pauseForBackup() { - return Mono.empty(); - } - - @Override - public Mono resumeAfterBackup() { - return Mono.empty(); - } - - @Override - public boolean isPaused() { - return false; - } - }); - } - - @Nullable - private static byte[] toArrayNoCopy(SingletonUpdateOldData oldData) { - if (oldData.exist()) { - return QuicUtils.toArrayNoCopy(oldData.oldValue()); - } else { - return null; - } - } - - @Override - public Mono getLuceneIndex(String clusterName, - LuceneIndexStructure indexStructure, - IndicizerAnalyzers indicizerAnalyzers, - IndicizerSimilarities indicizerSimilarities, - LuceneOptions luceneOptions, - @Nullable LuceneHacks luceneHacks) { - return sendRequest(new GetLuceneIndex(clusterName, indexStructure, indicizerAnalyzers, indicizerSimilarities, luceneOptions)) - .cast(GeneratedEntityId.class) - .map(GeneratedEntityId::id) - .map(id -> new LLLuceneIndex() { - - @Override - public String getLuceneIndexName() { - return clusterName; - } - - @Override - public Mono addDocument(LLTerm id, LLUpdateDocument doc) { - return null; - } - - @Override - public Mono addDocuments(boolean atomic, Flux> documents) { - return null; - } - - @Override - public Mono deleteDocument(LLTerm id) { - return null; - } - - @Override - public Mono update(LLTerm id, LLIndexRequest request) { - return null; - } - - @Override - public Mono updateDocuments(Flux> documents) { - return null; - } - - @Override - public Mono deleteAll() { - return null; - } - - @Override - public Flux moreLikeThis(@Nullable LLSnapshot snapshot, - QueryParams queryParams, - @Nullable String keyFieldName, - Multimap mltDocumentFields) { - return null; - } - - @Override - public Flux search(@Nullable LLSnapshot snapshot, - QueryParams queryParams, - @Nullable String keyFieldName) { - return null; - } - - @Override - public Mono computeBuckets(@Nullable LLSnapshot snapshot, - @NotNull List queries, - @Nullable Query normalizationQuery, - BucketParams bucketParams) { - return null; - } - - @Override - public boolean isLowMemoryMode() { - return false; - } - - @Override - public void close() { - sendRequest(new CloseLuceneIndex(id)).then().transform(LLUtils::handleDiscard).block(); - } - - @Override - public Mono flush() { - return null; - } - - @Override - public Mono waitForMerges() { - return null; - } - - @Override - public Mono waitForLastMerges() { - return null; - } - - @Override - public Mono refresh(boolean force) { - return null; - } - - @Override - public Mono takeSnapshot() { - return null; - } - - @Override - public Mono releaseSnapshot(LLSnapshot snapshot) { - return null; - } - - @Override - public Mono pauseForBackup() { - return null; - } - - @Override - public Mono resumeAfterBackup() { - return null; - } - - @Override - public boolean isPaused() { - return false; - } - }); - } - - @Override - public Mono disconnect() { - return sendDisconnect().then(Mono.fromRunnable(() -> quicConnection.dispose())).then(quicConnection.onDispose()); - } - - private Mono sendDisconnect() { - return Mono.empty(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/remote/LLSnapshotSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/LLSnapshotSerializer.java index 841c047..f2e9f1f 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/LLSnapshotSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/LLSnapshotSerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class LLSnapshotSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull LLSnapshot llSnapshot) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull LLSnapshot llSnapshot) { dataOutput.writeLong(llSnapshot.getSequenceNumber()); } @Override - public @NotNull LLSnapshot deserialize(DataInput dataInput) throws IOException { + public @NotNull LLSnapshot deserialize(DataInput dataInput) { return new LLSnapshot(dataInput.readLong()); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/LuceneHacksSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/LuceneHacksSerializer.java index 7940542..3b3aef0 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/LuceneHacksSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/LuceneHacksSerializer.java @@ -10,14 +10,14 @@ import org.jetbrains.annotations.NotNull; public class LuceneHacksSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull LuceneHacks luceneHacks) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull LuceneHacks luceneHacks) { if (luceneHacks.customLocalSearcher() != null || luceneHacks.customMultiSearcher() != null) { throw new UnsupportedOperationException("Can't encode this type"); } } @Override - public @NotNull LuceneHacks deserialize(DataInput dataInput) throws IOException { + public @NotNull LuceneHacks deserialize(DataInput dataInput) { return new LuceneHacks(null, null); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/MappedStream.java b/src/main/java/it/cavallium/dbengine/database/remote/MappedStream.java deleted file mode 100644 index 32d26e8..0000000 --- a/src/main/java/it/cavallium/dbengine/database/remote/MappedStream.java +++ /dev/null @@ -1,48 +0,0 @@ -package it.cavallium.dbengine.database.remote; - -import io.netty.handler.codec.ByteToMessageCodec; -import java.util.function.Supplier; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.Sinks.Empty; -import reactor.netty.NettyOutbound; - -public class MappedStream implements AutoCloseable { - - private final Flux inConn; - private final NettyOutbound outConn; - private final Supplier> outCodec; - private final Empty streamTerminator; - - public MappedStream(NettyOutbound outConn, Supplier> outCodec, Flux inConn, Empty streamTerminator) { - this.inConn = inConn; - this.outConn = outConn; - this.outCodec = outCodec; - this.streamTerminator = streamTerminator; - } - - private NettyOutbound getOut() { - return outConn.withConnection(conn -> conn.addHandler(outCodec.get())); - } - - public Mono send(SEND item) { - return getOut().sendObject(item).then(); - } - - public Mono sendMany(Flux items) { - return getOut().sendObject(items).then(); - } - - public Mono receive() { - return inConn.take(1, true).singleOrEmpty(); - } - - public Flux receiveMany() { - return inConn.hide(); - } - - @Override - public void close() { - streamTerminator.tryEmitEmpty(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/remote/PathSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/PathSerializer.java index e9c57ff..a2b672c 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/PathSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/PathSerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class PathSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull Path path) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull Path path) { dataOutput.writeUTF(path.toString()); } @Override - public @NotNull Path deserialize(DataInput dataInput) throws IOException { + public @NotNull Path deserialize(DataInput dataInput) { return Path.of(dataInput.readUTF()); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/QuicUtils.java b/src/main/java/it/cavallium/dbengine/database/remote/QuicUtils.java deleted file mode 100644 index 964a85e..0000000 --- a/src/main/java/it/cavallium/dbengine/database/remote/QuicUtils.java +++ /dev/null @@ -1,251 +0,0 @@ -package it.cavallium.dbengine.database.remote; - -import io.netty.handler.codec.ByteToMessageCodec; -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; -import it.cavallium.data.generator.nativedata.NullableString; -import it.cavallium.dbengine.database.OptionalBuf; -import it.cavallium.dbengine.rpc.current.data.RPCCrash; -import it.cavallium.dbengine.rpc.current.data.RPCEvent; -import it.cavallium.dbengine.rpc.current.data.nullables.NullableBytes; -import it.cavallium.dbengine.utils.InternalMonoUtils; -import it.unimi.dsi.fastutil.bytes.ByteArrayList; -import it.unimi.dsi.fastutil.bytes.ByteList; -import java.nio.charset.StandardCharsets; -import java.util.Optional; -import java.util.function.Function; -import java.util.function.Supplier; -import java.util.logging.Level; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.Sinks; -import reactor.core.publisher.Sinks.Empty; -import reactor.core.publisher.Sinks.One; -import reactor.netty.NettyInbound; -import reactor.netty.NettyOutbound; -import reactor.netty.incubator.quic.QuicConnection; - -public class QuicUtils { - - public static final Mono NO_RESPONSE_ERROR = Mono.error(NoResponseReceivedException::new); - - public static byte[] toArrayNoCopy(ByteList b) { - if (b instanceof ByteArrayList bal) { - return bal.elements(); - } else { - return b.toByteArray(); - } - } - - public static String toString(ByteList b) { - return new String(QuicUtils.toArrayNoCopy(b), StandardCharsets.UTF_8); - } - - public static NullableBytes toBytes(OptionalBuf valueSendOpt) { - if (valueSendOpt.isPresent()) { - try (var value = valueSendOpt.get()) { - var bytes = new byte[value.readableBytes()]; - value.copyInto(value.readerOffset(), bytes, 0, bytes.length); - return NullableBytes.ofNullable(ByteList.of(bytes)); - } - } else { - return NullableBytes.empty(); - } - } - - public static Mono toBytes(Mono valueSendOptMono) { - return valueSendOptMono.map(valueSendOpt -> { - try (var value = valueSendOpt) { - var bytes = new byte[value.readableBytes()]; - value.copyInto(value.readerOffset(), bytes, 0, bytes.length); - return NullableBytes.ofNullable(ByteList.of(bytes)); - } - }).defaultIfEmpty(NullableBytes.empty()); - } - - public record QuicStream(NettyInbound in, NettyOutbound out) {} - - public static Mono catchRPCErrors(@NotNull Throwable error) { - return Mono.just(new RPCCrash(500, NullableString.ofNullableBlank(error.getMessage()))); - } - - private static RECV extractResponse(SEND request, RECV response) { - return response; - } - - /** - * Create a general purpose QUIC stream - */ - public static Mono createStream(QuicConnection quicConnection, Mono streamTerminator) { - return Mono.defer(() -> { - One inOutSink = Sinks.one(); - return quicConnection - .createStream((in, out) -> Mono - .fromRunnable(() -> inOutSink.tryEmitValue(new QuicStream(in, out)).orThrow()) - .then(streamTerminator)) - .then(inOutSink.asMono()); - }); - } - - /** - * Send a single request, receive a single response - */ - @SuppressWarnings("unchecked") - public static Mono> createMappedStream( - @NotNull QuicConnection quicConnection, - @NotNull Supplier> sendCodec, - @Nullable Supplier> recvCodec) { - return Mono.defer(() -> { - Empty streamTerminator = Sinks.empty(); - return QuicUtils - .createStream(quicConnection, streamTerminator.asMono()) - .map(stream -> { - Flux inConn; - if (recvCodec == null) { - inConn = Flux.error(() -> new UnsupportedOperationException("Receiving responses is supported")); - } else { - inConn = Flux.defer(() -> (Flux) stream - .in() - .withConnection(conn -> conn.addHandler(recvCodec.get())) - .receiveObject() - .log("ClientBoundEvent", Level.FINEST) - ) - .publish(1) - .refCount(); - } - return new MappedStream<>(stream.out, sendCodec, inConn, streamTerminator); - }) - .single(); - }); - } - - /** - * Send a single request, receive a single response - */ - @SuppressWarnings("unchecked") - public static Mono sendSimpleRequest(QuicConnection quicConnection, - Supplier> sendCodec, - Supplier> recvCodec, - SEND req) { - return QuicUtils - .createMappedStream(quicConnection, sendCodec, recvCodec) - .flatMap(stream -> { - var recv = stream.receive().log("ClientBoundEvent", Level.FINEST); - var send = stream.send(req).log("ServerBoundEvent", Level.FINEST); - return send - .then(recv) - .doFinally(s -> stream.close()); - }) - .map(QuicUtils::mapErrors) - .switchIfEmpty((Mono) NO_RESPONSE_ERROR); - } - - /** - * Send a single request, receive a single response - */ - - public static Mono sendSimpleEvent(QuicConnection quicConnection, - Supplier> sendCodec, - SEND req) { - return QuicUtils - .createMappedStream(quicConnection, sendCodec, null) - .flatMap(stream -> { - var send = stream.send(req).log("ServerBoundEvent", Level.FINEST); - return send.doFinally(s -> stream.close()); - }) - .map(QuicUtils::mapErrors) - .then(); - } - - private static R mapErrors(R value) { - if (value instanceof RPCCrash crash) { - throw new RPCException(crash.code(), crash.message().getNullable()); - } else { - return value; - } - } - - /** - * Send n requests, receive n responses - */ - public static Flux sendSimpleRequestFlux(QuicConnection quicConnection, - Supplier> sendCodec, - Supplier> recvCodec, - Publisher requestFlux) { - return QuicUtils - .createMappedStream(quicConnection, sendCodec, recvCodec) - .flatMapMany(stream -> { - var sends = Flux - .from(requestFlux) - .log("ServerBoundEvent", Level.FINEST) - .concatMap(request -> stream.send(request) - .thenReturn(request)); - var receives = stream - .receiveMany() - .log("ClientBoundEvent", Level.FINEST); - return Flux - .zip(sends, receives, QuicUtils::extractResponse) - .doFinally(s -> stream.close()); - }) - .map(QuicUtils::mapErrors) - .log("ServerBoundEvent", Level.FINEST); - } - - /** - * Send update - */ - public static Mono sendUpdate(QuicConnection quicConnection, - Supplier> codec, - T request, - Function> updater) { - return QuicUtils - .createMappedStream(quicConnection, codec, codec) - .flatMapMany(stream -> { - //noinspection unchecked - var firstRequest = (Mono) stream - .send(request) - .then(); - var receives = stream - .receiveMany(); - One firstResponseSink = Sinks.one(); - //noinspection unchecked - var firstResponse = (Mono) receives - .elementAt(0) - .switchIfEmpty((Mono) NO_RESPONSE_ERROR) - .mapNotNull(value -> { - if (value instanceof RPCCrash crash) { - firstResponseSink.tryEmitEmpty(); - //noinspection unchecked - return (T) crash; - } else { - firstResponseSink.tryEmitValue(value); - return null; - } - }) - .doOnCancel(firstResponseSink::tryEmitEmpty); - //noinspection unchecked - var secondResponse = Mono - // FirstResponse returns only if it's RPCCrash. - // firstWithValue returns the crash first if it happens, otherwise it will - // return receives - .firstWithValue( - firstResponse, - receives.elementAt(1) - ) - .switchIfEmpty((Mono) NO_RESPONSE_ERROR); - //noinspection unchecked - var secondRequest = (Mono) firstResponseSink - .asMono() - .flatMap(updater) - .flatMap(stream::send); - return Flux - .merge(firstRequest, firstResponse.as(InternalMonoUtils::ignoreElements), secondRequest, secondResponse) - .doFinally(s -> stream.close()); - }) - .map(QuicUtils::mapErrors) - .singleOrEmpty(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/remote/RPCCodecs.java b/src/main/java/it/cavallium/dbengine/database/remote/RPCCodecs.java index af71133..7bf4d54 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/RPCCodecs.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/RPCCodecs.java @@ -7,12 +7,7 @@ import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageCodec; import it.cavallium.dbengine.rpc.current.data.BoxedRPCEvent; -import it.cavallium.dbengine.rpc.current.data.ClientBoundRequest; -import it.cavallium.dbengine.rpc.current.data.ClientBoundResponse; -import it.cavallium.dbengine.rpc.current.IBaseType; import it.cavallium.dbengine.rpc.current.data.RPCEvent; -import it.cavallium.dbengine.rpc.current.data.ServerBoundRequest; -import it.cavallium.dbengine.rpc.current.data.ServerBoundResponse; import it.cavallium.dbengine.rpc.current.serializers.BoxedRPCEventSerializer; import java.io.DataInputStream; import java.io.DataOutputStream; diff --git a/src/main/java/it/cavallium/dbengine/database/remote/RocksDBSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/RocksDBSerializer.java index 872eeeb..1fa220d 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/RocksDBSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/RocksDBSerializer.java @@ -10,12 +10,12 @@ import org.rocksdb.RocksDB; public class RocksDBSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull RocksDB rocksDB) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull RocksDB rocksDB) { throw new UnsupportedOperationException("Can't encode this type"); } @Override - public @NotNull RocksDB deserialize(DataInput dataInput) throws IOException { + public @NotNull RocksDB deserialize(DataInput dataInput) { throw new UnsupportedOperationException("Can't encode this type"); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/String2ColumnFamilyHandleMapSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/String2ColumnFamilyHandleMapSerializer.java index af680f1..e53faac 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/String2ColumnFamilyHandleMapSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/String2ColumnFamilyHandleMapSerializer.java @@ -17,7 +17,7 @@ public class String2ColumnFamilyHandleMapSerializer implements DataSerializer deserialize(DataInput dataInput) throws IOException { + public @NotNull Map deserialize(DataInput dataInput) { throw new UnsupportedOperationException("Can't encode this type"); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/String2FieldAnalyzerMapSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/String2FieldAnalyzerMapSerializer.java index b9afa01..f547c10 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/String2FieldAnalyzerMapSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/String2FieldAnalyzerMapSerializer.java @@ -26,7 +26,7 @@ public class String2FieldAnalyzerMapSerializer implements DataSerializer deserialize(DataInput dataInput) throws IOException { + public @NotNull Map deserialize(DataInput dataInput) { var size = dataInput.readInt(); var result = new HashMap(size); for (int i = 0; i < size; i++) { diff --git a/src/main/java/it/cavallium/dbengine/database/remote/String2FieldSimilarityMapSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/String2FieldSimilarityMapSerializer.java index 0d40f98..64936dc 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/String2FieldSimilarityMapSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/String2FieldSimilarityMapSerializer.java @@ -26,7 +26,7 @@ public class String2FieldSimilarityMapSerializer implements DataSerializer deserialize(DataInput dataInput) throws IOException { + public @NotNull Map deserialize(DataInput dataInput) { var size = dataInput.readInt(); var result = new HashMap(size); for (int i = 0; i < size; i++) { diff --git a/src/main/java/it/cavallium/dbengine/database/remote/StringEntrySerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/StringEntrySerializer.java index b8aa3d6..a6bc70e 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/StringEntrySerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/StringEntrySerializer.java @@ -11,13 +11,13 @@ import org.jetbrains.annotations.NotNull; public class StringEntrySerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull Map.Entry entry) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull Map.Entry entry) { dataOutput.writeUTF((String) entry.getKey()); dataOutput.writeUTF((String) entry.getValue()); } @Override - public @NotNull Map.Entry deserialize(DataInput dataInput) throws IOException { + public @NotNull Map.Entry deserialize(DataInput dataInput) { return Map.entry(dataInput.readUTF(), dataInput.readUTF()); } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/StringMapSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/StringMapSerializer.java index 377704d..41d0756 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/StringMapSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/StringMapSerializer.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.database.remote; import it.cavallium.data.generator.DataSerializer; -import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; @@ -24,7 +23,7 @@ public class StringMapSerializer implements DataSerializer> } @Override - public @NotNull Map deserialize(DataInput dataInput) throws IOException { + public @NotNull Map deserialize(DataInput dataInput) { var size = dataInput.readInt(); var result = new HashMap(size); for (int i = 0; i < size; i++) { diff --git a/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsAnalyzerSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsAnalyzerSerializer.java index e564395..1b34483 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsAnalyzerSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsAnalyzerSerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class TextFieldsAnalyzerSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull TextFieldsAnalyzer textFieldsAnalyzer) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull TextFieldsAnalyzer textFieldsAnalyzer) { dataOutput.writeInt(textFieldsAnalyzer.ordinal()); } @Override - public @NotNull TextFieldsAnalyzer deserialize(DataInput dataInput) throws IOException { + public @NotNull TextFieldsAnalyzer deserialize(DataInput dataInput) { return TextFieldsAnalyzer.values()[dataInput.readInt()]; } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsSimilaritySerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsSimilaritySerializer.java index cf3ee48..4a0ee3c 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsSimilaritySerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/TextFieldsSimilaritySerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class TextFieldsSimilaritySerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull TextFieldsSimilarity textFieldsSimilarity) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull TextFieldsSimilarity textFieldsSimilarity) { dataOutput.writeInt(textFieldsSimilarity.ordinal()); } @Override - public @NotNull TextFieldsSimilarity deserialize(DataInput dataInput) throws IOException { + public @NotNull TextFieldsSimilarity deserialize(DataInput dataInput) { return TextFieldsSimilarity.values()[dataInput.readInt()]; } } diff --git a/src/main/java/it/cavallium/dbengine/database/remote/UpdateReturnModeSerializer.java b/src/main/java/it/cavallium/dbengine/database/remote/UpdateReturnModeSerializer.java index dc70f02..6048300 100644 --- a/src/main/java/it/cavallium/dbengine/database/remote/UpdateReturnModeSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/remote/UpdateReturnModeSerializer.java @@ -10,12 +10,12 @@ import org.jetbrains.annotations.NotNull; public class UpdateReturnModeSerializer implements DataSerializer { @Override - public void serialize(DataOutput dataOutput, @NotNull UpdateReturnMode updateReturnMode) throws IOException { + public void serialize(DataOutput dataOutput, @NotNull UpdateReturnMode updateReturnMode) { dataOutput.writeInt(updateReturnMode.ordinal()); } @Override - public @NotNull UpdateReturnMode deserialize(DataInput dataInput) throws IOException { + public @NotNull UpdateReturnMode deserialize(DataInput dataInput) { return UpdateReturnMode.values()[dataInput.readInt()]; } } diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInput.java b/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInput.java deleted file mode 100644 index 03d8b6e..0000000 --- a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInput.java +++ /dev/null @@ -1,55 +0,0 @@ -package it.cavallium.dbengine.database.serialization; - -import java.io.DataInput; -import org.jetbrains.annotations.NotNull; - -public interface BufferDataInput extends DataInput { - - @Override - void readFully(byte @NotNull [] b); - - @Override - void readFully(byte @NotNull [] b, int off, int len); - - @Override - int skipBytes(int n); - - @Override - boolean readBoolean(); - - @Override - byte readByte(); - - @Override - int readUnsignedByte(); - - @Override - short readShort(); - - @Override - int readUnsignedShort(); - - @Override - char readChar(); - - @Override - int readInt(); - - @Override - long readLong(); - - @Override - float readFloat(); - - @Override - double readDouble(); - - @Override - String readLine(); - - @NotNull - @Override - String readUTF(); - - int getReadBytesCount(); -} diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputOwned.java b/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputOwned.java deleted file mode 100644 index d095528..0000000 --- a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputOwned.java +++ /dev/null @@ -1,143 +0,0 @@ -package it.cavallium.dbengine.database.serialization; - -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import java.io.DataInput; -import java.nio.charset.StandardCharsets; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; - -public class BufferDataInputOwned implements DiscardingCloseable, BufferDataInput { - - @Nullable - private final Buffer buf; - private final int initialReaderOffset; - - public BufferDataInputOwned(@Nullable Send bufferSend) { - this.buf = bufferSend == null ? null : bufferSend.receive().makeReadOnly(); - this.initialReaderOffset = buf == null ? 0 : buf.readerOffset(); - } - - @Override - public void readFully(byte @NotNull [] b) { - this.readFully(b, 0, b.length); - } - - @Override - public void readFully(byte @NotNull [] b, int off, int len) { - if (buf == null) { - if (len != 0) { - throw new IndexOutOfBoundsException(); - } - } else { - buf.copyInto(buf.readerOffset(), b, off, len); - buf.readerOffset(buf.readerOffset() + len); - } - } - - @Override - public int skipBytes(int n) { - if (buf == null) { - if (n != 0) { - throw new IndexOutOfBoundsException(); - } - return 0; - } else { - n = Math.min(n, buf.readerOffset() - buf.writerOffset()); - buf.readerOffset(buf.readerOffset() + n); - return n; - } - } - - @Override - public boolean readBoolean() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedByte() != 0; - } - - @Override - public byte readByte() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readByte(); - } - - @Override - public int readUnsignedByte() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedByte(); - } - - @Override - public short readShort() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readShort(); - } - - @Override - public int readUnsignedShort() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedShort(); - } - - @Override - public char readChar() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readChar(); - } - - @Override - public int readInt() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readInt(); - } - - @Override - public long readLong() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readLong(); - } - - @Override - public float readFloat() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readFloat(); - } - - @Override - public double readDouble() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readDouble(); - } - - @Override - public String readLine() { - if (buf == null) throw new IndexOutOfBoundsException(); - throw new UnsupportedOperationException(); - } - - @NotNull - @Override - public String readUTF() { - if (buf == null) throw new IndexOutOfBoundsException(); - int len = buf.readUnsignedShort(); - return buf.readCharSequence(len, StandardCharsets.UTF_8).toString(); - } - - @Override - public void close() { - if (buf != null) { - buf.close(); - } - } - - @Override - public int getReadBytesCount() { - if (buf == null) { - return 0; - } else { - return buf.readerOffset() - initialReaderOffset; - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputShared.java b/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputShared.java deleted file mode 100644 index 7fb19ce..0000000 --- a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataInputShared.java +++ /dev/null @@ -1,137 +0,0 @@ -package it.cavallium.dbengine.database.serialization; - -import io.netty5.buffer.Buffer; -import io.netty5.util.Send; -import it.cavallium.dbengine.database.SafeCloseable; -import java.io.DataInput; -import java.lang.invoke.MethodHandle; -import java.lang.invoke.MethodType; -import java.nio.charset.StandardCharsets; -import java.util.concurrent.atomic.AtomicReference; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; - -public class BufferDataInputShared implements BufferDataInput { - - @Nullable - private final Buffer buf; - private final int initialReaderOffset; - - public BufferDataInputShared(@Nullable Buffer buffer) { - this.buf = buffer; - this.initialReaderOffset = buf == null ? 0 : buf.readerOffset(); - } - - @Override - public void readFully(byte @NotNull [] b) { - this.readFully(b, 0, b.length); - } - - @Override - public void readFully(byte @NotNull [] b, int off, int len) { - if (buf == null) { - if (len != 0) { - throw new IndexOutOfBoundsException(); - } - } else { - buf.copyInto(buf.readerOffset(), b, off, len); - buf.readerOffset(buf.readerOffset() + len); - } - } - - @Override - public int skipBytes(int n) { - if (buf == null) { - if (n != 0) { - throw new IndexOutOfBoundsException(); - } - return 0; - } else { - n = Math.min(n, buf.readerOffset() - buf.writerOffset()); - buf.readerOffset(buf.readerOffset() + n); - return n; - } - } - - @Override - public boolean readBoolean() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedByte() != 0; - } - - @Override - public byte readByte() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readByte(); - } - - @Override - public int readUnsignedByte() {/* if (StackWalker.getInstance().walk(s -> s.limit(16).anyMatch(frame -> frame.getMethodName().contains("updateAndGetDelta")))) {throw new TempException();}*/ - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedByte(); - } - - @Override - public short readShort() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readShort(); - } - - @Override - public int readUnsignedShort() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readUnsignedShort(); - } - - @Override - public char readChar() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readChar(); - } - - @Override - public int readInt() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readInt(); - } - - @Override - public long readLong() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readLong(); - } - - @Override - public float readFloat() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readFloat(); - } - - @Override - public double readDouble() { - if (buf == null) throw new IndexOutOfBoundsException(); - return buf.readDouble(); - } - - @Override - public String readLine() { - if (buf == null) throw new IndexOutOfBoundsException(); - throw new UnsupportedOperationException(); - } - - @NotNull - @Override - public String readUTF() { - if (buf == null) throw new IndexOutOfBoundsException(); - int len = buf.readUnsignedShort(); - return buf.readCharSequence(len, StandardCharsets.UTF_8).toString(); - } - - public int getReadBytesCount() { - if (buf == null) { - return 0; - } else { - return buf.readerOffset() - initialReaderOffset; - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataOutput.java b/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataOutput.java deleted file mode 100644 index 16b46e9..0000000 --- a/src/main/java/it/cavallium/dbengine/database/serialization/BufferDataOutput.java +++ /dev/null @@ -1,121 +0,0 @@ -package it.cavallium.dbengine.database.serialization; - -import io.netty5.buffer.Buffer; -import java.io.DataOutput; -import java.nio.charset.StandardCharsets; -import org.jetbrains.annotations.NotNull; - -public class BufferDataOutput implements DataOutput { - - private final Buffer buf; - - public BufferDataOutput(Buffer bufferSend) { - this.buf = bufferSend; - } - - @Override - public void write(int b) { - buf.ensureWritable(Integer.BYTES); - buf.writeUnsignedByte(b); - } - - @Override - public void write(byte @NotNull [] b) { - buf.ensureWritable(Byte.BYTES * b.length); - buf.writeBytes(b); - } - - @Override - public void write(byte @NotNull [] b, int off, int len) { - buf.ensureWritable(len); - buf.writeBytes(b, off, len); - } - - @Override - public void writeBoolean(boolean v) { - buf.ensureWritable(Byte.BYTES); - buf.writeUnsignedByte(v ? 1 : 0); - } - - @Override - public void writeByte(int v) { - buf.ensureWritable(Byte.BYTES); - buf.writeByte((byte) v); - } - - @Override - public void writeShort(int v) { - buf.ensureWritable(Short.BYTES); - buf.writeShort((short) v); - } - - @Override - public void writeChar(int v) { - buf.ensureWritable(Character.BYTES); - buf.writeChar((char) v); - } - - @Override - public void writeInt(int v) { - buf.ensureWritable(Integer.BYTES); - buf.writeInt(v); - } - - @Override - public void writeLong(long v) { - buf.ensureWritable(Long.BYTES); - buf.writeLong(v); - } - - @Override - public void writeFloat(float v) { - buf.ensureWritable(Float.BYTES); - buf.writeFloat(v); - } - - @Override - public void writeDouble(double v) { - buf.ensureWritable(Double.BYTES); - buf.writeDouble(v); - } - - @Override - public void writeBytes(@NotNull String s) { - var b= s.getBytes(StandardCharsets.UTF_8); - buf.ensureWritable(Byte.BYTES * b.length); - buf.writeBytes(b); - } - - @Override - public void writeChars(@NotNull String s) { - var chars = s.toCharArray(); - buf.ensureWritable(Character.BYTES * chars.length); - for (char c : chars) { - buf.writeChar(c); - } - } - - @Override - public void writeUTF(@NotNull String s) { - int sizeShortOffset = buf.writerOffset(); - buf.ensureWritable(Short.BYTES + 1); - int stringOffset = sizeShortOffset + Short.BYTES; - buf.writerOffset(stringOffset); - // todo: replace with writeCharSequence when it will be optimized in netty 5 - { - byte[] bytes = s.getBytes(StandardCharsets.UTF_8); - buf.ensureWritable(bytes.length); - buf.writeBytes(bytes); - } - int endOffset = buf.writerOffset(); - int stringSize = endOffset - stringOffset; - buf.writerOffset(sizeShortOffset); - buf.writeUnsignedShort(stringSize); - if (stringSize > (1 << 16) - 1) { - buf.writerOffset(sizeShortOffset); - throw new IndexOutOfBoundsException("String too large: " + stringSize); - } else { - buf.writerOffset(endOffset); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java b/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java index 0b51c48..bc1521d 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/Codec.java @@ -1,11 +1,13 @@ package it.cavallium.dbengine.database.serialization; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import java.io.IOException; import org.jetbrains.annotations.NotNull; public interface Codec { - @NotNull A deserialize(@NotNull BufferDataInput serialized) throws IOException; + @NotNull A deserialize(@NotNull BufDataInput serialized); - void serialize(@NotNull BufferDataOutput outputStream, @NotNull A deserialized) throws IOException; + void serialize(@NotNull BufDataOutput outputStream, @NotNull A deserialized); } diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java index 7d288a0..25b5583 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/CodecSerializer.java @@ -1,12 +1,10 @@ package it.cavallium.dbengine.database.serialization; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import java.io.IOError; import java.io.IOException; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; public class CodecSerializer implements Serializer { @@ -47,9 +45,8 @@ public class CodecSerializer implements Serializer { } @Override - public @NotNull A deserialize(@NotNull Buffer serializedBuf) throws SerializationException { + public @NotNull A deserialize(@NotNull BufDataInput is) throws SerializationException { try { - var is = new BufferDataInputShared(serializedBuf); int codecId; if (microCodecs) { codecId = is.readUnsignedByte(); @@ -65,9 +62,8 @@ public class CodecSerializer implements Serializer { } @Override - public void serialize(@NotNull A deserialized, Buffer output) throws SerializationException { + public void serialize(@NotNull A deserialized, BufDataOutput os) throws SerializationException { try { - var os = new BufferDataOutput(output); if (microCodecs) { os.writeByte(serializationCodecId); } else { diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/SerializationException.java b/src/main/java/it/cavallium/dbengine/database/serialization/SerializationException.java index 5f97d64..e339999 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/SerializationException.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/SerializationException.java @@ -1,7 +1,5 @@ package it.cavallium.dbengine.database.serialization; -import java.io.IOException; - public class SerializationException extends RuntimeException { public SerializationException() { diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/SerializationFunction.java b/src/main/java/it/cavallium/dbengine/database/serialization/SerializationFunction.java index 30f258f..79c3a92 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/SerializationFunction.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/SerializationFunction.java @@ -1,7 +1,5 @@ package it.cavallium.dbengine.database.serialization; -import org.jetbrains.annotations.NotNull; - @FunctionalInterface public interface SerializationFunction { diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java index fab959b..c8dd9d5 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/Serializer.java @@ -1,43 +1,31 @@ package it.cavallium.dbengine.database.serialization; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; -import io.netty5.util.internal.StringUtil; -import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import java.nio.charset.StandardCharsets; -import java.util.Objects; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; public interface Serializer { - /** - * - * @param serialized the serialized data should be split! - */ - @NotNull A deserialize(@NotNull Buffer serialized) throws SerializationException; + @NotNull A deserialize(@NotNull BufDataInput in) throws SerializationException; - /** - * @param output its writable size will be at least equal to the size hint - */ - void serialize(@NotNull A deserialized, Buffer output) throws SerializationException; + void serialize(@NotNull A deserialized, BufDataOutput out) throws SerializationException; /** * @return suggested default buffer size, -1 if unknown */ int getSerializedSizeHint(); - Serializer NOOP_SERIALIZER = new Serializer<>() { + Serializer NOOP_SERIALIZER = new Serializer<>() { @Override - public @NotNull Buffer deserialize(@NotNull Buffer serialized) { - return serialized.split(); + public @NotNull Buf deserialize(@NotNull BufDataInput in) { + return Buf.wrap(in.readAllBytes()); } @Override - public void serialize(@NotNull Buffer deserialized, @NotNull Buffer deserializedToReceive) { - deserializedToReceive.ensureWritable(deserialized.readableBytes()); - deserializedToReceive.writeBytes(deserialized); + public void serialize(@NotNull Buf deserialized, BufDataOutput out) { + out.writeBytes(deserialized); } @Override @@ -46,43 +34,21 @@ public interface Serializer { } }; - Serializer> NOOP_SEND_SERIALIZER = new Serializer<>() { - @Override - public @NotNull Send deserialize(@NotNull Buffer serialized) { - return serialized.split().send(); - } - - @Override - public void serialize(@NotNull Send deserialized, @NotNull Buffer deserializedToReceive) { - try (var received = deserialized.receive()) { - deserializedToReceive.ensureWritable(received.readableBytes()); - deserializedToReceive.writeBytes(received); - } - } - - @Override - public int getSerializedSizeHint() { - return -1; - } - }; - - Serializer UTF8_SERIALIZER = new Serializer<>() { - @Override - public @NotNull String deserialize(@NotNull Buffer serialized) { - assert serialized.isAccessible(); - int length = serialized.readInt(); - try (var strBuf = serialized.readSplit(length)) { - return LLUtils.deserializeString(strBuf, strBuf.readerOffset(), length, StandardCharsets.UTF_8); - } - } - @Override - public void serialize(@NotNull String deserialized, Buffer output) { - var bytes = deserialized.getBytes(StandardCharsets.UTF_8); - output.ensureWritable(Integer.BYTES + bytes.length); - output.writeInt(bytes.length); - output.writeBytes(bytes); + @Override + public @NotNull String deserialize(@NotNull BufDataInput in) throws SerializationException { + int length = in.readInt(); + var bytes = in.readNBytes(length); + return new String(bytes, StandardCharsets.UTF_8); + } + + @Override + public void serialize(@NotNull String deserialized, BufDataOutput out) throws SerializationException { + var bytes = deserialized.getBytes(StandardCharsets.UTF_8); + out.ensureWritable(Integer.BYTES + bytes.length); + out.writeInt(bytes.length); + out.write(bytes); } @Override diff --git a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java index d9f2e3d..e9fbde6 100644 --- a/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java +++ b/src/main/java/it/cavallium/dbengine/database/serialization/SerializerFixedBinaryLength.java @@ -1,13 +1,11 @@ package it.cavallium.dbengine.database.serialization; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.util.Send; -import it.cavallium.dbengine.database.LLUtils; +import it.cavallium.dbengine.buffers.Buf; +import it.cavallium.dbengine.buffers.BufDataInput; +import it.cavallium.dbengine.buffers.BufDataOutput; import java.nio.charset.StandardCharsets; import java.util.Objects; import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; @SuppressWarnings("unused") public interface SerializerFixedBinaryLength extends Serializer { @@ -19,29 +17,27 @@ public interface SerializerFixedBinaryLength extends Serializer { return getSerializedBinaryLength(); } - static SerializerFixedBinaryLength noop(int length) { + static SerializerFixedBinaryLength noop(int length) { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Buffer deserialize(@NotNull Buffer serialized) { - Objects.requireNonNull(serialized); - if (serialized.readableBytes() < getSerializedBinaryLength()) { + public @NotNull Buf deserialize(@NotNull BufDataInput in) throws SerializationException { + Objects.requireNonNull(in); + if (in.available() < getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " - + serialized.readableBytes() + " bytes instead"); + + in.available() + " bytes instead"); } - return serialized.readSplit(getSerializedBinaryLength()); + return Buf.wrap(in.readNBytes(getSerializedBinaryLength())); } @Override - public void serialize(@NotNull Buffer deserialized, Buffer output) { - try (deserialized) { - if (deserialized.readableBytes() != getSerializedBinaryLength()) { - throw new IllegalArgumentException( - "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " - + deserialized.readableBytes() + " bytes instead"); - } - output.writeBytes(deserialized); + public void serialize(@NotNull Buf deserialized, BufDataOutput out) throws SerializationException { + if (deserialized.size() != getSerializedBinaryLength()) { + throw new IllegalArgumentException( + "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " + + deserialized.size() + " bytes instead"); } + out.writeBytes(deserialized); } @Override @@ -55,28 +51,25 @@ public interface SerializerFixedBinaryLength extends Serializer { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull String deserialize(@NotNull Buffer serialized) throws SerializationException { - if (serialized.readableBytes() < getSerializedBinaryLength()) { + public @NotNull String deserialize(@NotNull BufDataInput in) throws SerializationException { + if (in.available() < getSerializedBinaryLength()) { throw new SerializationException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " - + serialized.readableBytes() + " bytes instead"); + + in.available() + " bytes instead"); } - var readerOffset = serialized.readerOffset(); - return LLUtils.deserializeString(serialized.send(), readerOffset, length, StandardCharsets.UTF_8); + return new String(in.readNBytes(length), StandardCharsets.UTF_8); } @Override - public void serialize(@NotNull String deserialized, Buffer output) throws SerializationException { - assert output.isAccessible(); + public void serialize(@NotNull String deserialized, BufDataOutput out) throws SerializationException { var bytes = deserialized.getBytes(StandardCharsets.UTF_8); - output.ensureWritable(bytes.length); - output.writeBytes(bytes); - if (output.readableBytes() < getSerializedBinaryLength()) { + out.ensureWritable(bytes.length); + out.write(bytes); + if (bytes.length < getSerializedBinaryLength()) { throw new SerializationException("Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with " - + output.readableBytes() + " bytes instead"); + + bytes.length + " bytes instead"); } - assert output.isAccessible(); } @Override @@ -86,23 +79,23 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength intSerializer(BufferAllocator allocator) { + static SerializerFixedBinaryLength intSerializer() { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Integer deserialize(@NotNull Buffer serialized) { - Objects.requireNonNull(serialized); - if (serialized.readableBytes() < getSerializedBinaryLength()) { + public @NotNull Integer deserialize(@NotNull BufDataInput in) throws SerializationException { + Objects.requireNonNull(in); + if (in.available() < getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " - + serialized.readableBytes() + " bytes instead"); + + in.available() + " bytes instead"); } - return serialized.readInt(); + return in.readInt(); } @Override - public void serialize(@NotNull Integer deserialized, Buffer output) { - output.writeInt(deserialized); + public void serialize(@NotNull Integer deserialized, BufDataOutput out) throws SerializationException { + out.writeInt(deserialized); } @Override @@ -112,23 +105,23 @@ public interface SerializerFixedBinaryLength extends Serializer { }; } - static SerializerFixedBinaryLength longSerializer(BufferAllocator allocator) { + static SerializerFixedBinaryLength longSerializer() { return new SerializerFixedBinaryLength<>() { @Override - public @NotNull Long deserialize(@NotNull Buffer serialized) { - Objects.requireNonNull(serialized); - if (serialized.readableBytes() < getSerializedBinaryLength()) { + public @NotNull Long deserialize(@NotNull BufDataInput in) throws SerializationException { + Objects.requireNonNull(in); + if (in.available() < getSerializedBinaryLength()) { throw new IllegalArgumentException( "Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with " - + serialized.readableBytes() + " bytes instead"); + + in.available() + " bytes instead"); } - return serialized.readLong(); + return in.readLong(); } @Override - public void serialize(@NotNull Long deserialized, Buffer output) { - output.writeLong(deserialized); + public void serialize(@NotNull Long deserialized, BufDataOutput out) throws SerializationException { + out.writeLong(deserialized); } @Override diff --git a/src/main/java/it/cavallium/dbengine/lucene/AlwaysDirectIOFSDirectory.java b/src/main/java/it/cavallium/dbengine/lucene/AlwaysDirectIOFSDirectory.java index e6bb6ee..bb0102c 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/AlwaysDirectIOFSDirectory.java +++ b/src/main/java/it/cavallium/dbengine/lucene/AlwaysDirectIOFSDirectory.java @@ -9,11 +9,11 @@ import org.apache.lucene.store.IOContext; public class AlwaysDirectIOFSDirectory extends DirectIODirectory { - public AlwaysDirectIOFSDirectory(Path path, int mergeBufferSize, long minBytesDirect) throws IOException { + public AlwaysDirectIOFSDirectory(Path path, int mergeBufferSize, long minBytesDirect) { super(FSDirectory.open(path), mergeBufferSize, minBytesDirect); } - public AlwaysDirectIOFSDirectory(Path path) throws IOException { + public AlwaysDirectIOFSDirectory(Path path) { super(FSDirectory.open(path)); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/ByteArrayCodec.java b/src/main/java/it/cavallium/dbengine/lucene/ByteArrayCodec.java deleted file mode 100644 index 15e09fd..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/ByteArrayCodec.java +++ /dev/null @@ -1,28 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class ByteArrayCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, byte[] data) { - var buf = allocator.apply(data.length + Integer.BYTES); - buf.writeInt(data.length); - buf.writeBytes(data); - return buf; - } - - @Override - public byte[] deserialize(Buffer b) { - var length = b.readInt(); - byte[] data = new byte[length]; - b.readBytes(data, 0, length); - return data; - } - - @Override - public byte[] clone(byte[] obj) { - return obj.clone(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/BytesRefCodec.java b/src/main/java/it/cavallium/dbengine/lucene/BytesRefCodec.java deleted file mode 100644 index 0205304..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/BytesRefCodec.java +++ /dev/null @@ -1,29 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; -import org.apache.lucene.util.BytesRef; - -public class BytesRefCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, BytesRef data) { - var buf = allocator.apply(data.length + Integer.BYTES); - buf.writeInt(data.length); - buf.writeBytes(data.bytes, data.offset, data.length); - return buf; - } - - @Override - public BytesRef deserialize(Buffer b) { - var length = b.readInt(); - var bytes = new byte[length]; - b.readBytes(bytes, 0, length); - return new BytesRef(bytes, 0, length); - } - - @Override - public BytesRef clone(BytesRef obj) { - return obj.clone(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/CheckIndexInput.java b/src/main/java/it/cavallium/dbengine/lucene/CheckIndexInput.java index 65bc719..b5f4d3a 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/CheckIndexInput.java +++ b/src/main/java/it/cavallium/dbengine/lucene/CheckIndexInput.java @@ -4,7 +4,6 @@ import static it.cavallium.dbengine.lucene.LuceneUtils.warnLuceneThread; import java.io.IOException; import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.IndexOutput; import org.apache.lucene.store.RandomAccessInput; public class CheckIndexInput extends IndexInput { diff --git a/src/main/java/it/cavallium/dbengine/lucene/CheckIndexOutput.java b/src/main/java/it/cavallium/dbengine/lucene/CheckIndexOutput.java index 65cdad3..ba67620 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/CheckIndexOutput.java +++ b/src/main/java/it/cavallium/dbengine/lucene/CheckIndexOutput.java @@ -19,7 +19,7 @@ public class CheckIndexOutput extends IndexOutput { } @Override - public void close() throws IOException { + public void close() { warnLuceneThread(); output.close(); } @@ -31,19 +31,19 @@ public class CheckIndexOutput extends IndexOutput { } @Override - public long getChecksum() throws IOException { + public long getChecksum() { checkThread(); return output.getChecksum(); } @Override - public void writeByte(byte b) throws IOException { + public void writeByte(byte b) { checkThread(); output.writeByte(b); } @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { + public void writeBytes(byte[] b, int offset, int length) { checkThread(); output.writeBytes(b, offset, length); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/CheckOutputDirectory.java b/src/main/java/it/cavallium/dbengine/lucene/CheckOutputDirectory.java index b835ef8..b958d97 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/CheckOutputDirectory.java +++ b/src/main/java/it/cavallium/dbengine/lucene/CheckOutputDirectory.java @@ -20,70 +20,70 @@ public class CheckOutputDirectory extends Directory { } @Override - public String[] listAll() throws IOException { + public String[] listAll() { return directory.listAll(); } @Override - public void deleteFile(String name) throws IOException { + public void deleteFile(String name) { directory.deleteFile(name); } @Override - public long fileLength(String name) throws IOException { + public long fileLength(String name) { return directory.fileLength(name); } @Override - public IndexOutput createOutput(String name, IOContext context) throws IOException { + public IndexOutput createOutput(String name, IOContext context) { LuceneUtils.checkLuceneThread(); return new CheckIndexOutput(directory.createOutput(name, context)); } @Override - public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) throws IOException { + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { LuceneUtils.checkLuceneThread(); return new CheckIndexOutput(directory.createTempOutput(prefix, suffix, context)); } @Override - public void sync(Collection names) throws IOException { + public void sync(Collection names) { LuceneUtils.checkLuceneThread(); directory.sync(names); } @Override - public void syncMetaData() throws IOException { + public void syncMetaData() { LuceneUtils.checkLuceneThread(); directory.syncMetaData(); } @Override - public void rename(String source, String dest) throws IOException { + public void rename(String source, String dest) { LuceneUtils.checkLuceneThread(); directory.rename(source, dest); } @Override - public IndexInput openInput(String name, IOContext context) throws IOException { + public IndexInput openInput(String name, IOContext context) { LuceneUtils.checkLuceneThread(); return new CheckIndexInput(directory.openInput(name, context)); } @Override - public Lock obtainLock(String name) throws IOException { + public Lock obtainLock(String name) { LuceneUtils.checkLuceneThread(); return directory.obtainLock(name); } @Override - public void close() throws IOException { + public void close() { warnLuceneThread(); directory.close(); } @Override - public Set getPendingDeletions() throws IOException { + public Set getPendingDeletions() { return directory.getPendingDeletions(); } } diff --git a/src/main/java/it/cavallium/dbengine/lucene/CloseableIterable.java b/src/main/java/it/cavallium/dbengine/lucene/CloseableIterable.java index 7a6b67a..133ac92 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/CloseableIterable.java +++ b/src/main/java/it/cavallium/dbengine/lucene/CloseableIterable.java @@ -1,9 +1,6 @@ package it.cavallium.dbengine.lucene; import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import java.io.Closeable; -import java.io.IOException; import java.util.Iterator; import org.jetbrains.annotations.NotNull; diff --git a/src/main/java/it/cavallium/dbengine/lucene/DirectNIOFSDirectory.java b/src/main/java/it/cavallium/dbengine/lucene/DirectNIOFSDirectory.java index 2e707eb..5181507 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/DirectNIOFSDirectory.java +++ b/src/main/java/it/cavallium/dbengine/lucene/DirectNIOFSDirectory.java @@ -3,13 +3,7 @@ package it.cavallium.dbengine.lucene; import static it.cavallium.dbengine.lucene.LuceneUtils.alignUnsigned; import static it.cavallium.dbengine.lucene.LuceneUtils.readInternalAligned; -import com.sun.nio.file.ExtendedOpenOption; -import java.nio.file.OpenOption; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import org.apache.lucene.store.FSDirectory; -import org.apache.lucene.store.LockFactory; - +import it.cavallium.dbengine.utils.DBException; import java.io.Closeable; import java.io.EOFException; import java.io.IOException; @@ -98,7 +92,7 @@ public class DirectNIOFSDirectory extends FSDirectory { return clone; } - public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + public IndexInput slice(String sliceDescription, long offset, long length) { if (offset >= 0L && length >= 0L && offset + length <= this.length()) { return new DirectNIOFSDirectory.NIOFSIndexInput(this.getFullSliceDescription(sliceDescription), this.channel, this.off + offset, length, this.getBufferSize()); } else { @@ -110,7 +104,7 @@ public class DirectNIOFSDirectory extends FSDirectory { return this.end - this.off; } - protected void readInternal(ByteBuffer b) throws IOException { + protected void readInternal(ByteBuffer b) throws EOFException { long pos = this.getFilePointer() + this.off; if (pos + (long)b.remaining() > this.end) { throw new EOFException("read past EOF: " + this); @@ -136,11 +130,11 @@ public class DirectNIOFSDirectory extends FSDirectory { b.limit(b.position()); } } catch (IOException var7) { - throw new IOException(var7.getMessage() + ": " + this, var7); + throw new DBException(var7.getMessage() + ": " + this, var7); } } - protected void seekInternal(long pos) throws IOException { + protected void seekInternal(long pos) throws EOFException { if (pos > this.length()) { throw new EOFException("read past EOF: pos=" + pos + " vs length=" + this.length() + ": " + this); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/DocumentStoredSingleFieldVisitor.java b/src/main/java/it/cavallium/dbengine/lucene/DocumentStoredSingleFieldVisitor.java index efecf86..41eb8ce 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/DocumentStoredSingleFieldVisitor.java +++ b/src/main/java/it/cavallium/dbengine/lucene/DocumentStoredSingleFieldVisitor.java @@ -1,16 +1,13 @@ package it.cavallium.dbengine.lucene; import java.io.IOException; -import java.util.HashSet; import java.util.Objects; -import java.util.Set; import org.apache.lucene.document.Document; import org.apache.lucene.document.FieldType; import org.apache.lucene.document.StoredField; import org.apache.lucene.document.TextField; import org.apache.lucene.index.FieldInfo; import org.apache.lucene.index.StoredFieldVisitor; -import org.apache.lucene.index.StoredFieldVisitor.Status; public class DocumentStoredSingleFieldVisitor extends StoredFieldVisitor { private final Document doc = new Document(); @@ -24,11 +21,11 @@ public class DocumentStoredSingleFieldVisitor extends StoredFieldVisitor { this.fieldToAdd = null; } - public void binaryField(FieldInfo fieldInfo, byte[] value) throws IOException { + public void binaryField(FieldInfo fieldInfo, byte[] value) { this.doc.add(new StoredField(fieldInfo.name, value)); } - public void stringField(FieldInfo fieldInfo, String value) throws IOException { + public void stringField(FieldInfo fieldInfo, String value) { FieldType ft = new FieldType(TextField.TYPE_STORED); ft.setStoreTermVectors(fieldInfo.hasVectors()); ft.setOmitNorms(fieldInfo.omitsNorms()); diff --git a/src/main/java/it/cavallium/dbengine/lucene/DoubleCodec.java b/src/main/java/it/cavallium/dbengine/lucene/DoubleCodec.java deleted file mode 100644 index 4a7a400..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/DoubleCodec.java +++ /dev/null @@ -1,17 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class DoubleCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, Double data) { - return allocator.apply(Double.BYTES).writeDouble(data); - } - - @Override - public Double deserialize(Buffer b) { - return b.readDouble(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/EmptyPriorityQueue.java b/src/main/java/it/cavallium/dbengine/lucene/EmptyPriorityQueue.java index 5875f3c..49aa750 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/EmptyPriorityQueue.java +++ b/src/main/java/it/cavallium/dbengine/lucene/EmptyPriorityQueue.java @@ -1,10 +1,6 @@ package it.cavallium.dbengine.lucene; -import java.io.IOException; -import java.util.Iterator; -import java.util.NoSuchElementException; -import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Flux; +import java.util.stream.Stream; public class EmptyPriorityQueue implements PriorityQueue { @@ -45,8 +41,8 @@ public class EmptyPriorityQueue implements PriorityQueue { } @Override - public Flux iterate() { - return Flux.empty(); + public Stream iterate() { + return Stream.empty(); } @Override diff --git a/src/main/java/it/cavallium/dbengine/lucene/FieldValueHitQueue.java b/src/main/java/it/cavallium/dbengine/lucene/FieldValueHitQueue.java index 0d51311..d8cfc5d 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/FieldValueHitQueue.java +++ b/src/main/java/it/cavallium/dbengine/lucene/FieldValueHitQueue.java @@ -3,8 +3,6 @@ package it.cavallium.dbengine.lucene; import java.io.IOException; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.FieldDoc; -import org.apache.lucene.search.FieldValueHitQueue.Entry; import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.SortField; @@ -14,7 +12,7 @@ public interface FieldValueHitQueue { int[] getReverseMul(); - LeafFieldComparator[] getComparators(LeafReaderContext context) throws IOException; + LeafFieldComparator[] getComparators(LeafReaderContext context); LLFieldDoc fillFields(LLSlotDoc entry); diff --git a/src/main/java/it/cavallium/dbengine/lucene/FloatCodec.java b/src/main/java/it/cavallium/dbengine/lucene/FloatCodec.java deleted file mode 100644 index 37504b9..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/FloatCodec.java +++ /dev/null @@ -1,17 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class FloatCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, Float data) { - return allocator.apply(Float.BYTES).writeFloat(data); - } - - @Override - public Float deserialize(Buffer b) { - return b.readFloat(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/FullDocs.java b/src/main/java/it/cavallium/dbengine/lucene/FullDocs.java index 164855a..503a6d6 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/FullDocs.java +++ b/src/main/java/it/cavallium/dbengine/lucene/FullDocs.java @@ -1,19 +1,20 @@ package it.cavallium.dbengine.lucene; import static it.cavallium.dbengine.lucene.LLDocElementScoreComparator.SCORE_DOC_SCORE_ELEM_COMPARATOR; -import static org.apache.lucene.search.TotalHits.Relation.*; +import static it.cavallium.dbengine.utils.StreamUtils.mergeComparing; +import static org.apache.lucene.search.TotalHits.Relation.EQUAL_TO; +import static org.apache.lucene.search.TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; import it.cavallium.dbengine.lucene.collector.FullFieldDocs; import it.cavallium.dbengine.utils.SimpleResource; -import java.io.IOException; import java.util.Comparator; +import java.util.stream.Stream; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.Sort; import org.apache.lucene.search.SortField; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; public interface FullDocs extends ResourceIterable { @@ -22,10 +23,10 @@ public interface FullDocs extends ResourceIterable { Comparator DEFAULT_TIE_BREAKER = SHARD_INDEX_TIE_BREAKER.thenComparing(DOC_ID_TIE_BREAKER); @Override - Flux iterate(); + Stream iterate(); @Override - Flux iterate(long skips); + Stream iterate(long skips); TotalHits totalHits(); @@ -92,8 +93,8 @@ public interface FullDocs extends ResourceIterable { } @Override - public Flux iterate() { - @SuppressWarnings("unchecked") Flux[] iterables = new Flux[fullDocs.length]; + public Stream iterate() { + @SuppressWarnings("unchecked") Stream[] iterables = new Stream[fullDocs.length]; for (int i = 0; i < fullDocs.length; i++) { var singleFullDocs = fullDocs[i].iterate(); @@ -140,7 +141,7 @@ public interface FullDocs extends ResourceIterable { }; } - @SuppressWarnings("unchecked") Flux[] fluxes = new Flux[fullDocs.length]; + @SuppressWarnings("unchecked") Stream[] fluxes = new Stream[fullDocs.length]; for (int i = 0; i < iterables.length; i++) { var shardIndex = i; fluxes[i] = iterables[i].map(shard -> { @@ -158,11 +159,11 @@ public interface FullDocs extends ResourceIterable { } }); if (fullDocs[i].totalHits().relation == EQUAL_TO) { - fluxes[i] = fluxes[i].take(fullDocs[i].totalHits().value, true); + fluxes[i] = fluxes[i].limit(fullDocs[i].totalHits().value); } } - return Flux.mergeComparing(comp, fluxes); + return mergeComparing(comp, fluxes); } } @@ -182,12 +183,12 @@ public interface FullDocs extends ResourceIterable { } @Override - public Flux iterate() { + public Stream iterate() { return mergedIterable.iterate(); } @Override - public Flux iterate(long skips) { + public Stream iterate(long skips) { return mergedIterable.iterate(skips); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/HugePqArray.java b/src/main/java/it/cavallium/dbengine/lucene/HugePqArray.java deleted file mode 100644 index 2f06ae2..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/HugePqArray.java +++ /dev/null @@ -1,139 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.database.disk.HugePqEnv; -import it.cavallium.dbengine.database.disk.StandardRocksDBColumn; -import it.cavallium.dbengine.utils.SimpleResource; -import java.util.concurrent.atomic.AtomicBoolean; -import org.jetbrains.annotations.Nullable; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteOptions; - -public class HugePqArray extends SimpleResource implements IArray, DiscardingCloseable { - - static { - RocksDB.loadLibrary(); - } - - private final HugePqCodec valueCodec; - private final LLTempHugePqEnv tempEnv; - private final HugePqEnv env; - private final int hugePqId; - private final StandardRocksDBColumn rocksDB; - private final V defaultValue; - - private final long virtualSize; - - public HugePqArray(LLTempHugePqEnv env, HugePqCodec codec, long size, @Nullable V defaultValue) { - this.valueCodec = codec; - this.tempEnv = env; - this.env = env.getEnv(); - this.hugePqId = env.allocateDb(null); - this.rocksDB = this.env.openDb(hugePqId); - this.defaultValue = defaultValue; - - this.virtualSize = size; - } - - private static ReadOptions newReadOptions() { - return new ReadOptions().setVerifyChecksums(false); - } - - private static WriteOptions newWriteOptions() { - return new WriteOptions().setDisableWAL(true).setSync(false); - } - - public HugePqCodec getValueCodec() { - return valueCodec; - } - - private Buffer allocate(int size) { - return rocksDB.getAllocator().allocate(size); - } - - private static void ensureThread() { - LLUtils.ensureBlocking(); - } - - @Override - public void set(long index, @Nullable V value) { - ensureBounds(index); - ensureThread(); - var keyBuf = allocate(Long.BYTES); - try (var writeOptions = newWriteOptions(); - var valueBuf = valueCodec.serialize(this::allocate, value); keyBuf) { - keyBuf.writeLong(index); - rocksDB.put(writeOptions, keyBuf, valueBuf); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - @Override - public void reset(long index) { - ensureBounds(index); - ensureThread(); - try (var writeOptions = newWriteOptions(); - var keyBuf = allocate(Long.BYTES)) { - keyBuf.writeLong(index); - rocksDB.delete(writeOptions, keyBuf); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - @Override - public @Nullable V get(long index) { - ensureBounds(index); - ensureThread(); - - var keyBuf = allocate(Long.BYTES); - try (keyBuf) { - keyBuf.writeLong(index); - try (var readOptions = newReadOptions(); - var value = rocksDB.get(readOptions, keyBuf)) { - if (value == null) { - return null; - } - return valueCodec.deserialize(value); - } - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - private void ensureBounds(long index) { - if (index < 0 || index >= virtualSize) throw new IndexOutOfBoundsException(); - } - - @Override - public long size() { - ensureThread(); - return virtualSize; - } - - @Override - public void onClose() { - ensureThread(); - this.tempEnv.freeDb(hugePqId); - } - - @Override - public String toString() { - return "huge_pq_array[" + virtualSize + "]"; - } - - public Object[] toArray() { - var result = new Object[Math.toIntExact(virtualSize)]; - for (int i = 0; i < virtualSize; i++) { - result[i] = get(i); - } - return result; - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/HugePqCodec.java b/src/main/java/it/cavallium/dbengine/lucene/HugePqCodec.java deleted file mode 100644 index 24dc8c3..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/HugePqCodec.java +++ /dev/null @@ -1,105 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; -import org.apache.lucene.util.BitUtil; -import org.apache.lucene.util.NumericUtils; -import org.rocksdb.AbstractComparator; - -public interface HugePqCodec { - - Buffer serialize(Function allocator, T data); - - T deserialize(Buffer b); - - default T clone(T obj) { - return obj; - } - - default AbstractComparator getComparator() { - return null; - } - - static int getLexInt(Buffer buffer, int offset, boolean invert) { - var data = new byte[Integer.BYTES]; - buffer.copyInto(offset, data, 0, data.length); - var result = sortableBytesToInt(data, 0, invert); - return result; - } - - static Buffer setLexInt(Buffer buffer, int offset, boolean invert, int value) { - var data = new byte[Integer.BYTES]; - intToSortableBytes(value, data, 0, invert); - for (int i = 0; i < data.length; i++) { - buffer.setByte(offset + i, data[i]); - } - return buffer; - } - - static float getLexFloat(Buffer buffer, int offset, boolean invert) { - return sortableIntToFloat(getLexInt(buffer, offset, false), invert); - } - - static Buffer setLexFloat(Buffer buffer, int offset, boolean invert, float value) { - return setLexInt(buffer, offset, false, floatToSortableInt(value, invert)); - } - - /** - * Encodes an integer {@code value} such that unsigned byte order comparison is consistent with - * {@link Integer#compare(int, int)} - * - */ - public static void intToSortableBytes(int value, byte[] result, int offset, boolean invert) { - if (!invert) { - // Flip the sign bit, so negative ints sort before positive ints correctly: - value ^= 0x80000000; - } else { - value ^= 0x7FFFFFFF; - } - BitUtil.VH_BE_INT.set(result, offset, value); - } - - /** - * Decodes an integer value previously written with {@link #intToSortableBytes} - * - */ - public static int sortableBytesToInt(byte[] encoded, int offset, boolean invert) { - int x = (int) BitUtil.VH_BE_INT.get(encoded, offset); - if (!invert) { - // Re-flip the sign bit to restore the original value: - return x ^ 0x80000000; - } else { - return x ^ 0x7FFFFFFF; - } - } - - /** - * Converts a float value to a sortable signed int. The value is - * converted by getting their IEEE 754 floating-point "float format" bit layout and then - * some bits are swapped, to be able to compare the result as int. By this the precision is not - * reduced, but the value can easily used as an int. The sort order (including {@link Float#NaN}) - * is defined by {@link Float#compareTo}; {@code NaN} is greater than positive infinity. - * - * @see #sortableIntToFloat - */ - public static int floatToSortableInt(float value, boolean invert) { - if (invert) { - return Float.floatToIntBits(value); - } else { - return NumericUtils.floatToSortableInt(value); - } - } - - /** - * Converts a sortable int back to a float. - * - * @see #floatToSortableInt - */ - public static float sortableIntToFloat(int encoded, boolean invert) { - if (invert) { - return Float.intBitsToFloat(encoded); - } else { - return NumericUtils.sortableIntToFloat(encoded); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/HugePqComparator.java b/src/main/java/it/cavallium/dbengine/lucene/HugePqComparator.java deleted file mode 100644 index 7d6b952..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/HugePqComparator.java +++ /dev/null @@ -1,118 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import static org.apache.lucene.search.SortField.STRING_LAST; - -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.comparators.DoubleComparator; -import it.cavallium.dbengine.lucene.comparators.FloatComparator; -import it.cavallium.dbengine.lucene.comparators.IntComparator; -import it.cavallium.dbengine.lucene.comparators.LongComparator; -import it.cavallium.dbengine.lucene.comparators.RelevanceComparator; -import it.cavallium.dbengine.lucene.comparators.TermOrdValComparator; -import java.io.IOException; -import org.apache.commons.lang3.NotImplementedException; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.NumericDocValues; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.SortedNumericSelector; -import it.cavallium.dbengine.lucene.hugepq.search.comparators.HugePqDocComparator; - -public class HugePqComparator { - - public static FieldComparator getComparator(LLTempHugePqEnv env, SortField sortField, - int numHits, boolean enableSkipping) { - var sortFieldClass = sortField.getClass(); - if (sortFieldClass == org.apache.lucene.search.SortedNumericSortField.class) { - var nf = (org.apache.lucene.search.SortedNumericSortField) sortField; - var type = nf.getNumericType(); - var missingValue = nf.getMissingValue(); - var reverse = nf.getReverse(); - var selector = nf.getSelector(); - final FieldComparator fieldComparator = switch (type) { - case INT -> new IntComparator(env, numHits, nf.getField(), (Integer) missingValue, reverse, enableSkipping) { - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new IntLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) - throws IOException { - return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type); - } - }; - } - }; - case FLOAT -> new FloatComparator(env, numHits, nf.getField(), (Float) missingValue, reverse, enableSkipping) { - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new FloatLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) - throws IOException { - return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type); - } - }; - } - }; - case LONG -> new LongComparator(env, numHits, nf.getField(), (Long) missingValue, reverse, enableSkipping) { - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new LongLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) - throws IOException { - return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type); - } - }; - } - }; - case DOUBLE -> new DoubleComparator(env, numHits, nf.getField(), (Double) missingValue, reverse, enableSkipping) { - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new DoubleLeafComparator(context) { - @Override - protected NumericDocValues getNumericDocValues(LeafReaderContext context, String field) - throws IOException { - return SortedNumericSelector.wrap(DocValues.getSortedNumeric(context.reader(), field), selector, type); - } - }; - } - }; - case CUSTOM, DOC, REWRITEABLE, STRING_VAL, SCORE, STRING -> throw new AssertionError(); - }; - if (!nf.getOptimizeSortWithPoints()) { - fieldComparator.disableSkipping(); - } - return fieldComparator; - } else if (sortFieldClass == SortField.class) { - var missingValue = sortField.getMissingValue(); - var reverse = sortField.getReverse(); - var field = sortField.getField(); - var comparatorSource = sortField.getComparatorSource(); - return switch (sortField.getType()) { - case SCORE -> new RelevanceComparator(env, numHits); - case DOC -> new HugePqDocComparator(env, numHits, reverse, enableSkipping); - case INT -> new IntComparator(env, numHits, field, (Integer) missingValue, - reverse, enableSkipping); - case FLOAT -> new FloatComparator(env, numHits, field, (Float) missingValue, - reverse, enableSkipping); - case LONG -> new LongComparator(env, numHits, field, (Long) missingValue, - reverse, enableSkipping); - case DOUBLE -> new DoubleComparator(env, numHits, field, (Double) missingValue, - reverse, enableSkipping); - case CUSTOM -> { - assert comparatorSource != null; - yield comparatorSource.newComparator(field, numHits, enableSkipping, reverse); - } - case STRING -> new TermOrdValComparator(env, numHits, field, missingValue == STRING_LAST); - case STRING_VAL -> throw new NotImplementedException("String val sort field not implemented"); - case REWRITEABLE -> throw new IllegalStateException( - "SortField needs to be rewritten through Sort.rewrite(..) and SortField.rewrite(..)"); - }; - } else { - throw new NotImplementedException("SortField type not implemented: " + sortFieldClass.getName()); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/HugePqPriorityQueue.java b/src/main/java/it/cavallium/dbengine/lucene/HugePqPriorityQueue.java deleted file mode 100644 index 19d092b..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/HugePqPriorityQueue.java +++ /dev/null @@ -1,397 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import it.cavallium.dbengine.database.LLRange; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.database.disk.HugePqEnv; -import it.cavallium.dbengine.database.disk.RocksIterWithReadOpts; -import it.cavallium.dbengine.database.disk.StandardRocksDBColumn; -import it.cavallium.dbengine.database.disk.UpdateAtomicResultMode; -import it.cavallium.dbengine.database.disk.UpdateAtomicResultPrevious; -import it.cavallium.dbengine.utils.SimpleResource; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Objects; -import java.util.StringJoiner; -import java.util.concurrent.atomic.AtomicBoolean; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.WriteBatch; -import org.rocksdb.WriteOptions; -import reactor.core.publisher.Flux; -import reactor.core.scheduler.Schedulers; - -public class HugePqPriorityQueue extends SimpleResource - implements PriorityQueue, Reversable>, ReversableResourceIterable { - - static { - RocksDB.loadLibrary(); - } - - private final LLTempHugePqEnv tempEnv; - private final HugePqEnv env; - private final int hugePqId; - private final StandardRocksDBColumn rocksDB; - private final HugePqCodec codec; - - private long size = 0; - - private T cachedTop; - private boolean cachedTopSet = false; - - public HugePqPriorityQueue(LLTempHugePqEnv env, HugePqCodec codec) { - this.tempEnv = env; - this.env = env.getEnv(); - this.hugePqId = env.allocateDb(codec.getComparator()); - this.rocksDB = this.env.openDb(hugePqId); - this.codec = codec; - } - - private static ReadOptions newReadOptions() { - return new ReadOptions().setVerifyChecksums(false); - } - - private static WriteOptions newWriteOptions() { - return new WriteOptions().setDisableWAL(true).setSync(false); - } - - private Buffer allocate(int size) { - return rocksDB.getAllocator().allocate(size); - } - - private static void ensureThread() { - LLUtils.ensureBlocking(); - } - - private static void ensureItThread() { - ensureThread(); - } - - @Override - public void add(T element) { - ensureThread(); - - cachedTopSet = false; - var keyBuf = serializeKey(element); - try (keyBuf) { - try (var readOptions = newReadOptions(); var writeOptions = newWriteOptions()) { - rocksDB - .updateAtomic(readOptions, writeOptions, keyBuf, this::incrementOrAdd, UpdateAtomicResultMode.NOTHING) - .close(); - } - ++size; - } catch (IOException e) { - throw new IllegalStateException(e); - } - } - - private Buffer serializeKey(T element) { - return codec.serialize(this::allocate, element); - } - - private T deserializeKey(Buffer keyBuf) { - return codec.deserialize(keyBuf.writerOffset(keyBuf.writerOffset())); - } - - private Buffer serializeValue(int count) { - var keyBuf = allocate(Integer.BYTES); - keyBuf.writeInt(count); - return keyBuf; - } - - private int deserializeValue(Buffer keyBuf) { - return keyBuf.readInt(); - } - - @Override - public T top() { - if (cachedTopSet) { - return cachedTop; - } - ensureThread(); - return databaseTop(); - } - - private T databaseTop() { - try (var readOptions = newReadOptions(); - var it = rocksDB.newRocksIterator(true, readOptions, LLRange.all(), false)) { - it.seekToFirst(); - if (it.isValid()) { - var key = it.key(); - try (var keyBuf = rocksDB.getAllocator().copyOf(key)) { - var top = deserializeKey(keyBuf); - cachedTop = top; - cachedTopSet = true; - return top; - } - } else { - cachedTop = null; - cachedTopSet = true; - return null; - } - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - @Override - public T pop() { - ensureThread(); - cachedTopSet = false; - try (var readOptions = newReadOptions(); - var writeOptions = newWriteOptions(); - var it = rocksDB.newRocksIterator(true, readOptions, LLRange.all(), false)) { - it.seekToFirst(); - if (it.isValid()) { - var key = it.key(); - try (var keyBuf = rocksDB.getAllocator().copyOf(key)) { - rocksDB - .updateAtomic(readOptions, writeOptions, keyBuf, this::reduceOrRemove, UpdateAtomicResultMode.NOTHING) - .close(); - --size; - return deserializeKey(keyBuf); - } - } else { - return null; - } - } catch (RocksDBException | IOException e) { - throw new IllegalStateException(e); - } - } - - private Buffer incrementOrAdd(@Nullable Buffer prev) { - if (prev == null) { - return serializeValue(1); - } else { - var prevCount = deserializeValue(prev); - assert prevCount > 0; - return serializeValue(prevCount + 1); - } - } - - @Nullable - private Buffer reduceOrRemove(@Nullable Buffer prev) { - if (prev == null) { - return null; - } - var prevCount = deserializeValue(prev); - assert prevCount > 0; - if (prevCount == 1) { - return null; - } else { - return serializeValue(prevCount - 1); - } - } - - @Override - public void replaceTop(T oldTop, T newTop) { - ensureThread(); - cachedTopSet = false; - if (oldTop == null) { - add(newTop); - cachedTop = newTop; - cachedTopSet = true; - } else { - try (var readOptions = newReadOptions(); - var writeOptions = newWriteOptions(); - var oldKeyBuf = serializeKey(oldTop); - var newKeyBuf = serializeKey(newTop); - var ignored = rocksDB.updateAtomic(readOptions, - writeOptions, - oldKeyBuf, - this::reduceOrRemove, - UpdateAtomicResultMode.NOTHING - ); - var ignored2 = rocksDB.updateAtomic(readOptions, - writeOptions, - newKeyBuf, - this::incrementOrAdd, - UpdateAtomicResultMode.NOTHING - )) { - cachedTop = newTop; - cachedTopSet = true; - } catch (IOException ex) { - throw new IllegalStateException(ex); - } - } - } - - @Override - public long size() { - ensureThread(); - return size; - } - - @Override - public void clear() { - ensureThread(); - cachedTopSet = false; - try (var wb = new WriteBatch(); var wo = newWriteOptions()) { - wb.deleteRange(rocksDB.getColumnFamilyHandle(), new byte[0], getBiggestKey()); - size = 0; - rocksDB.write(wo, wb); - } catch (RocksDBException e) { - throw new IllegalStateException(e); - } - } - - private byte[] getBiggestKey() { - var biggestKey = new byte[4096]; - Arrays.fill(biggestKey, (byte) 0xFF); - return biggestKey; - } - - @Override - public boolean remove(@NotNull T element) { - ensureThread(); - Objects.requireNonNull(element); - cachedTopSet = false; - try (var readOptions = newReadOptions(); - var writeOptions = newWriteOptions(); - var keyBuf = serializeKey(element)) { - try (var prev = (UpdateAtomicResultPrevious) rocksDB.updateAtomic(readOptions, writeOptions, - keyBuf, - this::reduceOrRemove, - UpdateAtomicResultMode.PREVIOUS - )) { - if (prev.previous() != null) { - --size; - return true; - } else { - return false; - } - } - } catch (IOException ex) { - throw new IllegalStateException(ex); - } - } - - public Flux reverseIterate() { - return iterate(0, true); - } - - @Override - public Flux iterate() { - return iterate(0, false); - } - - private Flux iterate(long skips, boolean reverse) { - return Flux., RocksIterWithReadOpts>generate(() -> { - var readOptions = newReadOptions(); - var rocksIterator = rocksDB.newRocksIterator(true, readOptions, LLRange.all(), reverse); - if (reverse) { - rocksIterator.seekToLast(); - } else { - rocksIterator.seekToFirst(); - } - long skipsDone = 0; - while (rocksIterator.isValid() && skipsDone < skips) { - if (reverse) { - rocksIterator.prev(); - } else { - rocksIterator.next(); - } - skipsDone++; - } - return new RocksIterWithReadOpts(readOptions, rocksIterator); - }, (t, sink) -> { - var rocksIterator = t.iter(); - if (rocksIterator.isValid()) { - try (var keyBuf = rocksDB.getAllocator().copyOf(rocksIterator.key()); - var valBuf = rocksDB.getAllocator().copyOf(rocksIterator.value())) { - var count = deserializeValue(valBuf); - if (count == 0) { - sink.next(List.of()); - } else { - var result = new ArrayList(count); - T origKey = deserializeKey(keyBuf); - for (int i = 0; i < count; i++) { - if (i == 0) { - result.add(origKey); - } else { - result.add(codec.clone(origKey)); - } - } - sink.next(result); - } - } - try { - if (reverse) { - rocksIterator.prev(); - } else { - rocksIterator.next(); - } - } catch (RocksDBException e) { - sink.error(e); - } - } else { - sink.complete(); - } - - return t; - }, rocksIterWithReadOpts -> { - if (rocksIterWithReadOpts != null) { - rocksIterWithReadOpts.close(); - } - }).subscribeOn(Schedulers.boundedElastic()).concatMapIterable(item -> item); - } - - @Override - public Flux iterate(long skips) { - return iterate(skips, false); - } - - public Flux reverseIterate(long skips) { - return iterate(skips, true); - } - - @Override - protected void onClose() { - this.tempEnv.freeDb(hugePqId); - if (this.codec instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - @Override - public String toString() { - return new StringJoiner(", ", HugePqPriorityQueue.class.getSimpleName() + "[", "]") - .add("size=" + size) - .toString(); - } - - @Override - public ReversableResourceIterable reverse() { - return new ReversedResourceIterable(); - } - - private class ReversedResourceIterable extends SimpleResource implements ReversableResourceIterable { - - @Override - public void onClose() { - HugePqPriorityQueue.this.close(); - } - - @Override - public Flux iterate() { - return reverseIterate(); - } - - @Override - public Flux iterate(long skips) { - return reverseIterate(skips); - } - - @Override - public ReversableResourceIterable reverse() { - return HugePqPriorityQueue.this; - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/IntCodec.java b/src/main/java/it/cavallium/dbengine/lucene/IntCodec.java deleted file mode 100644 index eb2f7bb..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/IntCodec.java +++ /dev/null @@ -1,17 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class IntCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, Integer data) { - return allocator.apply(Integer.BYTES).writeInt(data); - } - - @Override - public Integer deserialize(Buffer b) { - return b.readInt(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LLFieldDocCodec.java b/src/main/java/it/cavallium/dbengine/lucene/LLFieldDocCodec.java deleted file mode 100644 index 8d003f2..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/LLFieldDocCodec.java +++ /dev/null @@ -1,126 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.ArrayList; -import java.util.function.Function; - -public class LLFieldDocCodec implements HugePqCodec { - - private enum FieldType { - FLOAT, - DOUBLE, - INT, - LONG; - - public byte ordinalByte() { - return (byte) ordinal(); - } - } - - @Override - public Buffer serialize(Function allocator, LLFieldDoc data) { - int fieldsDataSize = 0; - byte[] fieldTypes = new byte[data.fields().size()]; - int fieldId = 0; - for (Object field : data.fields()) { - assert field != null; - if (field instanceof Float) { - fieldsDataSize += Float.BYTES; - fieldTypes[fieldId] = FieldType.FLOAT.ordinalByte(); - } else if (field instanceof Double) { - fieldsDataSize += Double.BYTES; - fieldTypes[fieldId] = FieldType.DOUBLE.ordinalByte(); - } else if (field instanceof Integer) { - fieldsDataSize += Integer.BYTES; - fieldTypes[fieldId] = FieldType.INT.ordinalByte(); - } else if (field instanceof Long) { - fieldsDataSize += Long.BYTES; - fieldTypes[fieldId] = FieldType.LONG.ordinalByte(); - } else { - throw new UnsupportedOperationException("Unsupported field type " + field.getClass()); - } - fieldId++; - } - int size = Float.BYTES + Integer.BYTES + Integer.BYTES + Character.BYTES + (data.fields().size() + Byte.BYTES) + fieldsDataSize; - var buf = allocator.apply(size); - setScore(buf, data.score()); - setDoc(buf, data.doc()); - setShardIndex(buf, data.shardIndex()); - setFieldsCount(buf, data.fields().size()); - buf.writerOffset(size); - - fieldId = 0; - for (Object field : data.fields()) { - assert field != null; - buf.writeByte(fieldTypes[fieldId]); - if (field instanceof Float val) { - buf.writeFloat(val); - } else if (field instanceof Double val) { - buf.writeDouble(val); - } else if (field instanceof Integer val) { - buf.writeInt(val); - } else if (field instanceof Long val) { - buf.writeLong(val); - } else { - throw new UnsupportedOperationException("Unsupported field type " + field.getClass()); - } - fieldId++; - } - assert buf.writableBytes() == 0; - return buf; - } - - @Override - public LLFieldDoc deserialize(Buffer buf) { - var fieldsCount = getFieldsCount(buf); - ArrayList fields = new ArrayList<>(fieldsCount); - buf.readerOffset(Float.BYTES + Integer.BYTES + Integer.BYTES + Character.BYTES); - for (char i = 0; i < fieldsCount; i++) { - fields.add(switch (FieldType.values()[buf.readByte()]) { - case FLOAT -> buf.readFloat(); - case DOUBLE -> buf.readDouble(); - case INT -> buf.readInt(); - case LONG -> buf.readLong(); - }); - } - assert buf.readableBytes() == 0; - return new LLFieldDoc(getDoc(buf), getScore(buf), getShardIndex(buf), fields); - } - - private static float getScore(Buffer hit) { - return HugePqCodec.getLexFloat(hit, 0, false); - } - - private static int getDoc(Buffer hit) { - return HugePqCodec.getLexInt(hit, Float.BYTES, true); - } - - private static int getShardIndex(Buffer hit) { - return HugePqCodec.getLexInt(hit, Float.BYTES + Integer.BYTES, false); - } - - private char getFieldsCount(Buffer hit) { - return hit.getChar(Float.BYTES + Integer.BYTES + Integer.BYTES); - } - - private static void setScore(Buffer hit, float score) { - HugePqCodec.setLexFloat(hit, 0, false, score); - } - - private static void setDoc(Buffer hit, int doc) { - HugePqCodec.setLexInt(hit, Float.BYTES, true, doc); - } - - private static void setShardIndex(Buffer hit, int shardIndex) { - HugePqCodec.setLexInt(hit, Float.BYTES + Integer.BYTES, false, shardIndex); - } - - private void setFieldsCount(Buffer hit, int size) { - hit.setChar(Float.BYTES + Integer.BYTES + Integer.BYTES, (char) size); - } - - @Override - public LLFieldDoc clone(LLFieldDoc obj) { - return new LLFieldDoc(obj.doc(), obj.score(), obj.shardIndex(), obj.fields()); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LLScoreDocCodec.java b/src/main/java/it/cavallium/dbengine/lucene/LLScoreDocCodec.java deleted file mode 100644 index 41c8d90..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/LLScoreDocCodec.java +++ /dev/null @@ -1,51 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class LLScoreDocCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, LLScoreDoc data) { - var buf = allocator.apply(Float.BYTES + Integer.BYTES + Integer.BYTES); - buf.writerOffset(Float.BYTES + Integer.BYTES + Integer.BYTES); - setScore(buf, data.score()); - setDoc(buf, data.doc()); - setShardIndex(buf, data.shardIndex()); - return buf; - } - - @Override - public LLScoreDoc deserialize(Buffer buf) { - return new LLScoreDoc(getDoc(buf), getScore(buf), getShardIndex(buf)); - } - - private static float getScore(Buffer hit) { - return HugePqCodec.getLexFloat(hit, 0, false); - } - - private static int getDoc(Buffer hit) { - return HugePqCodec.getLexInt(hit, Float.BYTES, true); - } - - private static int getShardIndex(Buffer hit) { - return HugePqCodec.getLexInt(hit, Float.BYTES + Integer.BYTES, false); - } - - private static void setScore(Buffer hit, float score) { - HugePqCodec.setLexFloat(hit, 0, false, score); - } - - private static void setDoc(Buffer hit, int doc) { - HugePqCodec.setLexInt(hit, Float.BYTES, true, doc); - } - - private static void setShardIndex(Buffer hit, int shardIndex) { - HugePqCodec.setLexInt(hit, Float.BYTES + Integer.BYTES, false, shardIndex); - } - - @Override - public LLScoreDoc clone(LLScoreDoc obj) { - return new LLScoreDoc(obj.doc(), obj.score(), obj.shardIndex()); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LLSlotDocCodec.java b/src/main/java/it/cavallium/dbengine/lucene/LLSlotDocCodec.java deleted file mode 100644 index 537bc49..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/LLSlotDocCodec.java +++ /dev/null @@ -1,209 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.utils.SimpleResource; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.util.ArrayList; -import java.util.List; -import java.util.function.Function; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.rocksdb.AbstractComparator; -import org.rocksdb.ComparatorOptions; - -public class LLSlotDocCodec extends SimpleResource - implements HugePqCodec, FieldValueHitQueue, DiscardingCloseable { - - private final SortField[] fields; - - protected final FieldComparator[] comparators; - protected final int[] reverseMul; - private final ComparatorOptions comparatorOptions; - private final AbstractComparator comparator; - - public LLSlotDocCodec(LLTempHugePqEnv env, int numHits, SortField[] fields) { - // When we get here, fields.length is guaranteed to be > 0, therefore no - // need to check it again. - - // All these are required by this class's API - need to return arrays. - // Therefore even in the case of a single comparator, create an array - // anyway. - this.fields = fields; - int numComparators = fields.length; - comparators = new FieldComparator[numComparators]; - reverseMul = new int[numComparators]; - for (int i = 0; i < numComparators; ++i) { - SortField field = fields[i]; - reverseMul[i] = field.getReverse() ? -1 : 1; - comparators[i] = HugePqComparator.getComparator(env, field, numHits, i == 0); - } - comparatorOptions = new ComparatorOptions().setMaxReusedBufferSize(0); - comparator = new AbstractComparator(comparatorOptions) { - @Override - public String name() { - return "slot-doc-codec-comparator"; - } - - @Override - public int compare(ByteBuffer hitA, ByteBuffer hitB) { - assert hitA != hitB; - hitA.position(hitA.position() + Float.BYTES); - hitB.position(hitB.position() + Float.BYTES); - var docA = readDoc(hitA); - var docB = readDoc(hitB); - if (docA == docB) { - return 0; - } - hitA.position(hitA.position() + Integer.BYTES); - hitB.position(hitB.position() + Integer.BYTES); - var slotA = readSlot(hitA); - var slotB = readSlot(hitB); - assert slotA != slotB : "Slot " + slotA + " is equal to slot " + slotB; - - int numComparators = comparators.length; - for (int i = 0; i < numComparators; ++i) { - final int c = reverseMul[i] * comparators[i].compare(slotA, slotB); - if (c != 0) { - // Short circuit - return -c; - } - } - - // avoid random sort order that could lead to duplicates (bug #31241): - return Integer.compare(docB, docA); - } - }; - } - - @Override - public Buffer serialize(Function allocator, LLSlotDoc data) { - var buf = allocator.apply(Float.BYTES + Integer.BYTES + Integer.BYTES + Integer.BYTES); - buf.writerOffset(Float.BYTES + Integer.BYTES + Integer.BYTES + Integer.BYTES); - setScore(buf, data.score()); - setDoc(buf, data.doc()); - setShardIndex(buf, data.shardIndex()); - setSlot(buf, data.slot()); - return buf; - } - - @Override - public LLSlotDoc deserialize(Buffer buf) { - return new LLSlotDoc(getDoc(buf), getScore(buf), getShardIndex(buf), getSlot(buf)); - } - - @Override - public AbstractComparator getComparator() { - return comparator; - } - - private static float getScore(Buffer hit) { - return hit.getFloat(0); - } - - private static int getDoc(Buffer hit) { - return hit.getInt(Float.BYTES); - } - - private static int readDoc(ByteBuffer hit) { - return hit.getInt(); - } - - private static int getShardIndex(Buffer hit) { - return hit.getInt(Float.BYTES + Integer.BYTES); - } - - private static int getSlot(Buffer hit) { - return hit.getInt(Float.BYTES + Integer.BYTES + Integer.BYTES); - } - - private static int readSlot(ByteBuffer hit) { - return hit.getInt(); - } - - private static void setScore(Buffer hit, float score) { - hit.setFloat(0, score); - } - - private static void setDoc(Buffer hit, int doc) { - hit.setInt(Float.BYTES, doc); - } - - private static void setShardIndex(Buffer hit, int shardIndex) { - hit.setInt(Float.BYTES + Integer.BYTES, shardIndex); - } - - private static void setSlot(Buffer hit, int slot) { - hit.setInt(Float.BYTES + Integer.BYTES + Integer.BYTES, slot); - } - - @Override - public FieldComparator[] getComparators() { - return comparators; - } - - @Override - public int[] getReverseMul() { - return reverseMul; - } - - @Override - public LeafFieldComparator[] getComparators(LeafReaderContext context) throws IOException { - LeafFieldComparator[] comparators = new LeafFieldComparator[this.comparators.length]; - for (int i = 0; i < comparators.length; ++i) { - comparators[i] = this.comparators[i].getLeafComparator(context); - } - return comparators; - } - - /** - * Given a queue Entry, creates a corresponding FieldDoc that contains the values used to sort the - * given document. These values are not the raw values out of the index, but the internal - * representation of them. This is so the given search hit can be collated by a MultiSearcher with - * other search hits. - * - * @param entry The Entry used to create a FieldDoc - * @return The newly created FieldDoc - * @see IndexSearcher#search(Query,int, Sort) - */ - @Override - public LLFieldDoc fillFields(final LLSlotDoc entry) { - final int n = comparators.length; - final List fields = new ArrayList<>(n); - for (FieldComparator comparator : comparators) { - fields.add(comparator.value(entry.slot())); - } - // if (maxscore > 1.0f) doc.score /= maxscore; // normalize scores - return new LLFieldDoc(entry.doc(), entry.score(), entry.shardIndex(), fields); - } - - /** Returns the SortFields being used by this hit queue. */ - @Override - public SortField[] getFields() { - return fields; - } - - @Override - protected void onClose() { - for (FieldComparator comparator : this.comparators) { - if (comparator instanceof SafeCloseable closeable) { - closeable.close(); - } - } - comparator.close(); - comparatorOptions.close(); - } - - @Override - public LLSlotDoc clone(LLSlotDoc obj) { - return new LLSlotDoc(obj.doc(), obj.score(), obj.shardIndex(), obj.slot()); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LazyFullDocs.java b/src/main/java/it/cavallium/dbengine/lucene/LazyFullDocs.java index 4a07594..2bdbd58 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/LazyFullDocs.java +++ b/src/main/java/it/cavallium/dbengine/lucene/LazyFullDocs.java @@ -1,10 +1,8 @@ package it.cavallium.dbengine.lucene; import it.cavallium.dbengine.utils.SimpleResource; -import java.io.Closeable; -import java.io.IOException; +import java.util.stream.Stream; import org.apache.lucene.search.TotalHits; -import reactor.core.publisher.Flux; public class LazyFullDocs extends SimpleResource implements FullDocs { @@ -17,12 +15,12 @@ public class LazyFullDocs extends SimpleResource implements Ful } @Override - public Flux iterate() { + public Stream iterate() { return pq.iterate(); } @Override - public Flux iterate(long skips) { + public Stream iterate(long skips) { return pq.iterate(skips); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/LongCodec.java b/src/main/java/it/cavallium/dbengine/lucene/LongCodec.java deleted file mode 100644 index c40b25a..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/LongCodec.java +++ /dev/null @@ -1,18 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.Buffer; -import java.util.function.Function; - -public class LongCodec implements HugePqCodec { - - @Override - public Buffer serialize(Function allocator, Long data) { - return allocator.apply(Long.BYTES).writeLong(data); - } - - @Override - public Long deserialize(Buffer b) { - return b.readLong(); - } - -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LuceneConcurrentMergeScheduler.java b/src/main/java/it/cavallium/dbengine/lucene/LuceneConcurrentMergeScheduler.java index 3054f5a..ca7006d 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/LuceneConcurrentMergeScheduler.java +++ b/src/main/java/it/cavallium/dbengine/lucene/LuceneConcurrentMergeScheduler.java @@ -11,7 +11,7 @@ public class LuceneConcurrentMergeScheduler extends ConcurrentMergeScheduler { } @Override - protected synchronized MergeThread getMergeThread(MergeSource mergeSource, OneMerge merge) throws IOException { + protected synchronized MergeThread getMergeThread(MergeSource mergeSource, OneMerge merge) { final MergeThread thread = new LuceneMergeThread(mergeSource, merge); thread.setDaemon(true); thread.setName("lucene-merge-" + mergeThreadCount++); diff --git a/src/main/java/it/cavallium/dbengine/lucene/LuceneRocksDBManager.java b/src/main/java/it/cavallium/dbengine/lucene/LuceneRocksDBManager.java deleted file mode 100644 index 5e5acd8..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/LuceneRocksDBManager.java +++ /dev/null @@ -1,58 +0,0 @@ -package it.cavallium.dbengine.lucene; - -import io.netty5.buffer.BufferAllocator; -import it.cavallium.dbengine.lucene.directory.RocksDBInstance; -import it.cavallium.dbengine.lucene.directory.RocksdbFileStore; -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; - -public class LuceneRocksDBManager { - - private static final Logger LOG = LogManager.getLogger(LuceneRocksDBManager.class); - private final List> dbs = new ArrayList<>(); - private BufferAllocator bufferAllocator; - - public synchronized BufferAllocator getAllocator() { - if (bufferAllocator == null) { - bufferAllocator = BufferAllocator.offHeapPooled(); - } - return bufferAllocator; - } - - public synchronized RocksDBInstance getOrCreate(Path path) { - try { - for (var entry : dbs) { - if (Files.isSameFile(path, entry.getKey())) { - return entry.getValue(); - } - } - RocksDBInstance db = RocksdbFileStore.createEmpty(path); - dbs.add(Map.entry(path, db)); - return db; - } catch (IOException ex) { - throw new UnsupportedOperationException("Can't load RocksDB database at path: " + path, ex); - } - } - - public synchronized void closeAll() { - for (Entry db : dbs) { - try { - db.getValue().db().closeE(); - } catch (Throwable ex) { - LOG.error("Failed to close lucene RocksDB database", ex); - } - } - dbs.clear(); - if (bufferAllocator != null) { - bufferAllocator.close(); - } - bufferAllocator = null; - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/LuceneUtils.java b/src/main/java/it/cavallium/dbengine/lucene/LuceneUtils.java index 5f86c75..bb1d1e5 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/LuceneUtils.java +++ b/src/main/java/it/cavallium/dbengine/lucene/LuceneUtils.java @@ -1,9 +1,6 @@ package it.cavallium.dbengine.lucene; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; -import static reactor.core.scheduler.Schedulers.DEFAULT_BOUNDED_ELASTIC_QUEUESIZE; -import static reactor.core.scheduler.Schedulers.DEFAULT_BOUNDED_ELASTIC_SIZE; import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; @@ -24,8 +21,6 @@ import it.cavallium.dbengine.database.collections.DatabaseStageMap; import it.cavallium.dbengine.database.collections.ValueGetter; import it.cavallium.dbengine.database.disk.LLIndexSearcher; import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLIndexSearchers.UnshardedIndexSearchers; -import it.cavallium.dbengine.database.disk.LuceneThreadFactory; import it.cavallium.dbengine.lucene.LuceneConcurrentMergeScheduler.LuceneMergeThread; import it.cavallium.dbengine.lucene.analyzer.LegacyWordAnalyzer; import it.cavallium.dbengine.lucene.analyzer.NCharGramAnalyzer; @@ -33,7 +28,6 @@ import it.cavallium.dbengine.lucene.analyzer.NCharGramEdgeAnalyzer; import it.cavallium.dbengine.lucene.analyzer.TextFieldsAnalyzer; import it.cavallium.dbengine.lucene.analyzer.TextFieldsSimilarity; import it.cavallium.dbengine.lucene.analyzer.WordAnalyzer; -import it.cavallium.dbengine.lucene.directory.RocksdbDirectory; import it.cavallium.dbengine.lucene.mlt.BigCompositeReader; import it.cavallium.dbengine.lucene.mlt.MultiMoreLikeThis; import it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite; @@ -53,8 +47,7 @@ import it.cavallium.dbengine.rpc.current.data.MemoryMappedFSDirectory; import it.cavallium.dbengine.rpc.current.data.NIOFSDirectory; import it.cavallium.dbengine.rpc.current.data.NRTCachingDirectory; import it.cavallium.dbengine.rpc.current.data.RAFFSDirectory; -import it.cavallium.dbengine.rpc.current.data.RocksDBSharedDirectory; -import it.cavallium.dbengine.rpc.current.data.RocksDBStandaloneDirectory; +import it.cavallium.dbengine.utils.DBException; import it.unimi.dsi.fastutil.ints.IntArrayList; import it.unimi.dsi.fastutil.ints.IntList; import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap; @@ -64,7 +57,6 @@ import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.file.Path; import java.time.Duration; -import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.List; @@ -73,6 +65,7 @@ import java.util.Map.Entry; import java.util.NoSuchElementException; import java.util.Optional; import java.util.stream.Collectors; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.analysis.Analyzer; @@ -119,11 +112,6 @@ import org.novasearch.lucene.search.similarities.BM25Similarity.BM25Model; import org.novasearch.lucene.search.similarities.LdpSimilarity; import org.novasearch.lucene.search.similarities.LtcSimilarity; import org.novasearch.lucene.search.similarities.RobertsonSimilarity; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; -import reactor.util.concurrent.Queues; public class LuceneUtils { @@ -173,13 +161,6 @@ public class LuceneUtils { Nullabledouble.empty() ); - private static final Scheduler LUCENE_COMMON_SCHEDULER = uninterruptibleScheduler(Schedulers.newBoundedElastic( - DEFAULT_BOUNDED_ELASTIC_SIZE, - DEFAULT_BOUNDED_ELASTIC_QUEUESIZE, - new LuceneThreadFactory("lucene-common").setDaemon(true).withGroup(new ThreadGroup("lucene-common")), - Math.toIntExact(Duration.ofHours(1).toSeconds()) - )); - static { var cas = new CharArraySet( EnglishAnalyzer.ENGLISH_STOP_WORDS_SET.size() + ItalianAnalyzer.getDefaultStopSet().size(), true); @@ -239,12 +220,12 @@ public class LuceneUtils { */ @NotNull public static IndexableField keyOfTopDoc(int docId, IndexReader indexReader, - String keyFieldName) throws IOException, NoSuchElementException { - if (Schedulers.isInNonBlockingThread()) { + String keyFieldName) throws NoSuchElementException, IOException { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called keyOfTopDoc in a nonblocking thread"); } if (docId > indexReader.maxDoc()) { - throw new IOException("Document " + docId + " > maxDoc (" +indexReader.maxDoc() + ")"); + throw new DBException("Document " + docId + " > maxDoc (" +indexReader.maxDoc() + ")"); } DocumentStoredSingleFieldVisitor visitor = new DocumentStoredSingleFieldVisitor(keyFieldName); indexReader.document(docId, visitor); @@ -270,10 +251,7 @@ public class LuceneUtils { public static ValueGetter, V> getAsyncDbValueGetterDeep( CompositeSnapshot snapshot, DatabaseMapDictionaryDeep, ? extends DatabaseStageMap>> dictionaryDeep) { - return entry -> Mono.usingWhen(dictionaryDeep.at(snapshot, entry.getKey()), - sub -> sub.getValue(snapshot, entry.getValue()), - LLUtils::finalizeResource - ); + return entry -> dictionaryDeep.at(snapshot, entry.getKey()).getValue(snapshot, entry.getValue()); } public static PerFieldAnalyzerWrapper toPerFieldAnalyzerWrapper(IndicizerAnalyzers indicizerAnalyzers) { @@ -330,7 +308,7 @@ public class LuceneUtils { int readLength, int usefulLength, long end) throws IOException { - if (Schedulers.isInNonBlockingThread()) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called readInternalAligned in a nonblocking thread"); } int startBufPosition = b.position(); @@ -404,44 +382,22 @@ public class LuceneUtils { ); } - public static Flux convertHits(Flux hitsFlux, + public static Stream convertHits(Stream hitsFlux, List indexSearchers, - @Nullable String keyFieldName, - boolean preserveOrder) { - if (preserveOrder) { - return hitsFlux - .publishOn(LuceneUtils.luceneScheduler()) - .mapNotNull(hit -> mapHitBlocking(hit, indexSearchers, keyFieldName)); - } else { - return hitsFlux - .buffer(Queues.XS_BUFFER_SIZE, () -> new ArrayList(Queues.XS_BUFFER_SIZE)) - .flatMap(shardHits -> Mono.fromCallable(() -> { - int i2 = 0; - int size = shardHits.size(); - for (int i = 0; i < size; i++) { - var el = mapHitBlocking((ScoreDoc) shardHits.get(i), indexSearchers, keyFieldName); - if (el != null) { - shardHits.set(i2, el); - i2++; - } - } - if (i2 < size) { - //noinspection unchecked - return (List) (List) shardHits.subList(0, i2); - } else { - //noinspection unchecked - return (List) (List) shardHits; - } - }).subscribeOn(luceneScheduler())) - .flatMapIterable(a -> a); - } + @Nullable String keyFieldName) { + return hitsFlux.parallel().mapMulti((hit, sink) -> { + var mapped = mapHitBlocking(hit, indexSearchers, keyFieldName); + if (mapped != null) { + sink.accept(mapped); + } + }); } @Nullable private static LLKeyScore mapHitBlocking(ScoreDoc hit, List indexSearchers, @Nullable String keyFieldName) { - assert !Schedulers.isInNonBlockingThread(); + assert !LLUtils.isInNonBlockingThread(); int shardDocId = hit.doc; int shardIndex = hit.shardIndex; float score = hit.score; @@ -551,7 +507,7 @@ public class LuceneUtils { LocalQueryParams localQueryParams, Analyzer analyzer, Similarity similarity, - Multimap mltDocumentFieldsMultimap) throws IOException { + Multimap mltDocumentFieldsMultimap) { List indexSearchers = inputIndexSearchers.shards(); Query luceneAdditionalQuery = localQueryParams.query(); // Create the mutable version of the input @@ -612,24 +568,19 @@ public class LuceneUtils { return Math.abs(StringHelper.murmurhash3_x86_32(id.getValueBytesRef(), 7) % totalShards); } - public static CheckOutputDirectory createLuceneDirectory(LuceneDirectoryOptions directoryOptions, - String directoryName, - LuceneRocksDBManager rocksDBManager) + public static CheckOutputDirectory createLuceneDirectory(LuceneDirectoryOptions directoryOptions, String directoryName) throws IOException { - return new CheckOutputDirectory(createLuceneDirectoryInternal(directoryOptions, directoryName, rocksDBManager)); + return new CheckOutputDirectory(createLuceneDirectoryInternal(directoryOptions, directoryName)); } - private static Directory createLuceneDirectoryInternal(LuceneDirectoryOptions directoryOptions, - String directoryName, - LuceneRocksDBManager rocksDBManager) + private static Directory createLuceneDirectoryInternal(LuceneDirectoryOptions directoryOptions, String directoryName) throws IOException { Directory directory; if (directoryOptions instanceof ByteBuffersDirectory) { directory = new org.apache.lucene.store.ByteBuffersDirectory(); } else if (directoryOptions instanceof DirectIOFSDirectory directIOFSDirectory) { FSDirectory delegateDirectory = (FSDirectory) createLuceneDirectoryInternal(directIOFSDirectory.delegate(), - directoryName, - rocksDBManager + directoryName ); if (Constants.LINUX || Constants.MAC_OS_X) { try { @@ -653,27 +604,11 @@ public class LuceneUtils { } else if (directoryOptions instanceof RAFFSDirectory rafFsDirectory) { directory = new RAFDirectory(rafFsDirectory.managedPath().resolve(directoryName + ".lucene.db")); } else if (directoryOptions instanceof NRTCachingDirectory nrtCachingDirectory) { - var delegateDirectory = createLuceneDirectoryInternal(nrtCachingDirectory.delegate(), directoryName, rocksDBManager); + var delegateDirectory = createLuceneDirectoryInternal(nrtCachingDirectory.delegate(), directoryName); directory = new org.apache.lucene.store.NRTCachingDirectory(delegateDirectory, toMB(nrtCachingDirectory.maxMergeSizeBytes()), toMB(nrtCachingDirectory.maxCachedBytes()) ); - } else if (directoryOptions instanceof RocksDBSharedDirectory rocksDBSharedDirectory) { - var dbInstance = rocksDBManager.getOrCreate(rocksDBSharedDirectory.managedPath()); - directory = new RocksdbDirectory(rocksDBManager.getAllocator(), - dbInstance.db(), - dbInstance.handles(), - directoryName, - rocksDBSharedDirectory.blockSize() - ); - } else if (directoryOptions instanceof RocksDBStandaloneDirectory rocksDBStandaloneDirectory) { - var dbInstance = rocksDBManager.getOrCreate(rocksDBStandaloneDirectory.managedPath()); - directory = new RocksdbDirectory(rocksDBManager.getAllocator(), - dbInstance.db(), - dbInstance.handles(), - directoryName, - rocksDBStandaloneDirectory.blockSize() - ); } else { throw new UnsupportedOperationException("Unsupported directory: " + directoryName + ", " + directoryOptions); } @@ -693,10 +628,6 @@ public class LuceneUtils { return Optional.of(raffsDirectory.managedPath()); } else if (directoryOptions instanceof NRTCachingDirectory nrtCachingDirectory) { return getManagedPath(nrtCachingDirectory.delegate()); - } else if (directoryOptions instanceof RocksDBStandaloneDirectory rocksDBStandaloneDirectory) { - return Optional.of(rocksDBStandaloneDirectory.managedPath()); - } else if (directoryOptions instanceof RocksDBSharedDirectory rocksDBSharedDirectory) { - return Optional.of(rocksDBSharedDirectory.managedPath()); } else { throw new UnsupportedOperationException("Unsupported directory: " + directoryOptions); } @@ -715,10 +646,6 @@ public class LuceneUtils { return false; } else if (directoryOptions instanceof NRTCachingDirectory nrtCachingDirectory) { return getIsFilesystemCompressed(nrtCachingDirectory.delegate()); - } else if (directoryOptions instanceof RocksDBStandaloneDirectory) { - return true; - } else if (directoryOptions instanceof RocksDBSharedDirectory) { - return true; } else { throw new UnsupportedOperationException("Unsupported directory: " + directoryOptions); } @@ -786,35 +713,26 @@ public class LuceneUtils { /** * Rewrite a lucene query of a local searcher, then call the local searcher again with the rewritten query */ - public static Mono rewrite(LocalSearcher localSearcher, - Mono indexSearcherMono, + public static LuceneSearchResult rewrite(LocalSearcher localSearcher, + LLIndexSearcher indexSearcher, LocalQueryParams queryParams, String keyFieldName, GlobalQueryRewrite transformer) { - return Mono.usingWhen(indexSearcherMono.map(LLIndexSearchers::unsharded), indexSearchers -> Mono - .fromCallable(() -> transformer.rewrite(indexSearchers, queryParams)) - .transform(LuceneUtils::scheduleLucene) - .flatMap(queryParams2 -> - localSearcher.collect(indexSearcherMono, queryParams2, keyFieldName, NO_REWRITE)), - LLUtils::finalizeResource); + var indexSearchers = LLIndexSearchers.unsharded(indexSearcher); + var queryParams2 = transformer.rewrite(indexSearchers, queryParams); + return localSearcher.collect(indexSearcher, queryParams2, keyFieldName, NO_REWRITE); } /** * Rewrite a lucene query of a multi searcher, then call the multi searcher again with the rewritten query */ - public static Mono rewriteMulti(MultiSearcher multiSearcher, - Mono indexSearchersMono, + public static LuceneSearchResult rewriteMulti(MultiSearcher multiSearcher, + LLIndexSearchers indexSearchers, LocalQueryParams queryParams, String keyFieldName, GlobalQueryRewrite transformer) { - return Mono.usingWhen(indexSearchersMono, - indexSearchers -> Mono - .fromCallable(() -> transformer.rewrite(indexSearchers, queryParams)) - .transform(LuceneUtils::scheduleLucene) - .flatMap(queryParams2 -> - multiSearcher.collectMulti(indexSearchersMono, queryParams2, keyFieldName, NO_REWRITE)), - LLUtils::finalizeResource - ); + var queryParams2 = transformer.rewrite(indexSearchers, queryParams); + return multiSearcher.collectMulti(indexSearchers, queryParams2, keyFieldName, NO_REWRITE); } public static void checkLuceneThread() { @@ -843,16 +761,4 @@ public class LuceneUtils { var thread = Thread.currentThread(); return thread instanceof LuceneThread || thread instanceof LuceneMergeThread; } - - public static Scheduler luceneScheduler() { - return LUCENE_COMMON_SCHEDULER; - } - - public static Mono scheduleLucene(Mono prev) { - return prev.subscribeOn(LUCENE_COMMON_SCHEDULER).publishOn(Schedulers.parallel()); - } - - public static Flux scheduleLucene(Flux prev) { - return prev.subscribeOn(LUCENE_COMMON_SCHEDULER).publishOn(Schedulers.parallel()); - } } diff --git a/src/main/java/it/cavallium/dbengine/lucene/PageLimits.java b/src/main/java/it/cavallium/dbengine/lucene/PageLimits.java index 790acf1..8664670 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/PageLimits.java +++ b/src/main/java/it/cavallium/dbengine/lucene/PageLimits.java @@ -1,7 +1,5 @@ package it.cavallium.dbengine.lucene; -import it.cavallium.dbengine.lucene.LuceneUtils; - public interface PageLimits { int DEFAULT_MIN_ITEMS_PER_PAGE = 10; diff --git a/src/main/java/it/cavallium/dbengine/lucene/PriorityQueue.java b/src/main/java/it/cavallium/dbengine/lucene/PriorityQueue.java index c744129..9b8e8ff 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/PriorityQueue.java +++ b/src/main/java/it/cavallium/dbengine/lucene/PriorityQueue.java @@ -1,7 +1,6 @@ package it.cavallium.dbengine.lucene; import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; public interface PriorityQueue extends ResourceIterable, DiscardingCloseable { diff --git a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java index 54193c0..c401ae1 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java +++ b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparator.java @@ -1,5 +1,6 @@ package it.cavallium.dbengine.lucene; +import it.cavallium.dbengine.utils.LFSR.LFSRIterator; import java.io.IOException; import java.math.BigInteger; import org.apache.lucene.index.LeafReaderContext; @@ -8,7 +9,6 @@ import org.apache.lucene.search.LeafFieldComparator; import org.apache.lucene.search.Scorable; import org.apache.lucene.search.ScoreCachingWrappingScorer; import org.jetbrains.annotations.NotNull; -import it.cavallium.dbengine.utils.LFSR.LFSRIterator; //todo: fix public class RandomFieldComparator extends FieldComparator implements LeafFieldComparator { @@ -31,14 +31,14 @@ public class RandomFieldComparator extends FieldComparator implements Lea } @Override - public int compareBottom(int doc) throws IOException { + public int compareBottom(int doc) { float score = scorer.score(); assert !Float.isNaN(score); return Float.compare(score, bottom); } @Override - public void copy(int slot, int doc) throws IOException { + public void copy(int slot, int doc) { scores[slot] = scorer.score(); assert !Float.isNaN(scores[slot]); } @@ -93,7 +93,7 @@ public class RandomFieldComparator extends FieldComparator implements Lea } @Override - public int compareTop(int doc) throws IOException { + public int compareTop(int doc) { float docValue = scorer.score(); assert !Float.isNaN(docValue); return Float.compare(docValue, topValue); diff --git a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparatorSource.java b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparatorSource.java index ed1f6de..9c6cbe1 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparatorSource.java +++ b/src/main/java/it/cavallium/dbengine/lucene/RandomFieldComparatorSource.java @@ -1,9 +1,9 @@ package it.cavallium.dbengine.lucene; +import it.cavallium.dbengine.utils.LFSR; import java.util.concurrent.ThreadLocalRandom; import org.apache.lucene.search.FieldComparator; import org.apache.lucene.search.FieldComparatorSource; -import it.cavallium.dbengine.utils.LFSR; public class RandomFieldComparatorSource extends FieldComparatorSource { diff --git a/src/main/java/it/cavallium/dbengine/lucene/ResourceIterable.java b/src/main/java/it/cavallium/dbengine/lucene/ResourceIterable.java index 4457baf..8c7df87 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/ResourceIterable.java +++ b/src/main/java/it/cavallium/dbengine/lucene/ResourceIterable.java @@ -1,23 +1,19 @@ package it.cavallium.dbengine.lucene; import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import java.io.Closeable; -import java.util.Iterator; -import org.jetbrains.annotations.NotNull; -import reactor.core.publisher.Flux; +import java.util.stream.Stream; public interface ResourceIterable extends DiscardingCloseable { /** * Iterate this PriorityQueue */ - Flux iterate(); + Stream iterate(); /** * Iterate this PriorityQueue */ - default Flux iterate(long skips) { + default Stream iterate(long skips) { if (skips == 0) { return iterate(); } else { diff --git a/src/main/java/it/cavallium/dbengine/lucene/analyzer/ItaEngStopWords.java b/src/main/java/it/cavallium/dbengine/lucene/analyzer/ItaEngStopWords.java index f65dcb8..66058cc 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/analyzer/ItaEngStopWords.java +++ b/src/main/java/it/cavallium/dbengine/lucene/analyzer/ItaEngStopWords.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.lucene.analyzer; import java.util.ArrayList; import java.util.Arrays; -import java.util.EventListener; import java.util.List; import org.apache.lucene.analysis.CharArraySet; diff --git a/src/main/java/it/cavallium/dbengine/lucene/analyzer/LegacyWordAnalyzer.java b/src/main/java/it/cavallium/dbengine/lucene/analyzer/LegacyWordAnalyzer.java index e53422c..24c5149 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/analyzer/LegacyWordAnalyzer.java +++ b/src/main/java/it/cavallium/dbengine/lucene/analyzer/LegacyWordAnalyzer.java @@ -2,7 +2,6 @@ package it.cavallium.dbengine.lucene.analyzer; import com.ibm.icu.text.Collator; import com.ibm.icu.util.ULocale; -import it.cavallium.dbengine.lucene.LuceneUtils; import java.util.Collections; import java.util.HashSet; import java.util.Set; @@ -17,7 +16,6 @@ import org.apache.lucene.analysis.Tokenizer; import org.apache.lucene.analysis.en.EnglishPossessiveFilter; import org.apache.lucene.analysis.en.KStemFilter; import org.apache.lucene.analysis.icu.ICUCollationAttributeFactory; -import org.apache.lucene.analysis.icu.ICUCollationKeyAnalyzer; import org.apache.lucene.analysis.miscellaneous.ASCIIFoldingFilter; import org.apache.lucene.analysis.miscellaneous.LengthFilter; import org.apache.lucene.analysis.standard.StandardTokenizer; diff --git a/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramAnalyzer.java b/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramAnalyzer.java index d971e64..2e7212d 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramAnalyzer.java +++ b/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramAnalyzer.java @@ -1,13 +1,8 @@ package it.cavallium.dbengine.lucene.analyzer; -import it.cavallium.dbengine.lucene.LuceneUtils; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ngram.NGramTokenFilter; import org.apache.lucene.analysis.ngram.NGramTokenizer; -import org.apache.lucene.analysis.standard.StandardTokenizer; public class NCharGramAnalyzer extends Analyzer { diff --git a/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramEdgeAnalyzer.java b/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramEdgeAnalyzer.java index 07eb4af..f3c55ba 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramEdgeAnalyzer.java +++ b/src/main/java/it/cavallium/dbengine/lucene/analyzer/NCharGramEdgeAnalyzer.java @@ -1,14 +1,8 @@ package it.cavallium.dbengine.lucene.analyzer; -import it.cavallium.dbengine.lucene.LuceneUtils; import org.apache.lucene.analysis.Analyzer; -import org.apache.lucene.analysis.TokenStream; import org.apache.lucene.analysis.Tokenizer; -import org.apache.lucene.analysis.core.KeywordTokenizer; -import org.apache.lucene.analysis.ngram.EdgeNGramTokenFilter; import org.apache.lucene.analysis.ngram.EdgeNGramTokenizer; -import org.apache.lucene.analysis.ngram.NGramTokenizer; -import org.apache.lucene.analysis.standard.StandardTokenizer; public class NCharGramEdgeAnalyzer extends Analyzer { diff --git a/src/main/java/it/cavallium/dbengine/lucene/analyzer/WordAnalyzer.java b/src/main/java/it/cavallium/dbengine/lucene/analyzer/WordAnalyzer.java index 0aeab42..5aa50b1 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/analyzer/WordAnalyzer.java +++ b/src/main/java/it/cavallium/dbengine/lucene/analyzer/WordAnalyzer.java @@ -6,7 +6,6 @@ import org.apache.lucene.analysis.Analyzer; import org.apache.lucene.analysis.LowerCaseFilter; import org.apache.lucene.analysis.StopFilter; import org.apache.lucene.analysis.TokenStream; -import org.apache.lucene.analysis.en.EnglishAnalyzer; import org.apache.lucene.analysis.en.EnglishMinimalStemFilter; import org.apache.lucene.analysis.en.PorterStemFilter; import org.apache.lucene.analysis.icu.ICUCollationAttributeFactory; @@ -14,7 +13,6 @@ import org.apache.lucene.analysis.icu.ICUFoldingFilter; import org.apache.lucene.analysis.icu.segmentation.DefaultICUTokenizerConfig; import org.apache.lucene.analysis.icu.segmentation.ICUTokenizer; import org.apache.lucene.analysis.it.ItalianLightStemFilter; -import org.apache.lucene.analysis.miscellaneous.SetKeywordMarkerFilter; import org.apache.lucene.analysis.standard.StandardAnalyzer; import org.apache.lucene.analysis.standard.StandardTokenizer; import org.apache.lucene.analysis.util.ElisionFilter; diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/Buckets.java b/src/main/java/it/cavallium/dbengine/lucene/collector/Buckets.java index 8672f8d..60a867e 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/Buckets.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/Buckets.java @@ -3,7 +3,6 @@ package it.cavallium.dbengine.lucene.collector; import it.unimi.dsi.fastutil.doubles.DoubleArrayList; import java.util.ArrayList; import java.util.List; -import java.util.Map; public record Buckets(List seriesValues, DoubleArrayList totals) { diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/CollectorMultiManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/CollectorMultiManager.java index 64eee04..a17f16d 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/CollectorMultiManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/CollectorMultiManager.java @@ -3,11 +3,10 @@ package it.cavallium.dbengine.lucene.collector; import java.io.IOException; import java.util.List; import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.TopDocs; public interface CollectorMultiManager { ScoreMode scoreMode(); - U reduce(List results) throws IOException; + U reduce(List results); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/DecimalBucketMultiCollectorManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/DecimalBucketMultiCollectorManager.java index 2715a56..8ed5a93 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/DecimalBucketMultiCollectorManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/DecimalBucketMultiCollectorManager.java @@ -114,7 +114,7 @@ public class DecimalBucketMultiCollectorManager implements CollectorMultiManager return new double[buckets]; } - public Buckets search(IndexSearcher indexSearcher) throws IOException { + public Buckets search(IndexSearcher indexSearcher) { Query query; if (USE_SINGLE_FACET_COLLECTOR && normalizationQuery != null) { query = normalizationQuery; diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/FacetsCollector.java b/src/main/java/it/cavallium/dbengine/lucene/collector/FacetsCollector.java index 2888e22..0135e3b 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/FacetsCollector.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/FacetsCollector.java @@ -17,7 +17,7 @@ public interface FacetsCollector extends Collector { } @Override - public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { + public LeafCollector getLeafCollector(LeafReaderContext context) { return facetsCollector.getLeafCollector(context); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/FastFacetsCollectorManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/FastFacetsCollectorManager.java index b1c53db..478c7b2 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/FastFacetsCollectorManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/FastFacetsCollectorManager.java @@ -4,7 +4,6 @@ import it.cavallium.dbengine.lucene.IntSmear; import it.unimi.dsi.fastutil.ints.IntHash; import java.io.IOException; import java.util.Collection; -import java.util.concurrent.atomic.AtomicLong; import org.apache.lucene.facet.FacetsCollectorManager; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.CollectorManager; @@ -31,7 +30,7 @@ public class FastFacetsCollectorManager implements CollectorManager collectors) throws IOException { + public FacetsCollector reduce(Collection collectors) { return FacetsCollector.wrap(facetsCollectorManager.reduce(collectors .stream() .map(facetsCollector -> facetsCollector.getLuceneFacetsCollector()) @@ -62,23 +61,23 @@ public class FastFacetsCollectorManager implements CollectorManager extends SimpleResource implements FullDocs, DiscardingCloseable { @@ -23,12 +19,12 @@ public class FullFieldDocs extends SimpleResource implements Fu } @Override - public Flux iterate() { + public Stream iterate() { return fullDocs.iterate(); } @Override - public Flux iterate(long skips) { + public Stream iterate(long skips) { return fullDocs.iterate(skips); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/ScoringShardsCollectorMultiManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/ScoringShardsCollectorMultiManager.java index 876a09e..f7f204b 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/ScoringShardsCollectorMultiManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/ScoringShardsCollectorMultiManager.java @@ -1,12 +1,10 @@ package it.cavallium.dbengine.lucene.collector; -import static it.cavallium.dbengine.lucene.searcher.CurrentPageInfo.TIE_BREAKER; - +import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.lucene.LuceneUtils; import java.io.IOException; import java.util.Collection; import java.util.List; -import java.util.Objects; import org.apache.commons.lang3.NotImplementedException; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; @@ -19,7 +17,6 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.jetbrains.annotations.Nullable; -import reactor.core.scheduler.Schedulers; public class ScoringShardsCollectorMultiManager implements CollectorMultiManager { @@ -87,13 +84,13 @@ public class ScoringShardsCollectorMultiManager implements CollectorMultiManager public CollectorManager get(IndexSearcher indexSearcher, int shardIndex) { return new CollectorManager<>() { @Override - public TopFieldCollector newCollector() throws IOException { + public TopFieldCollector newCollector() { return sharedCollectorManager.newCollector(); } @Override - public TopDocs reduce(Collection collectors) throws IOException { - if (Schedulers.isInNonBlockingThread()) { + public TopDocs reduce(Collection collectors) { + if (LLUtils.isInNonBlockingThread()) { throw new UnsupportedOperationException("Called reduce in a nonblocking thread"); } if (USE_CLASSIC_REDUCE) { diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/TopDocsCollectorMultiManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/TopDocsCollectorMultiManager.java index 3a24e94..e97b87a 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/TopDocsCollectorMultiManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/TopDocsCollectorMultiManager.java @@ -3,12 +3,10 @@ package it.cavallium.dbengine.lucene.collector; import static it.cavallium.dbengine.lucene.searcher.PaginationInfo.ALLOW_UNSCORED_PAGINATION_MODE; import it.cavallium.dbengine.lucene.LuceneUtils; -import it.cavallium.dbengine.lucene.collector.UnscoredCollector; import java.io.IOException; import java.util.Collection; import java.util.List; import org.apache.commons.lang3.NotImplementedException; -import org.apache.lucene.search.Collector; import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; @@ -57,7 +55,7 @@ public class TopDocsCollectorMultiManager implements CollectorMultiManager, TopDocs> get(@NotNull Query query, IndexSearcher indexSearcher) { return new CollectorManager<>() { @Override - public TopDocsCollector newCollector() throws IOException { + public TopDocsCollector newCollector() { TopDocsCollector collector; if (after != null && !allowPagination) { throw new IllegalArgumentException("\"allowPagination\" is false, but \"after\" is set"); @@ -85,7 +83,7 @@ public class TopDocsCollectorMultiManager implements CollectorMultiManager> collectors) throws IOException { + public TopDocs reduce(Collection> collectors) { TopDocs[] docsArray; boolean needsSort = luceneSort != null; boolean needsScores = luceneSort != null && luceneSort.needsScores(); diff --git a/src/main/java/it/cavallium/dbengine/lucene/collector/TotalHitCountCollectorManager.java b/src/main/java/it/cavallium/dbengine/lucene/collector/TotalHitCountCollectorManager.java index aaab68d..9a2fba8 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/collector/TotalHitCountCollectorManager.java +++ b/src/main/java/it/cavallium/dbengine/lucene/collector/TotalHitCountCollectorManager.java @@ -28,7 +28,7 @@ public class TotalHitCountCollectorManager implements CollectorManager collectors) throws IOException { + public Long reduce(Collection collectors) { long totalHits = 0; for (var collector : collectors) { totalHits += collector.getTotalHits(); @@ -48,7 +48,7 @@ public class TotalHitCountCollectorManager implements CollectorManager implements LeafCollector { private final IntArrayList docIds = new IntArrayList(); diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/DoubleComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/DoubleComparator.java deleted file mode 100644 index 897e40f..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/DoubleComparator.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.DoubleCodec; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.HugePqArray; -import java.io.IOException; -import org.apache.lucene.document.DoublePoint; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.comparators.NumericComparator; - -/** - * Comparator based on {@link Double#compare} for {@code numHits}. This comparator provides a - * skipping functionality - an iterator that can skip over non-competitive documents. - * Based on {@link org.apache.lucene.search.comparators.DoubleComparator} - */ -public class DoubleComparator extends NumericComparator implements DiscardingCloseable { - private final IArray values; - protected double topValue; - protected double bottom; - - public DoubleComparator(LLTempHugePqEnv env, - int numHits, String field, Double missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : 0.0, reverse, enableSkipping, Double.BYTES); - values = new HugePqArray<>(env, new DoubleCodec(), numHits, 0d); - } - - @Override - public int compare(int slot1, int slot2) { - var value1 = values.get(slot1); - var value2 = values.get(slot2); - assert value1 != null : "Missing value for slot1: " + slot1; - assert value2 != null : "Missing value for slot2: " + slot2; - return Double.compare(value1, value2); - } - - @Override - public void setTopValue(Double value) { - super.setTopValue(value); - topValue = value; - } - - @Override - public Double value(int slot) { - return values.getOrDefault(slot, 0d); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new DoubleLeafComparator(context); - } - - @Override - public void close() { - if (values instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - /** Leaf comparator for {@link DoubleComparator} that provides skipping functionality */ - public class DoubleLeafComparator extends NumericLeafComparator { - - public DoubleLeafComparator(LeafReaderContext context) throws IOException { - super(context); - } - - private double getValueForDoc(int doc) throws IOException { - if (docValues.advanceExact(doc)) { - return Double.longBitsToDouble(docValues.longValue()); - } else { - return missingValue; - } - } - - @Override - public void setBottom(int slot) throws IOException { - bottom = values.getOrDefault(slot, 0d); - super.setBottom(slot); - } - - @Override - public int compareBottom(int doc) throws IOException { - return Double.compare(bottom, getValueForDoc(doc)); - } - - @Override - public int compareTop(int doc) throws IOException { - return Double.compare(topValue, getValueForDoc(doc)); - } - - @Override - public void copy(int slot, int doc) throws IOException { - values.set(slot, getValueForDoc(doc)); - super.copy(slot, doc); - } - - @Override - protected boolean isMissingValueCompetitive() { - int result = Double.compare(missingValue, bottom); - return reverse ? (result >= 0) : (result <= 0); - } - - @Override - protected void encodeBottom(byte[] packedValue) { - DoublePoint.encodeDimension(bottom, packedValue, 0); - } - - @Override - protected void encodeTop(byte[] packedValue) { - DoublePoint.encodeDimension(topValue, packedValue, 0); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/FloatComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/FloatComparator.java deleted file mode 100644 index ce4ae9c..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/FloatComparator.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.FloatCodec; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.HugePqArray; -import java.io.IOException; -import org.apache.lucene.document.FloatPoint; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.comparators.NumericComparator; - -/** - * Comparator based on {@link Float#compare} for {@code numHits}. This comparator provides a - * skipping functionality – an iterator that can skip over non-competitive documents. - * Based on {@link org.apache.lucene.search.comparators.FloatComparator} - */ -public class FloatComparator extends NumericComparator implements DiscardingCloseable { - private final IArray values; - protected float topValue; - protected float bottom; - - public FloatComparator(LLTempHugePqEnv env, - int numHits, String field, Float missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : 0.0f, reverse, enableSkipping, Float.BYTES); - values = new HugePqArray<>(env, new FloatCodec(), numHits, 0f); - } - - @Override - public int compare(int slot1, int slot2) { - var value1 = values.get(slot1); - var value2 = values.get(slot2); - assert value1 != null : "Missing value for slot1: " + slot1; - assert value2 != null : "Missing value for slot2: " + slot2; - return Float.compare(value1, value2); - } - - @Override - public void setTopValue(Float value) { - super.setTopValue(value); - topValue = value; - } - - @Override - public Float value(int slot) { - return values.getOrDefault(slot, 0f); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new FloatLeafComparator(context); - } - - @Override - public void close() { - if (values instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - /** Leaf comparator for {@link FloatComparator} that provides skipping functionality */ - public class FloatLeafComparator extends NumericLeafComparator { - - public FloatLeafComparator(LeafReaderContext context) throws IOException { - super(context); - } - - private float getValueForDoc(int doc) throws IOException { - if (docValues.advanceExact(doc)) { - return Float.intBitsToFloat((int) docValues.longValue()); - } else { - return missingValue; - } - } - - @Override - public void setBottom(int slot) throws IOException { - bottom = values.getOrDefault(slot, 0f); - super.setBottom(slot); - } - - @Override - public int compareBottom(int doc) throws IOException { - return Float.compare(bottom, getValueForDoc(doc)); - } - - @Override - public int compareTop(int doc) throws IOException { - return Float.compare(topValue, getValueForDoc(doc)); - } - - @Override - public void copy(int slot, int doc) throws IOException { - values.set(slot, getValueForDoc(doc)); - super.copy(slot, doc); - } - - @Override - protected boolean isMissingValueCompetitive() { - int result = Float.compare(missingValue, bottom); - return reverse ? (result >= 0) : (result <= 0); - } - - @Override - protected void encodeBottom(byte[] packedValue) { - FloatPoint.encodeDimension(bottom, packedValue, 0); - } - - @Override - protected void encodeTop(byte[] packedValue) { - FloatPoint.encodeDimension(topValue, packedValue, 0); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/IntComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/IntComparator.java deleted file mode 100644 index 40dd6fe..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/IntComparator.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.IntCodec; -import it.cavallium.dbengine.lucene.HugePqArray; -import java.io.IOException; -import org.apache.lucene.document.IntPoint; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.comparators.NumericComparator; - -/** - * Comparator based on {@link Integer#compare} for {@code numHits}. This comparator provides a - * skipping functionality – an iterator that can skip over non-competitive documents. - * Based on {@link org.apache.lucene.search.comparators.IntComparator} - */ -public class IntComparator extends NumericComparator implements DiscardingCloseable { - private final IArray values; - protected int topValue; - protected int bottom; - - public IntComparator(LLTempHugePqEnv env, - int numHits, String field, Integer missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : 0, reverse, enableSkipping, Integer.BYTES); - values = new HugePqArray<>(env, new IntCodec(), numHits, 0); - } - - @Override - public int compare(int slot1, int slot2) { - var value1 = values.get(slot1); - var value2 = values.get(slot2); - assert value1 != null : "Missing value for slot1: " + slot1; - assert value2 != null : "Missing value for slot2: " + slot2; - return Integer.compare(value1, value2); - } - - @Override - public void setTopValue(Integer value) { - super.setTopValue(value); - topValue = value; - } - - @Override - public Integer value(int slot) { - return values.getOrDefault(slot, 0); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new IntLeafComparator(context); - } - - @Override - public void close() { - if (values instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - /** Leaf comparator for {@link IntComparator} that provides skipping functionality */ - public class IntLeafComparator extends NumericLeafComparator { - - public IntLeafComparator(LeafReaderContext context) throws IOException { - super(context); - } - - private int getValueForDoc(int doc) throws IOException { - if (docValues.advanceExact(doc)) { - return (int) docValues.longValue(); - } else { - return missingValue; - } - } - - @Override - public void setBottom(int slot) throws IOException { - bottom = values.getOrDefault(slot, 0); - super.setBottom(slot); - } - - @Override - public int compareBottom(int doc) throws IOException { - return Integer.compare(bottom, getValueForDoc(doc)); - } - - @Override - public int compareTop(int doc) throws IOException { - return Integer.compare(topValue, getValueForDoc(doc)); - } - - @Override - public void copy(int slot, int doc) throws IOException { - values.set(slot, getValueForDoc(doc)); - super.copy(slot, doc); - } - - @Override - protected boolean isMissingValueCompetitive() { - int result = Integer.compare(missingValue, bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - - @Override - protected void encodeBottom(byte[] packedValue) { - IntPoint.encodeDimension(bottom, packedValue, 0); - } - - @Override - protected void encodeTop(byte[] packedValue) { - IntPoint.encodeDimension(topValue, packedValue, 0); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/LongComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/LongComparator.java deleted file mode 100644 index 99ed24e..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/LongComparator.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.comparators; - -import static java.util.Objects.requireNonNull; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.HugePqArray; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.LongCodec; -import java.io.IOException; -import java.util.Objects; -import org.apache.lucene.document.LongPoint; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.comparators.NumericComparator; - -/** - * Comparator based on {@link Long#compare} for {@code numHits}. This comparator provides a skipping - * functionality – an iterator that can skip over non-competitive documents. - * Based on {@link org.apache.lucene.search.comparators.LongComparator} - */ -public class LongComparator extends NumericComparator implements DiscardingCloseable { - private final IArray values; - protected long topValue; - protected long bottom; - - public LongComparator(LLTempHugePqEnv env, - int numHits, String field, Long missingValue, boolean reverse, boolean enableSkipping) { - super(field, missingValue != null ? missingValue : 0L, reverse, enableSkipping, Long.BYTES); - values = new HugePqArray<>(env, new LongCodec(), numHits, 0L); - } - - @Override - public int compare(int slot1, int slot2) { - var value1 = values.get(slot1); - var value2 = values.get(slot2); - assert value1 != null : "Missing value for slot1: " + slot1; - assert value2 != null : "Missing value for slot2: " + slot2; - return Long.compare(value1, value2); - } - - @Override - public void setTopValue(Long value) { - super.setTopValue(value); - topValue = value; - } - - @Override - public Long value(int slot) { - return values.getOrDefault(slot, 0L); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - return new LongLeafComparator(context); - } - - @Override - public void close() { - if (values instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - /** Leaf comparator for {@link LongComparator} that provides skipping functionality */ - public class LongLeafComparator extends NumericLeafComparator { - - public LongLeafComparator(LeafReaderContext context) throws IOException { - super(context); - } - - private long getValueForDoc(int doc) throws IOException { - if (docValues.advanceExact(doc)) { - return docValues.longValue(); - } else { - return missingValue; - } - } - - @Override - public void setBottom(int slot) throws IOException { - bottom = values.getOrDefault(slot, 0L); - super.setBottom(slot); - } - - @Override - public int compareBottom(int doc) throws IOException { - return Long.compare(bottom, getValueForDoc(doc)); - } - - @Override - public int compareTop(int doc) throws IOException { - return Long.compare(topValue, getValueForDoc(doc)); - } - - @Override - public void copy(int slot, int doc) throws IOException { - values.set(slot, getValueForDoc(doc)); - super.copy(slot, doc); - } - - @Override - protected boolean isMissingValueCompetitive() { - int result = Long.compare(missingValue, bottom); - // in reverse (desc) sort missingValue is competitive when it's greater or equal to bottom, - // in asc sort missingValue is competitive when it's smaller or equal to bottom - return reverse ? (result >= 0) : (result <= 0); - } - - @Override - protected void encodeBottom(byte[] packedValue) { - LongPoint.encodeDimension(bottom, packedValue, 0); - } - - @Override - protected void encodeTop(byte[] packedValue) { - LongPoint.encodeDimension(topValue, packedValue, 0); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/RelevanceComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/RelevanceComparator.java deleted file mode 100644 index b4b5812..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/RelevanceComparator.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package it.cavallium.dbengine.lucene.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.FloatCodec; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.HugePqArray; -import java.io.IOException; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Query; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.ScoreCachingWrappingScorer; - -/** - * Sorts by descending relevance. NOTE: if you are sorting only by descending relevance and then secondarily by - * ascending docID, performance is faster using {@link org.apache.lucene.search.TopScoreDocCollector} directly (which {@link - * org.apache.lucene.search.IndexSearcher#search(Query, int)} uses when no {@link org.apache.lucene.search.Sort} is specified). - * Based on {@link org.apache.lucene.search.FieldComparator.RelevanceComparator} - */ -public final class RelevanceComparator extends FieldComparator implements LeafFieldComparator, - DiscardingCloseable { - - private final IArray scores; - private float bottom; - private Scorable scorer; - private float topValue; - - /** - * Creates a new comparator based on relevance for {@code numHits}. - */ - public RelevanceComparator(LLTempHugePqEnv env, int numHits) { - scores = new HugePqArray<>(env, new FloatCodec(), numHits, 0f); - } - - @Override - public int compare(int slot1, int slot2) { - var value1 = scores.get(slot1); - var value2 = scores.get(slot2); - assert value1 != null : "Missing score for slot1: " + slot1; - assert value2 != null : "Missing score for slot2: " + slot2; - return Float.compare(value1, value2); - } - - @Override - public int compareBottom(int doc) throws IOException { - float score = scorer.score(); - assert !Float.isNaN(score); - return Float.compare(score, bottom); - } - - @Override - public void copy(int slot, int doc) throws IOException { - var score = scorer.score(); - scores.set(slot, score); - assert !Float.isNaN(score); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) { - return this; - } - - @Override - public void setBottom(final int bottom) { - this.bottom = scores.getOrDefault(bottom, 0f); - } - - @Override - public void setTopValue(Float value) { - topValue = value; - } - - @Override - public void setScorer(Scorable scorer) { - // wrap with a ScoreCachingWrappingScorer so that successive calls to - // score() will not incur score computation over and - // over again. - this.scorer = ScoreCachingWrappingScorer.wrap(scorer); - } - - @Override - public Float value(int slot) { - return scores.getOrDefault(slot, 0f); - } - - // Override because we sort reverse of natural Float order: - @Override - public int compareValues(Float first, Float second) { - // Reversed intentionally because relevance by default - // sorts descending: - return second.compareTo(first); - } - - @Override - public int compareTop(int doc) throws IOException { - float docValue = scorer.score(); - assert !Float.isNaN(docValue); - return Float.compare(docValue, topValue); - } - - @Override - public void close() { - if (this.scores instanceof SafeCloseable closeable) { - closeable.close(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/TermOrdValComparator.java b/src/main/java/it/cavallium/dbengine/lucene/comparators/TermOrdValComparator.java deleted file mode 100644 index 86aefb3..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/TermOrdValComparator.java +++ /dev/null @@ -1,312 +0,0 @@ -package it.cavallium.dbengine.lucene.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.ByteArrayCodec; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.IntCodec; -import it.cavallium.dbengine.lucene.HugePqArray; -import java.io.IOException; -import java.util.Arrays; -import org.apache.lucene.index.DocValues; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.index.SortedDocValues; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.util.BytesRef; - -/** - * Sorts by field's natural Term sort order, using ordinals. This is functionally equivalent to - * {@link org.apache.lucene.search.FieldComparator.TermValComparator}, but it first resolves the - * string to their relative ordinal positions (using the index returned by {@link - * org.apache.lucene.index.LeafReader#getSortedDocValues(String)}), and does most comparisons - * using the ordinals. For medium to large results, this comparator will be much faster than - * {@link org.apache.lucene.search.FieldComparator.TermValComparator}. For very small result sets - * it may be slower. - * Based on {@link org.apache.lucene.search.FieldComparator.TermOrdValComparator} - */ -public class TermOrdValComparator extends FieldComparator implements LeafFieldComparator, - DiscardingCloseable { - /* Ords for each slot. - @lucene.internal */ - final IArray ords; - - /* Values for each slot. - @lucene.internal */ - final IArray values; - - /* Which reader last copied a value into the slot. When - we compare two slots, we just compare-by-ord if the - readerGen is the same; else we must compare the - values (slower). - @lucene.internal */ - final IArray readerGen; - - /* Gen of current reader we are on. - @lucene.internal */ - int currentReaderGen = -1; - - /* Current reader's doc ord/values. - @lucene.internal */ - SortedDocValues termsIndex; - - private final String field; - - /* Bottom slot, or -1 if queue isn't full yet - @lucene.internal */ - int bottomSlot = -1; - - /* Bottom ord (same as ords[bottomSlot] once bottomSlot - is set). Cached for faster compares. - @lucene.internal */ - int bottomOrd; - - /* True if current bottom slot matches the current - reader. - @lucene.internal */ - boolean bottomSameReader; - - /* Bottom value (same as values[bottomSlot] once - bottomSlot is set). Cached for faster compares. - @lucene.internal */ - byte[] bottomValue; - - /** Set by setTopValue. */ - byte[] topValue; - - boolean topSameReader; - int topOrd; - - /** -1 if missing values are sorted first, 1 if they are sorted last */ - final int missingSortCmp; - - /** Which ordinal to use for a missing value. */ - final int missingOrd; - - /** Creates this, sorting missing values first. */ - public TermOrdValComparator(LLTempHugePqEnv env, int numHits, String field) { - this(env, numHits, field, false); - } - - /** - * Creates this, with control over how missing values are sorted. Pass sortMissingLast=true to - * put missing values at the end. - */ - public TermOrdValComparator(LLTempHugePqEnv env, int numHits, String field, boolean sortMissingLast) { - ords = new HugePqArray<>(env, new IntCodec(), numHits, 0); - values = new HugePqArray<>(env, new ByteArrayCodec(), numHits, null); - readerGen = new HugePqArray<>(env, new IntCodec(), numHits, 0); - this.field = field; - if (sortMissingLast) { - missingSortCmp = 1; - missingOrd = Integer.MAX_VALUE; - } else { - missingSortCmp = -1; - missingOrd = -1; - } - } - - private int getOrdForDoc(int doc) throws IOException { - if (termsIndex.advanceExact(doc)) { - return termsIndex.ordValue(); - } else { - return -1; - } - } - - @Override - public int compare(int slot1, int slot2) { - if ((int) readerGen.getOrDefault(slot2, 0) == readerGen.getOrDefault(slot1, 0)) { - return ords.getOrDefault(slot1, 0) - ords.getOrDefault(slot2, 0); - } - - final var val1 = values.get(slot1); - final var val2 = values.get(slot2); - if (val1 == null) { - if (val2 == null) { - return 0; - } - return missingSortCmp; - } else if (val2 == null) { - return -missingSortCmp; - } - return Arrays.compare(val1, val2); - } - - @Override - public int compareBottom(int doc) throws IOException { - assert bottomSlot != -1; - int docOrd = getOrdForDoc(doc); - if (docOrd == -1) { - docOrd = missingOrd; - } - if (bottomSameReader) { - // ord is precisely comparable, even in the equal case - return bottomOrd - docOrd; - } else if (bottomOrd >= docOrd) { - // the equals case always means bottom is > doc - // (because we set bottomOrd to the lower bound in - // setBottom): - return 1; - } else { - return -1; - } - } - - @Override - public void copy(int slot, int doc) throws IOException { - int ord = getOrdForDoc(doc); - if (ord == -1) { - ord = missingOrd; - values.reset(slot); - } else { - assert ord >= 0; - values.set(slot, copyBytes(termsIndex.lookupOrd(ord))); - } - ords.set(slot, ord); - readerGen.set(slot, currentReaderGen); - } - - private byte[] copyBytes(BytesRef lookupOrd) { - if (lookupOrd == null) return null; - return Arrays.copyOfRange(lookupOrd.bytes, lookupOrd.offset, lookupOrd.length); - } - - /** Retrieves the SortedDocValues for the field in this segment */ - protected SortedDocValues getSortedDocValues(LeafReaderContext context, String field) - throws IOException { - return DocValues.getSorted(context.reader(), field); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) throws IOException { - termsIndex = getSortedDocValues(context, field); - currentReaderGen++; - - if (topValue != null) { - // Recompute topOrd/SameReader - int ord = termsIndex.lookupTerm(new BytesRef(topValue)); - if (ord >= 0) { - topSameReader = true; - topOrd = ord; - } else { - topSameReader = false; - topOrd = -ord - 2; - } - } else { - topOrd = missingOrd; - topSameReader = true; - } - // System.out.println(" getLeafComparator topOrd=" + topOrd + " topSameReader=" + - // topSameReader); - - if (bottomSlot != -1) { - // Recompute bottomOrd/SameReader - setBottom(bottomSlot); - } - - return this; - } - - @Override - public void setBottom(final int bottom) throws IOException { - bottomSlot = bottom; - - bottomValue = values.get(bottomSlot); - if (currentReaderGen == readerGen.getOrDefault(bottomSlot, 0)) { - bottomOrd = ords.getOrDefault(bottomSlot, 0); - bottomSameReader = true; - } else { - if (bottomValue == null) { - // missingOrd is null for all segments - assert ords.getOrDefault(bottomSlot, 0) == missingOrd; - bottomOrd = missingOrd; - bottomSameReader = true; - readerGen.set(bottomSlot, currentReaderGen); - } else { - final int ord = termsIndex.lookupTerm(new BytesRef(bottomValue)); - if (ord < 0) { - bottomOrd = -ord - 2; - bottomSameReader = false; - } else { - bottomOrd = ord; - // exact value match - bottomSameReader = true; - readerGen.set(bottomSlot, currentReaderGen); - ords.set(bottomSlot, bottomOrd); - } - } - } - } - - @Override - public void setTopValue(BytesRef value) { - // null is fine: it means the last doc of the prior - // search was missing this value - topValue = copyBytes(value); - // System.out.println("setTopValue " + topValue); - } - - @Override - public BytesRef value(int slot) { - return getBytesRef(values.get(slot)); - } - - private BytesRef getBytesRef(byte[] bytes) { - if (bytes == null) return null; - return new BytesRef(bytes); - } - - @Override - public int compareTop(int doc) throws IOException { - - int ord = getOrdForDoc(doc); - if (ord == -1) { - ord = missingOrd; - } - - if (topSameReader) { - // ord is precisely comparable, even in the equal - // case - // System.out.println("compareTop doc=" + doc + " ord=" + ord + " ret=" + (topOrd-ord)); - return topOrd - ord; - } else if (ord <= topOrd) { - // the equals case always means doc is < value - // (because we set lastOrd to the lower bound) - return 1; - } else { - return -1; - } - } - - @Override - public int compareValues(BytesRef val1, BytesRef val2) { - if (val1 == null) { - if (val2 == null) { - return 0; - } - return missingSortCmp; - } else if (val2 == null) { - return -missingSortCmp; - } - return val1.compareTo(val2); - } - - @Override - public void setScorer(Scorable scorer) {} - - @Override - public void close() { - if (this.ords instanceof SafeCloseable closeable) { - closeable.close(); - } - if (this.readerGen instanceof SafeCloseable closeable) { - closeable.close(); - } - if (this.values instanceof SafeCloseable closeable) { - closeable.close(); - } - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/comparators/info.txt b/src/main/java/it/cavallium/dbengine/lucene/comparators/info.txt deleted file mode 100644 index 71d00c2..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/comparators/info.txt +++ /dev/null @@ -1,6 +0,0 @@ -/** - * This package must mirror this changes: - * - * Lucene comparators changes on GitHub - */ -This has been moved from package-info.java due to a compilation bug diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/Lucene90NoCompressionStoredFieldsFormat.java b/src/main/java/it/cavallium/dbengine/lucene/directory/Lucene90NoCompressionStoredFieldsFormat.java index b0ead5b..fe157c8 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/Lucene90NoCompressionStoredFieldsFormat.java +++ b/src/main/java/it/cavallium/dbengine/lucene/directory/Lucene90NoCompressionStoredFieldsFormat.java @@ -77,7 +77,7 @@ public class Lucene90NoCompressionStoredFieldsFormat extends StoredFieldsFormat private static final Compressor DUMMY_COMPRESSOR = new Compressor() { @Override - public void compress(ByteBuffersDataInput byteBuffersDataInput, DataOutput dataOutput) throws IOException { + public void compress(ByteBuffersDataInput byteBuffersDataInput, DataOutput dataOutput) { dataOutput.copyBytes(byteBuffersDataInput, byteBuffersDataInput.size()); } @@ -96,7 +96,7 @@ public class Lucene90NoCompressionStoredFieldsFormat extends StoredFieldsFormat } @Override - public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) throws IOException { + public StoredFieldsWriter fieldsWriter(Directory directory, SegmentInfo si, IOContext context) { return impl().fieldsWriter(directory, si, context); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBInstance.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBInstance.java deleted file mode 100644 index cc9584e..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBInstance.java +++ /dev/null @@ -1,7 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import java.util.Map; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDB; - -public record RocksDBInstance(RocksDB db, Map handles) {} diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBSliceInputStream.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBSliceInputStream.java deleted file mode 100644 index 3e30faa..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksDBSliceInputStream.java +++ /dev/null @@ -1,166 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferRef; -import org.apache.lucene.store.IndexInput; - -import java.io.EOFException; -import java.io.IOException; - -public class RocksDBSliceInputStream extends IndexInput { - - private static final BufferAllocator HEAP_UNPOOLED_BUFFER = BufferAllocator.onHeapUnpooled(); - - private final int bufferSize; - - private long position; - - private final long length; - - private byte[] currentBuffer; - - private int currentBufferIndex; - - private final RocksdbFileStore store; - - private final String name; - - public RocksDBSliceInputStream(String name, RocksdbFileStore store, int bufferSize) throws IOException { - this(name, store, bufferSize, store.getSize(name)); - } - - public RocksDBSliceInputStream(String name, RocksdbFileStore store, int bufferSize, long length) { - super("RocksDBSliceInputStream(name=" + name + ")"); - this.name = name; - this.store = store; - this.bufferSize = bufferSize; - this.currentBuffer = new byte[this.bufferSize]; - this.currentBufferIndex = bufferSize; - this.position = 0; - this.length = length; - - - } - - @Override - public void close() throws IOException { - //store.close(); - } - - @Override - public long getFilePointer() { - return position; - } - - @Override - public void seek(long pos) { - if (pos < 0 || pos > length) { - throw new IllegalArgumentException("pos must be between 0 and " + length); - } - position = pos; - currentBufferIndex = this.bufferSize; - } - - @Override - public long length() { - return this.length; - } - - @Override - public IndexInput slice(String sliceDescription, final long offset, final long length) throws IOException { - - if (offset < 0 || length < 0 || offset + length > this.length) { - throw new IllegalArgumentException("slice() " + sliceDescription + " out of bounds: " + this); - } - - return new RocksDBSliceInputStream(name, store, bufferSize, offset + length) { - { - seek(0L); - } - - @Override - public void seek(long pos) { - if (pos < 0L) { - throw new IllegalArgumentException("Seeking to negative position: " + this); - } - - super.seek(pos + offset); - } - - - @Override - public long getFilePointer() { - return super.getFilePointer() - offset; - } - - @Override - public long length() { - return super.length() - offset; - } - - @Override - public IndexInput slice(String sliceDescription, long ofs, long len) throws IOException { - return super.slice(sliceDescription, offset + ofs, len); - } - }; - } - - - @Override - public byte readByte() throws IOException { - - if (position >= length) { - throw new EOFException("Read end"); - } - loadBufferIfNeed(); - byte b = currentBuffer[currentBufferIndex++]; - position++; - return b; - } - - protected void loadBufferIfNeed() throws IOException { - if (this.currentBufferIndex == this.bufferSize) { - try (var editedBuffer = HEAP_UNPOOLED_BUFFER.copyOf(currentBuffer)) { - int n = store.load(name, position, editedBuffer, 0, bufferSize); - editedBuffer.copyInto(0, currentBuffer, 0, currentBuffer.length); - if (n == -1) { - throw new EOFException("Read end"); - } - } - this.currentBufferIndex = 0; - } - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - - if (position >= length) { - throw new EOFException("Read end"); - } - - int f = offset; - int n = Math.min((int) (length - position), len); - do { - loadBufferIfNeed(); - - int r = Math.min(bufferSize - currentBufferIndex, n); - - System.arraycopy(currentBuffer, currentBufferIndex, b, f, r); - - f += r; - position += r; - currentBufferIndex += r; - n -= r; - - } while (n != 0); - } - - @Override - public IndexInput clone() { - RocksDBSliceInputStream in = (RocksDBSliceInputStream) super.clone(); - in.currentBuffer = new byte[bufferSize]; - System.arraycopy(this.currentBuffer, 0, in.currentBuffer, 0, bufferSize); - return in; - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbDirectory.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbDirectory.java deleted file mode 100644 index 608b381..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbDirectory.java +++ /dev/null @@ -1,229 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import com.google.common.util.concurrent.Striped; -import io.netty5.buffer.BufferAllocator; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.nio.file.Path; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReadWriteLock; -import org.apache.lucene.index.IndexFileNames; -import org.apache.lucene.store.BaseDirectory; -import org.apache.lucene.store.IOContext; -import org.apache.lucene.store.IndexInput; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.store.LockFactory; -import org.apache.lucene.store.SingleInstanceLockFactory; -import org.apache.lucene.util.Accountable; -import org.jetbrains.annotations.Nullable; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; - -public class RocksdbDirectory extends BaseDirectory implements Accountable { - - private static final int BUFFER_SIZE = 10 * 1024; - - @SuppressWarnings("UnstableApiUsage") - protected final Striped metaLock = Striped.readWriteLock(64); - - protected final RocksdbFileStore store; - - private final AtomicLong nextTempFileCounter = new AtomicLong(); - - public RocksdbDirectory(BufferAllocator bufferAllocator, Path path, int blockSize) throws IOException { - this(bufferAllocator, path, blockSize, new SingleInstanceLockFactory()); - } - - public RocksdbDirectory(BufferAllocator bufferAllocator, - RocksDB db, - Map handles, - @Nullable String name, - int blockSize) - throws IOException { - this(bufferAllocator, db, handles, name, blockSize, new SingleInstanceLockFactory()); - } - - protected RocksdbDirectory(BufferAllocator bufferAllocator, Path path, int blockSize, LockFactory lockFactory) - throws IOException { - super(lockFactory); - store = RocksdbFileStore.create(bufferAllocator, path, blockSize, metaLock); - } - - protected RocksdbDirectory(BufferAllocator bufferAllocator, - RocksDB db, - Map handles, - @Nullable String name, - int blockSize, - LockFactory lockFactory) throws IOException { - super(lockFactory); - store = RocksdbFileStore.create(bufferAllocator, db, handles, name, blockSize, metaLock); - } - - @Override - public final String[] listAll() { - ensureOpen(); - return store.listKey().toArray(String[]::new); - } - - /** - * Returns the length in bytes of a file in the directory. - * - * @throws IOException if the file does not exist - */ - @Override - public final long fileLength(String name) throws IOException { - ensureOpen(); - long size = store.getSize(name); - if (size == -1) { - throw new FileNotFoundException(name); - } - return size; - } - - /** - * Removes an existing file in the directory. - * - * @throws IOException if the file does not exist - */ - @Override - public void deleteFile(String name) throws IOException { - ensureOpen(); - var l = metaLock.get(name).writeLock(); - l.lock(); - try { - long size = store.getSize(name); - if (size != -1) { - store.remove(name); - } else { - throw new FileNotFoundException(name); - } - } finally { - l.unlock(); - } - } - - /** - * Creates a new, empty file in the directory with the given name. Returns a stream writing this file. - */ - @Override - public IndexOutput createOutput(String name, IOContext context) throws IOException { - ensureOpen(); - var l = metaLock.get(name).writeLock(); - l.lock(); - try { - if (store.contains(name)) { - store.remove(name); - } - - return new RocksdbOutputStream(name, store, BUFFER_SIZE, true); - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - l.unlock(); - } - } - @Override - public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { - ensureOpen(); - - String name = getTempFileName(prefix, suffix, nextTempFileCounter.getAndIncrement()); - - return new RocksdbOutputStream(name, store, BUFFER_SIZE, true); - } - - /** - * Creates a file name for a temporary file. The name will start with {@code prefix}, end with - * {@code suffix} and have a reserved file extension {@code .tmp}. - * - * @see #createTempOutput(String, String, IOContext) - */ - protected static String getTempFileName(String prefix, String suffix, long counter) { - return IndexFileNames.segmentFileName( - prefix, suffix + "_" + Long.toString(counter, Character.MAX_RADIX), "tmp"); - } - - @Override - public void sync(Collection names) throws IOException { - // System.out.println("Syncing " + names.size() + " files"); - } - - @Override - public void syncMetaData() throws IOException { - // System.out.println("Syncing meta"); - } - - @Override - public void rename(String source, String dest) throws IOException { - ensureOpen(); - var l = metaLock.bulkGet(List.of(source, dest)); - for (ReadWriteLock ll : l) { - ll.writeLock().lock(); - } - try { - if (!store.contains(source)) { - throw new FileNotFoundException(source); - } - store.move(source, dest); - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - for (ReadWriteLock ll : l) { - ll.writeLock().unlock(); - } - } - } - - /** - * Returns a stream reading an existing file. - */ - @Override - public IndexInput openInput(String name, IOContext context) throws IOException { - ensureOpen(); - var l = metaLock.get(name).readLock(); - l.lock(); - try { - if (!store.contains(name)) { - throw new FileNotFoundException(name); - } - - return new RocksdbInputStream(name, store, BUFFER_SIZE); - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - l.unlock(); - } - } - - /** - * Closes the store to future operations, releasing associated memory. - */ - @Override - public void close() { - isOpen = false; - try { - store.close(); - } catch (IOException e) { - throw new RuntimeException(); - } - } - - @Override - public Set getPendingDeletions() { - return Set.of(); - } - - @Override - public long ramBytesUsed() { - return 0; - } - - @Override - public Collection getChildResources() { - return null; - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbFileStore.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbFileStore.java deleted file mode 100644 index 9b0c168..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbFileStore.java +++ /dev/null @@ -1,819 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import com.google.common.primitives.Longs; -import com.google.common.util.concurrent.Striped; -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.BufferComponent; -import it.cavallium.dbengine.database.LLUtils; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReadWriteLock; -import org.apache.lucene.store.AlreadyClosedException; -import org.jetbrains.annotations.Nullable; -import org.rocksdb.ClockCache; -import org.rocksdb.ColumnFamilyDescriptor; -import org.rocksdb.ColumnFamilyHandle; -import org.rocksdb.ColumnFamilyOptions; -import org.rocksdb.CompressionType; -import org.rocksdb.DBOptions; -import org.rocksdb.DirectSlice; -import org.rocksdb.InfoLogLevel; -import org.rocksdb.Options; -import org.rocksdb.ReadOptions; -import org.rocksdb.RocksDB; -import org.rocksdb.RocksDBException; -import org.rocksdb.RocksIterator; -import org.rocksdb.WALRecoveryMode; -import org.rocksdb.WriteOptions; -import org.rocksdb.util.SizeUnit; - -public class RocksdbFileStore { - - private static final byte[] NEXT_ID_KEY = new byte[]{0x0}; - private static final String DEFAULT_COLUMN_FAMILY_STRING = new String(RocksDB.DEFAULT_COLUMN_FAMILY, StandardCharsets.US_ASCII); - - static { - RocksDB.loadLibrary(); - } - - @SuppressWarnings("UnstableApiUsage") - private final Striped metaLock; - private final ReadWriteLock[] readWriteLocks; - - private static final ReadOptions DEFAULT_READ_OPTS = new ReadOptions() - .setVerifyChecksums(false) - .setIgnoreRangeDeletions(true); - private final ReadOptions itReadOpts; - private static final WriteOptions DEFAULT_WRITE_OPTS = new WriteOptions().setDisableWAL(true); - private static final ByteBuffer EMPTY_BYTE_BUF = ByteBuffer.allocateDirect(0); - - private final RocksDB db; - public final BufferAllocator bufferAllocator; - private final int blockSize; - private final ColumnFamilyHandle headers; - private final ColumnFamilyHandle filename; - private final ColumnFamilyHandle size; - private final ColumnFamilyHandle data; - private final ConcurrentHashMap filenameToId = new ConcurrentHashMap<>(); - private final AtomicLong nextId; - private final boolean closeDbOnClose; - private volatile boolean closed; - - private RocksdbFileStore(RocksDB db, - BufferAllocator bufferAllocator, - ColumnFamilyHandle headers, - ColumnFamilyHandle filename, - ColumnFamilyHandle size, - ColumnFamilyHandle data, - int blockSize, - Striped metaLock, - boolean closeDbOnClose) throws IOException { - try { - this.db = db; - this.bufferAllocator = bufferAllocator; - this.closeDbOnClose = closeDbOnClose; - this.blockSize = blockSize; - this.headers = headers; - this.filename = filename; - this.size = size; - this.data = data; - this.metaLock = metaLock; - ReadWriteLock[] locks = new ReadWriteLock[metaLock.size()]; - for (int i = 0; i < metaLock.size(); i++) { - locks[i] = metaLock.getAt(i); - } - this.readWriteLocks = locks; - byte[] nextIdBytes = db.get(headers, NEXT_ID_KEY); - if (nextIdBytes != null) { - this.nextId = new AtomicLong(Longs.fromByteArray(nextIdBytes)); - } else { - this.nextId = new AtomicLong(); - incFlush(); - db.put(headers, NEXT_ID_KEY, Longs.toByteArray(100)); - incFlush(); - } - this.itReadOpts = new ReadOptions(); - if (LLUtils.MANUAL_READAHEAD) { - itReadOpts.setReadaheadSize(blockSize * 4L); - } - itReadOpts.setVerifyChecksums(false) - .setIgnoreRangeDeletions(true); - } catch (RocksDBException e) { - throw new IOException("Failed to open RocksDB meta file store", e); - } - } - - private static ByteBuffer readableNioBuffer(Buffer buffer) { - assert buffer.countReadableComponents() == 1 : "Readable components count: " + buffer.countReadableComponents(); - return ((BufferComponent) buffer).readableBuffer(); - } - - private static ByteBuffer writableNioBuffer(Buffer buffer, int newWriterOffset) { - assert buffer.countWritableComponents() == 1 : "Writable components count: " + buffer.countWritableComponents(); - buffer.writerOffset(0).ensureWritable(newWriterOffset); - var byteBuf = ((BufferComponent) buffer).writableBuffer(); - buffer.writerOffset(newWriterOffset); - assert buffer.capacity() >= newWriterOffset : "Returned capacity " + buffer.capacity() + " < " + newWriterOffset; - return byteBuf; - } - - private static DBOptions getDBOptions() { - var options = new DBOptions(); - options.setParanoidChecks(false); - options.setWalSizeLimitMB(256); - options.setMaxWriteBatchGroupSizeBytes(2 * SizeUnit.MB); - //options.setAtomicFlush(false); - options.setWalRecoveryMode(WALRecoveryMode.PointInTimeRecovery); - options.setCreateMissingColumnFamilies(true); - options.setCreateIfMissing(true); - //options.setUnorderedWrite(true); - options.setAvoidUnnecessaryBlockingIO(true); - options.setSkipCheckingSstFileSizesOnDbOpen(true); - options.setInfoLogLevel(InfoLogLevel.ERROR_LEVEL); - //options.setAllowMmapReads(true); - //options.setAllowMmapWrites(true); - options.setUseDirectReads(true); - options.setUseDirectIoForFlushAndCompaction(true); - options.setIncreaseParallelism(Runtime.getRuntime().availableProcessors()); - options.setDeleteObsoleteFilesPeriodMicros(Duration.ofMinutes(15).toNanos() / 1000L); - options.setRowCache(new ClockCache(512 * 1024 * 1024L)); - options.setMaxOpenFiles(500); - return options; - } - - public static ColumnFamilyDescriptor getColumnFamilyDescriptor(String name) { - ColumnFamilyOptions opts; - if (name.equals(DEFAULT_COLUMN_FAMILY_STRING) || name.endsWith("_headers")) { - opts = new ColumnFamilyOptions() - .setCompressionType(CompressionType.NO_COMPRESSION) - .setTargetFileSizeBase(SizeUnit.KB); - } else if (name.endsWith("_filename")) { - opts = new ColumnFamilyOptions() - .setCompressionType(CompressionType.NO_COMPRESSION) - .setTargetFileSizeBase(32L * SizeUnit.MB); - } else if (name.endsWith("_size")) { - opts = new ColumnFamilyOptions() - .setCompressionType(CompressionType.NO_COMPRESSION) - .setTargetFileSizeBase(32L * SizeUnit.MB); - } else if (name.endsWith("_data")) { - opts = new ColumnFamilyOptions() - .setCompressionType(CompressionType.LZ4_COMPRESSION) - .setTargetFileSizeBase(128L * SizeUnit.MB); - } else { - opts = new ColumnFamilyOptions(); - } - return new ColumnFamilyDescriptor(name.getBytes(StandardCharsets.US_ASCII), opts); - } - - private static List getColumnFamilyDescriptors(@Nullable String name) { - String headersName, filenameName, sizeName, dataName; - if (name != null) { - headersName = (name + "_headers"); - filenameName = (name + "_filename"); - sizeName = (name + "_size"); - dataName = (name + "_data"); - } else { - headersName = DEFAULT_COLUMN_FAMILY_STRING; - filenameName = "filename"; - sizeName = "size"; - dataName = "data"; - } - return List.of( - getColumnFamilyDescriptor(headersName), - getColumnFamilyDescriptor(filenameName), - getColumnFamilyDescriptor(sizeName), - getColumnFamilyDescriptor(dataName) - ); - } - - public static RocksdbFileStore create(BufferAllocator bufferAllocator, - RocksDB db, - Map existingHandles, - @Nullable String name, - int blockSize, - Striped metaLock) throws IOException { - List columnFamilyDescriptors = getColumnFamilyDescriptors(name); - try { - List handles = new ArrayList<>(columnFamilyDescriptors.size()); - for (ColumnFamilyDescriptor columnFamilyDescriptor : columnFamilyDescriptors) { - var columnFamilyName = new String(columnFamilyDescriptor.getName(), StandardCharsets.US_ASCII); - ColumnFamilyHandle columnFamilyHandle; - if (existingHandles.containsKey(columnFamilyName)) { - columnFamilyHandle = existingHandles.get(columnFamilyName); - } else { - columnFamilyHandle = db.createColumnFamily(columnFamilyDescriptor); - } - handles.add(columnFamilyHandle); - } - return new RocksdbFileStore(db, - bufferAllocator, - handles.get(0), - handles.get(1), - handles.get(2), - handles.get(3), - blockSize, - metaLock, - false - ); - } catch (RocksDBException e) { - throw new IOException(e); - } - } - - public static RocksdbFileStore create(BufferAllocator bufferAllocator, - Path path, - int blockSize, - Striped metaLock) throws IOException { - try { - DBOptions options = getDBOptions(); - List descriptors = getColumnFamilyDescriptors(null); - if (Files.notExists(path)) { - Files.createDirectories(path); - } - var handles = new ArrayList(4); - RocksDB db = RocksDB.open(options, path.toString(), descriptors, handles); - return new RocksdbFileStore(db, - bufferAllocator, - handles.get(0), - handles.get(1), - handles.get(2), - handles.get(3), - blockSize, - metaLock, - true - ); - } catch (RocksDBException e) { - throw new IOException("Failed to open RocksDB meta file store", e); - } - } - - public static RocksDBInstance createEmpty(Path path) throws IOException { - try { - DBOptions options = getDBOptions(); - List descriptors; - if (Files.exists(path)) { - descriptors = RocksDB - .listColumnFamilies(new Options(), path.toString()) - .stream() - .map(nameBytes -> { - var name = new String(nameBytes, StandardCharsets.US_ASCII); - return getColumnFamilyDescriptor(name); - }) - .toList(); - } else { - descriptors = List.of(new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); - } - if (Files.notExists(path)) { - Files.createDirectories(path); - } - var handles = new ArrayList(descriptors.size()); - RocksDB db = RocksDB.open(options, path.toString(), descriptors, handles); - var handlesMap = new HashMap(); - for (int i = 0; i < handles.size(); i++) { - var name = new String(descriptors.get(i).getName(), StandardCharsets.US_ASCII); - handlesMap.put(name, handles.get(i)); - } - return new RocksDBInstance(db, Collections.unmodifiableMap(handlesMap)); - } catch (RocksDBException e) { - throw new IOException("Failed to open RocksDB meta file store", e); - } - } - - private long getFileId(String key) throws RocksDBException, IOException { - Long id = filenameToId.get(key); - if (id != null) { - return id; - } else { - try (var filenameKey = getFilenameKey(key); var filenameValue = getFilenameValue()) { - if (db.get(filename, DEFAULT_READ_OPTS, readableNioBuffer(filenameKey), writableNioBuffer(filenameValue, Long.BYTES)) - == RocksDB.NOT_FOUND) { - throw new IOException("File not found: " + key); - } - filenameValue.writerOffset(Long.BYTES); - return filenameValue.readLong(); - } - } - } - - @Nullable - private Long getFileIdOrNull(String key) throws RocksDBException { - Long id = filenameToId.get(key); - if (id != null) { - return id; - } else { - try (var filenameKey = getFilenameKey(key); var filenameValue = getFilenameValue()) { - if (db.get(filename, DEFAULT_READ_OPTS, readableNioBuffer(filenameKey), writableNioBuffer(filenameValue, Long.BYTES)) - == RocksDB.NOT_FOUND) { - return null; - } - filenameValue.writerOffset(Long.BYTES); - return filenameValue.readLong(); - } - } - } - - private boolean containsFileId(String key) throws RocksDBException { - Long id = filenameToId.get(key); - if (id != null) { - return true; - } else { - try (var filenameKey = getFilenameKey(key)) { - if (db.keyMayExist(filename, DEFAULT_READ_OPTS, readableNioBuffer(filenameKey))) { - return db.get(filename, DEFAULT_READ_OPTS, readableNioBuffer(filenameKey), EMPTY_BYTE_BUF) != RocksDB.NOT_FOUND; - } else { - return false; - } - } - } - } - - private void moveFileId(long id, String oldKey, String newKey) throws RocksDBException { - var filenameValue = getFilenameValue(); - filenameValue.writeLong(id); - try (var filenameOldKey = getFilenameKey(oldKey); var filenameNewKey = getFilenameKey(newKey); filenameValue) { - db.delete(filename, DEFAULT_WRITE_OPTS, readableNioBuffer(filenameOldKey)); - incFlush(); - db.put(filename, DEFAULT_WRITE_OPTS, readableNioBuffer(filenameNewKey), readableNioBuffer(filenameValue)); - incFlush(); - } - } - - private void incFlush() throws RocksDBException { - /* - if ((flushCounter.incrementAndGet() % 1) == 0) { - db.flushWal(false); - } - */ - } - - private long getFileIdOrAllocate(String key) throws RocksDBException { - Long id = filenameToId.get(key); - if (id != null) { - return id; - } else { - try (var filenameKey = getFilenameKey(key); var filenameValue = getFilenameValue()) { - if (db.get(filename, DEFAULT_READ_OPTS, readableNioBuffer(filenameKey), - writableNioBuffer(filenameValue, Long.BYTES)) - == RocksDB.NOT_FOUND) { - filenameValue.writerOffset(0); - filenameValue.readerOffset(0); - var newlyAllocatedId = this.nextId.getAndIncrement(); - if (newlyAllocatedId % 100 == 99) { - db.put(headers, new byte[]{0x00}, Longs.toByteArray(newlyAllocatedId + 1 + 100)); - incFlush(); - } - filenameValue.writeLong(newlyAllocatedId); - db.put(filename, - DEFAULT_WRITE_OPTS, - readableNioBuffer(filenameKey), - readableNioBuffer(filenameValue) - ); - incFlush(); - filenameToId.put(key, newlyAllocatedId); - return newlyAllocatedId; - } - filenameValue.readerOffset(0); - filenameValue.writerOffset(Long.BYTES); - return filenameValue.readLong(); - } - } - } - - private void dellocateFilename(String key) throws RocksDBException { - try (var filenameKey = getFilenameKey(key)) { - db.delete(filename, DEFAULT_WRITE_OPTS, readableNioBuffer(filenameKey)); - filenameToId.remove(key); - } - } - - public boolean contains(String key) throws RocksDBException, IOException { - var l = metaLock.get(key).readLock(); - l.lock(); - try { - ensureOpen(); - return containsFileId(key); - } finally { - l.unlock(); - } - } - - private Buffer getMetaValueBuf() { - return bufferAllocator.allocate(Long.BYTES); - } - - private Buffer getDataValueBuf() { - return bufferAllocator.allocate(blockSize); - } - - private Buffer getFilenameValue() { - return bufferAllocator.allocate(Long.BYTES); - } - - private Buffer getMetaKey(long id) { - Buffer buf = bufferAllocator.allocate(Long.BYTES); - buf.writeLong(id); - return buf; - } - - private Buffer getFilenameKey(String key) { - Buffer buf = bufferAllocator.allocate(key.length()); - buf.writeCharSequence(key, StandardCharsets.US_ASCII); - return buf; - } - - private Buffer getDataKey(@Nullable Buffer buf, long id, int i) { - if (buf == null) { - buf = bufferAllocator.allocate(Long.BYTES + Integer.BYTES); - } - buf.writeLong(id); - buf.writeInt(i); - return buf; - } - - private Buffer getDataKeyPrefix(long id) { - var buf = bufferAllocator.allocate(Long.BYTES); - buf.writeLong(id); - return buf; - } - - private byte[] getDataKeyByteArray(long id, int i) { - ByteBuffer bb = ByteBuffer.wrap(new byte[Long.BYTES + Integer.BYTES]); - bb.putLong(id); - bb.putInt(i); - return bb.array(); - } - - - public int load(String name, long position, Buffer buf, int offset, int len) throws IOException { - var l = metaLock.get(name).readLock(); - l.lock(); - try { - ensureOpen(); - Long fileId = getFileIdOrNull(name); - if (fileId == null) { - return -1; - } - long size = getSizeInternal(fileId); - - if (position >= size) { - return -1; - } - - if (buf.capacity() < offset + len) { - throw new IllegalArgumentException("len is too long"); - } - - long p = position; - int f = offset; - int n = len; - - Buffer valBuf = getDataValueBuf(); - try (valBuf) { - ByteBuffer valBuffer = writableNioBuffer(valBuf, blockSize); - boolean shouldSeekTo = true; - try (var ro = new ReadOptions(itReadOpts)) { - ro.setIgnoreRangeDeletions(true); - try (Buffer fileIdPrefix = getDataKeyPrefix(fileId)) { - try (var lb = new DirectSlice(readableNioBuffer(fileIdPrefix), Long.BYTES)) { - ro.setIterateLowerBound(lb); - ro.setPrefixSameAsStart(true); - try (RocksIterator it = db.newIterator(data, itReadOpts)) { - int m; - int r; - int i; - do { - m = (int) (p % (long) blockSize); - r = Math.min(blockSize - m, n); - i = (int) (p / (long) blockSize); - - //System.out.println("Reading block " + name + "(" + fileId + "):" + i); - - if (shouldSeekTo) { - shouldSeekTo = false; - try (Buffer dataKey = getDataKey(null, fileId, i)) { - it.seek(readableNioBuffer(dataKey)); - } - if (!it.isValid()) { - throw new IOException("Block " + name + "(" + fileId + ")" + ":" + i + " not found"); - } - } else { - it.next(); - if (!it.isValid()) { - throw new IOException("Block " + name + "(" + fileId + ")" + ":" + i + " not found"); - } - } - assert Arrays.equals(getDataKeyByteArray(fileId, i), it.key()); - int dataRead = it.value(valBuffer); - valBuf.writerOffset(dataRead); - - valBuf.copyInto(m, buf, f, r); - - valBuf.writerOffset(0); - valBuf.readerOffset(0); - - p += r; - f += r; - n -= r; - } while (n != 0 && p < size); - - return (int) (p - position); - } - } - } - } - } - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - l.unlock(); - } - } - - /** - * @return not exist return -1 - */ - public long getSize(String key) throws IOException { - var l = metaLock.get(key).readLock(); - l.lock(); - try { - ensureOpen(); - return getSizeInternal(key); - } finally { - l.unlock(); - } - } - - /** - * @return not exist return -1 - */ - private long getSizeInternal(String key) throws IOException { - try { - Long fileId = getFileIdOrNull(key); - if (fileId == null) { - return -1; - } - return getSizeInternal(fileId); - } catch (RocksDBException ex) { - throw new IOException(ex); - } - } - - /** - * @return not exist return -1 - */ - private long getSizeInternal(long fileId) throws IOException { - try { - try (Buffer metaKey = getMetaKey(fileId); Buffer metaData = getMetaValueBuf()) { - if (db.get(size, DEFAULT_READ_OPTS, readableNioBuffer(metaKey), writableNioBuffer(metaData, Long.BYTES)) - != RocksDB.NOT_FOUND) { - metaData.writerOffset(Long.BYTES); - return metaData.readLong(); - } else { - return -1; - } - } - } catch (RocksDBException ex) { - throw new IOException(ex); - } - } - - - public void remove(String key) throws IOException { - var l = metaLock.get(key).writeLock(); - l.lock(); - try { - ensureOpen(); - Long fileId = getFileIdOrNull(key); - if (fileId == null) { - return; - } - long size; - size = getSizeInternal(fileId); - if (size == -1) { - return; - } - Buffer dataKey = null; - try { - int n = (int) ((size + blockSize - 1) / blockSize); - if (n == 1) { - dataKey = getDataKey(dataKey, fileId, 0); - db.delete(data, DEFAULT_WRITE_OPTS, readableNioBuffer(dataKey)); - } else if (n > 1) { - var dataKey1 = getDataKeyByteArray(fileId, 0); - var dataKey2 = getDataKeyByteArray(fileId, n - 1); - db.deleteRange(data, DEFAULT_WRITE_OPTS, dataKey1, dataKey2); - } - try (Buffer metaKey = getMetaKey(fileId)) { - dellocateFilename(key); - db.delete(this.size, DEFAULT_WRITE_OPTS, readableNioBuffer(metaKey)); - } - } finally { - if (dataKey != null) { - dataKey.close(); - } - } - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - l.unlock(); - } - } - - - public void clear() throws IOException { - for (var lock : readWriteLocks) { - lock.writeLock().lock(); - } - try { - ensureOpen(); - List keySet = listKeyInternal(); - for (String key : keySet) { - remove(key); - } - } finally { - for (var lock : readWriteLocks) { - lock.writeLock().unlock(); - } - } - } - - public List listKey() { - ensureOpen(); - for (var lock : readWriteLocks) { - lock.readLock().lock(); - } - try { - ensureOpen(); - return listKeyInternal(); - } finally { - for (var lock : readWriteLocks) { - lock.readLock().unlock(); - } - } - } - - private List listKeyInternal() { - List keys = new ArrayList<>(); - try (RocksIterator iterator = db.newIterator(filename)) { - iterator.seekToFirst(); - while (iterator.isValid()) { - keys.add(new String(iterator.key(), StandardCharsets.US_ASCII).intern()); - iterator.next(); - } - return keys; - } - } - - public void append(String name, Buffer buf, int offset, int len) throws IOException { - var l = metaLock.get(name).writeLock(); - l.lock(); - try { - ensureOpen(); - long size; - long fileId; - int f; - int n; - size = getSizeInternal(name); - if (size == -1) { - size = 0; - } - - f = offset; - n = len; - - fileId = getFileIdOrAllocate(name); - Buffer dataKey = null; - Buffer bb = getDataValueBuf(); - try { - do { - int m = (int) (size % (long) blockSize); - int r = Math.min(blockSize - m, n); - - int i = (int) ((size) / (long) blockSize); - dataKey = getDataKey(dataKey, fileId, i); - if (m != 0) { - int dataRead; - if ((dataRead = db.get(data, - DEFAULT_READ_OPTS, - readableNioBuffer(dataKey), - writableNioBuffer(bb, blockSize) - )) == RocksDB.NOT_FOUND) { - throw new IOException("Block " + name + "(" + fileId + "):" + i + " not found"); - } - bb.writerOffset(dataRead); - dataKey.readerOffset(0); - } else { - bb.writerOffset(0); - } - - bb.ensureWritable(r); - buf.copyInto(f, bb, m, r); - - var bbBuf = writableNioBuffer(bb, m + r); - - assert bbBuf.capacity() >= m + r : bbBuf.capacity() + " < " + (m + r); - assert bbBuf.position() == 0; - bbBuf.limit(m + r); - assert bbBuf.limit() == m + r; - - db.put(data, DEFAULT_WRITE_OPTS, readableNioBuffer(dataKey), bbBuf); - incFlush(); - size += r; - f += r; - n -= r; - - dataKey.readerOffset(0); - dataKey.writerOffset(0); - bb.readerOffset(0); - bb.writerOffset(0); - } while (n != 0); - } finally { - if (dataKey != null) { - dataKey.close(); - } - bb.close(); - } - - try (Buffer metaKey = getMetaKey(fileId); Buffer metaValue = getMetaValueBuf()) { - metaValue.writeLong(size); - db.put(this.size, DEFAULT_WRITE_OPTS, readableNioBuffer(metaKey), readableNioBuffer(metaValue)); - incFlush(); - } - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - l.unlock(); - } - } - - public void move(String source, String dest) throws IOException { - var locks = metaLock.bulkGet(List.of(source, dest)); - for (ReadWriteLock lock : locks) { - lock.writeLock().lock(); - } - try { - ensureOpen(); - long sourceFileId = getFileId(source); - moveFileId(sourceFileId, source, dest); - } catch (RocksDBException ex) { - throw new IOException(ex); - } finally { - for (ReadWriteLock lock : locks) { - lock.writeLock().unlock(); - } - } - } - - private void ensureOpen() { - if (closed) { - throw new AlreadyClosedException("Index already closed"); - } - } - - public void close() throws IOException { - if (closed) { - return; - } - for (var lock : readWriteLocks) { - lock.writeLock().lock(); - } - try { - if (closed) { - return; - } - closed = true; - if (closeDbOnClose) { - try { - db.closeE(); - } catch (RocksDBException e) { - throw new IOException(e); - } - } - } finally { - for (var lock : readWriteLocks) { - lock.writeLock().unlock(); - } - } - } - - public void sync() throws RocksDBException { - /* - db.flushWal(true); - db.flush(new FlushOptions().setAllowWriteStall(true).setWaitForFlush(true)); - - */ - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbInputStream.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbInputStream.java deleted file mode 100644 index 537c587..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbInputStream.java +++ /dev/null @@ -1,181 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import io.netty5.buffer.Buffer; -import io.netty5.buffer.BufferAllocator; -import java.io.EOFException; -import java.io.IOException; -import org.apache.lucene.store.IndexInput; - -public class RocksdbInputStream extends IndexInput { - - private final int bufferSize; - - private long position; - - private final long length; - - private Buffer currentBuffer; - - private int currentBufferIndex; - - private boolean closed = false; - - private final RocksdbFileStore store; - - private final String name; - - public RocksdbInputStream(String name, RocksdbFileStore store, int bufferSize) throws IOException { - this(name, store, bufferSize, store.getSize(name)); - } - - public RocksdbInputStream(String name, RocksdbFileStore store, int bufferSize, long length) { - this(name, - store, - bufferSize, - length, - null - ); - } - - private RocksdbInputStream(String name, RocksdbFileStore store, int bufferSize, long length, Buffer currentBuffer) { - super("RocksdbInputStream(name=" + name + ")"); - this.name = name; - this.store = store; - this.bufferSize = bufferSize; - this.currentBuffer = currentBuffer; - this.currentBufferIndex = bufferSize; - this.position = 0; - this.length = length; - if (currentBuffer != null && bufferSize > currentBuffer.capacity()) { - throw new IllegalArgumentException( - "BufferSize is " + bufferSize + " but the buffer has only a capacity of " + currentBuffer.capacity()); - } - } - - @Override - public void close() throws IOException { - if (!closed) { - closed = true; - if (currentBuffer != null) { - currentBuffer.close(); - } - } - } - - @Override - public long getFilePointer() { - return position; - } - - @Override - public void seek(long pos) { - if (pos < 0 || pos > length) { - throw new IllegalArgumentException("pos must be between 0 and " + length); - } - position = pos; - currentBufferIndex = this.bufferSize; - } - - @Override - public long length() { - return this.length; - } - - @Override - public IndexInput slice(String sliceDescription, final long offset, final long length) throws IOException { - - if (offset < 0 || length < 0 || offset + length > this.length) { - throw new IllegalArgumentException("slice() " + sliceDescription + " out of bounds: " + this); - } - - return new RocksDBSliceInputStream(name, - store, - bufferSize, - offset + length - ) { - { - seek(0L); - } - - @Override - public void seek(long pos) { - if (pos < 0L) { - throw new IllegalArgumentException("Seeking to negative position: " + this); - } - - super.seek(pos + offset); - } - - - @Override - public long getFilePointer() { - return super.getFilePointer() - offset; - } - - @Override - public long length() { - return super.length() - offset; - } - - @Override - public IndexInput slice(String sliceDescription, long ofs, long len) throws IOException { - return super.slice(sliceDescription, offset + ofs, len); - } - }; - } - - - @Override - public byte readByte() throws IOException { - - if (position >= length) { - throw new EOFException("Read end"); - } - loadBufferIfNeed(); - byte b = currentBuffer.getByte(currentBufferIndex++); - position++; - return b; - } - - protected void loadBufferIfNeed() throws IOException { - if (currentBuffer == null) { - currentBuffer = store.bufferAllocator.allocate(bufferSize).writerOffset(bufferSize); - } - if (this.currentBufferIndex == this.bufferSize) { - int n = store.load(name, position, currentBuffer, 0, bufferSize); - if (n == -1) { - throw new EOFException("Read end"); - } - this.currentBufferIndex = 0; - } - } - - @Override - public void readBytes(byte[] b, int offset, int len) throws IOException { - - if (position >= length) { - throw new EOFException("Read end"); - } - - int f = offset; - int n = Math.min((int) (length - position), len); - do { - loadBufferIfNeed(); - - int r = Math.min(bufferSize - currentBufferIndex, n); - - currentBuffer.copyInto(currentBufferIndex, b, f, r); - - f += r; - position += r; - currentBufferIndex += r; - n -= r; - - } while (n != 0); - } - - @Override - public IndexInput clone() { - return super.clone(); - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbOutputStream.java b/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbOutputStream.java deleted file mode 100644 index 04b22ca..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/directory/RocksdbOutputStream.java +++ /dev/null @@ -1,123 +0,0 @@ -package it.cavallium.dbengine.lucene.directory; - -import io.netty5.buffer.Buffer; -import org.apache.lucene.store.BufferedChecksum; -import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.Accountable; - -import java.io.IOException; -import java.util.Collection; -import java.util.zip.CRC32; -import java.util.zip.Checksum; - -public class RocksdbOutputStream extends IndexOutput implements Accountable { - - private final int bufferSize; - - private long position; - - private Buffer currentBuffer; - - private boolean dirty; - - private final Checksum crc; - - private final RocksdbFileStore store; - - private final String name; - - public RocksdbOutputStream(String name, RocksdbFileStore store, int bufferSize, boolean checksum) { - super("RocksdbOutputStream(name=" + name + ")", name); - this.name = name; - this.store = store; - this.bufferSize = bufferSize; - this.currentBuffer = store.bufferAllocator.allocate(bufferSize); - this.position = 0; - this.dirty = false; - if (checksum) { - crc = new BufferedChecksum(new CRC32()); - } else { - crc = null; - } - } - - @Override - public void close() throws IOException { - if (currentBuffer != null) { - if (dirty) { - flush(); - } - currentBuffer.close(); - currentBuffer = null; - } - } - - - private void flush() throws IOException { - store.append(name, currentBuffer, 0, currentBuffer.writerOffset()); - currentBuffer.writerOffset(0); - dirty = false; - } - - @Override - public long getFilePointer() { - return position; - } - - @Override - public long getChecksum() { - if (crc != null) { - return crc.getValue(); - } else { - throw new IllegalStateException("crc is null"); - } - } - - @Override - public void writeByte(byte b) throws IOException { - - - if (crc != null) { - crc.update(b); - } - if (currentBuffer.writerOffset() == bufferSize) { - flush(); - } - currentBuffer.writeByte(b); - position++; - dirty = true; - } - - @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { - - if (crc != null) { - crc.update(b, offset, length); - } - int f = offset; - int n = length; - do { - if (currentBuffer.writerOffset() == bufferSize) { - flush(); - } - int r = Math.min(bufferSize - currentBuffer.writerOffset(), n); - currentBuffer.writeBytes(b, f, r); - f += r; - position += r; - n -= r; - dirty = true; - - - } while (n != 0); - } - - @Override - public long ramBytesUsed() { - return position; - } - - @Override - public Collection getChildResources() { - return null; - } -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/HitsThresholdChecker.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/HitsThresholdChecker.java deleted file mode 100644 index 6830a34..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/HitsThresholdChecker.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.hugepq.mirrored; - -import java.util.concurrent.atomic.AtomicLong; -import org.apache.lucene.search.ScoreMode; - -/** Used for defining custom algorithms to allow searches to early terminate */ -public abstract class HitsThresholdChecker { - /** Implementation of HitsThresholdChecker which allows global hit counting */ - private static class GlobalHitsThresholdChecker extends HitsThresholdChecker { - private final int totalHitsThreshold; - private final AtomicLong globalHitCount; - - public GlobalHitsThresholdChecker(int totalHitsThreshold) { - - if (totalHitsThreshold < 0) { - throw new IllegalArgumentException( - "totalHitsThreshold must be >= 0, got " + totalHitsThreshold); - } - - this.totalHitsThreshold = totalHitsThreshold; - this.globalHitCount = new AtomicLong(); - } - - @Override - public void incrementHitCount() { - globalHitCount.incrementAndGet(); - } - - @Override - public boolean isThresholdReached() { - return globalHitCount.getAcquire() > totalHitsThreshold; - } - - @Override - public ScoreMode scoreMode() { - return totalHitsThreshold == Integer.MAX_VALUE ? ScoreMode.COMPLETE : ScoreMode.TOP_SCORES; - } - - @Override - public int getHitsThreshold() { - return totalHitsThreshold; - } - } - - /** Default implementation of HitsThresholdChecker to be used for single threaded execution */ - private static class LocalHitsThresholdChecker extends HitsThresholdChecker { - private final int totalHitsThreshold; - private int hitCount; - - public LocalHitsThresholdChecker(int totalHitsThreshold) { - - if (totalHitsThreshold < 0) { - throw new IllegalArgumentException( - "totalHitsThreshold must be >= 0, got " + totalHitsThreshold); - } - - this.totalHitsThreshold = totalHitsThreshold; - } - - @Override - public void incrementHitCount() { - ++hitCount; - } - - @Override - public boolean isThresholdReached() { - return hitCount > totalHitsThreshold; - } - - @Override - public ScoreMode scoreMode() { - return totalHitsThreshold == Integer.MAX_VALUE ? ScoreMode.COMPLETE : ScoreMode.TOP_SCORES; - } - - @Override - public int getHitsThreshold() { - return totalHitsThreshold; - } - } - - /* - * Returns a threshold checker that is useful for single threaded searches - */ - public static HitsThresholdChecker create(final int totalHitsThreshold) { - return new LocalHitsThresholdChecker(totalHitsThreshold); - } - - /* - * Returns a threshold checker that is based on a shared counter - */ - public static HitsThresholdChecker createShared(final int totalHitsThreshold) { - return new GlobalHitsThresholdChecker(totalHitsThreshold); - } - - public abstract void incrementHitCount(); - - public abstract ScoreMode scoreMode(); - - public abstract int getHitsThreshold(); - - public abstract boolean isThresholdReached(); -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MinDocIterator.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MinDocIterator.java deleted file mode 100644 index 87e14b3..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MinDocIterator.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.hugepq.mirrored; - -import java.io.IOException; -import org.apache.lucene.search.DocIdSetIterator; - -/** Docs iterator that starts iterating from a configurable minimum document */ -public class MinDocIterator extends DocIdSetIterator { - final int segmentMinDoc; - final int maxDoc; - int doc = -1; - - public MinDocIterator(int segmentMinDoc, int maxDoc) { - this.segmentMinDoc = segmentMinDoc; - this.maxDoc = maxDoc; - } - - @Override - public int docID() { - return doc; - } - - @Override - public int nextDoc() throws IOException { - return advance(doc + 1); - } - - @Override - public int advance(int target) throws IOException { - assert target > doc; - if (doc == -1) { - // skip directly to minDoc - doc = Math.max(target, segmentMinDoc); - } else { - doc = target; - } - if (doc >= maxDoc) { - doc = NO_MORE_DOCS; - } - return doc; - } - - @Override - public long cost() { - return maxDoc - segmentMinDoc; - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MultiLeafFieldComparator.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MultiLeafFieldComparator.java deleted file mode 100644 index de19771..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/mirrored/MultiLeafFieldComparator.java +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package it.cavallium.dbengine.lucene.hugepq.mirrored; - -import java.io.IOException; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorable; - -public final class MultiLeafFieldComparator implements LeafFieldComparator { - - private final LeafFieldComparator[] comparators; - private final int[] reverseMul; - // we extract the first comparator to avoid array access in the common case - // that the first comparator compares worse than the bottom entry in the queue - private final LeafFieldComparator firstComparator; - private final int firstReverseMul; - - public MultiLeafFieldComparator(LeafFieldComparator[] comparators, int[] reverseMul) { - if (comparators.length != reverseMul.length) { - throw new IllegalArgumentException( - "Must have the same number of comparators and reverseMul, got " - + comparators.length - + " and " - + reverseMul.length); - } - this.comparators = comparators; - this.reverseMul = reverseMul; - this.firstComparator = comparators[0]; - this.firstReverseMul = reverseMul[0]; - } - - @Override - public void setBottom(int slot) throws IOException { - for (LeafFieldComparator comparator : comparators) { - comparator.setBottom(slot); - } - } - - @Override - public int compareBottom(int doc) throws IOException { - int cmp = firstReverseMul * firstComparator.compareBottom(doc); - if (cmp != 0) { - return cmp; - } - for (int i = 1; i < comparators.length; ++i) { - cmp = reverseMul[i] * comparators[i].compareBottom(doc); - if (cmp != 0) { - return cmp; - } - } - return 0; - } - - @Override - public int compareTop(int doc) throws IOException { - int cmp = firstReverseMul * firstComparator.compareTop(doc); - if (cmp != 0) { - return cmp; - } - for (int i = 1; i < comparators.length; ++i) { - cmp = reverseMul[i] * comparators[i].compareTop(doc); - if (cmp != 0) { - return cmp; - } - } - return 0; - } - - @Override - public void copy(int slot, int doc) throws IOException { - for (LeafFieldComparator comparator : comparators) { - comparator.copy(slot, doc); - } - } - - @Override - public void setScorer(Scorable scorer) throws IOException { - for (LeafFieldComparator comparator : comparators) { - comparator.setScorer(scorer); - } - } - - @Override - public void setHitsThresholdReached() throws IOException { - // this is needed for skipping functionality that is only relevant for the 1st comparator - firstComparator.setHitsThresholdReached(); - } - - @Override - public DocIdSetIterator competitiveIterator() throws IOException { - // this is needed for skipping functionality that is only relevant for the 1st comparator - return firstComparator.competitiveIterator(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullFieldDocCollector.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullFieldDocCollector.java deleted file mode 100644 index d1867db..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullFieldDocCollector.java +++ /dev/null @@ -1,495 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package it.cavallium.dbengine.lucene.hugepq.search; - -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.FieldValueHitQueue; -import it.cavallium.dbengine.lucene.FullDocs; -import it.cavallium.dbengine.lucene.LLFieldDoc; -import it.cavallium.dbengine.lucene.LLSlotDoc; -import it.cavallium.dbengine.lucene.LLSlotDocCodec; -import it.cavallium.dbengine.lucene.HugePqPriorityQueue; -import it.cavallium.dbengine.lucene.MaxScoreAccumulator; -import it.cavallium.dbengine.lucene.PriorityQueue; -import it.cavallium.dbengine.lucene.ResourceIterable; -import it.cavallium.dbengine.lucene.collector.FullDocsCollector; -import it.cavallium.dbengine.lucene.collector.FullFieldDocs; -import it.cavallium.dbengine.lucene.hugepq.mirrored.HitsThresholdChecker; -import it.cavallium.dbengine.lucene.hugepq.mirrored.MultiLeafFieldComparator; -import java.io.IOException; -import java.util.Arrays; -import java.util.Collection; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.CollectionTerminatedException; -import org.apache.lucene.search.CollectorManager; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.FieldComparator; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.Sort; -import org.apache.lucene.search.SortField; -import org.apache.lucene.search.TotalHits; -import org.apache.lucene.search.TotalHits.Relation; -import reactor.core.publisher.Flux; - -/** - * A {@link org.apache.lucene.search.Collector} that sorts by {@link SortField} using {@link FieldComparator}s. - * - *

See the {@link #create(LLTempHugePqEnv, Sort, int, int)} (org.apache.lucene.search.Sort, int, int)} method for instantiating a - * TopFieldCollector. - * - * This class must mirror this changes: - * - * Lucene TopFieldCollector changes on GitHub - */ -public abstract class HugePqFullFieldDocCollector extends - FullDocsCollector, LLSlotDoc, LLFieldDoc> { - - // TODO: one optimization we could do is to pre-fill - // the queue with sentinel value that guaranteed to - // always compare lower than a real hit; this would - // save having to check queueFull on each insert - - private abstract class TopFieldLeafCollector implements LeafCollector { - - final LeafFieldComparator comparator; - final int reverseMul; - Scorable scorer; - boolean collectedAllCompetitiveHits = false; - - TopFieldLeafCollector(FieldValueHitQueue fieldValueHitQueue, Sort sort, LeafReaderContext context) - throws IOException { - // as all segments are sorted in the same way, enough to check only the 1st segment for - // indexSort - if (searchSortPartOfIndexSort == null) { - final Sort indexSort = context.reader().getMetaData().getSort(); - searchSortPartOfIndexSort = canEarlyTerminate(sort, indexSort); - if (searchSortPartOfIndexSort) { - firstComparator.disableSkipping(); - } - } - LeafFieldComparator[] comparators = fieldValueHitQueue.getComparators(context); - int[] reverseMuls = fieldValueHitQueue.getReverseMul(); - if (comparators.length == 1) { - this.reverseMul = reverseMuls[0]; - this.comparator = comparators[0]; - } else { - this.reverseMul = 1; - this.comparator = new MultiLeafFieldComparator(comparators, reverseMuls); - } - } - - void countHit(int doc) throws IOException { - ++totalHits; - hitsThresholdChecker.incrementHitCount(); - - if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) { - updateGlobalMinCompetitiveScore(scorer); - } - if (!scoreMode.isExhaustive() - && totalHitsRelation == TotalHits.Relation.EQUAL_TO - && hitsThresholdChecker.isThresholdReached()) { - // for the first time hitsThreshold is reached, notify comparator about this - comparator.setHitsThresholdReached(); - totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; - } - } - - boolean thresholdCheck(int doc) throws IOException { - if (collectedAllCompetitiveHits || reverseMul * comparator.compareBottom(doc) <= 0) { - // since docs are visited in doc Id order, if compare is 0, it means - // this document is largest than anything else in the queue, and - // therefore not competitive. - if (searchSortPartOfIndexSort) { - if (hitsThresholdChecker.isThresholdReached()) { - totalHitsRelation = Relation.GREATER_THAN_OR_EQUAL_TO; - throw new CollectionTerminatedException(); - } else { - collectedAllCompetitiveHits = true; - } - } else if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) { - // we can start setting the min competitive score if the - // threshold is reached for the first time here. - updateMinCompetitiveScore(scorer); - } - return true; - } - return false; - } - - void collectCompetitiveHit(int doc) throws IOException { - // This hit is competitive - replace bottom element in queue & adjustTop - comparator.copy(pq.top().slot(), doc); - updateBottom(doc); - comparator.setBottom(pq.top().slot()); - updateMinCompetitiveScore(scorer); - } - - void collectAnyHit(int doc, int hitsCollected) throws IOException { - // Startup transient: queue hasn't gathered numHits yet - int slot = hitsCollected - 1; - // Copy hit into queue - comparator.copy(slot, doc); - add(slot, doc); - if (queueFull) { - comparator.setBottom(pq.top().slot()); - updateMinCompetitiveScore(scorer); - } - } - - @Override - public void setScorer(Scorable scorer) throws IOException { - this.scorer = scorer; - comparator.setScorer(scorer); - minCompetitiveScore = 0f; - updateMinCompetitiveScore(scorer); - if (minScoreAcc != null) { - updateGlobalMinCompetitiveScore(scorer); - } - } - - @Override - public DocIdSetIterator competitiveIterator() throws IOException { - return comparator.competitiveIterator(); - } - } - - static boolean canEarlyTerminate(Sort searchSort, Sort indexSort) { - return canEarlyTerminateOnDocId(searchSort) || canEarlyTerminateOnPrefix(searchSort, indexSort); - } - - private static boolean canEarlyTerminateOnDocId(Sort searchSort) { - final SortField[] fields1 = searchSort.getSort(); - return SortField.FIELD_DOC.equals(fields1[0]); - } - - private static boolean canEarlyTerminateOnPrefix(Sort searchSort, Sort indexSort) { - if (indexSort != null) { - final SortField[] fields1 = searchSort.getSort(); - final SortField[] fields2 = indexSort.getSort(); - // early termination is possible if fields1 is a prefix of fields2 - if (fields1.length > fields2.length) { - return false; - } - return Arrays.asList(fields1).equals(Arrays.asList(fields2).subList(0, fields1.length)); - } else { - return false; - } - } - - /* - * Implements a TopFieldCollector over one SortField criteria, with tracking - * document scores and maxScore. - */ - private static class SimpleFieldCollector extends HugePqFullFieldDocCollector { - final Sort sort; - final PriorityQueue queue; - private final FieldValueHitQueue fieldValueHitQueue; - - public SimpleFieldCollector( - Sort sort, - HugePqPriorityQueue queue, - FieldValueHitQueue fieldValueHitQueue, - long numHits, - HitsThresholdChecker hitsThresholdChecker, - MaxScoreAccumulator minScoreAcc) { - super(queue, fieldValueHitQueue, numHits, hitsThresholdChecker, sort.needsScores(), minScoreAcc); - this.sort = sort; - this.queue = queue; - this.fieldValueHitQueue = fieldValueHitQueue; - } - - @Override - public LeafCollector getLeafCollector(LeafReaderContext context) throws IOException { - docBase = context.docBase; - - return new TopFieldLeafCollector(fieldValueHitQueue, sort, context) { - - @Override - public void collect(int doc) throws IOException { - countHit(doc); - if (queueFull) { - if (thresholdCheck(doc)) { - return; - } - collectCompetitiveHit(doc); - } else { - collectAnyHit(doc, totalHits); - } - } - }; - } - - @Override - public ResourceIterable mapResults(ResourceIterable it) { - return new ResourceIterable<>() { - @Override - public void close() { - it.close(); - } - - @Override - public Flux iterate() { - return it.iterate().map(fieldValueHitQueue::fillFields); - } - - @Override - public Flux iterate(long skips) { - return it.iterate(skips).map(fieldValueHitQueue::fillFields); - } - }; - } - } - - final long numHits; - final HitsThresholdChecker hitsThresholdChecker; - final FieldComparator firstComparator; - final boolean canSetMinScore; - - Boolean searchSortPartOfIndexSort = null; // shows if Search Sort if a part of the Index Sort - - // an accumulator that maintains the maximum of the segment's minimum competitive scores - final MaxScoreAccumulator minScoreAcc; - // the current local minimum competitive score already propagated to the underlying scorer - float minCompetitiveScore; - - final int numComparators; - boolean queueFull; - int docBase; - final boolean needsScores; - final ScoreMode scoreMode; - - // Declaring the constructor private prevents extending this class by anyone - // else. Note that the class cannot be final since it's extended by the - // internal versions. If someone will define a constructor with any other - // visibility, then anyone will be able to extend the class, which is not what - // we want. - private HugePqFullFieldDocCollector( - HugePqPriorityQueue pq, - FieldValueHitQueue fieldValueHitQueue, - long numHits, - HitsThresholdChecker hitsThresholdChecker, - boolean needsScores, - MaxScoreAccumulator minScoreAcc) { - super(pq); - this.needsScores = needsScores; - this.numHits = numHits; - this.hitsThresholdChecker = hitsThresholdChecker; - this.numComparators = fieldValueHitQueue.getComparators().length; - this.firstComparator = fieldValueHitQueue.getComparators()[0]; - int reverseMul = fieldValueHitQueue.getReverseMul()[0]; - - if (firstComparator.getClass().equals(FieldComparator.RelevanceComparator.class) - && reverseMul == 1 // if the natural sort is preserved (sort by descending relevance) - && hitsThresholdChecker.getHitsThreshold() != Integer.MAX_VALUE) { - scoreMode = ScoreMode.TOP_SCORES; - canSetMinScore = true; - } else { - canSetMinScore = false; - if (hitsThresholdChecker.getHitsThreshold() != Integer.MAX_VALUE) { - scoreMode = needsScores ? ScoreMode.TOP_DOCS_WITH_SCORES : ScoreMode.TOP_DOCS; - } else { - scoreMode = needsScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; - } - } - this.minScoreAcc = minScoreAcc; - } - - @Override - public ScoreMode scoreMode() { - return scoreMode; - } - - protected void updateGlobalMinCompetitiveScore(Scorable scorer) throws IOException { - assert minScoreAcc != null; - if (canSetMinScore && hitsThresholdChecker.isThresholdReached()) { - // we can start checking the global maximum score even - // if the local queue is not full because the threshold - // is reached. - MaxScoreAccumulator.DocAndScore maxMinScore = minScoreAcc.get(); - if (maxMinScore != null && maxMinScore.score > minCompetitiveScore) { - scorer.setMinCompetitiveScore(maxMinScore.score); - minCompetitiveScore = maxMinScore.score; - totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; - } - } - } - - protected void updateMinCompetitiveScore(Scorable scorer) throws IOException { - if (canSetMinScore && queueFull && hitsThresholdChecker.isThresholdReached()) { - assert pq.top() != null; - float minScore = (float) firstComparator.value(pq.top().slot()); - if (minScore > minCompetitiveScore) { - scorer.setMinCompetitiveScore(minScore); - minCompetitiveScore = minScore; - totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; - if (minScoreAcc != null) { - minScoreAcc.accumulate(pq.top().doc(), minScore); - } - } - } - } - - /** - * Creates a new {@link HugePqFullFieldDocCollector} from the given arguments. - * - *

NOTE: The instances returned by this method pre-allocate a full array of length - * numHits. - * - * @param sort the sort criteria (SortFields). - * @param numHits the number of results to collect. - * @param totalHitsThreshold the number of docs to count accurately. If the query matches more - * than {@code totalHitsThreshold} hits then its hit count will be a lower bound. On the other - * hand if the query matches less than or exactly {@code totalHitsThreshold} hits then the hit - * count of the result will be accurate. {@link Integer#MAX_VALUE} may be used to make the hit - * count accurate, but this will also make query processing slower. - * @return a {@link HugePqFullFieldDocCollector} instance which will sort the results by the sort criteria. - */ - public static HugePqFullFieldDocCollector create(LLTempHugePqEnv env, Sort sort, int numHits, int totalHitsThreshold) { - if (totalHitsThreshold < 0) { - throw new IllegalArgumentException( - "totalHitsThreshold must be >= 0, got " + totalHitsThreshold); - } - - return create( - env, - sort, - numHits, - HitsThresholdChecker.create(Math.max(totalHitsThreshold, numHits)), - null /* bottomValueChecker */); - } - - /** - * Same as above with additional parameters to allow passing in the threshold checker and the max - * score accumulator. - */ - static HugePqFullFieldDocCollector create( - LLTempHugePqEnv env, - Sort sort, - int numHits, - HitsThresholdChecker hitsThresholdChecker, - MaxScoreAccumulator minScoreAcc) { - - if (sort.getSort().length == 0) { - throw new IllegalArgumentException("Sort must contain at least one field"); - } - - if (numHits <= 0) { - throw new IllegalArgumentException( - "numHits must be > 0; please use TotalHitCountCollector if you just need the total hit count"); - } - - if (hitsThresholdChecker == null) { - throw new IllegalArgumentException("hitsThresholdChecker should not be null"); - } - - var fieldValueHitQueue = new LLSlotDocCodec(env, numHits, sort.getSort()); - var queue = new HugePqPriorityQueue<>(env, fieldValueHitQueue); - - // inform a comparator that sort is based on this single field - // to enable some optimizations for skipping over non-competitive documents - // We can't set single sort when the `after` parameter is non-null as it's - // an implicit sort over the document id. - if (fieldValueHitQueue.getComparators().length == 1) { - fieldValueHitQueue.getComparators()[0].setSingleSort(); - } - return new SimpleFieldCollector(sort, queue, fieldValueHitQueue, numHits, hitsThresholdChecker, minScoreAcc); - } - - /** - * Create a CollectorManager which uses a shared hit counter to maintain number of hits and a - * shared {@link MaxScoreAccumulator} to propagate the minimum score accross segments if the - * primary sort is by relevancy. - */ - public static CollectorManager> createSharedManager( - LLTempHugePqEnv env, Sort sort, int numHits, long totalHitsThreshold) { - return new CollectorManager<>() { - - private final HitsThresholdChecker hitsThresholdChecker; - - { - if (totalHitsThreshold < Integer.MAX_VALUE) { - hitsThresholdChecker = HitsThresholdChecker.createShared(Math.max((int) totalHitsThreshold, numHits)); - } else { - hitsThresholdChecker = HitsThresholdChecker.createShared(Integer.MAX_VALUE); - } - } - - private final MaxScoreAccumulator minScoreAcc = new MaxScoreAccumulator(); - - @Override - public HugePqFullFieldDocCollector newCollector() { - return create(env, sort, numHits, hitsThresholdChecker, minScoreAcc); - } - - @Override - public FullFieldDocs reduce(Collection collectors) { - return reduceShared(sort, collectors); - } - }; - } - - private static FullFieldDocs reduceShared(Sort sort, Collection collectors) { - @SuppressWarnings("unchecked") - final FullDocs[] fullDocs = new FullDocs[collectors.size()]; - int i = 0; - for (var collector : collectors) { - fullDocs[i++] = collector.fullDocs(); - } - return (FullFieldDocs) FullDocs.merge(sort, fullDocs); - - } - - final void add(int slot, int doc) { - pq.add(new LLSlotDoc(docBase + doc, Float.NaN, -1, slot)); - - // The queue is full either when totalHits == numHits (in SimpleFieldCollector), in which case - // slot = totalHits - 1, or when hitsCollected == numHits (in PagingFieldCollector this is hits - // on the current page) and slot = hitsCollected - 1. - assert slot < numHits; - queueFull = slot == numHits - 1; - } - - //todo: check if this part is efficient and not redundant - final void updateBottom(int doc) { - // bottom.score is already set to Float.NaN in add(). - var bottom = pq.top(); - pq.replaceTop(bottom, new LLSlotDoc(docBase + doc, bottom.score(), bottom.shardIndex(), bottom.slot())); - } - - /* - * Only the following callback methods need to be overridden since - * topDocs(int, int) calls them to return the results. - */ - - /** Return whether collection terminated early. */ - public boolean isEarlyTerminated() { - return totalHitsRelation == Relation.GREATER_THAN_OR_EQUAL_TO; - } - - @Override - public void onClose() { - this.pq.close(); - if (this.firstComparator instanceof SafeCloseable closeable) { - closeable.close(); - } - super.onClose(); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullScoreDocCollector.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullScoreDocCollector.java deleted file mode 100644 index d5bc268..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/HugePqFullScoreDocCollector.java +++ /dev/null @@ -1,319 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package it.cavallium.dbengine.lucene.hugepq.search; - -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.FullDocs; -import it.cavallium.dbengine.lucene.LLScoreDoc; -import it.cavallium.dbengine.lucene.LLScoreDocCodec; -import it.cavallium.dbengine.lucene.HugePqPriorityQueue; -import it.cavallium.dbengine.lucene.MaxScoreAccumulator; -import it.cavallium.dbengine.lucene.ResourceIterable; -import it.cavallium.dbengine.lucene.collector.FullDocsCollector; -import java.io.IOException; -import java.util.Collection; -import org.apache.lucene.index.LeafReaderContext; -import it.cavallium.dbengine.lucene.MaxScoreAccumulator.DocAndScore; -import org.apache.lucene.search.Collector; -import org.apache.lucene.search.CollectorManager; -import org.apache.lucene.search.IndexSearcher; -import org.apache.lucene.search.LeafCollector; -import org.apache.lucene.search.Scorable; -import org.apache.lucene.search.ScoreDoc; -import org.apache.lucene.search.ScoreMode; -import org.apache.lucene.search.TotalHits; -import org.jetbrains.annotations.NotNull; -import org.jetbrains.annotations.Nullable; - -/** - * A {@link Collector} implementation that collects the top-scoring hits, returning them as a {@link - * FullDocs}. This is used by {@link IndexSearcher} to implement {@link FullDocs}-based search. Hits - * are sorted by score descending and then (when the scores are tied) docID ascending. When you - * create an instance of this collector you should know in advance whether documents are going to be - * collected in doc Id order or not. - * - *

NOTE: The values {@link Float#NaN} and {@link Float#NEGATIVE_INFINITY} are not valid - * scores. This collector will not properly collect hits with such scores. - * - * This class must mirror this changes: - * - * Lucene TopScoreDocCollector changes on GitHub - */ -public abstract class HugePqFullScoreDocCollector extends - FullDocsCollector, LLScoreDoc, LLScoreDoc> { - - /** Scorable leaf collector */ - public abstract static class ScorerLeafCollector implements LeafCollector { - - protected Scorable scorer; - - @Override - public void setScorer(Scorable scorer) throws IOException { - this.scorer = scorer; - } - } - - private static class SimpleHugePqFullScoreDocCollector extends HugePqFullScoreDocCollector { - - SimpleHugePqFullScoreDocCollector(LLTempHugePqEnv env, @Nullable Long limit, - CustomHitsThresholdChecker hitsThresholdChecker, MaxScoreAccumulator minScoreAcc) { - super(env, limit, hitsThresholdChecker, minScoreAcc); - } - - @Override - public LeafCollector getLeafCollector(LeafReaderContext context) { - // reset the minimum competitive score - docBase = context.docBase; - minCompetitiveScore = 0f; - return new ScorerLeafCollector() { - - @Override - public void setScorer(Scorable scorer) throws IOException { - super.setScorer(scorer); - if (minScoreAcc == null) { - updateMinCompetitiveScore(scorer); - } else { - updateGlobalMinCompetitiveScore(scorer); - } - } - - @Override - public void collect(int doc) throws IOException { - float score = scorer.score(); - - // This collector relies on the fact that scorers produce positive values: - assert score >= 0; // NOTE: false for NaN - - totalHits++; - hitsThresholdChecker.incrementHitCount(); - - if (minScoreAcc != null && (totalHits & minScoreAcc.modInterval) == 0) { - updateGlobalMinCompetitiveScore(scorer); - } - - // If there is a limit, and it's reached, use the replacement logic - if (limit != null && pq.size() >= limit) { - if (pq.top() != null && score <= pq.top().score()) { - if (totalHitsRelation == TotalHits.Relation.EQUAL_TO) { - // we just reached totalHitsThreshold, we can start setting the min - // competitive score now - updateMinCompetitiveScore(scorer); - } - // Since docs are returned in-order (i.e., increasing doc Id), a document - // with equal score to pqTop.score cannot compete since HitQueue favors - // documents with lower doc Ids. Therefore reject those docs too. - return; - } else { - // Remove the top element, then add the following element - pq.replaceTop(pq.top(), new LLScoreDoc(doc + docBase, score, -1)); - // The minimum competitive score will be updated later - } - } else { - // There is no limit or the limit has not been reached. Add the document to the queue - pq.add(new LLScoreDoc(doc + docBase, score, -1)); - // The minimum competitive score will be updated later - } - // Update the minimum competitive score - updateMinCompetitiveScore(scorer); - } - }; - } - - @Override - public ResourceIterable mapResults(ResourceIterable it) { - return it; - } - } - - /** - * Creates a new {@link HugePqFullScoreDocCollector} given the number of hits to collect and the number - * of hits to count accurately. - * - *

NOTE: If the total hit count of the top docs is less than or exactly {@code - * totalHitsThreshold} then this value is accurate. On the other hand, if the {@link - * FullDocs#totalHits} value is greater than {@code totalHitsThreshold} then its value is a lower - * bound of the hit count. A value of {@link Integer#MAX_VALUE} will make the hit count accurate - * but will also likely make query processing slower. - * - *

NOTE: The instances returned by this method pre-allocate a full array of length - * numHits, and fill the array with sentinel objects. - */ - public static HugePqFullScoreDocCollector create(LLTempHugePqEnv env, long numHits, int totalHitsThreshold) { - return create(env, numHits, CustomHitsThresholdChecker.create(totalHitsThreshold), null); - } - - /** - * Creates a new {@link HugePqFullScoreDocCollector} given the number of hits to count accurately. - * - *

NOTE: A value of {@link Integer#MAX_VALUE} will make the hit count accurate - * but will also likely make query processing slower. - */ - public static HugePqFullScoreDocCollector create(LLTempHugePqEnv env, int totalHitsThreshold) { - return create(env, CustomHitsThresholdChecker.create(totalHitsThreshold), null); - } - - static HugePqFullScoreDocCollector create( - LLTempHugePqEnv env, - CustomHitsThresholdChecker hitsThresholdChecker, - MaxScoreAccumulator minScoreAcc) { - - if (hitsThresholdChecker == null) { - throw new IllegalArgumentException("hitsThresholdChecker must be non null"); - } - - return new SimpleHugePqFullScoreDocCollector(env, null, hitsThresholdChecker, minScoreAcc); - } - - static HugePqFullScoreDocCollector create( - LLTempHugePqEnv env, - @NotNull Long numHits, - CustomHitsThresholdChecker hitsThresholdChecker, - MaxScoreAccumulator minScoreAcc) { - - if (hitsThresholdChecker == null) { - throw new IllegalArgumentException("hitsThresholdChecker must be non null"); - } - - return new SimpleHugePqFullScoreDocCollector(env, - (numHits < 0 || numHits >= 2147483630L) ? null : numHits, - hitsThresholdChecker, - minScoreAcc - ); - } - - /** - * Create a CollectorManager which uses a shared hit counter to maintain number of hits and a - * shared {@link MaxScoreAccumulator} to propagate the minimum score accross segments - */ - public static CollectorManager> createSharedManager( - LLTempHugePqEnv env, - long numHits, - long totalHitsThreshold) { - return new CollectorManager<>() { - - private final CustomHitsThresholdChecker hitsThresholdChecker = - CustomHitsThresholdChecker.createShared(totalHitsThreshold); - private final MaxScoreAccumulator minScoreAcc = new MaxScoreAccumulator(); - - @Override - public HugePqFullScoreDocCollector newCollector() { - return HugePqFullScoreDocCollector.create(env, numHits, hitsThresholdChecker, minScoreAcc); - } - - @Override - public FullDocs reduce(Collection collectors) { - return reduceShared(collectors); - } - }; - } - - /** - * Create a CollectorManager which uses a shared {@link MaxScoreAccumulator} to propagate - * the minimum score accross segments - */ - public static CollectorManager> createSharedManager( - LLTempHugePqEnv env, - long totalHitsThreshold) { - return new CollectorManager<>() { - - private final CustomHitsThresholdChecker hitsThresholdChecker = - CustomHitsThresholdChecker.createShared(totalHitsThreshold); - private final MaxScoreAccumulator minScoreAcc = new MaxScoreAccumulator(); - - @Override - public HugePqFullScoreDocCollector newCollector() { - return HugePqFullScoreDocCollector.create(env, hitsThresholdChecker, minScoreAcc); - } - - @Override - public FullDocs reduce(Collection collectors) { - return reduceShared(collectors); - } - }; - } - - private static FullDocs reduceShared(Collection collectors) { - @SuppressWarnings("unchecked") - final FullDocs[] fullDocs = new FullDocs[collectors.size()]; - int i = 0; - for (HugePqFullScoreDocCollector collector : collectors) { - fullDocs[i++] = collector.fullDocs(); - } - return FullDocs.merge(null, fullDocs); - } - - int docBase; - final @Nullable Long limit; - final CustomHitsThresholdChecker hitsThresholdChecker; - final MaxScoreAccumulator minScoreAcc; - float minCompetitiveScore; - - // prevents instantiation - HugePqFullScoreDocCollector(LLTempHugePqEnv env, @Nullable Long limit, - CustomHitsThresholdChecker hitsThresholdChecker, MaxScoreAccumulator minScoreAcc) { - super(new HugePqPriorityQueue<>(env, new LLScoreDocCodec())); - assert hitsThresholdChecker != null; - this.limit = limit; - this.hitsThresholdChecker = hitsThresholdChecker; - this.minScoreAcc = minScoreAcc; - } - - @Override - public ScoreMode scoreMode() { - return hitsThresholdChecker.scoreMode(); - } - - protected void updateGlobalMinCompetitiveScore(Scorable scorer) throws IOException { - assert minScoreAcc != null; - DocAndScore maxMinScore = minScoreAcc.get(); - if (maxMinScore != null) { - // since we tie-break on doc id and collect in doc id order we can require - // the next float if the global minimum score is set on a document id that is - // smaller than the ids in the current leaf - float score = - docBase >= maxMinScore.docBase ? Math.nextUp(maxMinScore.score) : maxMinScore.score; - if (score > minCompetitiveScore) { - assert hitsThresholdChecker.isThresholdReached(true); - scorer.setMinCompetitiveScore(score); - minCompetitiveScore = score; - totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; - } - } - } - - protected void updateMinCompetitiveScore(Scorable scorer) throws IOException { - if (hitsThresholdChecker.isThresholdReached(true) - && pq.top() != null - && pq.top().score() != Float.NEGATIVE_INFINITY) { // -Infinity is the score of sentinels - // since we tie-break on doc id and collect in doc id order, we can require - // the next float - float localMinScore = Math.nextUp(pq.top().score()); - if (localMinScore > minCompetitiveScore) { - scorer.setMinCompetitiveScore(localMinScore); - totalHitsRelation = TotalHits.Relation.GREATER_THAN_OR_EQUAL_TO; - minCompetitiveScore = localMinScore; - if (minScoreAcc != null) { - // we don't use the next float but we register the document - // id so that other leaves can require it if they are after - // the current maximum - minScoreAcc.accumulate(docBase, pq.top().score()); - } - } - } - } - -} \ No newline at end of file diff --git a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/comparators/HugePqDocComparator.java b/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/comparators/HugePqDocComparator.java deleted file mode 100644 index 3712bee..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/hugepq/search/comparators/HugePqDocComparator.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package it.cavallium.dbengine.lucene.hugepq.search.comparators; - -import it.cavallium.dbengine.database.DiscardingCloseable; -import it.cavallium.dbengine.database.SafeCloseable; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.IArray; -import it.cavallium.dbengine.lucene.IntCodec; -import it.cavallium.dbengine.lucene.HugePqArray; -import it.cavallium.dbengine.lucene.hugepq.mirrored.MinDocIterator; -import java.io.IOException; -import org.apache.lucene.index.LeafReaderContext; -import org.apache.lucene.search.DocIdSetIterator; -import org.apache.lucene.search.LeafFieldComparator; -import org.apache.lucene.search.Scorable; - -/** - * Comparator that sorts by asc _doc - * Based on {@link org.apache.lucene.search.comparators.DocComparator} - * */ -public class HugePqDocComparator extends org.apache.lucene.search.comparators.DocComparator implements - DiscardingCloseable { - private final IArray docIDs; - private final boolean enableSkipping; // if skipping functionality should be enabled - private int bottom; - private int topValue; - private boolean topValueSet; - private boolean bottomValueSet; - private boolean hitsThresholdReached; - - /** Creates a new comparator based on document ids for {@code numHits} */ - public HugePqDocComparator(LLTempHugePqEnv env, int numHits, boolean reverse, boolean enableSkipping) { - super(0, reverse, enableSkipping); - this.docIDs = new HugePqArray<>(env, new IntCodec(), numHits, 0); - // skipping functionality is enabled if we are sorting by _doc in asc order as a primary sort - this.enableSkipping = (!reverse && enableSkipping); - } - - @Override - public int compare(int slot1, int slot2) { - // No overflow risk because docIDs are non-negative - return docIDs.getOrDefault(slot1, 0) - docIDs.getOrDefault(slot2, 0); - } - - @Override - public LeafFieldComparator getLeafComparator(LeafReaderContext context) { - // TODO: can we "map" our docIDs to the current - // reader? saves having to then subtract on every - // compare call - return new DocLeafComparator(context); - } - - @Override - public void setTopValue(Integer value) { - topValue = value; - topValueSet = true; - } - - @Override - public Integer value(int slot) { - return docIDs.getOrDefault(slot, 0); - } - - @Override - public void close() { - if (docIDs instanceof SafeCloseable closeable) { - closeable.close(); - } - } - - /** - * DocLeafComparator with skipping functionality. When sort by _doc asc, after collecting top N - * matches and enough hits, the comparator can skip all the following documents. When sort by _doc - * asc and "top" document is set after which search should start, the comparator provides an - * iterator that can quickly skip to the desired "top" document. - */ - private class DocLeafComparator implements LeafFieldComparator { - private final int docBase; - private final int minDoc; - private final int maxDoc; - private DocIdSetIterator competitiveIterator; // iterator that starts from topValue - - public DocLeafComparator(LeafReaderContext context) { - this.docBase = context.docBase; - if (enableSkipping) { - // Skip docs before topValue, but include docs starting with topValue. - // Including topValue is necessary when doing sort on [_doc, other fields] - // in a distributed search where there are docs from different indices - // with the same docID. - this.minDoc = topValue; - this.maxDoc = context.reader().maxDoc(); - this.competitiveIterator = DocIdSetIterator.all(maxDoc); - } else { - this.minDoc = -1; - this.maxDoc = -1; - this.competitiveIterator = null; - } - } - - @Override - public void setBottom(int slot) { - bottom = docIDs.getOrDefault(slot, 0); - bottomValueSet = true; - updateIterator(); - } - - @Override - public int compareBottom(int doc) { - // No overflow risk because docIDs are non-negative - return bottom - (docBase + doc); - } - - @Override - public int compareTop(int doc) { - int docValue = docBase + doc; - return Integer.compare(topValue, docValue); - } - - @Override - public void copy(int slot, int doc) throws IOException { - docIDs.set(slot, docBase + doc); - } - - @Override - public void setScorer(Scorable scorer) throws IOException { - // update an iterator on a new segment - updateIterator(); - } - - @Override - public DocIdSetIterator competitiveIterator() { - if (!enableSkipping) { - return null; - } else { - return new DocIdSetIterator() { - private int docID = competitiveIterator.docID(); - - @Override - public int nextDoc() throws IOException { - return advance(docID + 1); - } - - @Override - public int docID() { - return docID; - } - - @Override - public long cost() { - return competitiveIterator.cost(); - } - - @Override - public int advance(int target) throws IOException { - return docID = competitiveIterator.advance(target); - } - }; - } - } - - @Override - public void setHitsThresholdReached() { - hitsThresholdReached = true; - updateIterator(); - } - - private void updateIterator() { - if (!enableSkipping || !hitsThresholdReached) return; - if (bottomValueSet) { - // since we've collected top N matches, we can early terminate - // Currently early termination on _doc is also implemented in TopFieldCollector, but this - // will be removed - // once all bulk scores uses collectors' iterators - competitiveIterator = DocIdSetIterator.empty(); - } else if (topValueSet) { - // skip to the desired top doc - if (docBase + maxDoc <= minDoc) { - competitiveIterator = DocIdSetIterator.empty(); // skip this segment - } else { - int segmentMinDoc = Math.max(competitiveIterator.docID(), minDoc - docBase); - competitiveIterator = new MinDocIterator(segmentMinDoc, maxDoc); - } - } - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/mlt/BigCompositeReader.java b/src/main/java/it/cavallium/dbengine/lucene/mlt/BigCompositeReader.java index a1af025..fcd03f9 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/mlt/BigCompositeReader.java +++ b/src/main/java/it/cavallium/dbengine/lucene/mlt/BigCompositeReader.java @@ -4,13 +4,11 @@ import java.io.IOException; import java.math.BigInteger; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Set; import java.util.concurrent.atomic.AtomicLong; import java.util.function.IntFunction; -import java.util.function.ToIntFunction; import java.util.stream.Collectors; import java.util.stream.StreamSupport; import org.apache.lucene.document.Document; @@ -18,8 +16,6 @@ import org.apache.lucene.document.DocumentStoredFieldVisitor; import org.apache.lucene.index.Fields; import org.apache.lucene.index.IndexOptions; import org.apache.lucene.index.IndexReader; -import org.apache.lucene.index.IndexReaderContext; -import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.index.StoredFieldVisitor; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; @@ -90,7 +86,7 @@ public class BigCompositeReader { } } - public long getDocCount(String field) throws IOException { + public long getDocCount(String field) { this.ensureOpen(); long total = 0; @@ -107,7 +103,7 @@ public class BigCompositeReader { return total; } - public long docFreq(Term term) throws IOException { + public long docFreq(Term term) { this.ensureOpen(); long total = 0; @@ -141,7 +137,7 @@ public class BigCompositeReader { return numDocs; } - public Fields getTermVectors(long docID) throws IOException { + public Fields getTermVectors(long docID) { this.ensureOpen(); int i = this.readerIndex(docID); return this.subReaders[i].getTermVectors(Math.toIntExact(docID - this.starts[i])); @@ -181,19 +177,19 @@ public class BigCompositeReader { return hi; } - public final void document(long docID, StoredFieldVisitor visitor) throws IOException { + public final void document(long docID, StoredFieldVisitor visitor) { this.ensureOpen(); int i = this.readerIndex(docID); this.subReaders[i].document(Math.toIntExact(docID - this.starts[i]), visitor); } - public final Document document(long docID) throws IOException { + public final Document document(long docID) { DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(); this.document(docID, visitor); return visitor.getDocument(); } - public final Document document(long docID, Set fieldsToLoad) throws IOException { + public final Document document(long docID, Set fieldsToLoad) { DocumentStoredFieldVisitor visitor = new DocumentStoredFieldVisitor(fieldsToLoad); this.document(docID, visitor); return visitor.getDocument(); diff --git a/src/main/java/it/cavallium/dbengine/lucene/mlt/MoreLikeThisTransformer.java b/src/main/java/it/cavallium/dbengine/lucene/mlt/MoreLikeThisTransformer.java index 3c1648f..bb69bcf 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/mlt/MoreLikeThisTransformer.java +++ b/src/main/java/it/cavallium/dbengine/lucene/mlt/MoreLikeThisTransformer.java @@ -24,7 +24,7 @@ public class MoreLikeThisTransformer implements GlobalQueryRewrite { } @Override - public LocalQueryParams rewrite(LLIndexSearchers indexSearchers, LocalQueryParams queryParams) throws IOException { + public LocalQueryParams rewrite(LLIndexSearchers indexSearchers, LocalQueryParams queryParams) { var rewrittenQuery = LuceneUtils.getMoreLikeThisQuery(indexSearchers, queryParams, luceneAnalyzer, diff --git a/src/main/java/it/cavallium/dbengine/lucene/mlt/MultiMoreLikeThis.java b/src/main/java/it/cavallium/dbengine/lucene/mlt/MultiMoreLikeThis.java index 848f82e..df7106e 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/mlt/MultiMoreLikeThis.java +++ b/src/main/java/it/cavallium/dbengine/lucene/mlt/MultiMoreLikeThis.java @@ -549,7 +549,7 @@ public final class MultiMoreLikeThis { * @param docNum the documentID of the lucene doc to generate the 'More Like This" query for. * @return a query that will return docs like the passed lucene document ID. */ - public Query like(long docNum) throws IOException { + public Query like(long docNum) { if (fieldNames == null) { // gather list of valid fields from lucene Collection fields; @@ -564,7 +564,7 @@ public final class MultiMoreLikeThis { * @param filteredDocument Document with field values extracted for selected fields. * @return More Like This query for the passed document. */ - public Query like(Map> filteredDocument) throws IOException { + public Query like(Map> filteredDocument) { if (fieldNames == null) { // gather list of valid fields from lucene Collection fields = BigCompositeReader.getIndexedFields(ir); @@ -579,7 +579,7 @@ public final class MultiMoreLikeThis { * * @return a query that will return docs like the passed Readers. */ - public Query like(String fieldName, Reader... readers) throws IOException { + public Query like(String fieldName, Reader... readers) { Map> perFieldTermFrequencies = new HashMap<>(); for (Reader r : readers) { addTermFrequencies(r, perFieldTermFrequencies, fieldName); @@ -622,7 +622,7 @@ public final class MultiMoreLikeThis { * objects as the values. */ private PriorityQueue createQueue( - Map> perFieldTermFrequencies) throws IOException { + Map> perFieldTermFrequencies) { // have collected all words in doc and their freqs final long limit = Math.min(maxQueryTerms, this.getTermsCount(perFieldTermFrequencies)); FreqQ queue = new FreqQ(Math.toIntExact(limit)); // will order words by score @@ -709,7 +709,7 @@ public final class MultiMoreLikeThis { * * @param docNum the id of the lucene document from which to find terms */ - private PriorityQueue retrieveTerms(long docNum) throws IOException { + private PriorityQueue retrieveTerms(long docNum) { Map> field2termFreqMap = new HashMap<>(); retrieveTermsOfIndexReader(ir, docNum, field2termFreqMap); @@ -879,14 +879,14 @@ public final class MultiMoreLikeThis { * or best entry, first * @see #retrieveInterestingTerms */ - private PriorityQueue retrieveTerms(Reader r, String fieldName) throws IOException { + private PriorityQueue retrieveTerms(Reader r, String fieldName) { Map> field2termFreqMap = new HashMap<>(); addTermFrequencies(r, field2termFreqMap, fieldName); return createQueue(field2termFreqMap); } /** @see #retrieveInterestingTerms(java.io.Reader, String) */ - public String[] retrieveInterestingTerms(long docNum) throws IOException { + public String[] retrieveInterestingTerms(long docNum) { ArrayList al = new ArrayList<>(Math.toIntExact(maxQueryTerms)); PriorityQueue pq = retrieveTerms(docNum); ScoreTerm scoreTerm; @@ -911,7 +911,7 @@ public final class MultiMoreLikeThis { * @see #retrieveTerms(java.io.Reader, String) * @see #setMaxQueryTerms */ - public String[] retrieveInterestingTerms(Reader r, String fieldName) throws IOException { + public String[] retrieveInterestingTerms(Reader r, String fieldName) { ArrayList al = new ArrayList<>(Math.toIntExact(maxQueryTerms)); PriorityQueue pq = retrieveTerms(r, fieldName); ScoreTerm scoreTerm; diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveLocalSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveLocalSearcher.java index b18a19f..97b205e 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveLocalSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveLocalSearcher.java @@ -1,24 +1,14 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.LLIndexSearcher; -import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLIndexSearchers.UnshardedIndexSearchers; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; import it.cavallium.dbengine.lucene.LuceneUtils; +import java.io.IOException; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class AdaptiveLocalSearcher implements LocalSearcher { - static final boolean FORCE_HUGE_PQ - = Boolean.parseBoolean(System.getProperty("it.cavallium.hugepq.force", "false")); - private static final StandardSearcher standardSearcher = new StandardSearcher(); private static final LocalSearcher scoredPaged = new PagedLocalSearcher(); @@ -32,27 +22,23 @@ public class AdaptiveLocalSearcher implements LocalSearcher { */ private final int maxInMemoryResultEntries; - @Nullable - private final SortedByScoreFullMultiSearcher sortedByScoreFull; - - @Nullable - private final SortedScoredFullMultiSearcher sortedScoredFull; - - public AdaptiveLocalSearcher(LLTempHugePqEnv env, boolean useHugePq, int maxInMemoryResultEntries) { - sortedByScoreFull = (FORCE_HUGE_PQ || useHugePq) ? new SortedByScoreFullMultiSearcher(env) : null; - sortedScoredFull = (FORCE_HUGE_PQ || useHugePq) ? new SortedScoredFullMultiSearcher(env) : null; + public AdaptiveLocalSearcher(int maxInMemoryResultEntries) { this.maxInMemoryResultEntries = maxInMemoryResultEntries; } @Override - public Mono collect(Mono indexSearcherMono, + public LuceneSearchResult collect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != NO_REWRITE) { - return LuceneUtils.rewrite(this, indexSearcherMono, queryParams, keyFieldName, transformer); + try { + return LuceneUtils.rewrite(this, indexSearcher, queryParams, keyFieldName, transformer); + } catch (IOException e) { + throw new RuntimeException(e); + } } - return transformedCollect(indexSearcherMono, queryParams, keyFieldName, transformer); + return transformedCollect(indexSearcher, queryParams, keyFieldName, transformer); } @Override @@ -61,7 +47,7 @@ public class AdaptiveLocalSearcher implements LocalSearcher { } // Remember to change also AdaptiveMultiSearcher - public Mono transformedCollect(Mono indexSearcherMono, + public LuceneSearchResult transformedCollect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, String keyFieldName, GlobalQueryRewrite transformer) { @@ -70,37 +56,29 @@ public class AdaptiveLocalSearcher implements LocalSearcher { long maxAllowedInMemoryLimit = Math.max(maxInMemoryResultEntries, (long) queryParams.pageLimits().getPageLimit(0)); - if (!FORCE_HUGE_PQ && queryParams.limitLong() == 0) { - return countSearcher.collect(indexSearcherMono, queryParams, keyFieldName, transformer); - } else if (!FORCE_HUGE_PQ && realLimit <= maxInMemoryResultEntries) { - return standardSearcher.collect(indexSearcherMono, queryParams, keyFieldName, transformer); + if (queryParams.limitLong() == 0) { + return countSearcher.collect(indexSearcher, queryParams, keyFieldName, transformer); + } else if (realLimit <= maxInMemoryResultEntries) { + return standardSearcher.collect(indexSearcher, queryParams, keyFieldName, transformer); } else if (queryParams.isSorted()) { - if (!FORCE_HUGE_PQ && realLimit <= maxAllowedInMemoryLimit) { - return scoredPaged.collect(indexSearcherMono, queryParams, keyFieldName, transformer); + if (realLimit <= maxAllowedInMemoryLimit) { + return scoredPaged.collect(indexSearcher, queryParams, keyFieldName, transformer); } else { if (queryParams.isSortedByScore()) { - if (!FORCE_HUGE_PQ && queryParams.limitLong() < maxInMemoryResultEntries) { + if (queryParams.limitLong() < maxInMemoryResultEntries) { throw new UnsupportedOperationException("Allowed limit is " + maxInMemoryResultEntries + " or greater"); } - if (sortedByScoreFull != null) { - return sortedByScoreFull.collect(indexSearcherMono, queryParams, keyFieldName, transformer); - } else { - return scoredPaged.collect(indexSearcherMono, queryParams, keyFieldName, transformer); - } + return scoredPaged.collect(indexSearcher, queryParams, keyFieldName, transformer); } else { - if (!FORCE_HUGE_PQ && queryParams.limitLong() < maxInMemoryResultEntries) { + if (queryParams.limitLong() < maxInMemoryResultEntries) { throw new UnsupportedOperationException("Allowed limit is " + maxInMemoryResultEntries + " or greater"); } - if (sortedScoredFull != null) { - return sortedScoredFull.collect(indexSearcherMono, queryParams, keyFieldName, transformer); - } else { - return scoredPaged.collect(indexSearcherMono, queryParams, keyFieldName, transformer); - } + return scoredPaged.collect(indexSearcher, queryParams, keyFieldName, transformer); } } } else { // Run large/unbounded searches using the continuous multi searcher - return unsortedUnscoredContinuous.collect(indexSearcherMono, queryParams, keyFieldName, transformer); + return unsortedUnscoredContinuous.collect(indexSearcher, queryParams, keyFieldName, transformer); } } } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveMultiSearcher.java index 6b9f15a..06229f3 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveMultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/AdaptiveMultiSearcher.java @@ -1,19 +1,11 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.searcher.AdaptiveLocalSearcher.FORCE_HUGE_PQ; import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; -import io.netty5.util.Send; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; import it.cavallium.dbengine.lucene.LuceneUtils; import java.io.IOException; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class AdaptiveMultiSearcher implements MultiSearcher { @@ -30,31 +22,27 @@ public class AdaptiveMultiSearcher implements MultiSearcher { */ private final int maxInMemoryResultEntries; - @Nullable - private final SortedByScoreFullMultiSearcher sortedByScoreFull; - - @Nullable - private final SortedScoredFullMultiSearcher sortedScoredFull; - - public AdaptiveMultiSearcher(LLTempHugePqEnv env, boolean useHugePq, int maxInMemoryResultEntries) { - sortedByScoreFull = (FORCE_HUGE_PQ || useHugePq) ? new SortedByScoreFullMultiSearcher(env) : null; - sortedScoredFull = (FORCE_HUGE_PQ || useHugePq) ? new SortedScoredFullMultiSearcher(env) : null; + public AdaptiveMultiSearcher(int maxInMemoryResultEntries) { this.maxInMemoryResultEntries = maxInMemoryResultEntries; } @Override - public Mono collectMulti(Mono indexSearchersMono, + public LuceneSearchResult collectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); + try { + return LuceneUtils.rewriteMulti(this, indexSearchers, queryParams, keyFieldName, transformer); + } catch (IOException e) { + throw new RuntimeException(e); + } } - return transformedCollectMulti(indexSearchersMono, queryParams, keyFieldName, transformer); + return transformedCollectMulti(indexSearchers, queryParams, keyFieldName, transformer); } // Remember to change also AdaptiveLocalSearcher - public Mono transformedCollectMulti(Mono indexSearchers, + public LuceneSearchResult transformedCollectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { @@ -63,32 +51,24 @@ public class AdaptiveMultiSearcher implements MultiSearcher { long maxAllowedInMemoryLimit = Math.max(maxInMemoryResultEntries, (long) queryParams.pageLimits().getPageLimit(0)); - if (!FORCE_HUGE_PQ && queryParams.limitLong() == 0) { + if (queryParams.limitLong() == 0) { return count.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); - } else if (!FORCE_HUGE_PQ && realLimit <= maxInMemoryResultEntries) { + } else if (realLimit <= maxInMemoryResultEntries) { return standardSearcher.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); } else if (queryParams.isSorted()) { - if (!FORCE_HUGE_PQ && realLimit <= maxAllowedInMemoryLimit) { + if (realLimit <= maxAllowedInMemoryLimit) { return scoredPaged.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); } else { if (queryParams.isSortedByScore()) { - if (!FORCE_HUGE_PQ && queryParams.limitLong() < maxInMemoryResultEntries) { + if (queryParams.limitLong() < maxInMemoryResultEntries) { throw new UnsupportedOperationException("Allowed limit is " + maxInMemoryResultEntries + " or greater"); } - if (sortedByScoreFull != null) { - return sortedByScoreFull.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); - } else { - return scoredPaged.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); - } + return scoredPaged.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); } else { - if (!FORCE_HUGE_PQ && queryParams.limitLong() < maxInMemoryResultEntries) { + if (queryParams.limitLong() < maxInMemoryResultEntries) { throw new UnsupportedOperationException("Allowed limit is " + maxInMemoryResultEntries + " or greater"); } - if (sortedScoredFull != null) { - return sortedScoredFull.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); - } else { - return scoredPaged.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); - } + return scoredPaged.collectMulti(indexSearchers, queryParams, keyFieldName, transformer); } } } else { diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/CountMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/CountMultiSearcher.java index 10eab0c..5d997e3 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/CountMultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/CountMultiSearcher.java @@ -1,42 +1,30 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; -import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; - -import io.netty5.util.Send; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; -import it.cavallium.dbengine.database.LLKeyScore; import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.LLIndexSearcher; import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLIndexSearchers.UnshardedIndexSearchers; import it.cavallium.dbengine.lucene.LuceneUtils; -import it.cavallium.dbengine.utils.SimpleResource; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.ArrayList; -import java.util.List; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.index.QueryTimeout; +import org.apache.lucene.index.QueryTimeoutImpl; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class CountMultiSearcher implements MultiSearcher { protected static final Logger LOG = LogManager.getLogger(CountMultiSearcher.class); @Override - public Mono collectMulti(Mono indexSearchersMono, + public LuceneSearchResult collectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewriteMulti(this, indexSearchers, queryParams, keyFieldName, transformer); } if (queryParams.isSorted() && queryParams.limitLong() > 0) { throw new UnsupportedOperationException( @@ -47,42 +35,40 @@ public class CountMultiSearcher implements MultiSearcher { "Scored queries are not supported by SimpleUnsortedUnscoredLuceneMultiSearcher"); } - return Mono.usingWhen(indexSearchersMono, searchers -> Flux - .fromIterable(searchers.llShards()) - .flatMap(searcher -> this.collect(Mono.just(searcher), queryParams, keyFieldName, transformer)) - .collectList() - .map(results -> { - boolean exactTotalHitsCount = true; - long totalHitsCountValue = 0; - for (LuceneSearchResult result : results) { - exactTotalHitsCount &= result.totalHitsCount().exact(); - totalHitsCountValue += result.totalHitsCount().value(); - result.close(); - } + var results = indexSearchers + .llShards() + .stream() + .map(searcher -> this.collect(searcher, queryParams, keyFieldName, transformer)) + .toList(); + boolean exactTotalHitsCount = true; + long totalHitsCountValue = 0; + for (LuceneSearchResult result : results) { + exactTotalHitsCount &= result.totalHitsCount().exact(); + totalHitsCountValue += result.totalHitsCount().value(); + result.close(); + } - var totalHitsCount = new TotalHitsCount(totalHitsCountValue, exactTotalHitsCount); + var totalHitsCount = new TotalHitsCount(totalHitsCountValue, exactTotalHitsCount); - return new LuceneSearchResult(totalHitsCount, Flux.empty()); - }) - .doOnDiscard(LuceneSearchResult.class, LLUtils::onDiscard), - LLUtils::finalizeResource); + return new LuceneSearchResult(totalHitsCount, Stream.empty()); } @Override - public Mono collect(Mono indexSearcherMono, + public LuceneSearchResult collect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewrite(this, indexSearcherMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewrite(this, indexSearcher, queryParams, keyFieldName, transformer); + } + try { + var is = indexSearcher.getIndexSearcher(); + is.setTimeout(new QueryTimeoutImpl(queryParams.timeout().toMillis())); + var count = is.count(queryParams.query()); + return new LuceneSearchResult(TotalHitsCount.of(count, true), Stream.empty()); + } catch (IOException e) { + throw new DBException(e); } - - return Mono.usingWhen(indexSearcherMono, indexSearcher -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - return (long) indexSearcher.getIndexSearcher().count(queryParams.query()); - }).subscribeOn(luceneScheduler()), LLUtils::finalizeResource) - .transform(TimeoutUtil.timeoutMono(queryParams.timeout())) - .map(count -> new LuceneSearchResult(TotalHitsCount.of(count, true), Flux.empty())); } @Override diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/CountedFlux.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/CountedFlux.java deleted file mode 100644 index 8a957ec..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/CountedFlux.java +++ /dev/null @@ -1,30 +0,0 @@ -package it.cavallium.dbengine.lucene.searcher; - -import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; -import org.jetbrains.annotations.NotNull; -import reactor.core.CoreSubscriber; -import reactor.core.publisher.Flux; - -public class CountedFlux extends Flux { - - private final TotalHitsCount totalHitsCount; - private final Flux flux; - - private CountedFlux(TotalHitsCount totalHitsCount, Flux flux) { - this.totalHitsCount = totalHitsCount; - this.flux = flux; - } - - public static CountedFlux of(TotalHitsCount totalHitsCount, Flux flux) { - return new CountedFlux<>(totalHitsCount, flux); - } - - public TotalHitsCount totalHitsCount() { - return totalHitsCount; - } - - @Override - public void subscribe(@NotNull CoreSubscriber actual) { - flux.subscribe(actual); - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/CurrentPageInfo.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/CurrentPageInfo.java index 61c318d..dc991f3 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/CurrentPageInfo.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/CurrentPageInfo.java @@ -1,8 +1,6 @@ package it.cavallium.dbengine.lucene.searcher; -import it.cavallium.dbengine.lucene.LuceneUtils; import java.util.Comparator; -import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.ScoreDoc; import org.jetbrains.annotations.Nullable; diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/DecimalBucketMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/DecimalBucketMultiSearcher.java index 3719a63..a4dc8b7 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/DecimalBucketMultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/DecimalBucketMultiSearcher.java @@ -1,14 +1,10 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - -import io.netty5.util.Send; -import it.cavallium.dbengine.database.LLUtils; +import com.google.common.collect.Streams; import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.collector.Buckets; import it.cavallium.dbengine.lucene.collector.DecimalBucketMultiCollectorManager; +import java.io.IOException; import java.util.List; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; @@ -16,52 +12,43 @@ import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.Query; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class DecimalBucketMultiSearcher { protected static final Logger logger = LogManager.getLogger(DecimalBucketMultiSearcher.class); - public Mono collectMulti(Mono indexSearchersMono, + public Buckets collectMulti(LLIndexSearchers indexSearchers, BucketParams bucketParams, @NotNull List queries, @Nullable Query normalizationQuery) { - - return Mono.usingWhen(indexSearchersMono, indexSearchers -> this - // Search results - .search(indexSearchers.shards(), bucketParams, queries, normalizationQuery) - // Ensure that one result is always returned - .single(), indexSearchers -> Mono.fromCallable(() -> { + try { + // Search results + return this.search(indexSearchers.shards(), bucketParams, queries, normalizationQuery); + } finally { indexSearchers.close(); - return null; - }).transform(LuceneUtils::scheduleLucene)); + } } - private Mono search(Iterable indexSearchers, + private Buckets search(Iterable indexSearchers, BucketParams bucketParams, @NotNull List queries, @Nullable Query normalizationQuery) { - return Mono.defer(() -> { - var cmm = new DecimalBucketMultiCollectorManager(bucketParams.min(), - bucketParams.max(), - bucketParams.buckets(), - bucketParams.bucketFieldName(), - bucketParams.valueSource(), - queries, - normalizationQuery, - bucketParams.collectionRate(), - bucketParams.sampleSize() - ); - return Flux - .fromIterable(indexSearchers) - .flatMap(shard -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - return cmm.search(shard); - }).subscribeOn(luceneScheduler())) - .collectList() - .flatMap(results -> Mono.fromSupplier(() -> cmm.reduce(results)).subscribeOn(luceneScheduler())); - }); + var cmm = new DecimalBucketMultiCollectorManager(bucketParams.min(), + bucketParams.max(), + bucketParams.buckets(), + bucketParams.bucketFieldName(), + bucketParams.valueSource(), + queries, + normalizationQuery, + bucketParams.collectionRate(), + bucketParams.sampleSize() + ); + return cmm.reduce(Streams.stream(indexSearchers).parallel().map(shard -> { + try { + return cmm.search(shard); + } catch (IOException e) { + throw new RuntimeException(e); + } + }).toList()); } } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/FirstPageResults.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/FirstPageResults.java index 365233e..4c10ce3 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/FirstPageResults.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/FirstPageResults.java @@ -2,7 +2,7 @@ package it.cavallium.dbengine.lucene.searcher; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.LLKeyScore; -import reactor.core.publisher.Flux; +import java.util.stream.Stream; -record FirstPageResults(TotalHitsCount totalHitsCount, Flux firstPageHitsFlux, +record FirstPageResults(TotalHitsCount totalHitsCount, Stream firstPageHitsStream, CurrentPageInfo nextPageInfo) {} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/GlobalQueryRewrite.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/GlobalQueryRewrite.java index fb7bb63..1fba95a 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/GlobalQueryRewrite.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/GlobalQueryRewrite.java @@ -2,11 +2,10 @@ package it.cavallium.dbengine.lucene.searcher; import it.cavallium.dbengine.database.disk.LLIndexSearchers; import java.io.IOException; -import reactor.core.publisher.Mono; public interface GlobalQueryRewrite { GlobalQueryRewrite NO_REWRITE = (indexSearchers, queryParamsMono) -> queryParamsMono; - LocalQueryParams rewrite(LLIndexSearchers indexSearchers, LocalQueryParams localQueryParams) throws IOException; + LocalQueryParams rewrite(LLIndexSearchers indexSearchers, LocalQueryParams localQueryParams); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/LocalSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/LocalSearcher.java index 52dd9fc..0dcde76 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/LocalSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/LocalSearcher.java @@ -1,19 +1,17 @@ package it.cavallium.dbengine.lucene.searcher; -import io.netty5.util.Send; import it.cavallium.dbengine.database.disk.LLIndexSearcher; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public interface LocalSearcher { /** - * @param indexSearcherMono Lucene index searcher + * @param indexSearcher Lucene index searcher * @param queryParams the query parameters * @param keyFieldName the name of the key field * @param transformer the search query transformer */ - Mono collect(Mono indexSearcherMono, + LuceneSearchResult collect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer); diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneGenerator.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneGenerator.java index 67a7e28..785a50b 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneGenerator.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneGenerator.java @@ -1,13 +1,12 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; - -import it.cavallium.dbengine.lucene.LuceneUtils; import java.io.IOException; -import java.io.UncheckedIOException; +import it.cavallium.dbengine.utils.DBException; import java.util.Iterator; import java.util.List; +import java.util.Objects; import java.util.function.Supplier; +import java.util.stream.Stream; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; import org.apache.lucene.search.DocIdSetIterator; @@ -19,13 +18,9 @@ import org.apache.lucene.search.Scorer; import org.apache.lucene.search.Weight; import org.apache.lucene.util.Bits; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.scheduler.Scheduler; -import reactor.core.scheduler.Schedulers; public class LuceneGenerator implements Supplier { - private static final Scheduler SCHED = LuceneUtils.luceneScheduler(); private final IndexSearcher shard; private final int shardIndex; private final Query query; @@ -51,23 +46,12 @@ public class LuceneGenerator implements Supplier { this.leavesIterator = leaves.iterator(); } - public static Flux reactive(IndexSearcher shard, LocalQueryParams localQueryParams, int shardIndex) { + public static Stream reactive(IndexSearcher shard, LocalQueryParams localQueryParams, int shardIndex) { if (localQueryParams.sort() != null) { - return Flux.error(new IllegalArgumentException("Sorting is not allowed")); + throw new IllegalArgumentException("Sorting is not allowed"); } - return Flux - .generate(() -> new LuceneGenerator(shard, localQueryParams, shardIndex), - (s, sink) -> { - ScoreDoc val = s.get(); - if (val == null) { - sink.complete(); - } else { - sink.next(val); - } - return s; - } - ) - .subscribeOn(SCHED); + var lg = new LuceneGenerator(shard, localQueryParams, shardIndex); + return Stream.generate(lg).takeWhile(Objects::nonNull); } @Override @@ -88,7 +72,7 @@ public class LuceneGenerator implements Supplier { remainingOffset--; } - private Weight createWeight() throws IOException { + private Weight createWeight() { ScoreMode scoreMode = computeScores ? ScoreMode.COMPLETE : ScoreMode.COMPLETE_NO_SCORES; return shard.createWeight(shard.rewrite(query), scoreMode, 1f); } @@ -98,18 +82,18 @@ public class LuceneGenerator implements Supplier { try { weight = createWeight(); } catch (IOException e) { - throw new UncheckedIOException(e); + throw new DBException(e); } } try { return getWeightedNext(); } catch (IOException e) { - throw new UncheckedIOException(e); + throw new DBException(e); } } - private ScoreDoc getWeightedNext() throws IOException { + private ScoreDoc getWeightedNext() { while (tryAdvanceDocIdSetIterator()) { LeafReader reader = leaf.reader(); Bits liveDocs = reader.getLiveDocs(); @@ -125,7 +109,7 @@ public class LuceneGenerator implements Supplier { clearState(); return null; } - private boolean tryAdvanceDocIdSetIterator() throws IOException { + private boolean tryAdvanceDocIdSetIterator() { if (docIdSetIterator != null) { return true; } @@ -143,7 +127,7 @@ public class LuceneGenerator implements Supplier { return false; } - private ScoreDoc transformDoc(int doc) throws IOException { + private ScoreDoc transformDoc(int doc) { return new ScoreDoc(leaf.docBase + doc, scorer.score(), shardIndex); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneSearchResult.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneSearchResult.java index 9a39158..bcbbc15 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneSearchResult.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/LuceneSearchResult.java @@ -1,36 +1,31 @@ package it.cavallium.dbengine.lucene.searcher; -import io.netty5.buffer.Drop; -import io.netty5.buffer.Owned; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.DiscardingCloseable; import it.cavallium.dbengine.database.LLKeyScore; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.utils.SimpleResource; import java.util.Objects; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; -import reactor.core.publisher.Flux; public class LuceneSearchResult extends SimpleResource implements DiscardingCloseable { private static final Logger logger = LogManager.getLogger(LuceneSearchResult.class); private final TotalHitsCount totalHitsCount; - private final Flux results; + private final Stream results; - public LuceneSearchResult(TotalHitsCount totalHitsCount, Flux results) { + public LuceneSearchResult(TotalHitsCount totalHitsCount, Stream results) { this.totalHitsCount = totalHitsCount; this.results = results; } public TotalHitsCount totalHitsCount() { - ensureOpen(); return totalHitsCount; } - public Flux results() { - ensureOpen(); + public Stream results() { return results; } @@ -56,5 +51,6 @@ public class LuceneSearchResult extends SimpleResource implements DiscardingClos @Override protected void onClose() { + results.close(); } } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/MultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/MultiSearcher.java index ef54278..ecc72f6 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/MultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/MultiSearcher.java @@ -1,12 +1,9 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; - -import io.netty5.util.Send; import it.cavallium.dbengine.database.disk.LLIndexSearcher; import it.cavallium.dbengine.database.disk.LLIndexSearchers; +import java.io.IOException; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Mono; public interface MultiSearcher extends LocalSearcher { @@ -16,23 +13,23 @@ public interface MultiSearcher extends LocalSearcher { * @param keyFieldName the name of the key field * @param transformer the search query transformer */ - Mono collectMulti(Mono indexSearchersMono, + LuceneSearchResult collectMulti(LLIndexSearchers indexSearchersMono, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer); /** - * @param indexSearcherMono Lucene index searcher + * @param indexSearcher Lucene index searcher * @param queryParams the query parameters * @param keyFieldName the name of the key field * @param transformer the search query transformer */ @Override - default Mono collect(Mono indexSearcherMono, + default LuceneSearchResult collect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { - Mono searchers = indexSearcherMono.map(indexSearcher -> LLIndexSearchers.unsharded(indexSearcher)); + LLIndexSearchers searchers = LLIndexSearchers.unsharded(indexSearcher); return this.collectMulti(searchers, queryParams, keyFieldName, transformer); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/PageIterationStepResult.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/PageIterationStepResult.java new file mode 100644 index 0000000..37950bb --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/PageIterationStepResult.java @@ -0,0 +1,5 @@ +package it.cavallium.dbengine.lucene.searcher; + +import org.jetbrains.annotations.Nullable; + +record PageIterationStepResult(CurrentPageInfo nextPageToIterate, @Nullable PageData pageData) {} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/PagedLocalSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/PagedLocalSearcher.java index e67a0b3..d355cd6 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/PagedLocalSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/PagedLocalSearcher.java @@ -1,13 +1,8 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; import static it.cavallium.dbengine.lucene.searcher.CurrentPageInfo.EMPTY_STATUS; import static it.cavallium.dbengine.lucene.searcher.PaginationInfo.MAX_SINGLE_SEARCH_LIMIT; -import io.netty5.util.Send; -import io.netty5.buffer.internal.ResourceSupport; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.LLKeyScore; import it.cavallium.dbengine.database.LLUtils; @@ -16,10 +11,12 @@ import it.cavallium.dbengine.database.disk.LLIndexSearchers; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.collector.TopDocsCollectorMultiManager; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.io.UncheckedIOException; -import java.util.Arrays; import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.search.IndexSearcher; @@ -28,45 +25,37 @@ import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TotalHits; import org.apache.lucene.search.TotalHits.Relation; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.publisher.SynchronousSink; -import reactor.core.scheduler.Schedulers; public class PagedLocalSearcher implements LocalSearcher { private static final Logger LOG = LogManager.getLogger(PagedLocalSearcher.class); @Override - public Mono collect(Mono indexSearcherMono, + public LuceneSearchResult collect(LLIndexSearcher indexSearcher, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewrite(this, indexSearcherMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewrite(this, indexSearcher, queryParams, keyFieldName, transformer); } PaginationInfo paginationInfo = getPaginationInfo(queryParams); - var indexSearchersMono = indexSearcherMono.map(indexSearcher -> LLIndexSearchers.unsharded(indexSearcher)); + var indexSearchers = LLIndexSearchers.unsharded(indexSearcher); - return singleOrClose(indexSearchersMono, indexSearchers -> this - // Search first page results - .searchFirstPage(indexSearchers.shards(), queryParams, paginationInfo) - // Compute the results of the first page - .transform(firstPageTopDocsMono -> this.computeFirstPageResults(firstPageTopDocsMono, - indexSearchers.shards(), - keyFieldName, - queryParams - )) - // Compute other results - .transform(firstResult -> this.computeOtherResults(firstResult, - indexSearchers.shards(), - queryParams, - keyFieldName, - () -> indexSearchers.close() - )) - // Ensure that one LuceneSearchResult is always returned - .single()); + // Search first page results + var firstPageTopDocs = this.searchFirstPage(indexSearchers.shards(), queryParams, paginationInfo); + // Compute the results of the first page + var firstResult = this.computeFirstPageResults(firstPageTopDocs, + indexSearchers.shards(), + keyFieldName, + queryParams + ); + return this.computeOtherResults(firstResult, + indexSearchers.shards(), + queryParams, + keyFieldName, + () -> indexSearchers.close() + ); } @Override @@ -88,76 +77,68 @@ public class PagedLocalSearcher implements LocalSearcher { /** * Search effectively the raw results of the first page */ - private Mono searchFirstPage(List indexSearchers, + private PageData searchFirstPage(List indexSearchers, LocalQueryParams queryParams, PaginationInfo paginationInfo) { var limit = paginationInfo.totalLimit(); var pagination = !paginationInfo.forceSinglePage(); var resultsOffset = LuceneUtils.safeLongToInt(paginationInfo.firstPageOffset()); var currentPageInfo = new CurrentPageInfo(null, limit, 0); - return Mono - .just(currentPageInfo) - .handle((s, sink) -> this.searchPageSync(queryParams, indexSearchers, pagination, resultsOffset, s, sink)) - //defaultIfEmpty(new PageData(new TopDocs(new TotalHits(0, Relation.EQUAL_TO), new ScoreDoc[0]), currentPageInfo)) - .single() - .transform(LuceneUtils::scheduleLucene); + return this.searchPageSync(queryParams, indexSearchers, pagination, resultsOffset, currentPageInfo).pageData(); } /** * Compute the results of the first page, extracting useful data */ - private Mono computeFirstPageResults(Mono firstPageDataMono, + private FirstPageResults computeFirstPageResults(PageData firstPageData, List indexSearchers, String keyFieldName, LocalQueryParams queryParams) { - return firstPageDataMono.map(firstPageData -> { - var totalHitsCount = LuceneUtils.convertTotalHitsCount(firstPageData.topDocs().totalHits); - var scoreDocs = firstPageData.topDocs().scoreDocs; - assert LLUtils.isSet(scoreDocs); + var totalHitsCount = LuceneUtils.convertTotalHitsCount(firstPageData.topDocs().totalHits); + var scoreDocs = firstPageData.topDocs().scoreDocs; + assert LLUtils.isSet(scoreDocs); - Flux firstPageHitsFlux = LuceneUtils.convertHits(Flux.fromArray(scoreDocs), - indexSearchers, keyFieldName, true) - .take(queryParams.limitInt(), true); + Stream firstPageHitsFlux = LuceneUtils.convertHits(Stream.of(scoreDocs), + indexSearchers, keyFieldName + ) + .limit(queryParams.limitLong()); - CurrentPageInfo nextPageInfo = firstPageData.nextPageInfo(); + CurrentPageInfo nextPageInfo = firstPageData.nextPageInfo(); - return new FirstPageResults(totalHitsCount, firstPageHitsFlux, nextPageInfo); - }).single(); + return new FirstPageResults(totalHitsCount, firstPageHitsFlux, nextPageInfo); } - private Mono computeOtherResults(Mono firstResultMono, + private LuceneSearchResult computeOtherResults(FirstPageResults firstResult, List indexSearchers, LocalQueryParams queryParams, String keyFieldName, Runnable onClose) { - return firstResultMono.map(firstResult -> { - var totalHitsCount = firstResult.totalHitsCount(); - var firstPageHitsFlux = firstResult.firstPageHitsFlux(); - var secondPageInfo = firstResult.nextPageInfo(); + var totalHitsCount = firstResult.totalHitsCount(); + var firstPageHitsStream = firstResult.firstPageHitsStream(); + var secondPageInfo = firstResult.nextPageInfo(); - Flux nextHitsFlux = searchOtherPages(indexSearchers, queryParams, keyFieldName, secondPageInfo); + Stream nextHitsFlux = searchOtherPages(indexSearchers, queryParams, keyFieldName, secondPageInfo); - Flux combinedFlux = firstPageHitsFlux.concatWith(nextHitsFlux); - return new MyLuceneSearchResult(totalHitsCount, combinedFlux, onClose); - }).single(); + Stream combinedFlux = Stream.concat(firstPageHitsStream, nextHitsFlux); + return new MyLuceneSearchResult(totalHitsCount, combinedFlux, onClose); } /** * Search effectively the merged raw results of the next pages */ - private Flux searchOtherPages(List indexSearchers, + private Stream searchOtherPages(List indexSearchers, LocalQueryParams queryParams, String keyFieldName, CurrentPageInfo secondPageInfo) { - return Flux - .generate( - () -> secondPageInfo, - (s, sink) -> searchPageSync(queryParams, indexSearchers, true, 0, s, sink), - s -> {} - ) - .subscribeOn(luceneScheduler()) - .map(pageData -> pageData.topDocs()) - .flatMapIterable(topDocs -> Arrays.asList(topDocs.scoreDocs)) - .transform(topFieldDocFlux -> LuceneUtils.convertHits(topFieldDocFlux, indexSearchers, keyFieldName, true)) - .publishOn(Schedulers.parallel()); + AtomicReference pageInfo = new AtomicReference<>(secondPageInfo); + Object lock = new Object(); + Stream topFieldDocFlux = Stream.generate(() -> { + synchronized (lock) { + var currentPageInfo = pageInfo.getPlain(); + var result = searchPageSync(queryParams, indexSearchers, true, 0, currentPageInfo); + pageInfo.setPlain(result.nextPageToIterate()); + return result.pageData(); + } + }).takeWhile(Objects::nonNull).flatMap(pd -> Stream.of(pd.topDocs().scoreDocs)); + return LuceneUtils.convertHits(topFieldDocFlux, indexSearchers, keyFieldName); } /** @@ -165,28 +146,25 @@ public class PagedLocalSearcher implements LocalSearcher { * @param resultsOffset offset of the resulting topDocs. Useful if you want to * skip the first n results in the first page */ - private CurrentPageInfo searchPageSync(LocalQueryParams queryParams, + private PageIterationStepResult searchPageSync(LocalQueryParams queryParams, List indexSearchers, boolean allowPagination, int resultsOffset, - CurrentPageInfo s, - SynchronousSink sink) { - LLUtils.ensureBlocking(); + CurrentPageInfo s) { if (resultsOffset < 0) { throw new IndexOutOfBoundsException(resultsOffset); } + PageData result = null; var currentPageLimit = queryParams.pageLimits().getPageLimit(s.pageIndex()); if (s.pageIndex() == 0 && s.remainingLimit() == 0) { int count; try { count = indexSearchers.get(0).count(queryParams.query()); } catch (IOException e) { - sink.error(e); - return EMPTY_STATUS; + throw new DBException(e); } var nextPageInfo = new CurrentPageInfo(null, 0, 1); - sink.next(new PageData(new TopDocs(new TotalHits(count, Relation.EQUAL_TO), new ScoreDoc[0]), nextPageInfo)); - return EMPTY_STATUS; + return new PageIterationStepResult(EMPTY_STATUS, new PageData(new TopDocs(new TotalHits(count, Relation.EQUAL_TO), new ScoreDoc[0]), nextPageInfo)); } else if (s.pageIndex() == 0 || (s.last() != null && s.remainingLimit() > 0)) { TopDocs pageTopDocs; try { @@ -198,8 +176,7 @@ public class PagedLocalSearcher implements LocalSearcher { .get(0) .search(queryParams.query(), cmm.get(queryParams.query(), indexSearchers.get(0))))); } catch (IOException e) { - sink.error(e); - return EMPTY_STATUS; + throw new DBException(e); } var pageLastDoc = LuceneUtils.getLastScoreDoc(pageTopDocs.scoreDocs); long nextRemainingLimit; @@ -210,11 +187,9 @@ public class PagedLocalSearcher implements LocalSearcher { } var nextPageIndex = s.pageIndex() + 1; var nextPageInfo = new CurrentPageInfo(pageLastDoc, nextRemainingLimit, nextPageIndex); - sink.next(new PageData(pageTopDocs, nextPageInfo)); - return nextPageInfo; + return new PageIterationStepResult(nextPageInfo, new PageData(pageTopDocs, nextPageInfo)); } else { - sink.complete(); - return EMPTY_STATUS; + return new PageIterationStepResult(EMPTY_STATUS, null); } } @@ -222,8 +197,8 @@ public class PagedLocalSearcher implements LocalSearcher { private final Runnable onClose; - public MyLuceneSearchResult(TotalHitsCount totalHitsCount, Flux combinedFlux, Runnable onClose) { - super(totalHitsCount, combinedFlux); + public MyLuceneSearchResult(TotalHitsCount totalHitsCount, Stream combinedStream, Runnable onClose) { + super(totalHitsCount, combinedStream); this.onClose = onClose; } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/ScoredPagedMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/ScoredPagedMultiSearcher.java index 3c9f786..bc0ff8f 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/ScoredPagedMultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/ScoredPagedMultiSearcher.java @@ -1,12 +1,8 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; -import static it.cavallium.dbengine.lucene.searcher.GlobalQueryRewrite.NO_REWRITE; import static it.cavallium.dbengine.lucene.searcher.PaginationInfo.MAX_SINGLE_SEARCH_LIMIT; -import io.netty5.util.Send; +import com.google.common.collect.Streams; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.LLKeyScore; import it.cavallium.dbengine.database.LLUtils; @@ -15,20 +11,22 @@ import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; import it.cavallium.dbengine.lucene.PageLimits; import it.cavallium.dbengine.lucene.collector.ScoringShardsCollectorMultiManager; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.Arrays; +import java.util.Collection; import java.util.List; +import java.util.Objects; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.search.FieldDoc; import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.Sort; +import org.apache.lucene.search.TopDocs; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class ScoredPagedMultiSearcher implements MultiSearcher { @@ -38,32 +36,26 @@ public class ScoredPagedMultiSearcher implements MultiSearcher { } @Override - public Mono collectMulti(Mono indexSearchersMono, + public LuceneSearchResult collectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewriteMulti(this, indexSearchers, queryParams, keyFieldName, transformer); } PaginationInfo paginationInfo = getPaginationInfo(queryParams); - - return singleOrClose(indexSearchersMono, indexSearchers -> this - // Search first page results - .searchFirstPage(indexSearchers.shards(), queryParams, paginationInfo) + // Search first page results + var firstPageTopDocs = this.searchFirstPage(indexSearchers.shards(), queryParams, paginationInfo); // Compute the results of the first page - .transform(firstPageTopDocsMono -> - this.computeFirstPageResults(firstPageTopDocsMono, indexSearchers, keyFieldName, queryParams - )) + var firstResult = this.computeFirstPageResults(firstPageTopDocs, indexSearchers, keyFieldName, queryParams); // Compute other results - .map(firstResult -> this.computeOtherResults(firstResult, + return this.computeOtherResults(firstResult, indexSearchers.shards(), queryParams, keyFieldName, () -> indexSearchers.close() - )) - // Ensure that one LuceneSearchResult is always returned - .single()); + ); } private Sort getSort(LocalQueryParams queryParams) { @@ -84,38 +76,40 @@ public class ScoredPagedMultiSearcher implements MultiSearcher { /** * Search effectively the raw results of the first page */ - private Mono searchFirstPage(List indexSearchers, + private PageData searchFirstPage(List indexSearchers, LocalQueryParams queryParams, PaginationInfo paginationInfo) { var limit = paginationInfo.totalLimit(); var pageLimits = paginationInfo.pageLimits(); var pagination = !paginationInfo.forceSinglePage(); var resultsOffset = LuceneUtils.safeLongToInt(paginationInfo.firstPageOffset()); - return Mono - .fromSupplier(() -> new CurrentPageInfo(null, limit, 0)) - .flatMap(s -> this.searchPage(queryParams, indexSearchers, pagination, pageLimits, resultsOffset, s)); + return this.searchPage(queryParams, + indexSearchers, + pagination, + pageLimits, + resultsOffset, + new CurrentPageInfo(null, limit, 0) + ); } /** * Compute the results of the first page, extracting useful data */ - private Mono computeFirstPageResults(Mono firstPageDataMono, + private FirstPageResults computeFirstPageResults(PageData firstPageData, LLIndexSearchers indexSearchers, String keyFieldName, LocalQueryParams queryParams) { - return firstPageDataMono.map(firstPageData -> { - var totalHitsCount = LuceneUtils.convertTotalHitsCount(firstPageData.topDocs().totalHits); - var scoreDocs = firstPageData.topDocs().scoreDocs; - assert LLUtils.isSet(scoreDocs); + var totalHitsCount = LuceneUtils.convertTotalHitsCount(firstPageData.topDocs().totalHits); + var scoreDocs = firstPageData.topDocs().scoreDocs; + assert LLUtils.isSet(scoreDocs); - Flux firstPageHitsFlux = LuceneUtils.convertHits(Flux.fromArray(scoreDocs), - indexSearchers.shards(), keyFieldName, true) - .take(queryParams.limitInt(), true); + Stream firstPageHitsFlux = LuceneUtils + .convertHits(Stream.of(scoreDocs), indexSearchers.shards(), keyFieldName) + .limit(queryParams.limitInt()); - CurrentPageInfo nextPageInfo = firstPageData.nextPageInfo(); + CurrentPageInfo nextPageInfo = firstPageData.nextPageInfo(); - return new FirstPageResults(totalHitsCount, firstPageHitsFlux, nextPageInfo); - }); + return new FirstPageResults(totalHitsCount, firstPageHitsFlux, nextPageInfo); } private LuceneSearchResult computeOtherResults(FirstPageResults firstResult, @@ -124,37 +118,36 @@ public class ScoredPagedMultiSearcher implements MultiSearcher { String keyFieldName, Runnable onClose) { var totalHitsCount = firstResult.totalHitsCount(); - var firstPageHitsFlux = firstResult.firstPageHitsFlux(); + var firstPageHitsStream = firstResult.firstPageHitsStream(); var secondPageInfo = firstResult.nextPageInfo(); - Flux nextHitsFlux = searchOtherPages(indexSearchers, queryParams, keyFieldName, secondPageInfo); + Stream nextHitsFlux = searchOtherPages(indexSearchers, queryParams, keyFieldName, secondPageInfo); - Flux combinedFlux = firstPageHitsFlux.concatWith(nextHitsFlux); - return new MyLuceneSearchResult(totalHitsCount, combinedFlux, onClose); + Stream combinedStream = Stream.concat(firstPageHitsStream, nextHitsFlux); + return new MyLuceneSearchResult(totalHitsCount, combinedStream, onClose); } /** * Search effectively the merged raw results of the next pages */ - private Flux searchOtherPages(List indexSearchers, + private Stream searchOtherPages(List indexSearchers, LocalQueryParams queryParams, String keyFieldName, CurrentPageInfo secondPageInfo) { - return Flux - .defer(() -> { - AtomicReference currentPageInfoRef = new AtomicReference<>(secondPageInfo); - return Mono - .fromSupplier(currentPageInfoRef::get) - .doOnNext(s -> LOG.trace("Current page info: {}", s)) - .flatMap(currentPageInfo -> this.searchPage(queryParams, indexSearchers, true, - queryParams.pageLimits(), 0, currentPageInfo)) - .doOnNext(s -> LOG.trace("Next page info: {}", s.nextPageInfo())) - .doOnNext(s -> currentPageInfoRef.set(s.nextPageInfo())) - .repeatWhen(s -> s.takeWhile(n -> n > 0)); - }) - .transform(LuceneUtils::scheduleLucene) - .map(pageData -> pageData.topDocs()) - .flatMapIterable(topDocs -> Arrays.asList(topDocs.scoreDocs)) - .transform(topFieldDocFlux -> LuceneUtils.convertHits(topFieldDocFlux, indexSearchers, - keyFieldName, true)); + AtomicReference currentPageInfoRef = new AtomicReference<>(secondPageInfo); + Stream topFieldDocStream = Stream.generate(() -> { + var currentPageInfo = currentPageInfoRef.getPlain(); + if (currentPageInfo == null) return null; + LOG.trace("Current page info: {}", currentPageInfo); + var result = this.searchPage(queryParams, indexSearchers, true, queryParams.pageLimits(), 0, currentPageInfo); + LOG.trace("Next page info: {}", result != null ? result.nextPageInfo() : null); + currentPageInfoRef.setPlain(result != null ? result.nextPageInfo() : null); + if (result == null || result.topDocs().scoreDocs.length == 0) { + return null; + } else { + return Arrays.asList(result.topDocs().scoreDocs); + } + }).takeWhile(Objects::nonNull).flatMap(Collection::stream); + + return LuceneUtils.convertHits(topFieldDocStream, indexSearchers, keyFieldName); } /** @@ -162,61 +155,56 @@ public class ScoredPagedMultiSearcher implements MultiSearcher { * @param resultsOffset offset of the resulting topDocs. Useful if you want to * skip the first n results in the first page */ - private Mono searchPage(LocalQueryParams queryParams, + private PageData searchPage(LocalQueryParams queryParams, List indexSearchers, boolean allowPagination, PageLimits pageLimits, int resultsOffset, CurrentPageInfo s) { - return Mono - .fromCallable(() -> { - LLUtils.ensureBlocking(); - if (resultsOffset < 0) { - throw new IndexOutOfBoundsException(resultsOffset); - } - if (s.pageIndex() == 0 || (s.last() != null && s.remainingLimit() > 0)) { - var query = queryParams.query(); - @Nullable var sort = getSort(queryParams); - var pageLimit = pageLimits.getPageLimit(s.pageIndex()); - var after = (FieldDoc) s.last(); - var totalHitsThreshold = queryParams.getTotalHitsThresholdInt(); - return new ScoringShardsCollectorMultiManager(query, sort, pageLimit, after, totalHitsThreshold, - resultsOffset, pageLimit); - } else { - return null; + if (resultsOffset < 0) { + throw new IndexOutOfBoundsException(resultsOffset); + } + ScoringShardsCollectorMultiManager cmm; + if (s.pageIndex() == 0 || (s.last() != null && s.remainingLimit() > 0)) { + var query = queryParams.query(); + @Nullable var sort = getSort(queryParams); + var pageLimit = pageLimits.getPageLimit(s.pageIndex()); + var after = (FieldDoc) s.last(); + var totalHitsThreshold = queryParams.getTotalHitsThresholdInt(); + cmm = new ScoringShardsCollectorMultiManager(query, sort, pageLimit, after, totalHitsThreshold, + resultsOffset, pageLimit); + } else { + return null; + }; + record IndexedShard(IndexSearcher indexSearcher, long shardIndex) {} + List shardResults = Streams + .mapWithIndex(indexSearchers.stream(), IndexedShard::new) + .map(shardWithIndex -> { + var index = (int) shardWithIndex.shardIndex(); + var shard = shardWithIndex.indexSearcher(); + + var cm = cmm.get(shard, index); + + try { + return shard.search(queryParams.query(), cm); + } catch (IOException e) { + throw new DBException(e); } }) - .subscribeOn(luceneScheduler()) - .flatMap(cmm -> Flux - .fromIterable(indexSearchers) - .index() - .flatMap(shardWithIndex -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); + .toList(); - var index = (int) (long) shardWithIndex.getT1(); - var shard = shardWithIndex.getT2(); + var pageTopDocs = cmm.reduce(shardResults); - var cm = cmm.get(shard, index); - - return shard.search(queryParams.query(), cm); - }).subscribeOn(luceneScheduler())) - .collectList() - .flatMap(results -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - var pageTopDocs = cmm.reduce(results); - - var pageLastDoc = LuceneUtils.getLastScoreDoc(pageTopDocs.scoreDocs); - long nextRemainingLimit; - if (allowPagination) { - nextRemainingLimit = s.remainingLimit() - pageLimits.getPageLimit(s.pageIndex()); - } else { - nextRemainingLimit = 0L; - } - var nextPageIndex = s.pageIndex() + 1; - var nextPageInfo = new CurrentPageInfo(pageLastDoc, nextRemainingLimit, nextPageIndex); - return new PageData(pageTopDocs, nextPageInfo); - }).subscribeOn(luceneScheduler())) - ).publishOn(Schedulers.parallel()); + var pageLastDoc = LuceneUtils.getLastScoreDoc(pageTopDocs.scoreDocs); + long nextRemainingLimit; + if (allowPagination) { + nextRemainingLimit = s.remainingLimit() - pageLimits.getPageLimit(s.pageIndex()); + } else { + nextRemainingLimit = 0L; + } + var nextPageIndex = s.pageIndex() + 1; + var nextPageInfo = new CurrentPageInfo(pageLastDoc, nextRemainingLimit, nextPageIndex); + return new PageData(pageTopDocs, nextPageInfo); } @Override @@ -229,7 +217,7 @@ public class ScoredPagedMultiSearcher implements MultiSearcher { private final Runnable onClose; - public MyLuceneSearchResult(TotalHitsCount totalHitsCount, Flux combinedFlux, Runnable onClose) { + public MyLuceneSearchResult(TotalHitsCount totalHitsCount, Stream combinedFlux, Runnable onClose) { super(totalHitsCount, combinedFlux); this.onClose = onClose; } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/ShardIndexSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/ShardIndexSearcher.java index b22c8a5..a0feb6b 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/ShardIndexSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/ShardIndexSearcher.java @@ -8,7 +8,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; -import java.util.Optional; import java.util.Set; import org.apache.lucene.index.Term; import org.apache.lucene.index.TermStates; @@ -78,7 +77,7 @@ public class ShardIndexSearcher extends IndexSearcher { } @Override - public Query rewrite(Query original) throws IOException { + public Query rewrite(Query original) { final IndexSearcher localSearcher = new IndexSearcher(getIndexReader()); original = localSearcher.rewrite(original); final Set terms = new HashSet<>(); @@ -113,7 +112,7 @@ public class ShardIndexSearcher extends IndexSearcher { // Mock: in a real env, this would hit the wire and get // term stats from remote node - Map getNodeTermStats(Set terms, int nodeID) throws IOException { + Map getNodeTermStats(Set terms, int nodeID) { var s = searchers[nodeID]; final Map stats = new HashMap<>(); if (s == null) { @@ -158,7 +157,7 @@ public class ShardIndexSearcher extends IndexSearcher { } @Override - public CollectionStatistics collectionStatistics(String field) throws IOException { + public CollectionStatistics collectionStatistics(String field) { // TODO: we could compute this on init and cache, // since we are re-inited whenever any nodes have a // new reader @@ -205,7 +204,7 @@ public class ShardIndexSearcher extends IndexSearcher { } } - private CollectionStatistics computeNodeCollectionStatistics(FieldAndShar fieldAndShard) throws IOException { + private CollectionStatistics computeNodeCollectionStatistics(FieldAndShar fieldAndShard) { var searcher = searchers[fieldAndShard.nodeID]; return searcher.collectionStatistics(fieldAndShard.field); } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/SharedShardStatistics.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/SharedShardStatistics.java index bfb9d75..523fbe0 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/SharedShardStatistics.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/SharedShardStatistics.java @@ -4,9 +4,7 @@ import it.cavallium.dbengine.lucene.searcher.ShardIndexSearcher.CachedCollection import it.cavallium.dbengine.lucene.searcher.ShardIndexSearcher.FieldAndShar; import it.cavallium.dbengine.lucene.searcher.ShardIndexSearcher.TermAndShard; import java.util.Map; -import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; -import org.apache.lucene.search.CollectionStatistics; import org.apache.lucene.search.TermStatistics; public class SharedShardStatistics { diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedByScoreFullMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedByScoreFullMultiSearcher.java deleted file mode 100644 index d35e866..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedByScoreFullMultiSearcher.java +++ /dev/null @@ -1,150 +0,0 @@ -package it.cavallium.dbengine.lucene.searcher; - -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - -import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; -import it.cavallium.dbengine.database.LLKeyScore; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.LuceneCloseable; -import it.cavallium.dbengine.lucene.LuceneUtils; -import it.cavallium.dbengine.lucene.FullDocs; -import it.cavallium.dbengine.lucene.LLScoreDoc; -import it.cavallium.dbengine.lucene.hugepq.search.HugePqFullScoreDocCollector; -import java.io.UncheckedIOException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.IndexSearcher; -import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; - -public class SortedByScoreFullMultiSearcher implements MultiSearcher { - - protected static final Logger LOG = LogManager.getLogger(SortedByScoreFullMultiSearcher.class); - - private final LLTempHugePqEnv env; - - public SortedByScoreFullMultiSearcher(LLTempHugePqEnv env) { - this.env = env; - } - - @Override - public Mono collectMulti(Mono indexSearchersMono, - LocalQueryParams queryParams, - @Nullable String keyFieldName, - GlobalQueryRewrite transformer) { - if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); - } - if (queryParams.isSorted() && !queryParams.isSortedByScore()) { - throw new IllegalArgumentException(SortedByScoreFullMultiSearcher.this.getClass().getSimpleName() - + " doesn't support sorted queries"); - } - return singleOrClose(indexSearchersMono, indexSearchers -> this - // Search results - .search(indexSearchers.shards(), queryParams) - // Compute the results - .transform(fullDocsMono -> this.computeResults(fullDocsMono, indexSearchers, keyFieldName, queryParams)) - // Ensure that one LuceneSearchResult is always returned - .single()); - } - - /** - * Search effectively the raw results - */ - private Mono> search(Iterable indexSearchers, - LocalQueryParams queryParams) { - return Mono - .fromCallable(() -> { - var totalHitsThreshold = queryParams.getTotalHitsThresholdLong(); - return HugePqFullScoreDocCollector.createSharedManager(env, queryParams.limitLong(), totalHitsThreshold); - }) - .flatMap(sharedManager -> Flux - .fromIterable(indexSearchers) - .flatMap(shard -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - - var collector = sharedManager.newCollector(); - try { - assert queryParams.computePreciseHitsCount() == null || - queryParams.computePreciseHitsCount() == collector.scoreMode().isExhaustive(); - - shard.search(queryParams.query(), collector); - return collector; - } catch (Throwable ex) { - collector.close(); - throw ex; - } - }).subscribeOn(luceneScheduler())) - .collectList() - .flatMap(collectors -> Mono.fromCallable(() -> { - try { - LLUtils.ensureBlocking(); - return sharedManager.reduce(collectors); - } catch (Throwable ex) { - for (HugePqFullScoreDocCollector collector : collectors) { - collector.close(); - } - throw ex; - } - }).subscribeOn(luceneScheduler())) - ).publishOn(Schedulers.parallel()); - } - - /** - * Compute the results, extracting useful data - */ - private Mono computeResults(Mono> dataMono, - LLIndexSearchers indexSearchers, - String keyFieldName, - LocalQueryParams queryParams) { - return dataMono.map(data -> { - var totalHitsCount = LuceneUtils.convertTotalHitsCount(data.totalHits()); - - Flux hitsFlux = LuceneUtils - .convertHits(data.iterate(queryParams.offsetLong()).map(LLScoreDoc::toScoreDoc), - indexSearchers.shards(), keyFieldName, true) - .take(queryParams.limitLong(), true); - - return new MyLuceneSearchResult(totalHitsCount, hitsFlux, indexSearchers, data); - }); - } - - @Override - public String getName() { - return "sorted by score full multi"; - } - - private static class MyLuceneSearchResult extends LuceneSearchResult implements LuceneCloseable { - - private final LLIndexSearchers indexSearchers; - private final FullDocs data; - - public MyLuceneSearchResult(TotalHitsCount totalHitsCount, Flux hitsFlux, - LLIndexSearchers indexSearchers, - FullDocs data) { - super(totalHitsCount, hitsFlux); - this.indexSearchers = indexSearchers; - this.data = data; - } - - @Override - protected void onClose() { - try { - indexSearchers.close(); - } catch (Throwable e) { - LOG.error("Can't close index searchers", e); - } - try { - data.close(); - } catch (Throwable e) { - LOG.error("Failed to discard data", e); - } - super.onClose(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedScoredFullMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedScoredFullMultiSearcher.java deleted file mode 100644 index ab2965e..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/SortedScoredFullMultiSearcher.java +++ /dev/null @@ -1,153 +0,0 @@ -package it.cavallium.dbengine.lucene.searcher; - -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; - -import io.netty5.util.Send; -import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; -import it.cavallium.dbengine.database.LLKeyScore; -import it.cavallium.dbengine.database.LLUtils; -import it.cavallium.dbengine.database.disk.LLIndexSearchers; -import it.cavallium.dbengine.database.disk.LLTempHugePqEnv; -import it.cavallium.dbengine.lucene.FullDocs; -import it.cavallium.dbengine.lucene.LLFieldDoc; -import it.cavallium.dbengine.lucene.LuceneCloseable; -import it.cavallium.dbengine.lucene.LuceneUtils; -import it.cavallium.dbengine.lucene.hugepq.search.HugePqFullFieldDocCollector; -import java.io.IOException; -import java.io.UncheckedIOException; -import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.Logger; -import org.apache.lucene.search.IndexSearcher; -import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; - -public class SortedScoredFullMultiSearcher implements MultiSearcher { - - protected static final Logger LOG = LogManager.getLogger(SortedScoredFullMultiSearcher.class); - - private final LLTempHugePqEnv env; - - public SortedScoredFullMultiSearcher(LLTempHugePqEnv env) { - this.env = env; - } - - @Override - public Mono collectMulti(Mono indexSearchersMono, - LocalQueryParams queryParams, - @Nullable String keyFieldName, - GlobalQueryRewrite transformer) { - if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); - } - return singleOrClose(indexSearchersMono, indexSearchers -> this - // Search results - .search(indexSearchers.shards(), queryParams) - // Compute the results - .transform(fullDocsMono -> this.computeResults(fullDocsMono, indexSearchers, keyFieldName, queryParams)) - // Ensure that one LuceneSearchResult is always returned - .single()); - } - - /** - * Search effectively the raw results - */ - private Mono> search(Iterable indexSearchers, - LocalQueryParams queryParams) { - return Mono - .fromCallable(() -> { - LLUtils.ensureBlocking(); - var totalHitsThreshold = queryParams.getTotalHitsThresholdLong(); - return HugePqFullFieldDocCollector.createSharedManager(env, queryParams.sort(), queryParams.limitInt(), - totalHitsThreshold); - }) - .subscribeOn(luceneScheduler()) - .>flatMap(sharedManager -> Flux - .fromIterable(indexSearchers) - .flatMap(shard -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - - var collector = sharedManager.newCollector(); - try { - assert queryParams.computePreciseHitsCount() == null - || queryParams.computePreciseHitsCount() == collector.scoreMode().isExhaustive(); - - shard.search(queryParams.query(), collector); - return collector; - } catch (Throwable ex) { - collector.close(); - throw ex; - } - }).subscribeOn(luceneScheduler())) - .collectList() - .flatMap(collectors -> Mono.fromCallable(() -> { - try { - LLUtils.ensureBlocking(); - return sharedManager.reduce(collectors); - } catch (Throwable ex) { - for (HugePqFullFieldDocCollector collector : collectors) { - collector.close(); - } - throw ex; - } - }).subscribeOn(luceneScheduler())) - ); - } - - /** - * Compute the results, extracting useful data - */ - private Mono computeResults(Mono> dataMono, - LLIndexSearchers indexSearchers, - String keyFieldName, - LocalQueryParams queryParams) { - return dataMono.map(data -> { - var totalHitsCount = LuceneUtils.convertTotalHitsCount(data.totalHits()); - - Flux hitsFlux = LuceneUtils - .convertHits(data.iterate(queryParams.offsetLong()).map(LLFieldDoc::toFieldDoc), - indexSearchers.shards(), keyFieldName, true) - .take(queryParams.limitLong(), true); - - return new MyLuceneSearchResult(totalHitsCount, hitsFlux, indexSearchers, data); - }); - } - - @Override - public String getName() { - return "sorted scored full multi"; - } - - private static class MyLuceneSearchResult extends LuceneSearchResult implements LuceneCloseable { - - private final LLIndexSearchers indexSearchers; - private final FullDocs data; - - public MyLuceneSearchResult(TotalHitsCount totalHitsCount, - Flux hitsFlux, - LLIndexSearchers indexSearchers, - FullDocs data) { - super(totalHitsCount, hitsFlux); - this.indexSearchers = indexSearchers; - this.data = data; - } - - @Override - protected void onClose() { - try { - indexSearchers.close(); - } catch (Throwable e) { - LOG.error("Can't close index searchers", e); - } - try { - data.close(); - } catch (Throwable e) { - LOG.error("Failed to discard data", e); - } - super.onClose(); - } - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/StandardSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/StandardSearcher.java index 18821d0..de9058c 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/StandardSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/StandardSearcher.java @@ -1,9 +1,5 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; -import static it.cavallium.dbengine.lucene.LuceneUtils.luceneScheduler; -import static it.cavallium.dbengine.lucene.LuceneUtils.sum; import static java.util.Objects.requireNonNull; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; @@ -12,11 +8,14 @@ import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.LLIndexSearchers; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; +import it.cavallium.dbengine.utils.DBException; import java.io.IOException; -import java.io.UncheckedIOException; +import java.util.Collection; import java.util.List; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.search.CollectorManager; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.TopDocs; @@ -25,9 +24,6 @@ import org.apache.lucene.search.TopFieldCollector; import org.apache.lucene.search.TopFieldDocs; import org.apache.lucene.search.TopScoreDocCollector; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class StandardSearcher implements MultiSearcher { @@ -37,96 +33,96 @@ public class StandardSearcher implements MultiSearcher { } @Override - public Mono collectMulti(Mono indexSearchersMono, + public LuceneSearchResult collectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewriteMulti(this, indexSearchers, queryParams, keyFieldName, transformer); } - return singleOrClose(indexSearchersMono, indexSearchers -> this - // Search results - .search(indexSearchers.shards(), queryParams) - // Compute the results - .transform(fullDocsMono -> this.computeResults(fullDocsMono, indexSearchers, keyFieldName, queryParams)) - // Ensure that one LuceneSearchResult is always returned - .single()); + // Search results + var fullDocs = this.search(indexSearchers.shards(), queryParams); + // Compute the results + return this.computeResults(fullDocs, indexSearchers, keyFieldName, queryParams); } /** * Search effectively the raw results */ - @SuppressWarnings({"unchecked", "rawtypes"}) - private Mono search(Iterable indexSearchers, - LocalQueryParams queryParams) { - return Mono - .fromCallable(() -> { - LLUtils.ensureBlocking(); - var totalHitsThreshold = queryParams.getTotalHitsThresholdInt(); - if (queryParams.isSorted() && !queryParams.isSortedByScore()) { - return TopFieldCollector.createSharedManager(queryParams.sort(), - queryParams.limitInt(), null, totalHitsThreshold); - } else { - return TopScoreDocCollector.createSharedManager(queryParams.limitInt(), null, totalHitsThreshold); - } - }) - .transform(LuceneUtils::scheduleLucene) - .flatMap(sharedManager -> Flux.fromIterable(indexSearchers).flatMapSequential(shard -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - var collector = sharedManager.newCollector(); - assert queryParams.computePreciseHitsCount() == null || (queryParams.computePreciseHitsCount() == collector - .scoreMode().isExhaustive()); + @SuppressWarnings({"rawtypes"}) + private TopDocs search(Collection indexSearchers, LocalQueryParams queryParams) { + var totalHitsThreshold = queryParams.getTotalHitsThresholdInt(); + CollectorManager, ? extends TopDocs> sharedManager; + if (queryParams.isSorted() && !queryParams.isSortedByScore()) { + sharedManager = TopFieldCollector.createSharedManager(queryParams.sort(), + queryParams.limitInt(), null, totalHitsThreshold); + } else { + sharedManager = TopScoreDocCollector.createSharedManager(queryParams.limitInt(), null, totalHitsThreshold); + }; + var collectors = indexSearchers.stream().map(shard -> { + try { + TopDocsCollector collector; + collector = sharedManager.newCollector(); + assert queryParams.computePreciseHitsCount() == null || (queryParams.computePreciseHitsCount() == collector + .scoreMode() + .isExhaustive()); - shard.search(queryParams.query(), LuceneUtils.withTimeout(collector, queryParams.timeout())); - return collector; - }).subscribeOn(luceneScheduler())).collectList().flatMap(collectors -> Mono.fromCallable(() -> { - LLUtils.ensureBlocking(); - if (collectors.size() <= 1) { - return sharedManager.reduce((List) collectors); - } else if (queryParams.isSorted() && !queryParams.isSortedByScore()) { - final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()]; - int i = 0; - for (var collector : collectors) { - var topFieldDocs = ((TopFieldCollector) collector).topDocs(); - for (ScoreDoc scoreDoc : topFieldDocs.scoreDocs) { - scoreDoc.shardIndex = i; - } - topDocs[i++] = topFieldDocs; - } - return TopDocs.merge(requireNonNull(queryParams.sort()), 0, queryParams.limitInt(), topDocs); - } else { - final TopDocs[] topDocs = new TopDocs[collectors.size()]; - int i = 0; - for (var collector : collectors) { - var topScoreDocs = collector.topDocs(); - for (ScoreDoc scoreDoc : topScoreDocs.scoreDocs) { - scoreDoc.shardIndex = i; - } - topDocs[i++] = topScoreDocs; - } - return TopDocs.merge(0, queryParams.limitInt(), topDocs); + shard.search(queryParams.query(), LuceneUtils.withTimeout(collector, queryParams.timeout())); + return collector; + } catch (IOException e) { + throw new DBException(e); + } + }).toList(); + + try { + if (collectors.size() <= 1) { + //noinspection unchecked + return sharedManager.reduce((List) collectors); + } else if (queryParams.isSorted() && !queryParams.isSortedByScore()) { + final TopFieldDocs[] topDocs = new TopFieldDocs[collectors.size()]; + int i = 0; + for (var collector : collectors) { + var topFieldDocs = ((TopFieldCollector) collector).topDocs(); + for (ScoreDoc scoreDoc : topFieldDocs.scoreDocs) { + scoreDoc.shardIndex = i; } - }).subscribeOn(luceneScheduler()))); + topDocs[i++] = topFieldDocs; + } + return TopDocs.merge(requireNonNull(queryParams.sort()), 0, queryParams.limitInt(), topDocs); + } else { + final TopDocs[] topDocs = new TopDocs[collectors.size()]; + int i = 0; + for (var collector : collectors) { + var topScoreDocs = collector.topDocs(); + for (ScoreDoc scoreDoc : topScoreDocs.scoreDocs) { + scoreDoc.shardIndex = i; + } + topDocs[i++] = topScoreDocs; + } + return TopDocs.merge(0, queryParams.limitInt(), topDocs); + } + } catch (IOException ex) { + throw new DBException(ex); + } } /** * Compute the results, extracting useful data */ - private Mono computeResults(Mono dataMono, + private LuceneSearchResult computeResults(TopDocs data, LLIndexSearchers indexSearchers, String keyFieldName, LocalQueryParams queryParams) { - return dataMono.map(data -> { - var totalHitsCount = LuceneUtils.convertTotalHitsCount(data.totalHits); + var totalHitsCount = LuceneUtils.convertTotalHitsCount(data.totalHits); - Flux hitsFlux = LuceneUtils - .convertHits(Flux.fromArray(data.scoreDocs), - indexSearchers.shards(), keyFieldName, true) - .skip(queryParams.offsetLong()) - .take(queryParams.limitLong(), true); + Stream hitsStream = LuceneUtils + .convertHits(Stream.of(data.scoreDocs), + indexSearchers.shards(), keyFieldName + ) + .skip(queryParams.offsetLong()) + .limit(queryParams.limitLong()); - return new MyLuceneSearchResult(totalHitsCount, hitsFlux, indexSearchers); - }); + return new MyLuceneSearchResult(totalHitsCount, hitsStream, indexSearchers); } @Override @@ -139,9 +135,9 @@ public class StandardSearcher implements MultiSearcher { private final LLIndexSearchers indexSearchers; public MyLuceneSearchResult(TotalHitsCount totalHitsCount, - Flux hitsFlux, + Stream hitsStream, LLIndexSearchers indexSearchers) { - super(totalHitsCount, hitsFlux); + super(totalHitsCount, hitsStream); this.indexSearchers = indexSearchers; } diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/TimeoutUtil.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/TimeoutUtil.java deleted file mode 100644 index e906530..0000000 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/TimeoutUtil.java +++ /dev/null @@ -1,31 +0,0 @@ -package it.cavallium.dbengine.lucene.searcher; - -import java.time.Duration; -import java.util.function.Function; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -public class TimeoutUtil { - - private static final Duration INFINITE = Duration.ofDays(360); - - public static Function, Mono> timeoutMono(Duration timeout) { - return query -> { - if (timeout.isZero() || timeout.isNegative() || timeout.compareTo(INFINITE) > 0) { - return query; - } else { - return query.timeout(timeout); - } - }; - } - - public static Function, Flux> timeoutFlux(Duration timeout) { - return query -> { - if (timeout.compareTo(INFINITE) > 0) { - return query; - } else { - return query.timeout(timeout); - } - }; - } -} diff --git a/src/main/java/it/cavallium/dbengine/lucene/searcher/UnsortedStreamingMultiSearcher.java b/src/main/java/it/cavallium/dbengine/lucene/searcher/UnsortedStreamingMultiSearcher.java index b250537..b004d79 100644 --- a/src/main/java/it/cavallium/dbengine/lucene/searcher/UnsortedStreamingMultiSearcher.java +++ b/src/main/java/it/cavallium/dbengine/lucene/searcher/UnsortedStreamingMultiSearcher.java @@ -1,28 +1,21 @@ package it.cavallium.dbengine.lucene.searcher; -import static it.cavallium.dbengine.client.UninterruptibleScheduler.uninterruptibleScheduler; -import static it.cavallium.dbengine.database.LLUtils.singleOrClose; +import static com.google.common.collect.Streams.mapWithIndex; -import io.netty5.util.Send; import it.cavallium.dbengine.client.query.current.data.TotalHitsCount; import it.cavallium.dbengine.database.LLKeyScore; -import it.cavallium.dbengine.database.LLUtils; import it.cavallium.dbengine.database.disk.LLIndexSearchers; import it.cavallium.dbengine.lucene.LuceneCloseable; import it.cavallium.dbengine.lucene.LuceneUtils; -import it.cavallium.dbengine.lucene.MaxScoreAccumulator; import java.io.IOException; -import java.io.UncheckedIOException; import java.util.List; -import it.cavallium.dbengine.lucene.hugepq.search.CustomHitsThresholdChecker; +import java.util.function.Function; +import java.util.stream.Stream; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.lucene.search.IndexSearcher; import org.apache.lucene.search.ScoreDoc; import org.jetbrains.annotations.Nullable; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; -import reactor.core.scheduler.Schedulers; public class UnsortedStreamingMultiSearcher implements MultiSearcher { @@ -30,41 +23,35 @@ public class UnsortedStreamingMultiSearcher implements MultiSearcher { protected static final Logger LOG = LogManager.getLogger(UnsortedStreamingMultiSearcher.class); @Override - public Mono collectMulti(Mono indexSearchersMono, + public LuceneSearchResult collectMulti(LLIndexSearchers indexSearchers, LocalQueryParams queryParams, @Nullable String keyFieldName, GlobalQueryRewrite transformer) { if (transformer != GlobalQueryRewrite.NO_REWRITE) { - return LuceneUtils.rewriteMulti(this, indexSearchersMono, queryParams, keyFieldName, transformer); + return LuceneUtils.rewriteMulti(this, indexSearchers, queryParams, keyFieldName, transformer); } if (queryParams.isSorted() && queryParams.limitLong() > 0) { throw new UnsupportedOperationException("Sorted queries are not supported" + " by UnsortedContinuousLuceneMultiSearcher"); } var localQueryParams = getLocalQueryParams(queryParams); - return singleOrClose(indexSearchersMono, indexSearchers -> Mono.fromCallable(() -> { - var shards = indexSearchers.shards(); - Flux scoreDocsFlux = getScoreDocs(localQueryParams, shards); + var shards = indexSearchers.shards(); - Flux resultsFlux = LuceneUtils.convertHits(scoreDocsFlux, shards, keyFieldName, false); + Stream scoreDocsFlux = getScoreDocs(localQueryParams, shards); - var totalHitsCount = new TotalHitsCount(0, false); - Flux mergedFluxes = resultsFlux.skip(queryParams.offsetLong()).take(queryParams.limitLong(), true); + Stream resultsFlux = LuceneUtils.convertHits(scoreDocsFlux, shards, keyFieldName); - return new MyLuceneSearchResult(totalHitsCount, mergedFluxes, indexSearchers); - })); + var totalHitsCount = new TotalHitsCount(0, false); + Stream mergedFluxes = resultsFlux.skip(queryParams.offsetLong()).limit(queryParams.limitLong()); + + return new MyLuceneSearchResult(totalHitsCount, mergedFluxes, indexSearchers); } - private Flux getScoreDocs(LocalQueryParams localQueryParams, List shards) { - return Flux.defer(() -> { - var hitsThreshold = CustomHitsThresholdChecker.createShared(localQueryParams.getTotalHitsThresholdLong()); - MaxScoreAccumulator maxScoreAccumulator = new MaxScoreAccumulator(); - return Flux.fromIterable(shards).index().flatMap(tuple -> { - var shardIndex = (int) (long) tuple.getT1(); - var shard = tuple.getT2(); - return LuceneGenerator.reactive(shard, localQueryParams, shardIndex); - }); - }); + private Stream getScoreDocs(LocalQueryParams localQueryParams, List shards) { + return mapWithIndex(shards.stream(), + (shard, shardIndex) -> LuceneGenerator.reactive(shard, localQueryParams, (int) shardIndex)) + .parallel() + .flatMap(Function.identity()); } private LocalQueryParams getLocalQueryParams(LocalQueryParams queryParams) { @@ -88,7 +75,7 @@ public class UnsortedStreamingMultiSearcher implements MultiSearcher { private final LLIndexSearchers indexSearchers; public MyLuceneSearchResult(TotalHitsCount totalHitsCount, - Flux hitsFlux, + Stream hitsFlux, LLIndexSearchers indexSearchers) { super(totalHitsCount, hitsFlux); this.indexSearchers = indexSearchers; diff --git a/src/main/java/it/cavallium/dbengine/netty/NettyMetrics.java b/src/main/java/it/cavallium/dbengine/netty/NettyMetrics.java deleted file mode 100644 index 171ca57..0000000 --- a/src/main/java/it/cavallium/dbengine/netty/NettyMetrics.java +++ /dev/null @@ -1,121 +0,0 @@ -package it.cavallium.dbengine.netty; - -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.core.instrument.Tag; -import io.micrometer.core.instrument.Tags; -import io.micrometer.core.instrument.binder.MeterBinder; -import io.netty5.buffer.BufferAllocator; -import io.netty5.buffer.pool.BufferAllocatorMetric; -import io.netty5.buffer.pool.BufferAllocatorMetricProvider; -import io.netty5.buffer.pool.PoolArenaMetric; -import io.netty5.buffer.pool.PoolChunkListMetric; -import io.netty5.buffer.pool.PoolSubpageMetric; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import org.jetbrains.annotations.NotNull; - -public class NettyMetrics implements MeterBinder { - - private final BufferAllocator allocator; - private final String resourceName; - private final String allocatorName; - private final Map extraTags; - - public NettyMetrics(BufferAllocator allocator, - String resourceName, - String allocatorName, - Map extraTags) { - this.allocator = allocator; - this.resourceName = resourceName; - this.allocatorName = allocatorName; - this.extraTags = extraTags; - } - - @Override - public void bindTo(@NotNull MeterRegistry registry) { - var direct = allocator.getAllocationType().isDirect(); - var pooling = allocator.isPooling(); - List tags = new ArrayList<>(); - tags.add(Tag.of("resource", resourceName)); - tags.add(Tag.of("allocator", allocatorName)); - tags.add(Tag.of("type", direct ? "direct" : "heap")); - tags.add(Tag.of("pooling-mode", pooling ? "pooled" : "unpooled")); - extraTags.forEach((key, value) -> tags.add(Tag.of(key, value))); - - if (allocator instanceof BufferAllocatorMetricProvider metricProvider) { - var metric = metricProvider.metric(); - registry.gauge("netty.num.arenas", tags, metric, BufferAllocatorMetric::numArenas); - int arenaId = 0; - for (PoolArenaMetric arenaMetric : metric.arenaMetrics()) { - var currentArenaId = arenaId; - var arenaTags = new ArrayList<>(tags); - arenaTags.add(Tag.of("arena.offset", String.valueOf(currentArenaId))); - registry.gauge("netty.arena.thread.caches.num", arenaTags, arenaMetric, PoolArenaMetric::numThreadCaches); - registry.gauge("netty.arena.subpage.num", tagsWithSize(arenaTags, "small"), arenaMetric, PoolArenaMetric::numSmallSubpages); - registry.gauge("netty.arena.chunk.lists.num", arenaTags, arenaMetric, PoolArenaMetric::numChunkLists); - registerSubpageMetrics(registry, arenaTags, arenaMetric.smallSubpages(), "small"); - registerPoolChunkMetrics(registry, arenaTags, arenaMetric.chunkLists()); - registry.gauge("netty.arena.allocations.num", tagsWithSize(arenaTags, "small"), arenaMetric, PoolArenaMetric::numSmallAllocations); - registry.gauge("netty.arena.allocations.num", tagsWithSize(arenaTags, "normal"), arenaMetric, PoolArenaMetric::numNormalAllocations); - registry.gauge("netty.arena.allocations.num", tagsWithSize(arenaTags, "huge"), arenaMetric, PoolArenaMetric::numHugeAllocations); - registry.gauge("netty.arena.deallocations.num", tagsWithSize(arenaTags, "small"), arenaMetric, PoolArenaMetric::numSmallDeallocations); - registry.gauge("netty.arena.deallocations.num", tagsWithSize(arenaTags, "normal"), arenaMetric, PoolArenaMetric::numNormalDeallocations); - registry.gauge("netty.arena.deallocations.num", tagsWithSize(arenaTags, "huge"), arenaMetric, PoolArenaMetric::numHugeDeallocations); - registry.gauge("netty.arena.allocations.active.num", tagsWithSize(arenaTags, "small"), arenaMetric, PoolArenaMetric::numActiveSmallAllocations); - registry.gauge("netty.arena.allocations.active.num", tagsWithSize(arenaTags, "normal"), arenaMetric, PoolArenaMetric::numActiveNormalAllocations); - registry.gauge("netty.arena.allocations.active.num", tagsWithSize(arenaTags, "huge"), arenaMetric, PoolArenaMetric::numActiveHugeAllocations); - registry.gauge("netty.arena.bytes.active.num", arenaTags, arenaMetric, PoolArenaMetric::numActiveBytes); - - arenaId++; - } - registry.gauge("netty.num.thread.local.caches", tags, metric, BufferAllocatorMetric::numThreadLocalCaches); - registry.gauge("netty.cache.size", tagsWithSize(tags, "small"), metric, BufferAllocatorMetric::smallCacheSize); - registry.gauge("netty.cache.size", tagsWithSize(tags, "normal"), metric, BufferAllocatorMetric::normalCacheSize); - registry.gauge("netty.chunk.size", tags, metric, BufferAllocatorMetric::chunkSize); - registry.gauge("netty.used.memory", tags, metric, BufferAllocatorMetric::usedMemory); - } - } - - private List tagsWithSize(List inputTags, String size) { - var tags = new ArrayList<>(inputTags); - tags.add(Tag.of("data.size", size)); - return tags; - } - - private void registerPoolChunkMetrics(MeterRegistry registry, - ArrayList arenaTags, - List chunkMetricList) { - int chunkId = 0; - for (var chunkMetrics : chunkMetricList) { - var currentChunkId = chunkId; - var chunkTags = new ArrayList<>(arenaTags); - chunkTags.add(Tag.of("chunk.offset", String.valueOf(currentChunkId))); - registry.gauge("netty.chunk.usage.min", chunkTags, chunkMetrics, PoolChunkListMetric::minUsage); - registry.gauge("netty.chunk.usage.max", chunkTags, chunkMetrics, PoolChunkListMetric::maxUsage); - - chunkId++; - } - } - - public void registerSubpageMetrics(MeterRegistry registry, - List arenaTags, - List subpageMetricList, - String subpageType) { - - int subpageId = 0; - for (PoolSubpageMetric subpageMetric : subpageMetricList) { - var currentSubpageId = subpageId; - var subpageTags = new ArrayList<>(arenaTags); - subpageTags.add(Tag.of("subpage.offset", String.valueOf(currentSubpageId))); - subpageTags.add(Tag.of("subpage.type", subpageType)); - registry.gauge("netty.subpage.elements.max.num", subpageTags, subpageMetric, PoolSubpageMetric::maxNumElements); - registry.gauge("netty.subpage.available.num", subpageTags, subpageMetric, PoolSubpageMetric::numAvailable); - registry.gauge("netty.subpage.elements.size", subpageTags, subpageMetric, PoolSubpageMetric::elementSize); - registry.gauge("netty.subpage.page.size", subpageTags, subpageMetric, PoolSubpageMetric::pageSize); - - subpageId++; - } - } - -} diff --git a/src/main/java/it/cavallium/dbengine/utils/BooleanListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/BooleanListJsonAdapter.java index 55b3913..9d5a9aa 100644 --- a/src/main/java/it/cavallium/dbengine/utils/BooleanListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/BooleanListJsonAdapter.java @@ -6,7 +6,6 @@ import com.squareup.moshi.JsonWriter; import it.unimi.dsi.fastutil.booleans.BooleanArrayList; import it.unimi.dsi.fastutil.booleans.BooleanList; import it.unimi.dsi.fastutil.booleans.BooleanLists; -import it.unimi.dsi.fastutil.bytes.ByteArrayList; import java.io.IOException; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -14,7 +13,7 @@ import org.jetbrains.annotations.Nullable; public class BooleanListJsonAdapter extends JsonAdapter { @Override - public @NotNull BooleanList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull BooleanList fromJson(@NotNull JsonReader reader) { reader.beginArray(); BooleanArrayList modifiableOutput = new BooleanArrayList(); while (reader.hasNext()) { @@ -25,7 +24,7 @@ public class BooleanListJsonAdapter extends JsonAdapter { } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable BooleanList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable BooleanList value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/ByteListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/ByteListJsonAdapter.java index 134b981..8f5a820 100644 --- a/src/main/java/it/cavallium/dbengine/utils/ByteListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/ByteListJsonAdapter.java @@ -3,28 +3,26 @@ package it.cavallium.dbengine.utils; import com.squareup.moshi.JsonAdapter; import com.squareup.moshi.JsonReader; import com.squareup.moshi.JsonWriter; -import it.unimi.dsi.fastutil.bytes.ByteArrayList; -import it.unimi.dsi.fastutil.bytes.ByteList; -import it.unimi.dsi.fastutil.bytes.ByteLists; +import it.cavallium.dbengine.buffers.Buf; import java.io.IOException; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -public class ByteListJsonAdapter extends JsonAdapter { +public class ByteListJsonAdapter extends JsonAdapter { @Override - public @NotNull ByteList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull Buf fromJson(@NotNull JsonReader reader) { reader.beginArray(); - ByteArrayList modifiableOutput = new ByteArrayList(); + var modifiableOutput = Buf.create(); while (reader.hasNext()) { modifiableOutput.add((byte) reader.nextInt()); } reader.endArray(); - return ByteLists.unmodifiable(modifiableOutput); + return modifiableOutput; } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable ByteList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable Buf value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/CharListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/CharListJsonAdapter.java index 56ad070..13f24d6 100644 --- a/src/main/java/it/cavallium/dbengine/utils/CharListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/CharListJsonAdapter.java @@ -13,7 +13,7 @@ import org.jetbrains.annotations.Nullable; public class CharListJsonAdapter extends JsonAdapter { @Override - public @NotNull CharList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull CharList fromJson(@NotNull JsonReader reader) { reader.beginArray(); CharArrayList modifiableOutput = new CharArrayList(); while (reader.hasNext()) { @@ -24,7 +24,7 @@ public class CharListJsonAdapter extends JsonAdapter { } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable CharList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable CharList value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/DBException.java b/src/main/java/it/cavallium/dbengine/utils/DBException.java new file mode 100644 index 0000000..1e3d2d1 --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/utils/DBException.java @@ -0,0 +1,22 @@ +package it.cavallium.dbengine.utils; + +import java.io.IOException; + +public class DBException extends RuntimeException { + + public DBException(String message) { + super(message); + } + + public DBException(String message, Exception cause) { + super(message, cause); + } + + public DBException(Exception cause) { + super(cause); + } + + public DBException() { + super(); + } +} diff --git a/src/main/java/it/cavallium/dbengine/utils/IntListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/IntListJsonAdapter.java index dfd6933..b77600e 100644 --- a/src/main/java/it/cavallium/dbengine/utils/IntListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/IntListJsonAdapter.java @@ -13,7 +13,7 @@ import org.jetbrains.annotations.Nullable; public class IntListJsonAdapter extends JsonAdapter { @Override - public @NotNull IntList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull IntList fromJson(@NotNull JsonReader reader) { reader.beginArray(); IntArrayList modifiableOutput = new IntArrayList(); while (reader.hasNext()) { @@ -24,7 +24,7 @@ public class IntListJsonAdapter extends JsonAdapter { } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable IntList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable IntList value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/InternalMonoUtils.java b/src/main/java/it/cavallium/dbengine/utils/InternalMonoUtils.java deleted file mode 100644 index a828afb..0000000 --- a/src/main/java/it/cavallium/dbengine/utils/InternalMonoUtils.java +++ /dev/null @@ -1,33 +0,0 @@ -package it.cavallium.dbengine.utils; - -import org.reactivestreams.Publisher; -import reactor.core.publisher.Flux; -import reactor.core.publisher.Mono; - -public class InternalMonoUtils { - - - @SuppressWarnings("unchecked") - public static Mono toAny(Mono request) { - return (Mono) request; - } - - @SuppressWarnings("unchecked") - public static Mono toAny(Flux request) { - return (Mono) Mono.ignoreElements(request); - } - - @SuppressWarnings("unchecked") - public static Mono toAny(Publisher request) { - if (request instanceof Mono mono) { - return (Mono) mono; - } else { - return (Mono) Mono.ignoreElements(request); - } - } - - @SuppressWarnings("unchecked") - public static Mono ignoreElements(Publisher flux) { - return (Mono) Mono.ignoreElements(flux); - } -} diff --git a/src/main/java/it/cavallium/dbengine/utils/LongListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/LongListJsonAdapter.java index cd2f8f2..c18570b 100644 --- a/src/main/java/it/cavallium/dbengine/utils/LongListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/LongListJsonAdapter.java @@ -13,7 +13,7 @@ import org.jetbrains.annotations.Nullable; public class LongListJsonAdapter extends JsonAdapter { @Override - public @NotNull LongList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull LongList fromJson(@NotNull JsonReader reader) { reader.beginArray(); LongArrayList modifiableOutput = new LongArrayList(); while (reader.hasNext()) { @@ -24,7 +24,7 @@ public class LongListJsonAdapter extends JsonAdapter { } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable LongList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable LongList value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/MoshiPolymorphic.java b/src/main/java/it/cavallium/dbengine/utils/MoshiPolymorphic.java index 2c67e0c..9318468 100644 --- a/src/main/java/it/cavallium/dbengine/utils/MoshiPolymorphic.java +++ b/src/main/java/it/cavallium/dbengine/utils/MoshiPolymorphic.java @@ -132,7 +132,7 @@ public abstract class MoshiPolymorphic { @Nullable @Override - public T fromJson(@NotNull JsonReader jsonReader) throws IOException { + public T fromJson(@NotNull JsonReader jsonReader) { String type = null; jsonReader.beginObject(); @@ -165,7 +165,7 @@ public abstract class MoshiPolymorphic { } @Override - public void toJson(@NotNull JsonWriter jsonWriter, @Nullable T t) throws IOException { + public void toJson(@NotNull JsonWriter jsonWriter, @Nullable T t) { if (t == null) { jsonWriter.nullValue(); } else { @@ -259,7 +259,7 @@ public abstract class MoshiPolymorphic { @Nullable @Override - public T fromJson(@NotNull JsonReader jsonReader) throws IOException { + public T fromJson(@NotNull JsonReader jsonReader) { try { Object instance; Object[] fields; @@ -304,7 +304,7 @@ public abstract class MoshiPolymorphic { } @Override - public void toJson(@NotNull JsonWriter jsonWriter, @Nullable T t) throws IOException { + public void toJson(@NotNull JsonWriter jsonWriter, @Nullable T t) { if (t == null) { jsonWriter.nullValue(); } else { @@ -339,7 +339,7 @@ public abstract class MoshiPolymorphic { @Nullable @Override - public List fromJson(@NotNull JsonReader jsonReader) throws IOException { + public List fromJson(@NotNull JsonReader jsonReader) { jsonReader.beginArray(); var result = new ArrayList(); while (jsonReader.hasNext()) { @@ -350,7 +350,7 @@ public abstract class MoshiPolymorphic { } @Override - public void toJson(@NotNull JsonWriter jsonWriter, @Nullable List ts) throws IOException { + public void toJson(@NotNull JsonWriter jsonWriter, @Nullable List ts) { if (ts == null) { jsonWriter.nullValue(); } else { diff --git a/src/main/java/it/cavallium/dbengine/utils/ShortListJsonAdapter.java b/src/main/java/it/cavallium/dbengine/utils/ShortListJsonAdapter.java index a0f30f3..cd5b12e 100644 --- a/src/main/java/it/cavallium/dbengine/utils/ShortListJsonAdapter.java +++ b/src/main/java/it/cavallium/dbengine/utils/ShortListJsonAdapter.java @@ -13,7 +13,7 @@ import org.jetbrains.annotations.Nullable; public class ShortListJsonAdapter extends JsonAdapter { @Override - public @NotNull ShortList fromJson(@NotNull JsonReader reader) throws IOException { + public @NotNull ShortList fromJson(@NotNull JsonReader reader) { reader.beginArray(); ShortArrayList modifiableOutput = new ShortArrayList(); while (reader.hasNext()) { @@ -24,7 +24,7 @@ public class ShortListJsonAdapter extends JsonAdapter { } @Override - public void toJson(@NotNull JsonWriter writer, @Nullable ShortList value) throws IOException { + public void toJson(@NotNull JsonWriter writer, @Nullable ShortList value) { if (value == null) { writer.nullValue(); return; diff --git a/src/main/java/it/cavallium/dbengine/utils/SimpleResource.java b/src/main/java/it/cavallium/dbengine/utils/SimpleResource.java index 7638074..86a60b1 100644 --- a/src/main/java/it/cavallium/dbengine/utils/SimpleResource.java +++ b/src/main/java/it/cavallium/dbengine/utils/SimpleResource.java @@ -1,6 +1,5 @@ package it.cavallium.dbengine.utils; -import it.cavallium.dbengine.MetricUtils; import it.cavallium.dbengine.database.SafeCloseable; import java.lang.ref.Cleaner; import java.util.concurrent.atomic.AtomicBoolean; diff --git a/src/main/java/it/cavallium/dbengine/utils/StreamUtils.java b/src/main/java/it/cavallium/dbengine/utils/StreamUtils.java new file mode 100644 index 0000000..ac5b2b8 --- /dev/null +++ b/src/main/java/it/cavallium/dbengine/utils/StreamUtils.java @@ -0,0 +1,88 @@ +package it.cavallium.dbengine.utils; + +import com.google.common.collect.Iterators; +import com.google.common.collect.Streams; +import java.util.ArrayList; +import java.util.Comparator; +import java.util.Iterator; +import java.util.List; +import java.util.Spliterator; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.StreamSupport; + +public class StreamUtils { + + @SafeVarargs + @SuppressWarnings("UnstableApiUsage") + public static Stream mergeComparing(Comparator comparator, Stream... streams) { + List> iterators = new ArrayList<>(streams.length); + for (Stream stream : streams) { + var it = stream.iterator(); + if (it.hasNext()) { + iterators.add(it); + } + } + + Stream resultStream; + + if (iterators.isEmpty()) { + resultStream = Stream.empty(); + } else if (iterators.size() == 1) { + resultStream = Streams.stream(iterators.get(0)); + } else { + resultStream = Streams.stream(Iterators.mergeSorted(iterators, comparator)); + } + + return resultStream.onClose(() -> { + for (Stream stream : streams) { + stream.close(); + } + }); + } + + public static Stream> batches(Stream stream, int batchSize) { + return batchSize <= 0 + ? Stream.of(stream.collect(Collectors.toList())) + : StreamSupport.stream(new BatchSpliterator<>(stream.spliterator(), batchSize), stream.isParallel()); + } + + private record BatchSpliterator(Spliterator base, int batchSize) implements Spliterator> { + + @Override + public boolean tryAdvance(Consumer> action) { + final List batch = new ArrayList<>(batchSize); + //noinspection StatementWithEmptyBody + for (int i = 0; i < batchSize && base.tryAdvance(batch::add); i++) { + + } + if (batch.isEmpty()) { + return false; + } + action.accept(batch); + return true; + } + + @Override + public Spliterator> trySplit() { + if (base.estimateSize() <= batchSize) { + return null; + } + final Spliterator splitBase = this.base.trySplit(); + return splitBase == null ? null : new BatchSpliterator<>(splitBase, batchSize); + } + + @Override + public long estimateSize() { + final double baseSize = base.estimateSize(); + return baseSize == 0 ? 0 : (long) Math.ceil(baseSize / (double) batchSize); + } + + @Override + public int characteristics() { + return base.characteristics(); + } + + } +} diff --git a/src/main/java/it/cavallium/dbengine/utils/UTFUtils.java b/src/main/java/it/cavallium/dbengine/utils/UTFUtils.java index 999fb3b..f3e919e 100644 --- a/src/main/java/it/cavallium/dbengine/utils/UTFUtils.java +++ b/src/main/java/it/cavallium/dbengine/utils/UTFUtils.java @@ -6,13 +6,13 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; public class UTFUtils { - public static void writeUTF(DataOutput out, String utf) throws IOException { + public static void writeUTF(DataOutput out, String utf) { byte[] bytes = utf.getBytes(StandardCharsets.UTF_8); out.writeInt(bytes.length); out.write(bytes); } - public static String readUTF(DataInput in) throws IOException { + public static String readUTF(DataInput in) { int len = in.readInt(); byte[] data = new byte[len]; in.readFully(data, 0, len); diff --git a/src/main/java/module-info.java b/src/main/java/module-info.java index 5c346b3..409eeee 100644 --- a/src/main/java/module-info.java +++ b/src/main/java/module-info.java @@ -1,5 +1,5 @@ module dbengine { - uses io.netty5.buffer.pool.BufferAllocatorMetricProvider; + exports org.warp.commonutils.stream; exports it.cavallium.dbengine.lucene; exports it.cavallium.dbengine.database; exports it.cavallium.dbengine.rpc.current.data; @@ -15,21 +15,16 @@ module dbengine { exports it.cavallium.dbengine.lucene.analyzer; exports it.cavallium.dbengine.client.query; exports it.cavallium.dbengine.database.memory; - exports it.cavallium.dbengine.netty; opens it.cavallium.dbengine.database.remote; - exports it.cavallium.dbengine; exports it.cavallium.dbengine.utils; exports it.cavallium.dbengine.database.disk.rocksdb; + exports it.cavallium.dbengine.buffers; requires org.jetbrains.annotations; - requires reactor.core; requires com.google.common; requires micrometer.core; - requires io.netty5.buffer; requires rocksdbjni; - requires org.reactivestreams; requires org.apache.logging.log4j; requires static io.soabase.recordbuilder.core; - requires io.netty5.common; requires it.unimi.dsi.fastutil; requires data.generator.runtime; requires java.logging; @@ -46,14 +41,11 @@ module dbengine { requires io.netty.codec; requires org.apache.lucene.facet; requires java.management; - requires reactor.netty.core; requires com.ibm.icu; requires org.apache.lucene.analysis.icu; requires io.netty.handler; - requires io.netty.incubator.codec.classes.quic; requires io.netty.common; requires org.apache.lucene.queryparser; - requires reactor.netty.incubator.quic; requires okio; requires moshi.records.reflect; requires moshi; diff --git a/src/main/java/org/warp/commonutils/stream/ByteBufferBackedInputStream.java b/src/main/java/org/warp/commonutils/stream/ByteBufferBackedInputStream.java new file mode 100644 index 0000000..3efa9ae --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/ByteBufferBackedInputStream.java @@ -0,0 +1,29 @@ +package org.warp.commonutils.stream; + +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import org.jetbrains.annotations.NotNull; + +/** + * Simple {@link InputStream} implementation that exposes currently + * available content of a {@link ByteBuffer}. + */ +public class ByteBufferBackedInputStream extends InputStream { + protected final ByteBuffer _b; + + public ByteBufferBackedInputStream(ByteBuffer buf) { _b = buf; } + + @Override public int available() { return _b.remaining(); } + + @Override + public int read() { return _b.hasRemaining() ? (_b.get() & 0xFF) : -1; } + + @Override + public int read(byte @NotNull [] bytes, int off, int len) { + if (!_b.hasRemaining()) return -1; + len = Math.min(len, _b.remaining()); + _b.get(bytes, off, len); + return len; + } +} \ No newline at end of file diff --git a/src/main/java/org/warp/commonutils/stream/DataInputOutput.java b/src/main/java/org/warp/commonutils/stream/DataInputOutput.java new file mode 100644 index 0000000..7435511 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/DataInputOutput.java @@ -0,0 +1,11 @@ +package org.warp.commonutils.stream; + +import java.io.DataInput; +import java.io.DataOutput; + +public interface DataInputOutput extends DataInput, DataOutput { + + DataInput getIn(); + + DataOutput getOut(); +} diff --git a/src/main/java/org/warp/commonutils/stream/DataInputOutputImpl.java b/src/main/java/org/warp/commonutils/stream/DataInputOutputImpl.java new file mode 100644 index 0000000..b45e564 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/DataInputOutputImpl.java @@ -0,0 +1,173 @@ +package org.warp.commonutils.stream; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import org.jetbrains.annotations.NotNull; + +public class DataInputOutputImpl implements DataInputOutput { + + private final DataInput in; + private final DataOutput out; + + public DataInputOutputImpl(DataInput in, DataOutput out) { + this.in = in; + this.out = out; + } + + @Override + public DataInput getIn() { + return this; + } + + @Override + public DataOutput getOut() { + return this; + } + + @Override + public void readFully(byte @NotNull [] bytes) { + in.readFully(bytes); + } + + @Override + public void readFully(byte @NotNull [] bytes, int i, int i1) { + in.readFully(bytes, i, i1); + } + + @Override + public int skipBytes(int i) { + return in.skipBytes(i); + } + + @Override + public boolean readBoolean() { + return in.readBoolean(); + } + + @Override + public byte readByte() { + return in.readByte(); + } + + @Override + public int readUnsignedByte() { + return in.readUnsignedByte(); + } + + @Override + public short readShort() { + return in.readShort(); + } + + @Override + public int readUnsignedShort() { + return in.readUnsignedShort(); + } + + @Override + public char readChar() { + return in.readChar(); + } + + @Override + public int readInt() { + return in.readInt(); + } + + @Override + public long readLong() { + return in.readLong(); + } + + @Override + public float readFloat() { + return in.readFloat(); + } + + @Override + public double readDouble() { + return in.readDouble(); + } + + @Override + public String readLine() { + return in.readLine(); + } + + @NotNull + @Override + public String readUTF() { + return in.readUTF(); + } + + @Override + public void write(int i) { + out.write(i); + } + + @Override + public void write(byte @NotNull [] bytes) { + out.write(bytes); + } + + @Override + public void write(byte @NotNull [] bytes, int i, int i1) { + out.write(bytes, i, i1); + } + + @Override + public void writeBoolean(boolean b) { + out.writeBoolean(b); + } + + @Override + public void writeByte(int i) { + out.writeByte(i); + } + + @Override + public void writeShort(int i) { + out.writeShort(i); + } + + @Override + public void writeChar(int i) { + out.writeChar(i); + } + + @Override + public void writeInt(int i) { + out.writeInt(i); + } + + @Override + public void writeLong(long l) { + out.writeLong(l); + } + + @Override + public void writeFloat(float v) { + out.writeFloat(v); + } + + @Override + public void writeDouble(double v) { + out.writeDouble(v); + } + + @Override + public void writeBytes(@NotNull String s) { + out.writeBytes(s); + } + + @Override + public void writeChars(@NotNull String s) { + out.writeChars(s); + } + + @Override + public void writeUTF(@NotNull String s) { + out.writeUTF(s); + } +} diff --git a/src/main/java/org/warp/commonutils/stream/DataInputOutputStream.java b/src/main/java/org/warp/commonutils/stream/DataInputOutputStream.java new file mode 100644 index 0000000..4895ff7 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/DataInputOutputStream.java @@ -0,0 +1,103 @@ +package org.warp.commonutils.stream; + +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import org.jetbrains.annotations.NotNull; + +public class DataInputOutputStream extends DataOutputStream implements DataInputOutput { + + private final DataInputStream in; + + public DataInputOutputStream(DataInputStream in, DataOutputStream out) { + super(out); + this.in = in; + } + + @Override + public DataInputStream getIn() { + return in; + } + + @Override + public DataOutputStream getOut() { + return this; + } + + @Override + public void readFully(byte @NotNull [] bytes) { + in.readFully(bytes); + } + + @Override + public void readFully(byte @NotNull [] bytes, int i, int i1) { + in.readFully(bytes, i, i1); + } + + @Override + public int skipBytes(int i) { + return in.skipBytes(i); + } + + @Override + public boolean readBoolean() { + return in.readBoolean(); + } + + @Override + public byte readByte() { + return in.readByte(); + } + + @Override + public int readUnsignedByte() { + return in.readUnsignedByte(); + } + + @Override + public short readShort() { + return in.readShort(); + } + + @Override + public int readUnsignedShort() { + return in.readUnsignedShort(); + } + + @Override + public char readChar() { + return in.readChar(); + } + + @Override + public int readInt() { + return in.readInt(); + } + + @Override + public long readLong() { + return in.readLong(); + } + + @Override + public float readFloat() { + return in.readFloat(); + } + + @Override + public double readDouble() { + return in.readDouble(); + } + + @Deprecated + @Override + public String readLine() { + return in.readLine(); + } + + @NotNull + @Override + public String readUTF() { + return in.readUTF(); + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeByteArrayInputStream.java b/src/main/java/org/warp/commonutils/stream/SafeByteArrayInputStream.java new file mode 100644 index 0000000..b2975dd --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeByteArrayInputStream.java @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2005-2022 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.warp.commonutils.stream; + +/** Simple, fast and repositionable byte-array input stream. + * + *

Warning: this class implements the correct semantics + * of {@link #read(byte[], int, int)} as described in {@link java.io.InputStream}. + * The implementation given in {@link java.io.ByteArrayInputStream} is broken, + * but it will never be fixed because it's too late. + * + * @author Sebastiano Vigna + */ + +public class SafeByteArrayInputStream extends SafeMeasurableInputStream implements SafeRepositionableStream { + + /** The array backing the input stream. */ + public byte[] array; + + /** The first valid entry. */ + public int offset; + + /** The number of valid bytes in {@link #array} starting from {@link #offset}. */ + public int length; + + /** The current position as a distance from {@link #offset}. */ + private int position; + + /** The current mark as a position, or -1 if no mark exists. */ + private int mark; + + /** Creates a new array input stream using a given array fragment. + * + * @param array the backing array. + * @param offset the first valid entry of the array. + * @param length the number of valid bytes. + */ + public SafeByteArrayInputStream(final byte[] array, final int offset, final int length) { + this.array = array; + this.offset = offset; + this.length = length; + } + + /** Creates a new array input stream using a given array. + * + * @param array the backing array. + */ + public SafeByteArrayInputStream(final byte[] array) { + this(array, 0, array.length); + } + + @Override + public boolean markSupported() { + return true; + } + + @Override + public void reset() { + position = mark; + } + + /** Closing a fast byte array input stream has no effect. */ + @Override + public void close() {} + + @Override + public void mark(final int dummy) { + mark = position; + } + + @Override + public int available() { + return length - position; + } + + @Override + public long skip(long n) { + if (n <= length - position) { + position += (int)n; + return n; + } + n = length - position; + position = length; + return n; + } + + @Override + public int read() { + if (length == position) return -1; + return array[offset + position++] & 0xFF; + } + + /** Reads bytes from this byte-array input stream as + * specified in {@link java.io.InputStream#read(byte[], int, int)}. + * Note that the implementation given in {@link java.io.ByteArrayInputStream#read(byte[], int, int)} + * will return -1 on a zero-length read at EOF, contrarily to the specification. We won't. + */ + + @Override + public int read(final byte b[], final int offset, final int length) { + if (this.length == this.position) return length == 0 ? 0 : -1; + final int n = Math.min(length, this.length - this.position); + System.arraycopy(array, this.offset + this.position, b, offset, n); + this.position += n; + return n; + } + + @Override + public long position() { + return position; + } + + @Override + public void position(final long newPosition) { + position = (int)Math.min(newPosition, length); + } + + @Override + public long length() { + return length; + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeByteArrayOutputStream.java b/src/main/java/org/warp/commonutils/stream/SafeByteArrayOutputStream.java new file mode 100644 index 0000000..760f709 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeByteArrayOutputStream.java @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2005-2022 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.warp.commonutils.stream; + +import it.unimi.dsi.fastutil.Arrays; +import it.unimi.dsi.fastutil.bytes.ByteArrays; + +/** Simple, fast byte-array output stream that exposes the backing array. + * + *

{@link java.io.ByteArrayOutputStream} is nice, but to get its content you + * must generate each time a new object. This doesn't happen here. + * + *

This class will automatically enlarge the backing array, doubling its + * size whenever new space is needed. The {@link #reset()} method will + * mark the content as empty, but will not decrease the capacity: use + * {@link #trim()} for that purpose. + * + * @author Sebastiano Vigna + */ + +public class SafeByteArrayOutputStream extends SafeMeasurableOutputStream implements SafeRepositionableStream { + + /** The array backing the output stream. */ + public static final int DEFAULT_INITIAL_CAPACITY = 16; + private final boolean wrapped; + private final int initialPosition; + private final int initialLength; + + /** The array backing the output stream. */ + public byte[] array; + + /** The number of valid bytes in {@link #array}. */ + public int length; + + /** The current writing position. */ + private int position; + + /** Creates a new array output stream with an initial capacity of {@link #DEFAULT_INITIAL_CAPACITY} bytes. */ + public SafeByteArrayOutputStream() { + this(DEFAULT_INITIAL_CAPACITY); + } + + /** Creates a new array output stream with a given initial capacity. + * + * @param initialCapacity the initial length of the backing array. + */ + public SafeByteArrayOutputStream(final int initialCapacity) { + array = new byte[initialCapacity]; + wrapped = false; + initialPosition = 0; + initialLength = 0; + } + + /** Creates a new array output stream wrapping a given byte array. + * + * @param a the byte array to wrap. + */ + public SafeByteArrayOutputStream(final byte[] a) { + array = a; + wrapped = true; + initialPosition = 0; + initialLength = a.length; + } + + /** Creates a new array output stream wrapping a given byte array. + * + * @param a the byte array to wrap. + */ + public SafeByteArrayOutputStream(final byte[] a, int from, int to) { + Arrays.ensureFromTo(a.length, from, to); + wrapped = true; + array = a; + initialPosition = from; + initialLength = to; + position = from; + length = to - from; + } + + /** Marks this array output stream as empty. */ + public void reset() { + length = initialLength; + position = initialPosition; + } + + /** Ensures that the length of the backing array is equal to {@link #length}. */ + public void trim() { + if (!wrapped) { + array = ByteArrays.trim(array, length); + } + } + + public void ensureWritable(int size) { + growBy(size); + } + + @Override + public void write(final int b) { + if (position >= array.length) { + if (wrapped) { + throw new ArrayIndexOutOfBoundsException(position); + } else { + array = ByteArrays.grow(array, position + 1, length); + } + } + array[position++] = (byte)b; + if (length < position) length = position; + } + + @Override + public void write(final byte[] b, final int off, final int len) { + ByteArrays.ensureOffsetLength(b, off, len); + growBy(len); + System.arraycopy(b, off, array, position, len); + if (position + len > length) length = position += len; + } + + private void growBy(int len) { + if (position + len > array.length) { + if (wrapped) { + throw new ArrayIndexOutOfBoundsException(position + len - 1); + } else { + array = ByteArrays.grow(array, position + len, position); + } + } + } + + @Override + public void position(final long newPosition) { + position = (int)newPosition; + } + + @Override + public long position() { + return position; + } + + @Override + public long length() { + return length; + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeDataInputStream.java b/src/main/java/org/warp/commonutils/stream/SafeDataInputStream.java new file mode 100644 index 0000000..c5bd5e2 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeDataInputStream.java @@ -0,0 +1,580 @@ +/* + * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package org.warp.commonutils.stream; + +import java.io.DataInput; +import org.jetbrains.annotations.NotNull; + +/** + * A data input stream lets an application read primitive Java data + * types from an underlying input stream in a machine-independent + * way. An application uses a data output stream to write data that + * can later be read by a data input stream. + *

+ * DataInputStream is not necessarily safe for multithreaded access. + * Thread safety is optional and is the responsibility of users of + * methods in this class. + * + * @author Arthur van Hoff + * @see java.io.DataOutputStream + * @since 1.0 + */ +public class SafeDataInputStream extends SafeFilterInputStream implements DataInput { + + /** + * Creates a DataInputStream that uses the specified + * underlying InputStream. + * + * @param in the specified input stream + */ + public SafeDataInputStream(SafeInputStream in) { + super(in); + } + + /** + * working arrays initialized on demand by readUTF + */ + private byte[] bytearr = new byte[80]; + private char[] chararr = new char[80]; + + /** + * Reads some number of bytes from the contained input stream and + * stores them into the buffer array {@code b}. The number of + * bytes actually read is returned as an integer. This method blocks + * until input data is available, end of file is detected, or an + * exception is thrown. + * + *

If {@code b} is null, a {@code NullPointerException} is + * thrown. If the length of {@code b} is zero, then no bytes are + * read and {@code 0} is returned; otherwise, there is an attempt + * to read at least one byte. If no byte is available because the + * stream is at end of file, the value {@code -1} is returned; + * otherwise, at least one byte is read and stored into {@code b}. + * + *

The first byte read is stored into element {@code b[0]}, the + * next one into {@code b[1]}, and so on. The number of bytes read + * is, at most, equal to the length of {@code b}. Let {@code k} + * be the number of bytes actually read; these bytes will be stored in + * elements {@code b[0]} through {@code b[k-1]}, leaving + * elements {@code b[k]} through {@code b[b.length-1]} + * unaffected. + * + *

The {@code read(b)} method has the same effect as: + *

+	 * read(b, 0, b.length)
+	 * 
+ * + * @param b the buffer into which the data is read. + * @return the total number of bytes read into the buffer, or + * {@code -1} if there is no more data because the end + * of the stream has been reached. + * @see SafeFilterInputStream#in + * @see java.io.InputStream#read(byte[], int, int) + */ + public final int read(byte[] b) { + return in.read(b, 0, b.length); + } + + /** + * Reads up to {@code len} bytes of data from the contained + * input stream into an array of bytes. An attempt is made to read + * as many as {@code len} bytes, but a smaller number may be read, + * possibly zero. The number of bytes actually read is returned as an + * integer. + * + *

This method blocks until input data is available, end of file is + * detected, or an exception is thrown. + * + *

If {@code len} is zero, then no bytes are read and + * {@code 0} is returned; otherwise, there is an attempt to read at + * least one byte. If no byte is available because the stream is at end of + * file, the value {@code -1} is returned; otherwise, at least one + * byte is read and stored into {@code b}. + * + *

The first byte read is stored into element {@code b[off]}, the + * next one into {@code b[off+1]}, and so on. The number of bytes read + * is, at most, equal to {@code len}. Let k be the number of + * bytes actually read; these bytes will be stored in elements + * {@code b[off]} through {@code b[off+}k{@code -1]}, + * leaving elements {@code b[off+}k{@code ]} through + * {@code b[off+len-1]} unaffected. + * + *

In every case, elements {@code b[0]} through + * {@code b[off]} and elements {@code b[off+len]} through + * {@code b[b.length-1]} are unaffected. + * + * @param b the buffer into which the data is read. + * @param off the start offset in the destination array {@code b} + * @param len the maximum number of bytes read. + * @return the total number of bytes read into the buffer, or + * {@code -1} if there is no more data because the end + * of the stream has been reached. + * @throws NullPointerException If {@code b} is {@code null}. + * @throws IndexOutOfBoundsException If {@code off} is negative, + * {@code len} is negative, or {@code len} is greater than + * {@code b.length - off} + * @see SafeFilterInputStream#in + * @see java.io.InputStream#read(byte[], int, int) + */ + public final int read(byte[] b, int off, int len) { + return in.read(b, off, len); + } + + /** + * See the general contract of the {@code readFully} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @param b the buffer into which the data is read. + * @throws NullPointerException if {@code b} is {@code null}. + * @see SafeFilterInputStream#in + */ + public final void readFully(byte @NotNull [] b) { + readFully(b, 0, b.length); + } + + /** + * See the general contract of the {@code readFully} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @param b the buffer into which the data is read. + * @param off the start offset in the data array {@code b}. + * @param len the number of bytes to read. + * @throws NullPointerException if {@code b} is {@code null}. + * @throws IndexOutOfBoundsException if {@code off} is negative, + * {@code len} is negative, or {@code len} is greater than + * {@code b.length - off}. + * @see SafeFilterInputStream#in + */ + public final void readFully(byte @NotNull [] b, int off, int len) { + if (len < 0) + throw new IndexOutOfBoundsException(); + int n = 0; + while (n < len) { + int count = in.read(b, off + n, len - n); + if (count < 0) + throw new IndexOutOfBoundsException(); + n += count; + } + } + + /** + * See the general contract of the {@code skipBytes} + * method of {@code DataInput}. + *

+ * Bytes for this operation are read from the contained + * input stream. + * + * @param n the number of bytes to be skipped. + * @return the actual number of bytes skipped. + */ + public final int skipBytes(int n) { + int total = 0; + int cur; + + while ((total 0)) { + total += cur; + } + + return total; + } + + /** + * See the general contract of the {@code readBoolean} + * method of {@code DataInput}. + *

+ * Bytes for this operation are read from the contained + * input stream. + * + * @return the {@code boolean} value read. + * @see SafeFilterInputStream#in + */ + public final boolean readBoolean() { + int ch = in.read(); + if (ch < 0) + throw new IndexOutOfBoundsException(); + return (ch != 0); + } + + /** + * See the general contract of the {@code readByte} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next byte of this input stream as a signed 8-bit + * {@code byte}. + * @see SafeFilterInputStream#in + */ + public final byte readByte() { + int ch = in.read(); + if (ch < 0) + throw new IndexOutOfBoundsException(); + return (byte)(ch); + } + + /** + * See the general contract of the {@code readUnsignedByte} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next byte of this input stream, interpreted as an + * unsigned 8-bit number. + * @see SafeFilterInputStream#in + */ + public final int readUnsignedByte() { + int ch = in.read(); + if (ch < 0) + throw new IndexOutOfBoundsException(); + return ch; + } + + /** + * See the general contract of the {@code readShort} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next two bytes of this input stream, interpreted as a + * signed 16-bit number. + * @see SafeFilterInputStream#in + */ + public final short readShort() { + int ch1 = in.read(); + int ch2 = in.read(); + if ((ch1 | ch2) < 0) + throw new IndexOutOfBoundsException(); + return (short)((ch1 << 8) + (ch2 << 0)); + } + + /** + * See the general contract of the {@code readUnsignedShort} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next two bytes of this input stream, interpreted as an + * unsigned 16-bit integer. + * @see SafeFilterInputStream#in + */ + public final int readUnsignedShort() { + int ch1 = in.read(); + int ch2 = in.read(); + if ((ch1 | ch2) < 0) + throw new IndexOutOfBoundsException(); + return (ch1 << 8) + (ch2 << 0); + } + + /** + * See the general contract of the {@code readChar} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next two bytes of this input stream, interpreted as a + * {@code char}. + * @see SafeFilterInputStream#in + */ + public final char readChar() { + int ch1 = in.read(); + int ch2 = in.read(); + if ((ch1 | ch2) < 0) + throw new IndexOutOfBoundsException(); + return (char)((ch1 << 8) + (ch2 << 0)); + } + + /** + * See the general contract of the {@code readInt} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next four bytes of this input stream, interpreted as an + * {@code int}. + * @see SafeFilterInputStream#in + */ + public final int readInt() { + int ch1 = in.read(); + int ch2 = in.read(); + int ch3 = in.read(); + int ch4 = in.read(); + if ((ch1 | ch2 | ch3 | ch4) < 0) + throw new IndexOutOfBoundsException(); + return ((ch1 << 24) + (ch2 << 16) + (ch3 << 8) + (ch4 << 0)); + } + + private final byte[] readBuffer = new byte[8]; + + /** + * See the general contract of the {@code readLong} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next eight bytes of this input stream, interpreted as a + * {@code long}. + * @see SafeFilterInputStream#in + */ + public final long readLong() { + readFully(readBuffer, 0, 8); + return (((long)readBuffer[0] << 56) + + ((long)(readBuffer[1] & 255) << 48) + + ((long)(readBuffer[2] & 255) << 40) + + ((long)(readBuffer[3] & 255) << 32) + + ((long)(readBuffer[4] & 255) << 24) + + ((readBuffer[5] & 255) << 16) + + ((readBuffer[6] & 255) << 8) + + ((readBuffer[7] & 255) << 0)); + } + + /** + * See the general contract of the {@code readFloat} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next four bytes of this input stream, interpreted as a + * {@code float}. + * @see SafeDataInputStream#readInt() + * @see Float#intBitsToFloat(int) + */ + public final float readFloat() { + return Float.intBitsToFloat(readInt()); + } + + /** + * See the general contract of the {@code readDouble} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return the next eight bytes of this input stream, interpreted as a + * {@code double}. + * @see SafeDataInputStream#readLong() + * @see Double#longBitsToDouble(long) + */ + public final double readDouble() { + return Double.longBitsToDouble(readLong()); + } + + private char[] lineBuffer; + + /** + * See the general contract of the {@code readLine} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @deprecated This method does not properly convert bytes to characters. + * As of JDK 1.1, the preferred way to read lines of text is via the + * {@code BufferedReader.readLine()} method. Programs that use the + * {@code DataInputStream} class to read lines can be converted to use + * the {@code BufferedReader} class by replacing code of the form: + *

+	 *     DataInputStream d = new DataInputStream(in);
+	 * 
+ * with: + *
+	 *     BufferedReader d
+	 *          = new BufferedReader(new InputStreamReader(in));
+	 * 
+ * + * @return the next line of text from this input stream. + * @see java.io.BufferedReader#readLine() + * @see SafeFilterInputStream#in + */ + @Deprecated + public final String readLine() { + char[] buf = lineBuffer; + + if (buf == null) { + buf = lineBuffer = new char[128]; + } + + int room = buf.length; + int offset = 0; + int c; + + loop: while (true) { + switch (c = in.read()) { + case -1: + case '\n': + break loop; + + case '\r': + int c2 = in.read(); + if ((c2 != '\n') && (c2 != -1)) { + if (!(in instanceof SafePushbackInputStream)) { + this.in = new SafePushbackInputStream(in); + } + ((SafePushbackInputStream)in).unread(c2); + } + break loop; + + default: + if (--room < 0) { + buf = new char[offset + 128]; + room = buf.length - offset - 1; + System.arraycopy(lineBuffer, 0, buf, 0, offset); + lineBuffer = buf; + } + buf[offset++] = (char) c; + break; + } + } + if ((c == -1) && (offset == 0)) { + return null; + } + return String.copyValueOf(buf, 0, offset); + } + + /** + * See the general contract of the {@code readUTF} + * method of {@code DataInput}. + *

+ * Bytes + * for this operation are read from the contained + * input stream. + * + * @return a Unicode string. + * @see SafeDataInputStream#readUTF(SafeDataInputStream) + */ + public final @NotNull String readUTF() { + return readUTF(this); + } + + /** + * Reads from the + * stream {@code in} a representation + * of a Unicode character string encoded in + * modified UTF-8 format; + * this string of characters is then returned as a {@code String}. + * The details of the modified UTF-8 representation + * are exactly the same as for the {@code readUTF} + * method of {@code DataInput}. + * + * @param in a data input stream. + * @return a Unicode string. + * @see SafeDataInputStream#readUnsignedShort() + */ + public static String readUTF(SafeDataInputStream in) { + int utflen = in.readUnsignedShort(); + byte[] bytearr; + char[] chararr; + if (in.bytearr.length < utflen){ + in.bytearr = new byte[utflen*2]; + in.chararr = new char[utflen*2]; + } + chararr = in.chararr; + bytearr = in.bytearr; + + int c, char2, char3; + int count = 0; + int chararr_count=0; + + in.readFully(bytearr, 0, utflen); + + while (count < utflen) { + c = (int) bytearr[count] & 0xff; + if (c > 127) break; + count++; + chararr[chararr_count++]=(char)c; + } + + while (count < utflen) { + c = (int) bytearr[count] & 0xff; + switch (c >> 4) { + case 0: case 1: case 2: case 3: case 4: case 5: case 6: case 7: + /* 0xxxxxxx*/ + count++; + chararr[chararr_count++]=(char)c; + break; + case 12: case 13: + /* 110x xxxx 10xx xxxx*/ + count += 2; + if (count > utflen) + throw new IllegalArgumentException( + "malformed input: partial character at end"); + char2 = bytearr[count-1]; + if ((char2 & 0xC0) != 0x80) + throw new IllegalArgumentException( + "malformed input around byte " + count); + chararr[chararr_count++]=(char)(((c & 0x1F) << 6) | + (char2 & 0x3F)); + break; + case 14: + /* 1110 xxxx 10xx xxxx 10xx xxxx */ + count += 3; + if (count > utflen) + throw new IllegalArgumentException( + "malformed input: partial character at end"); + char2 = bytearr[count-2]; + char3 = bytearr[count-1]; + if (((char2 & 0xC0) != 0x80) || ((char3 & 0xC0) != 0x80)) + throw new IllegalArgumentException( + "malformed input around byte " + (count-1)); + chararr[chararr_count++]=(char)(((c & 0x0F) << 12) | + ((char2 & 0x3F) << 6) | + ((char3 & 0x3F) << 0)); + break; + default: + /* 10xx xxxx, 1111 xxxx */ + throw new IllegalArgumentException( + "malformed input around byte " + count); + } + } + // The number of chars produced may be less than utflen + return new String(chararr, 0, chararr_count); + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeDataOutput.java b/src/main/java/org/warp/commonutils/stream/SafeDataOutput.java new file mode 100644 index 0000000..a48aa34 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeDataOutput.java @@ -0,0 +1,339 @@ +/* + * Copyright (c) 1995, 2020, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package org.warp.commonutils.stream; + +/** + * The {@code SafeDataOutput} interface provides + * for converting data from any of the Java + * primitive types to a series of bytes and + * writing these bytes to a binary stream. + * There is also a facility for converting + * a {@code String} into + * modified UTF-8 + * format and writing the resulting series + * of bytes. + *

+ * For all the methods in this interface that + * write bytes, it is generally true that if + * a byte cannot be written for any reason, + * an {@code IOException} is thrown. + * + * @author Frank Yellin + * @see java.io.DataInput + * @see java.io.DataOutputStream + * @since 1.0 + */ +public interface SafeDataOutput { + /** + * Writes to the output stream the eight + * low-order bits of the argument {@code b}. + * The 24 high-order bits of {@code b} + * are ignored. + * + * @param b the byte to be written. + */ + void write(int b); + + /** + * Writes to the output stream all the bytes in array {@code b}. + * If {@code b} is {@code null}, + * a {@code NullPointerException} is thrown. + * If {@code b.length} is zero, then + * no bytes are written. Otherwise, the byte + * {@code b[0]} is written first, then + * {@code b[1]}, and so on; the last byte + * written is {@code b[b.length-1]}. + * + * @param b the data. + */ + void write(byte b[]); + + /** + * Writes {@code len} bytes from array + * {@code b}, in order, to + * the output stream. If {@code b} + * is {@code null}, a {@code NullPointerException} + * is thrown. If {@code off} is negative, + * or {@code len} is negative, or {@code off+len} + * is greater than the length of the array + * {@code b}, then an {@code IndexOutOfBoundsException} + * is thrown. If {@code len} is zero, + * then no bytes are written. Otherwise, the + * byte {@code b[off]} is written first, + * then {@code b[off+1]}, and so on; the + * last byte written is {@code b[off+len-1]}. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + */ + void write(byte b[], int off, int len); + + /** + * Writes a {@code boolean} value to this output stream. + * If the argument {@code v} + * is {@code true}, the value {@code (byte)1} + * is written; if {@code v} is {@code false}, + * the value {@code (byte)0} is written. + * The byte written by this method may + * be read by the {@code readBoolean} + * method of interface {@code DataInput}, + * which will then return a {@code boolean} + * equal to {@code v}. + * + * @param v the boolean to be written. + */ + void writeBoolean(boolean v); + + /** + * Writes to the output stream the eight low-order + * bits of the argument {@code v}. + * The 24 high-order bits of {@code v} + * are ignored. (This means that {@code writeByte} + * does exactly the same thing as {@code write} + * for an integer argument.) The byte written + * by this method may be read by the {@code readByte} + * method of interface {@code DataInput}, + * which will then return a {@code byte} + * equal to {@code (byte)v}. + * + * @param v the byte value to be written. + */ + void writeByte(int v); + + /** + * Writes two bytes to the output + * stream to represent the value of the argument. + * The byte values to be written, in the order + * shown, are: + *

{@code
+	 * (byte)(0xff & (v >> 8))
+	 * (byte)(0xff & v)
+	 * }

+ * The bytes written by this method may be + * read by the {@code readShort} method + * of interface {@code DataInput}, which + * will then return a {@code short} equal + * to {@code (short)v}. + * + * @param v the {@code short} value to be written. + */ + void writeShort(int v); + + /** + * Writes a {@code char} value, which + * is comprised of two bytes, to the + * output stream. + * The byte values to be written, in the order + * shown, are: + *

{@code
+	 * (byte)(0xff & (v >> 8))
+	 * (byte)(0xff & v)
+	 * }

+ * The bytes written by this method may be + * read by the {@code readChar} method + * of interface {@code DataInput}, which + * will then return a {@code char} equal + * to {@code (char)v}. + * + * @param v the {@code char} value to be written. + */ + void writeChar(int v); + + /** + * Writes an {@code int} value, which is + * comprised of four bytes, to the output stream. + * The byte values to be written, in the order + * shown, are: + *

{@code
+	 * (byte)(0xff & (v >> 24))
+	 * (byte)(0xff & (v >> 16))
+	 * (byte)(0xff & (v >>  8))
+	 * (byte)(0xff & v)
+	 * }

+ * The bytes written by this method may be read + * by the {@code readInt} method of interface + * {@code DataInput}, which will then + * return an {@code int} equal to {@code v}. + * + * @param v the {@code int} value to be written. + */ + void writeInt(int v); + + /** + * Writes a {@code long} value, which is + * comprised of eight bytes, to the output stream. + * The byte values to be written, in the order + * shown, are: + *

{@code
+	 * (byte)(0xff & (v >> 56))
+	 * (byte)(0xff & (v >> 48))
+	 * (byte)(0xff & (v >> 40))
+	 * (byte)(0xff & (v >> 32))
+	 * (byte)(0xff & (v >> 24))
+	 * (byte)(0xff & (v >> 16))
+	 * (byte)(0xff & (v >>  8))
+	 * (byte)(0xff & v)
+	 * }

+ * The bytes written by this method may be + * read by the {@code readLong} method + * of interface {@code DataInput}, which + * will then return a {@code long} equal + * to {@code v}. + * + * @param v the {@code long} value to be written. + */ + void writeLong(long v); + + /** + * Writes a {@code float} value, + * which is comprised of four bytes, to the output stream. + * It does this as if it first converts this + * {@code float} value to an {@code int} + * in exactly the manner of the {@code Float.floatToIntBits} + * method and then writes the {@code int} + * value in exactly the manner of the {@code writeInt} + * method. The bytes written by this method + * may be read by the {@code readFloat} + * method of interface {@code DataInput}, + * which will then return a {@code float} + * equal to {@code v}. + * + * @param v the {@code float} value to be written. + */ + void writeFloat(float v); + + /** + * Writes a {@code double} value, + * which is comprised of eight bytes, to the output stream. + * It does this as if it first converts this + * {@code double} value to a {@code long} + * in exactly the manner of the {@code Double.doubleToLongBits} + * method and then writes the {@code long} + * value in exactly the manner of the {@code writeLong} + * method. The bytes written by this method + * may be read by the {@code readDouble} + * method of interface {@code DataInput}, + * which will then return a {@code double} + * equal to {@code v}. + * + * @param v the {@code double} value to be written. + */ + void writeDouble(double v); + + /** + * Writes a string to the output stream. + * For every character in the string + * {@code s}, taken in order, one byte + * is written to the output stream. If + * {@code s} is {@code null}, a {@code NullPointerException} + * is thrown.

If {@code s.length} + * is zero, then no bytes are written. Otherwise, + * the character {@code s[0]} is written + * first, then {@code s[1]}, and so on; + * the last character written is {@code s[s.length-1]}. + * For each character, one byte is written, + * the low-order byte, in exactly the manner + * of the {@code writeByte} method . The + * high-order eight bits of each character + * in the string are ignored. + * + * @param s the string of bytes to be written. + */ + void writeBytes(String s); + + /** + * Writes every character in the string {@code s}, + * to the output stream, in order, + * two bytes per character. If {@code s} + * is {@code null}, a {@code NullPointerException} + * is thrown. If {@code s.length} + * is zero, then no characters are written. + * Otherwise, the character {@code s[0]} + * is written first, then {@code s[1]}, + * and so on; the last character written is + * {@code s[s.length-1]}. For each character, + * two bytes are actually written, high-order + * byte first, in exactly the manner of the + * {@code writeChar} method. + * + * @param s the string value to be written. + */ + void writeChars(String s); + + /** + * Writes two bytes of length information + * to the output stream, followed + * by the + * modified UTF-8 + * representation + * of every character in the string {@code s}. + * If {@code s} is {@code null}, + * a {@code NullPointerException} is thrown. + * Each character in the string {@code s} + * is converted to a group of one, two, or + * three bytes, depending on the value of the + * character.

+ * If a character {@code c} + * is in the range \u0001 through + * \u007f, it is represented + * by one byte: + *

(byte)c 

+ * If a character {@code c} is \u0000 + * or is in the range \u0080 + * through \u07ff, then it is + * represented by two bytes, to be written + * in the order shown:

{@code
+	 * (byte)(0xc0 | (0x1f & (c >> 6)))
+	 * (byte)(0x80 | (0x3f & c))
+	 * }

If a character + * {@code c} is in the range \u0800 + * through {@code uffff}, then it is + * represented by three bytes, to be written + * in the order shown:

{@code
+	 * (byte)(0xe0 | (0x0f & (c >> 12)))
+	 * (byte)(0x80 | (0x3f & (c >>  6)))
+	 * (byte)(0x80 | (0x3f & c))
+	 * }

First, + * the total number of bytes needed to represent + * all the characters of {@code s} is + * calculated. If this number is larger than + * {@code 65535}, then a {@code UTFDataFormatException} + * is thrown. Otherwise, this length is written + * to the output stream in exactly the manner + * of the {@code writeShort} method; + * after this, the one-, two-, or three-byte + * representation of each character in the + * string {@code s} is written.

The + * bytes written by this method may be read + * by the {@code readUTF} method of interface + * {@code DataInput}, which will then + * return a {@code String} equal to {@code s}. + * + * @param s the string value to be written. + */ + void writeUTF(String s); +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeDataOutputStream.java b/src/main/java/org/warp/commonutils/stream/SafeDataOutputStream.java new file mode 100644 index 0000000..be6b509 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeDataOutputStream.java @@ -0,0 +1,408 @@ +/* + * Copyright (c) 1994, 2019, Oracle and/or its affiliates. All rights reserved. + * ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + */ + +package org.warp.commonutils.stream; + +import java.io.DataOutputStream; + +/** + * A data output stream lets an application write primitive Java data + * types to an output stream in a portable way. An application can + * then use a data input stream to read the data back in. + * + * @author unascribed + * @see java.io.DataInputStream + * @since 1.0 + */ +public class SafeDataOutputStream extends SafeFilterOutputStream implements SafeDataOutput { + /** + * The number of bytes written to the data output stream so far. + * If this counter overflows, it will be wrapped to Integer.MAX_VALUE. + */ + protected int written; + + /** + * bytearr is initialized on demand by writeUTF + */ + private byte[] bytearr = null; + + /** + * Creates a new data output stream to write data to the specified + * underlying output stream. The counter {@code written} is + * set to zero. + * + * @param out the underlying output stream, to be saved for later + * use. + * @see SafeFilterOutputStream#out + */ + public SafeDataOutputStream(SafeOutputStream out) { + super(out); + } + + /** + * Increases the written counter by the specified value + * until it reaches Integer.MAX_VALUE. + */ + private void incCount(int value) { + int temp = written + value; + if (temp < 0) { + temp = Integer.MAX_VALUE; + } + written = temp; + } + + /** + * Writes the specified byte (the low eight bits of the argument + * {@code b}) to the underlying output stream. If no exception + * is thrown, the counter {@code written} is incremented by + * {@code 1}. + *

+ * Implements the {@code write} method of {@code OutputStream}. + * + * @param b the {@code byte} to be written. + * @see SafeFilterOutputStream#out + */ + public void write(int b) { + out.write(b); + incCount(1); + } + + /** + * Writes {@code len} bytes from the specified byte array + * starting at offset {@code off} to the underlying output stream. + * If no exception is thrown, the counter {@code written} is + * incremented by {@code len}. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + * @see SafeFilterOutputStream#out + */ + public void write(byte b[], int off, int len) + { + out.write(b, off, len); + incCount(len); + } + + /** + * Flushes this data output stream. This forces any buffered output + * bytes to be written out to the stream. + *

+ * The {@code flush} method of {@code SafeDataOutputStream} + * calls the {@code flush} method of its underlying output stream. + * + * @see SafeFilterOutputStream#out + * @see java.io.OutputStream#flush() + */ + public void flush() { + out.flush(); + } + + /** + * Writes a {@code boolean} to the underlying output stream as + * a 1-byte value. The value {@code true} is written out as the + * value {@code (byte)1}; the value {@code false} is + * written out as the value {@code (byte)0}. If no exception is + * thrown, the counter {@code written} is incremented by + * {@code 1}. + * + * @param v a {@code boolean} value to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeBoolean(boolean v) { + out.write(v ? 1 : 0); + incCount(1); + } + + /** + * Writes out a {@code byte} to the underlying output stream as + * a 1-byte value. If no exception is thrown, the counter + * {@code written} is incremented by {@code 1}. + * + * @param v a {@code byte} value to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeByte(int v) { + out.write(v); + incCount(1); + } + + /** + * Writes a {@code short} to the underlying output stream as two + * bytes, high byte first. If no exception is thrown, the counter + * {@code written} is incremented by {@code 2}. + * + * @param v a {@code short} to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeShort(int v) { + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(2); + } + + /** + * Writes a {@code char} to the underlying output stream as a + * 2-byte value, high byte first. If no exception is thrown, the + * counter {@code written} is incremented by {@code 2}. + * + * @param v a {@code char} value to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeChar(int v) { + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(2); + } + + /** + * Writes an {@code int} to the underlying output stream as four + * bytes, high byte first. If no exception is thrown, the counter + * {@code written} is incremented by {@code 4}. + * + * @param v an {@code int} to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeInt(int v) { + out.write((v >>> 24) & 0xFF); + out.write((v >>> 16) & 0xFF); + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + incCount(4); + } + + private byte writeBuffer[] = new byte[8]; + + /** + * Writes a {@code long} to the underlying output stream as eight + * bytes, high byte first. In no exception is thrown, the counter + * {@code written} is incremented by {@code 8}. + * + * @param v a {@code long} to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeLong(long v) { + writeBuffer[0] = (byte)(v >>> 56); + writeBuffer[1] = (byte)(v >>> 48); + writeBuffer[2] = (byte)(v >>> 40); + writeBuffer[3] = (byte)(v >>> 32); + writeBuffer[4] = (byte)(v >>> 24); + writeBuffer[5] = (byte)(v >>> 16); + writeBuffer[6] = (byte)(v >>> 8); + writeBuffer[7] = (byte)(v >>> 0); + out.write(writeBuffer, 0, 8); + incCount(8); + } + + /** + * Converts the float argument to an {@code int} using the + * {@code floatToIntBits} method in class {@code Float}, + * and then writes that {@code int} value to the underlying + * output stream as a 4-byte quantity, high byte first. If no + * exception is thrown, the counter {@code written} is + * incremented by {@code 4}. + * + * @param v a {@code float} value to be written. + * @see SafeFilterOutputStream#out + * @see Float#floatToIntBits(float) + */ + public final void writeFloat(float v) { + writeInt(Float.floatToIntBits(v)); + } + + /** + * Converts the double argument to a {@code long} using the + * {@code doubleToLongBits} method in class {@code Double}, + * and then writes that {@code long} value to the underlying + * output stream as an 8-byte quantity, high byte first. If no + * exception is thrown, the counter {@code written} is + * incremented by {@code 8}. + * + * @param v a {@code double} value to be written. + * @see SafeFilterOutputStream#out + * @see Double#doubleToLongBits(double) + */ + public final void writeDouble(double v) { + writeLong(Double.doubleToLongBits(v)); + } + + /** + * Writes out the string to the underlying output stream as a + * sequence of bytes. Each character in the string is written out, in + * sequence, by discarding its high eight bits. If no exception is + * thrown, the counter {@code written} is incremented by the + * length of {@code s}. + * + * @param s a string of bytes to be written. + * @see SafeFilterOutputStream#out + */ + public final void writeBytes(String s) { + int len = s.length(); + for (int i = 0 ; i < len ; i++) { + out.write((byte)s.charAt(i)); + } + incCount(len); + } + + /** + * Writes a string to the underlying output stream as a sequence of + * characters. Each character is written to the data output stream as + * if by the {@code writeChar} method. If no exception is + * thrown, the counter {@code written} is incremented by twice + * the length of {@code s}. + * + * @param s a {@code String} value to be written. + * @see SafeDataOutputStream#writeChar(int) + * @see SafeFilterOutputStream#out + */ + public final void writeChars(String s) { + int len = s.length(); + for (int i = 0 ; i < len ; i++) { + int v = s.charAt(i); + out.write((v >>> 8) & 0xFF); + out.write((v >>> 0) & 0xFF); + } + incCount(len * 2); + } + + /** + * Writes a string to the underlying output stream using + * modified UTF-8 + * encoding in a machine-independent manner. + *

+ * First, two bytes are written to the output stream as if by the + * {@code writeShort} method giving the number of bytes to + * follow. This value is the number of bytes actually written out, + * not the length of the string. Following the length, each character + * of the string is output, in sequence, using the modified UTF-8 encoding + * for the character. If no exception is thrown, the counter + * {@code written} is incremented by the total number of + * bytes written to the output stream. This will be at least two + * plus the length of {@code str}, and at most two plus + * thrice the length of {@code str}. + * + * @param str a string to be written. + * @see #writeChars(String) + */ + public final void writeUTF(String str) { + writeUTF(str, this); + } + + /** + * Writes a string to the specified DataOutput using + * modified UTF-8 + * encoding in a machine-independent manner. + *

+ * First, two bytes are written to out as if by the {@code writeShort} + * method giving the number of bytes to follow. This value is the number of + * bytes actually written out, not the length of the string. Following the + * length, each character of the string is output, in sequence, using the + * modified UTF-8 encoding for the character. If no exception is thrown, the + * counter {@code written} is incremented by the total number of + * bytes written to the output stream. This will be at least two + * plus the length of {@code str}, and at most two plus + * thrice the length of {@code str}. + * + * @param str a string to be written. + * @param out destination to write to + * @return The number of bytes written out. + */ + static int writeUTF(String str, SafeDataOutput out) { + final int strlen = str.length(); + int utflen = strlen; // optimized for ASCII + + for (int i = 0; i < strlen; i++) { + int c = str.charAt(i); + if (c >= 0x80 || c == 0) + utflen += (c >= 0x800) ? 2 : 1; + } + + if (utflen > 65535 || /* overflow */ utflen < strlen) + throw new IllegalArgumentException(tooLongMsg(str, utflen)); + + final byte[] bytearr; + if (out instanceof SafeDataOutputStream) { + SafeDataOutputStream dos = (SafeDataOutputStream)out; + if (dos.bytearr == null || (dos.bytearr.length < (utflen + 2))) + dos.bytearr = new byte[(utflen*2) + 2]; + bytearr = dos.bytearr; + } else { + bytearr = new byte[utflen + 2]; + } + + int count = 0; + bytearr[count++] = (byte) ((utflen >>> 8) & 0xFF); + bytearr[count++] = (byte) ((utflen >>> 0) & 0xFF); + + int i = 0; + for (i = 0; i < strlen; i++) { // optimized for initial run of ASCII + int c = str.charAt(i); + if (c >= 0x80 || c == 0) break; + bytearr[count++] = (byte) c; + } + + for (; i < strlen; i++) { + int c = str.charAt(i); + if (c < 0x80 && c != 0) { + bytearr[count++] = (byte) c; + } else if (c >= 0x800) { + bytearr[count++] = (byte) (0xE0 | ((c >> 12) & 0x0F)); + bytearr[count++] = (byte) (0x80 | ((c >> 6) & 0x3F)); + bytearr[count++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } else { + bytearr[count++] = (byte) (0xC0 | ((c >> 6) & 0x1F)); + bytearr[count++] = (byte) (0x80 | ((c >> 0) & 0x3F)); + } + } + out.write(bytearr, 0, utflen + 2); + return utflen + 2; + } + + private static String tooLongMsg(String s, int bits32) { + int slen = s.length(); + String head = s.substring(0, 8); + String tail = s.substring(slen - 8, slen); + // handle int overflow with max 3x expansion + long actualLength = (long)slen + Integer.toUnsignedLong(bits32 - slen); + return "encoded string (" + head + "..." + tail + ") too long: " + + actualLength + " bytes"; + } + + /** + * Returns the current value of the counter {@code written}, + * the number of bytes written to this data output stream so far. + * If the counter overflows, it will be wrapped to Integer.MAX_VALUE. + * + * @return the value of the {@code written} field. + * @see SafeDataOutputStream#written + */ + public final int size() { + return written; + } + + public DataOutputStream asDataOutputStream() { + return new DataOutputStream(this.out); + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeFilterInputStream.java b/src/main/java/org/warp/commonutils/stream/SafeFilterInputStream.java new file mode 100644 index 0000000..7e8247b --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeFilterInputStream.java @@ -0,0 +1,210 @@ +package org.warp.commonutils.stream; + +/** + * A {@code FilterInputStream} contains + * some other input stream, which it uses as + * its basic source of data, possibly transforming + * the data along the way or providing additional + * functionality. The class {@code FilterInputStream} + * itself simply overrides all methods of + * {@code InputStream} with versions that + * pass all requests to the contained input + * stream. Subclasses of {@code FilterInputStream} + * may further override some of these methods + * and may also provide additional methods + * and fields. + * + * @author Jonathan Payne + * @since 1.0 + */ +public class SafeFilterInputStream extends SafeInputStream { + /** + * The input stream to be filtered. + */ + protected volatile SafeInputStream in; + + /** + * Creates a {@code FilterInputStream} + * by assigning the argument {@code in} + * to the field {@code this.in} so as + * to remember it for later use. + * + * @param in the underlying input stream, or {@code null} if + * this instance is to be created without an underlying stream. + */ + protected SafeFilterInputStream(SafeInputStream in) { + this.in = in; + } + + /** + * Reads the next byte of data from this input stream. The value + * byte is returned as an {@code int} in the range + * {@code 0} to {@code 255}. If no byte is available + * because the end of the stream has been reached, the value + * {@code -1} is returned. This method blocks until input data + * is available, the end of the stream is detected, or an exception + * is thrown. + *

+ * This method + * simply performs {@code in.read()} and returns the result. + * + * @return the next byte of data, or {@code -1} if the end of the + * stream is reached. + * @see SafeFilterInputStream#in + */ + public int read() { + return in.read(); + } + + /** + * Reads up to {@code b.length} bytes of data from this + * input stream into an array of bytes. This method blocks until some + * input is available. + *

+ * This method simply performs the call + * {@code read(b, 0, b.length)} and returns + * the result. It is important that it does + * not do {@code in.read(b)} instead; + * certain subclasses of {@code FilterInputStream} + * depend on the implementation strategy actually + * used. + * + * @param b the buffer into which the data is read. + * @return the total number of bytes read into the buffer, or + * {@code -1} if there is no more data because the end of + * the stream has been reached. + * @see SafeFilterInputStream#read(byte[], int, int) + */ + public int read(byte b[]) { + return read(b, 0, b.length); + } + + /** + * Reads up to {@code len} bytes of data from this input stream + * into an array of bytes. If {@code len} is not zero, the method + * blocks until some input is available; otherwise, no + * bytes are read and {@code 0} is returned. + *

+ * This method simply performs {@code in.read(b, off, len)} + * and returns the result. + * + * @param b the buffer into which the data is read. + * @param off the start offset in the destination array {@code b} + * @param len the maximum number of bytes read. + * @return the total number of bytes read into the buffer, or + * {@code -1} if there is no more data because the end of + * the stream has been reached. + * @throws NullPointerException If {@code b} is {@code null}. + * @throws IndexOutOfBoundsException If {@code off} is negative, + * {@code len} is negative, or {@code len} is greater than + * {@code b.length - off} + * @see SafeFilterInputStream#in + */ + public int read(byte b[], int off, int len) { + return in.read(b, off, len); + } + + /** + * Skips over and discards {@code n} bytes of data from the + * input stream. The {@code skip} method may, for a variety of + * reasons, end up skipping over some smaller number of bytes, + * possibly {@code 0}. The actual number of bytes skipped is + * returned. + *

+ * This method simply performs {@code in.skip(n)}. + * + * @param n the number of bytes to be skipped. + * @return the actual number of bytes skipped. + */ + public long skip(long n) { + return in.skip(n); + } + + /** + * Returns an estimate of the number of bytes that can be read (or + * skipped over) from this input stream without blocking by the next + * caller of a method for this input stream. The next caller might be + * the same thread or another thread. A single read or skip of this + * many bytes will not block, but may read or skip fewer bytes. + *

+ * This method returns the result of {@link #in in}.available(). + * + * @return an estimate of the number of bytes that can be read (or skipped + * over) from this input stream without blocking. + */ + public int available() { + return in.available(); + } + + /** + * Closes this input stream and releases any system resources + * associated with the stream. + * This + * method simply performs {@code in.close()}. + * + * @see SafeFilterInputStream#in + */ + public void close() { + in.close(); + } + + /** + * Marks the current position in this input stream. A subsequent + * call to the {@code reset} method repositions this stream at + * the last marked position so that subsequent reads re-read the same bytes. + *

+ * The {@code readlimit} argument tells this input stream to + * allow that many bytes to be read before the mark position gets + * invalidated. + *

+ * This method simply performs {@code in.mark(readlimit)}. + * + * @param readlimit the maximum limit of bytes that can be read before + * the mark position becomes invalid. + * @see SafeFilterInputStream#in + * @see SafeFilterInputStream#reset() + */ + public void mark(int readlimit) { + in.mark(readlimit); + } + + /** + * Repositions this stream to the position at the time the + * {@code mark} method was last called on this input stream. + *

+ * This method + * simply performs {@code in.reset()}. + *

+ * Stream marks are intended to be used in + * situations where you need to read ahead a little to see what's in + * the stream. Often this is most easily done by invoking some + * general parser. If the stream is of the type handled by the + * parse, it just chugs along happily. If the stream is not of + * that type, the parser should toss an exception when it fails. + * If this happens within readlimit bytes, it allows the outer + * code to reset the stream and try another parser. + * + * @see SafeFilterInputStream#in + * @see SafeFilterInputStream#mark(int) + */ + public void reset() { + in.reset(); + } + + /** + * Tests if this input stream supports the {@code mark} + * and {@code reset} methods. + * This method + * simply performs {@code in.markSupported()}. + * + * @return {@code true} if this stream type supports the + * {@code mark} and {@code reset} method; + * {@code false} otherwise. + * @see SafeFilterInputStream#in + * @see java.io.InputStream#mark(int) + * @see java.io.InputStream#reset() + */ + public boolean markSupported() { + return in.markSupported(); + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeFilterOutputStream.java b/src/main/java/org/warp/commonutils/stream/SafeFilterOutputStream.java new file mode 100644 index 0000000..f4fd0e5 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeFilterOutputStream.java @@ -0,0 +1,180 @@ +package org.warp.commonutils.stream; + + +/** + * This class is the superclass of all classes that filter output + * streams. These streams sit on top of an already existing output + * stream (the underlying output stream) which it uses as its + * basic sink of data, but possibly transforming the data along the + * way or providing additional functionality. + *

+ * The class {@code FilterOutputStream} itself simply overrides + * all methods of {@code SafeOutputStream} with versions that pass + * all requests to the underlying output stream. Subclasses of + * {@code FilterOutputStream} may further override some of these + * methods as well as provide additional methods and fields. + * + * @author Jonathan Payne + * @since 1.0 + */ +public class SafeFilterOutputStream extends SafeOutputStream { + /** + * The underlying output stream to be filtered. + */ + protected SafeOutputStream out; + + /** + * Whether the stream is closed; implicitly initialized to false. + */ + private volatile boolean closed; + + /** + * Object used to prevent a race on the 'closed' instance variable. + */ + private final Object closeLock = new Object(); + + /** + * Creates an output stream filter built on top of the specified + * underlying output stream. + * + * @param out the underlying output stream to be assigned to + * the field {@code this.out} for later use, or + * {@code null} if this instance is to be + * created without an underlying stream. + */ + public SafeFilterOutputStream(SafeOutputStream out) { + this.out = out; + } + + /** + * Writes the specified {@code byte} to this output stream. + *

+ * The {@code write} method of {@code FilterOutputStream} + * calls the {@code write} method of its underlying output stream, + * that is, it performs {@code out.write(b)}. + *

+ * Implements the abstract {@code write} method of {@code SafeOutputStream}. + * + * @param b the {@code byte}. + */ + @Override + public void write(int b) { + out.write(b); + } + + /** + * Writes {@code b.length} bytes to this output stream. + *

+ * The {@code write} method of {@code FilterOutputStream} + * calls its {@code write} method of three arguments with the + * arguments {@code b}, {@code 0}, and + * {@code b.length}. + *

+ * Note that this method does not call the one-argument + * {@code write} method of its underlying output stream with + * the single argument {@code b}. + * + * @param b the data to be written. + * @see SafeFilterOutputStream#write(byte[], int, int) + */ + @Override + public void write(byte b[]) { + write(b, 0, b.length); + } + + /** + * Writes {@code len} bytes from the specified + * {@code byte} array starting at offset {@code off} to + * this output stream. + *

+ * The {@code write} method of {@code FilterOutputStream} + * calls the {@code write} method of one argument on each + * {@code byte} to output. + *

+ * Note that this method does not call the {@code write} method + * of its underlying output stream with the same arguments. Subclasses + * of {@code FilterOutputStream} should provide a more efficient + * implementation of this method. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + * @see SafeFilterOutputStream#write(int) + */ + @Override + public void write(byte b[], int off, int len) { + if ((off | len | (b.length - (len + off)) | (off + len)) < 0) + throw new IndexOutOfBoundsException(); + + for (int i = 0 ; i < len ; i++) { + write(b[off + i]); + } + } + + /** + * Flushes this output stream and forces any buffered output bytes + * to be written out to the stream. + *

+ * The {@code flush} method of {@code FilterOutputStream} + * calls the {@code flush} method of its underlying output stream. + * + * @see SafeFilterOutputStream#out + */ + @Override + public void flush() { + out.flush(); + } + + /** + * Closes this output stream and releases any system resources + * associated with the stream. + *

+ * When not already closed, the {@code close} method of {@code + * FilterOutputStream} calls its {@code flush} method, and then + * calls the {@code close} method of its underlying output stream. + * + * @see SafeFilterOutputStream#flush() + * @see SafeFilterOutputStream#out + */ + @Override + public void close() { + if (closed) { + return; + } + synchronized (closeLock) { + if (closed) { + return; + } + closed = true; + } + + Throwable flushException = null; + try { + flush(); + } catch (Throwable e) { + flushException = e; + throw e; + } finally { + if (flushException == null) { + out.close(); + } else { + try { + out.close(); + } catch (Throwable closeException) { + // evaluate possible precedence of flushException over closeException + if ((flushException instanceof ThreadDeath) && + !(closeException instanceof ThreadDeath)) { + flushException.addSuppressed(closeException); + throw (ThreadDeath) flushException; + } + + if (flushException != closeException) { + closeException.addSuppressed(flushException); + } + + throw closeException; + } + } + } + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeInputStream.java b/src/main/java/org/warp/commonutils/stream/SafeInputStream.java new file mode 100644 index 0000000..89020b0 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeInputStream.java @@ -0,0 +1,201 @@ +package org.warp.commonutils.stream; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +public abstract class SafeInputStream extends InputStream { + + // MAX_SKIP_BUFFER_SIZE is used to determine the maximum buffer size to + // use when skipping. + private static final int MAX_SKIP_BUFFER_SIZE = 2048; + + private static final int DEFAULT_BUFFER_SIZE = 8192; + + @Override + public abstract int read(); + + public int read(byte b[]) { + return read(b, 0, b.length); + } + + public int read(byte b[], int off, int len) { + Objects.checkFromIndexSize(off, len, b.length); + if (len == 0) { + return 0; + } + + int c = read(); + if (c == -1) { + return -1; + } + b[off] = (byte)c; + + int i = 1; + for (; i < len ; i++) { + c = read(); + if (c == -1) { + break; + } + b[off + i] = (byte)c; + } + return i; + } + + public byte[] readAllBytes() { + return readNBytes(Integer.MAX_VALUE); + } + + private static final int MAX_BUFFER_SIZE = Integer.MAX_VALUE - 8; + + public byte[] readNBytes(int len) { + if (len < 0) { + throw new IllegalArgumentException("len < 0"); + } + + List bufs = null; + byte[] result = null; + int total = 0; + int remaining = len; + int n; + do { + byte[] buf = new byte[Math.min(remaining, DEFAULT_BUFFER_SIZE)]; + int nread = 0; + + // read to EOF which may read more or less than buffer size + while ((n = read(buf, nread, + Math.min(buf.length - nread, remaining))) > 0) { + nread += n; + remaining -= n; + } + + if (nread > 0) { + if (MAX_BUFFER_SIZE - total < nread) { + throw new OutOfMemoryError("Required array size too large"); + } + total += nread; + if (result == null) { + result = buf; + } else { + if (bufs == null) { + bufs = new ArrayList<>(); + bufs.add(result); + } + bufs.add(buf); + } + } + // if the last call to read returned -1 or the number of bytes + // requested have been read then break + } while (n >= 0 && remaining > 0); + + if (bufs == null) { + if (result == null) { + return new byte[0]; + } + return result.length == total ? + result : Arrays.copyOf(result, total); + } + + result = new byte[total]; + int offset = 0; + remaining = total; + for (byte[] b : bufs) { + int count = Math.min(b.length, remaining); + System.arraycopy(b, 0, result, offset, count); + offset += count; + remaining -= count; + } + + return result; + } + + public int readNBytes(byte[] b, int off, int len) { + Objects.checkFromIndexSize(off, len, b.length); + + int n = 0; + while (n < len) { + int count = read(b, off + n, len - n); + if (count < 0) + break; + n += count; + } + return n; + } + + public long skip(long n) { + long remaining = n; + int nr; + + if (n <= 0) { + return 0; + } + + int size = (int)Math.min(MAX_SKIP_BUFFER_SIZE, remaining); + byte[] skipBuffer = new byte[size]; + while (remaining > 0) { + nr = read(skipBuffer, 0, (int)Math.min(size, remaining)); + if (nr < 0) { + break; + } + remaining -= nr; + } + + return n - remaining; + } + + public void skipNBytes(long n) { + if (n > 0) { + long ns = skip(n); + if (ns >= 0 && ns < n) { // skipped too few bytes + // adjust number to skip + n -= ns; + // read until requested number skipped or EOS reached + while (n > 0 && read() != -1) { + n--; + } + // if not enough skipped, then EOFE + if (n != 0) { + throw new IndexOutOfBoundsException(); + } + } else if (ns != n) { // skipped negative or too many bytes + throw new IllegalArgumentException("Unable to skip exactly"); + } + } + } + + public int available() { + return 0; + } + + public void close() {} + + public void mark(int readlimit) {} + + public void reset() { + throw new UnsupportedOperationException("mark/reset not supported"); + } + + public boolean markSupported() { + return false; + } + + public long transferTo(OutputStream out) { + Objects.requireNonNull(out, "out"); + long transferred = 0; + byte[] buffer = new byte[DEFAULT_BUFFER_SIZE]; + int read; + while ((read = this.read(buffer, 0, DEFAULT_BUFFER_SIZE)) >= 0) { + try { + out.write(buffer, 0, read); + } catch (IOException e) { + throw new IllegalStateException(e); + } + transferred += read; + } + return transferred; + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeMeasurableInputStream.java b/src/main/java/org/warp/commonutils/stream/SafeMeasurableInputStream.java new file mode 100644 index 0000000..547f847 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeMeasurableInputStream.java @@ -0,0 +1,28 @@ +package org.warp.commonutils.stream; + +/* + * Copyright (C) 2005-2020 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import java.io.InputStream; + +/** An {@link InputStream} that implements also the {@link SafeMeasurableStream} interface. + * + * @since 5.0.4 + */ + +public abstract class SafeMeasurableInputStream extends SafeInputStream implements SafeMeasurableStream { +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeMeasurableOutputStream.java b/src/main/java/org/warp/commonutils/stream/SafeMeasurableOutputStream.java new file mode 100644 index 0000000..fb67530 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeMeasurableOutputStream.java @@ -0,0 +1,27 @@ +package org.warp.commonutils.stream; + +/* + * Copyright (C) 2005-2020 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.OutputStream; + +/** An {@link OutputStream} that implements also the {@link SafeMeasurableStream} interface. + * + * @since 6.0.0 + */ + +public abstract class SafeMeasurableOutputStream extends SafeOutputStream implements SafeMeasurableStream { +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeMeasurableStream.java b/src/main/java/org/warp/commonutils/stream/SafeMeasurableStream.java new file mode 100644 index 0000000..a5db872 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeMeasurableStream.java @@ -0,0 +1,53 @@ +package org.warp.commonutils.stream; + +/* + * Copyright (C) 2005-2020 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +import it.unimi.dsi.fastutil.io.FastBufferedInputStream; +import it.unimi.dsi.fastutil.io.FastBufferedOutputStream; +import it.unimi.dsi.fastutil.io.MeasurableInputStream; + +/** An stream that provides eager access to its length, + * and keeps track of the current position (e.g., the number of bytes read so far, or the current + * position of the file pointer). + * + *

This class has two methods, both specified as optional. This apparently bizarre + * behaviour is necessary because of wrapper classes which use reflection + * to support those methods (see, e.g., {@link MeasurableInputStream}, {@link FastBufferedInputStream} and {@link FastBufferedOutputStream}). + * + * @since 6.0.0 + */ + +public interface SafeMeasurableStream { + + /** Returns the overall length of this stream (optional operation). In most cases, this will require the + * stream to perform some extra action, possibly changing the state of the input stream itself (typically, reading + * all the bytes up to the end, or flushing on output stream). + * Implementing classes should always document what state will the input stream be in + * after calling this method, and which kind of exception could be thrown. + */ + long length(); + + /** Returns the current position in this stream (optional operation). + * + *

Usually, the position is just the number of bytes read or written + * since the stream was opened, but in the case of a + * {@link it.unimi.dsi.fastutil.io.RepositionableStream} it + * represent the current position. + */ + long position(); +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeOutputStream.java b/src/main/java/org/warp/commonutils/stream/SafeOutputStream.java new file mode 100644 index 0000000..bc67d8e --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeOutputStream.java @@ -0,0 +1,169 @@ +package org.warp.commonutils.stream; + +import java.io.Closeable; +import java.io.Flushable; +import java.io.OutputStream; +import java.util.Objects; +import org.jetbrains.annotations.NotNull; + +/** + * This abstract class is the superclass of all classes representing + * an output stream of bytes. An output stream accepts output bytes + * and sends them to some sink. + *

+ * Applications that need to define a subclass of + * {@code OutputStream} must always provide at least a method + * that writes one byte of output. + * + * @author Arthur van Hoff + * @see java.io.BufferedOutputStream + * @see java.io.ByteArrayOutputStream + * @see java.io.DataOutputStream + * @see java.io.FilterOutputStream + * @see java.io.InputStream + * @see OutputStream#write(int) + * @since 1.0 + */ +public abstract class SafeOutputStream extends OutputStream implements Closeable, Flushable { + /** + * Constructor for subclasses to call. + */ + public SafeOutputStream() {} + + /** + * Returns a new {@code OutputStream} which discards all bytes. The + * returned stream is initially open. The stream is closed by calling + * the {@code close()} method. Subsequent calls to {@code close()} have + * no effect. + * + *

While the stream is open, the {@code write(int)}, {@code + * write(byte[])}, and {@code write(byte[], int, int)} methods do nothing. + * After the stream has been closed, these methods all throw {@code + * IOException}. + * + *

The {@code flush()} method does nothing. + * + * @return an {@code OutputStream} which discards all bytes + * + * @since 11 + */ + public static OutputStream nullOutputStream() { + return new OutputStream() { + private volatile boolean closed; + + private void ensureOpen() { + if (closed) { + throw new IllegalStateException("Stream closed"); + } + } + + @Override + public void write(int b) { + ensureOpen(); + } + + @Override + public void write(byte @NotNull [] b, int off, int len) { + Objects.checkFromIndexSize(off, len, b.length); + ensureOpen(); + } + + @Override + public void close() { + closed = true; + } + }; + } + + /** + * Writes the specified byte to this output stream. The general + * contract for {@code write} is that one byte is written + * to the output stream. The byte to be written is the eight + * low-order bits of the argument {@code b}. The 24 + * high-order bits of {@code b} are ignored. + *

+ * Subclasses of {@code OutputStream} must provide an + * implementation for this method. + * + * @param b the {@code byte}. + */ + public abstract void write(int b); + + /** + * Writes {@code b.length} bytes from the specified byte array + * to this output stream. The general contract for {@code write(b)} + * is that it should have exactly the same effect as the call + * {@code write(b, 0, b.length)}. + * + * @param b the data. + * @see OutputStream#write(byte[], int, int) + */ + public void write(byte @NotNull [] b) { + write(b, 0, b.length); + } + + /** + * Writes {@code len} bytes from the specified byte array + * starting at offset {@code off} to this output stream. + * The general contract for {@code write(b, off, len)} is that + * some of the bytes in the array {@code b} are written to the + * output stream in order; element {@code b[off]} is the first + * byte written and {@code b[off+len-1]} is the last byte written + * by this operation. + *

+ * The {@code write} method of {@code OutputStream} calls + * the write method of one argument on each of the bytes to be + * written out. Subclasses are encouraged to override this method and + * provide a more efficient implementation. + *

+ * If {@code b} is {@code null}, a + * {@code NullPointerException} is thrown. + *

+ * If {@code off} is negative, or {@code len} is negative, or + * {@code off+len} is greater than the length of the array + * {@code b}, then an {@code IndexOutOfBoundsException} is thrown. + * + * @param b the data. + * @param off the start offset in the data. + * @param len the number of bytes to write. + */ + public void write(byte[] b, int off, int len) { + Objects.checkFromIndexSize(off, len, b.length); + // len == 0 condition implicitly handled by loop bounds + for (int i = 0 ; i < len ; i++) { + write(b[off + i]); + } + } + + /** + * Flushes this output stream and forces any buffered output bytes + * to be written out. The general contract of {@code flush} is + * that calling it is an indication that, if any bytes previously + * written have been buffered by the implementation of the output + * stream, such bytes should immediately be written to their + * intended destination. + *

+ * If the intended destination of this stream is an abstraction provided by + * the underlying operating system, for example a file, then flushing the + * stream guarantees only that bytes previously written to the stream are + * passed to the operating system for writing; it does not guarantee that + * they are actually written to a physical device such as a disk drive. + *

+ * The {@code flush} method of {@code OutputStream} does nothing. + * + */ + public void flush() { + } + + /** + * Closes this output stream and releases any system resources + * associated with this stream. The general contract of {@code close} + * is that it closes the output stream. A closed stream cannot perform + * output operations and cannot be reopened. + *

+ * The {@code close} method of {@code OutputStream} does nothing. + * + */ + public void close() { + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafePushbackInputStream.java b/src/main/java/org/warp/commonutils/stream/SafePushbackInputStream.java new file mode 100644 index 0000000..ab8c929 --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafePushbackInputStream.java @@ -0,0 +1,332 @@ +package org.warp.commonutils.stream; + +/** + * A {@code PushbackInputStream} adds + * functionality to another input stream, namely + * the ability to "push back" or "unread" bytes, + * by storing pushed-back bytes in an internal buffer. + * This is useful in situations where + * it is convenient for a fragment of code + * to read an indefinite number of data bytes + * that are delimited by a particular byte + * value; after reading the terminating byte, + * the code fragment can "unread" it, so that + * the next read operation on the input stream + * will reread the byte that was pushed back. + * For example, bytes representing the characters + * constituting an identifier might be terminated + * by a byte representing an operator character; + * a method whose job is to read just an identifier + * can read until it sees the operator and + * then push the operator back to be re-read. + * + * @author David Connelly + * @author Jonathan Payne + * @since 1.0 + */ +public class SafePushbackInputStream extends SafeFilterInputStream { + /** + * The pushback buffer. + * @since 1.1 + */ + protected byte[] buf; + + /** + * The position within the pushback buffer from which the next byte will + * be read. When the buffer is empty, {@code pos} is equal to + * {@code buf.length}; when the buffer is full, {@code pos} is + * equal to zero. + * + * @since 1.1 + */ + protected int pos; + + /** + * Check to make sure that this stream has not been closed + */ + private void ensureOpen() { + if (in == null) + throw new IllegalStateException("Stream closed"); + } + + /** + * Creates a {@code PushbackInputStream} + * with a pushback buffer of the specified {@code size}, + * and saves its argument, the input stream + * {@code in}, for later use. Initially, + * the pushback buffer is empty. + * + * @param in the input stream from which bytes will be read. + * @param size the size of the pushback buffer. + * @throws IllegalArgumentException if {@code size <= 0} + * @since 1.1 + */ + public SafePushbackInputStream(SafeInputStream in, int size) { + super(in); + if (size <= 0) { + throw new IllegalArgumentException("size <= 0"); + } + this.buf = new byte[size]; + this.pos = size; + } + + /** + * Creates a {@code PushbackInputStream} + * with a 1-byte pushback buffer, and saves its argument, the input stream + * {@code in}, for later use. Initially, + * the pushback buffer is empty. + * + * @param in the input stream from which bytes will be read. + */ + public SafePushbackInputStream(SafeInputStream in) { + this(in, 1); + } + + /** + * Reads the next byte of data from this input stream. The value + * byte is returned as an {@code int} in the range + * {@code 0} to {@code 255}. If no byte is available + * because the end of the stream has been reached, the value + * {@code -1} is returned. This method blocks until input data + * is available, the end of the stream is detected, or an exception + * is thrown. + * + *

This method returns the most recently pushed-back byte, if there is + * one, and otherwise calls the {@code read} method of its underlying + * input stream and returns whatever value that method returns. + * + * @return the next byte of data, or {@code -1} if the end of the + * stream has been reached. + * or an I/O error occurs. + * @see java.io.InputStream#read() + */ + public int read() { + ensureOpen(); + if (pos < buf.length) { + return buf[pos++] & 0xff; + } + return super.read(); + } + + /** + * Reads up to {@code len} bytes of data from this input stream into + * an array of bytes. This method first reads any pushed-back bytes; after + * that, if fewer than {@code len} bytes have been read then it + * reads from the underlying input stream. If {@code len} is not zero, the method + * blocks until at least 1 byte of input is available; otherwise, no + * bytes are read and {@code 0} is returned. + * + * @param b the buffer into which the data is read. + * @param off the start offset in the destination array {@code b} + * @param len the maximum number of bytes read. + * @return the total number of bytes read into the buffer, or + * {@code -1} if there is no more data because the end of + * the stream has been reached. + * @throws NullPointerException If {@code b} is {@code null}. + * @throws IndexOutOfBoundsException If {@code off} is negative, + * {@code len} is negative, or {@code len} is greater than + * {@code b.length - off} + * or an I/O error occurs. + * @see java.io.InputStream#read(byte[], int, int) + */ + public int read(byte[] b, int off, int len) { + ensureOpen(); + if (b == null) { + throw new NullPointerException(); + } else if (off < 0 || len < 0 || len > b.length - off) { + throw new IndexOutOfBoundsException(); + } else if (len == 0) { + return 0; + } + + int avail = buf.length - pos; + if (avail > 0) { + if (len < avail) { + avail = len; + } + System.arraycopy(buf, pos, b, off, avail); + pos += avail; + off += avail; + len -= avail; + } + if (len > 0) { + len = super.read(b, off, len); + if (len == -1) { + return avail == 0 ? -1 : avail; + } + return avail + len; + } + return avail; + } + + /** + * Pushes back a byte by copying it to the front of the pushback buffer. + * After this method returns, the next byte to be read will have the value + * {@code (byte)b}. + * + * @param b the {@code int} value whose low-order + * byte is to be pushed back. + */ + public void unread(int b) { + ensureOpen(); + if (pos == 0) { + throw new IllegalStateException("Push back buffer is full"); + } + buf[--pos] = (byte)b; + } + + /** + * Pushes back a portion of an array of bytes by copying it to the front + * of the pushback buffer. After this method returns, the next byte to be + * read will have the value {@code b[off]}, the byte after that will + * have the value {@code b[off+1]}, and so forth. + * + * @param b the byte array to push back. + * @param off the start offset of the data. + * @param len the number of bytes to push back. + * @throws NullPointerException If {@code b} is {@code null}. + * @since 1.1 + */ + public void unread(byte[] b, int off, int len) { + ensureOpen(); + if (len > pos) { + throw new IllegalStateException("Push back buffer is full"); + } + pos -= len; + System.arraycopy(b, off, buf, pos, len); + } + + /** + * Pushes back an array of bytes by copying it to the front of the + * pushback buffer. After this method returns, the next byte to be read + * will have the value {@code b[0]}, the byte after that will have the + * value {@code b[1]}, and so forth. + * + * @param b the byte array to push back + * @throws NullPointerException If {@code b} is {@code null}. + * @since 1.1 + */ + public void unread(byte[] b) { + unread(b, 0, b.length); + } + + /** + * Returns an estimate of the number of bytes that can be read (or + * skipped over) from this input stream without blocking by the next + * invocation of a method for this input stream. The next invocation might be + * the same thread or another thread. A single read or skip of this + * many bytes will not block, but may read or skip fewer bytes. + * + *

The method returns the sum of the number of bytes that have been + * pushed back and the value returned by {@link + * SafeFilterInputStream#available available}. + * + * @return the number of bytes that can be read (or skipped over) from + * the input stream without blocking. + * @see SafeFilterInputStream#in + * @see java.io.InputStream#available() + */ + public int available() { + ensureOpen(); + int n = buf.length - pos; + int avail = super.available(); + return n > (Integer.MAX_VALUE - avail) + ? Integer.MAX_VALUE + : n + avail; + } + + /** + * Skips over and discards {@code n} bytes of data from this + * input stream. The {@code skip} method may, for a variety of + * reasons, end up skipping over some smaller number of bytes, + * possibly zero. If {@code n} is negative, no bytes are skipped. + * + *

The {@code skip} method of {@code PushbackInputStream} + * first skips over the bytes in the pushback buffer, if any. It then + * calls the {@code skip} method of the underlying input stream if + * more bytes need to be skipped. The actual number of bytes skipped + * is returned. + * + * @param n {@inheritDoc} + * @return {@inheritDoc} + * @see SafeFilterInputStream#in + * @see java.io.InputStream#skip(long n) + * @since 1.2 + */ + public long skip(long n) { + ensureOpen(); + if (n <= 0) { + return 0; + } + + long pskip = buf.length - pos; + if (pskip > 0) { + if (n < pskip) { + pskip = n; + } + pos += pskip; + n -= pskip; + } + if (n > 0) { + pskip += super.skip(n); + } + return pskip; + } + + /** + * Tests if this input stream supports the {@code mark} and + * {@code reset} methods, which it does not. + * + * @return {@code false}, since this class does not support the + * {@code mark} and {@code reset} methods. + * @see java.io.InputStream#mark(int) + * @see java.io.InputStream#reset() + */ + public boolean markSupported() { + return false; + } + + /** + * Marks the current position in this input stream. + * + *

The {@code mark} method of {@code PushbackInputStream} + * does nothing. + * + * @param readlimit the maximum limit of bytes that can be read before + * the mark position becomes invalid. + * @see java.io.InputStream#reset() + */ + public void mark(int readlimit) { + } + + /** + * Repositions this stream to the position at the time the + * {@code mark} method was last called on this input stream. + * + *

The method {@code reset} for class + * {@code PushbackInputStream} does nothing except throw an + * {@code IOException}. + * + * @see java.io.InputStream#mark(int) + * @see java.io.IOException + */ + public void reset() { + throw new UnsupportedOperationException("mark/reset not supported"); + } + + /** + * Closes this input stream and releases any system resources + * associated with the stream. + * Once the stream has been closed, further read(), unread(), + * available(), reset(), or skip() invocations will throw an IOException. + * Closing a previously closed stream has no effect. + * + */ + public void close() { + if (in == null) + return; + in.close(); + in = null; + buf = null; + } +} diff --git a/src/main/java/org/warp/commonutils/stream/SafeRepositionableStream.java b/src/main/java/org/warp/commonutils/stream/SafeRepositionableStream.java new file mode 100644 index 0000000..e79f2df --- /dev/null +++ b/src/main/java/org/warp/commonutils/stream/SafeRepositionableStream.java @@ -0,0 +1,40 @@ +package org.warp.commonutils.stream; + +/* + * Copyright (C) 2005-2020 Sebastiano Vigna + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +/** A basic interface specifying positioning methods for a byte stream. + * + * @author Sebastiano Vigna + * @since 4.4 + */ + +public interface SafeRepositionableStream { + + /** Sets the current stream position. + * + * @param newPosition the new stream position. + */ + void position(long newPosition); + + /** Returns the current stream position. + * + * @return the current stream position. + */ + long position(); + +}