(unfinished) Netty 5 refactoring
This commit is contained in:
parent
9984cfff73
commit
3b55e8bd24
9
pom.xml
9
pom.xml
@ -32,6 +32,13 @@
|
||||
<releases><enabled>false</enabled></releases>
|
||||
<snapshots><enabled>true</enabled></snapshots>
|
||||
</repository>
|
||||
<repository>
|
||||
<id>netty5-snapshots</id>
|
||||
<name>Netty 5 snapshots</name>
|
||||
<url>https://oss.sonatype.org/content/repositories/snapshots</url>
|
||||
<releases><enabled>true</enabled></releases>
|
||||
<snapshots><enabled>true</enabled></snapshots>
|
||||
</repository>
|
||||
</repositories>
|
||||
<pluginRepositories>
|
||||
<pluginRepository>
|
||||
@ -245,7 +252,7 @@
|
||||
<dependency>
|
||||
<groupId>io.netty</groupId>
|
||||
<artifactId>netty-buffer</artifactId>
|
||||
<version>4.1.63.Final</version>
|
||||
<version>5.0.0.Final-SNAPSHOT</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>javax.xml.bind</groupId>
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@ -18,7 +18,7 @@ public interface CompositeDatabase {
|
||||
*/
|
||||
Mono<Void> releaseSnapshot(CompositeSnapshot snapshot);
|
||||
|
||||
ByteBufAllocator getAllocator();
|
||||
BufferAllocator getAllocator();
|
||||
|
||||
/**
|
||||
* Find corrupted items
|
||||
|
@ -1,23 +1,23 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class MappedSerializer<A, B> implements Serializer<B, ByteBuf> {
|
||||
public class MappedSerializer<A, B> implements Serializer<B, Buffer> {
|
||||
|
||||
private final Serializer<A, ByteBuf> serializer;
|
||||
private final Serializer<A, Buffer> serializer;
|
||||
private final Mapper<A, B> keyMapper;
|
||||
|
||||
public MappedSerializer(Serializer<A, ByteBuf> serializer,
|
||||
public MappedSerializer(Serializer<A, Buffer> serializer,
|
||||
Mapper<A, B> keyMapper) {
|
||||
this.serializer = serializer;
|
||||
this.keyMapper = keyMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull B deserialize(@NotNull ByteBuf serialized) throws SerializationException {
|
||||
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
|
||||
try {
|
||||
return keyMapper.map(serializer.deserialize(serialized.retain()));
|
||||
} finally {
|
||||
@ -26,7 +26,7 @@ public class MappedSerializer<A, B> implements Serializer<B, ByteBuf> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull B deserialized) throws SerializationException {
|
||||
public @NotNull Buffer serialize(@NotNull B deserialized) throws SerializationException {
|
||||
return serializer.serialize(keyMapper.unmap(deserialized));
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,23 @@
|
||||
package it.cavallium.dbengine.client;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryLength<B, ByteBuf> {
|
||||
public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryLength<B, Buffer> {
|
||||
|
||||
private final SerializerFixedBinaryLength<A, ByteBuf> fixedLengthSerializer;
|
||||
private final SerializerFixedBinaryLength<A, Buffer> fixedLengthSerializer;
|
||||
private final Mapper<A, B> keyMapper;
|
||||
|
||||
public MappedSerializerFixedLength(SerializerFixedBinaryLength<A, ByteBuf> fixedLengthSerializer,
|
||||
public MappedSerializerFixedLength(SerializerFixedBinaryLength<A, Buffer> fixedLengthSerializer,
|
||||
Mapper<A, B> keyMapper) {
|
||||
this.fixedLengthSerializer = fixedLengthSerializer;
|
||||
this.keyMapper = keyMapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull B deserialize(@NotNull ByteBuf serialized) throws SerializationException {
|
||||
public @NotNull B deserialize(@NotNull Buffer serialized) throws SerializationException {
|
||||
try {
|
||||
return keyMapper.map(fixedLengthSerializer.deserialize(serialized.retain()));
|
||||
} finally {
|
||||
@ -26,7 +26,7 @@ public class MappedSerializerFixedLength<A, B> implements SerializerFixedBinaryL
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull B deserialized) throws SerializationException {
|
||||
public @NotNull Buffer serialize(@NotNull B deserialized) throws SerializationException {
|
||||
return fixedLengthSerializer.serialize(keyMapper.unmap(deserialized));
|
||||
}
|
||||
|
||||
|
@ -3,9 +3,46 @@ package it.cavallium.dbengine.database;
|
||||
import java.util.Objects;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public record Delta<T>(@Nullable T previous, @Nullable T current) {
|
||||
public class Delta<T> {
|
||||
|
||||
private final @Nullable T previous;
|
||||
private final @Nullable T current;
|
||||
|
||||
public Delta(@Nullable T previous, @Nullable T current) {
|
||||
this.previous = previous;
|
||||
this.current = current;
|
||||
}
|
||||
|
||||
public boolean isModified() {
|
||||
return !Objects.equals(previous, current);
|
||||
}
|
||||
|
||||
public @Nullable T previous() {
|
||||
return previous;
|
||||
}
|
||||
|
||||
public @Nullable T current() {
|
||||
return current;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == this)
|
||||
return true;
|
||||
if (obj == null || obj.getClass() != this.getClass())
|
||||
return false;
|
||||
var that = (Delta) obj;
|
||||
return Objects.equals(this.previous, that.previous) && Objects.equals(this.current, that.current);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(previous, current);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Delta[" + "previous=" + previous + ", " + "current=" + current + ']';
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.DatabaseOptions;
|
||||
import it.cavallium.dbengine.client.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.client.IndicizerSimilarities;
|
||||
@ -11,7 +11,7 @@ import reactor.core.publisher.Mono;
|
||||
@SuppressWarnings("UnusedReturnValue")
|
||||
public interface LLDatabaseConnection {
|
||||
|
||||
ByteBufAllocator getAllocator();
|
||||
BufferAllocator getAllocator();
|
||||
|
||||
Mono<? extends LLDatabaseConnection> connect();
|
||||
|
||||
|
129
src/main/java/it/cavallium/dbengine/database/LLDelta.java
Normal file
129
src/main/java/it/cavallium/dbengine/database/LLDelta.java
Normal file
@ -0,0 +1,129 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Drop;
|
||||
import io.netty.buffer.api.Owned;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.buffer.api.internal.ResourceSupport;
|
||||
import java.util.StringJoiner;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
|
||||
@Nullable
|
||||
private final Buffer previous;
|
||||
@Nullable
|
||||
private final Buffer current;
|
||||
|
||||
private LLDelta(@Nullable Send<Buffer> previous, @Nullable Send<Buffer> current, Drop<LLDelta> drop) {
|
||||
super(new LLDelta.CloseOnDrop(drop));
|
||||
assert isAllAccessible();
|
||||
this.previous = previous != null ? previous.receive().makeReadOnly() : null;
|
||||
this.current = current != null ? current.receive().makeReadOnly() : null;
|
||||
}
|
||||
|
||||
private boolean isAllAccessible() {
|
||||
assert previous == null || previous.isAccessible();
|
||||
assert current == null || current.isAccessible();
|
||||
assert this.isAccessible();
|
||||
assert this.isOwned();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static LLDelta of(Send<Buffer> min, Send<Buffer> max) {
|
||||
return new LLDelta(min, max, d -> {});
|
||||
}
|
||||
|
||||
public Send<Buffer> previous() {
|
||||
ensureOwned();
|
||||
return previous != null ? previous.copy().send() : null;
|
||||
}
|
||||
|
||||
public Send<Buffer> current() {
|
||||
ensureOwned();
|
||||
return current != null ? current.copy().send() : null;
|
||||
}
|
||||
|
||||
public boolean isModified() {
|
||||
return !LLUtils.equals(previous, current);
|
||||
}
|
||||
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
if (!isOwned()) {
|
||||
if (!isAccessible()) {
|
||||
throw this.createResourceClosedException();
|
||||
} else {
|
||||
throw new IllegalStateException("Resource not owned");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
LLDelta LLDelta = (LLDelta) o;
|
||||
return LLUtils.equals(previous, LLDelta.previous) && LLUtils.equals(current, LLDelta.current);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = LLUtils.hashCode(previous);
|
||||
result = 31 * result + LLUtils.hashCode(current);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", LLDelta.class.getSimpleName() + "[", "]")
|
||||
.add("min=" + LLUtils.toString(previous))
|
||||
.add("max=" + LLUtils.toString(current))
|
||||
.toString();
|
||||
}
|
||||
|
||||
public LLDelta copy() {
|
||||
ensureOwned();
|
||||
return new LLDelta(previous != null ? previous.copy().send() : null,
|
||||
current != null ? current.copy().send() : null,
|
||||
d -> {}
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLDelta> prepareSend() {
|
||||
Send<Buffer> minSend;
|
||||
Send<Buffer> maxSend;
|
||||
minSend = this.previous != null ? this.previous.send() : null;
|
||||
maxSend = this.current != null ? this.current.send() : null;
|
||||
return drop -> new LLDelta(minSend, maxSend, drop);
|
||||
}
|
||||
|
||||
private static class CloseOnDrop implements Drop<LLDelta> {
|
||||
|
||||
private final Drop<LLDelta> delegate;
|
||||
|
||||
public CloseOnDrop(Drop<LLDelta> drop) {
|
||||
this.delegate = drop;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void drop(LLDelta obj) {
|
||||
if (obj.previous != null) {
|
||||
obj.previous.close();
|
||||
}
|
||||
if (obj.current != null) {
|
||||
obj.current.close();
|
||||
}
|
||||
delegate.drop(obj);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.database.serialization.BiSerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
@ -23,89 +24,90 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
|
||||
|
||||
String getColumnName();
|
||||
|
||||
ByteBufAllocator getAllocator();
|
||||
BufferAllocator getAllocator();
|
||||
|
||||
Mono<ByteBuf> get(@Nullable LLSnapshot snapshot, Mono<ByteBuf> key, boolean existsAlmostCertainly);
|
||||
Mono<Send<Buffer>> get(@Nullable LLSnapshot snapshot, Mono<Send<Buffer>> key, boolean existsAlmostCertainly);
|
||||
|
||||
default Mono<ByteBuf> get(@Nullable LLSnapshot snapshot, Mono<ByteBuf> key) {
|
||||
default Mono<Send<Buffer>> get(@Nullable LLSnapshot snapshot, Mono<Send<Buffer>> key) {
|
||||
return get(snapshot, key, false);
|
||||
}
|
||||
|
||||
Mono<ByteBuf> put(Mono<ByteBuf> key, Mono<ByteBuf> value, LLDictionaryResultType resultType);
|
||||
Mono<Send<Buffer>> put(Mono<Send<Buffer>> key, Mono<Send<Buffer>> value, LLDictionaryResultType resultType);
|
||||
|
||||
Mono<UpdateMode> getUpdateMode();
|
||||
|
||||
default Mono<ByteBuf> update(Mono<ByteBuf> key,
|
||||
SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater,
|
||||
default Mono<Send<Buffer>> update(Mono<Send<Buffer>> key,
|
||||
SerializationFunction<@Nullable Send<Buffer>, @Nullable Send<Buffer>> updater,
|
||||
UpdateReturnMode updateReturnMode,
|
||||
boolean existsAlmostCertainly) {
|
||||
return this
|
||||
.updateAndGetDelta(key, updater, existsAlmostCertainly)
|
||||
.transform(prev -> LLUtils.resolveDelta(prev, updateReturnMode));
|
||||
.transform(prev -> LLUtils.resolveLLDelta(prev, updateReturnMode));
|
||||
}
|
||||
|
||||
default Mono<ByteBuf> update(Mono<ByteBuf> key,
|
||||
SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater,
|
||||
default Mono<Send<Buffer>> update(Mono<Send<Buffer>> key,
|
||||
SerializationFunction<@Nullable Send<Buffer>, @Nullable Send<Buffer>> updater,
|
||||
UpdateReturnMode returnMode) {
|
||||
return update(key, updater, returnMode, false);
|
||||
}
|
||||
|
||||
Mono<Delta<ByteBuf>> updateAndGetDelta(Mono<ByteBuf> key,
|
||||
SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater,
|
||||
Mono<LLDelta> updateAndGetDelta(Mono<Send<Buffer>> key,
|
||||
SerializationFunction<@Nullable Send<Buffer>, @Nullable Send<Buffer>> updater,
|
||||
boolean existsAlmostCertainly);
|
||||
|
||||
default Mono<Delta<ByteBuf>> updateAndGetDelta(Mono<ByteBuf> key,
|
||||
SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater) {
|
||||
default Mono<LLDelta> updateAndGetDelta(Mono<Send<Buffer>> key,
|
||||
SerializationFunction<@Nullable Send<Buffer>, @Nullable Send<Buffer>> updater) {
|
||||
return updateAndGetDelta(key, updater, false);
|
||||
}
|
||||
|
||||
Mono<Void> clear();
|
||||
|
||||
Mono<ByteBuf> remove(Mono<ByteBuf> key, LLDictionaryResultType resultType);
|
||||
Mono<Send<Buffer>> remove(Mono<Send<Buffer>> key, LLDictionaryResultType resultType);
|
||||
|
||||
<K> Flux<Tuple3<K, ByteBuf, Optional<ByteBuf>>> getMulti(@Nullable LLSnapshot snapshot,
|
||||
Flux<Tuple2<K, ByteBuf>> keys,
|
||||
<K> Flux<Tuple3<K, Send<Buffer>, Optional<Send<Buffer>>>> getMulti(@Nullable LLSnapshot snapshot,
|
||||
Flux<Tuple2<K, Send<Buffer>>> keys,
|
||||
boolean existsAlmostCertainly);
|
||||
|
||||
default <K> Flux<Tuple3<K, ByteBuf, Optional<ByteBuf>>> getMulti(@Nullable LLSnapshot snapshot, Flux<Tuple2<K, ByteBuf>> keys) {
|
||||
default <K> Flux<Tuple3<K, Send<Buffer>, Optional<Send<Buffer>>>> getMulti(@Nullable LLSnapshot snapshot,
|
||||
Flux<Tuple2<K, Send<Buffer>>> keys) {
|
||||
return getMulti(snapshot, keys, false);
|
||||
}
|
||||
|
||||
Flux<LLEntry> putMulti(Flux<LLEntry> entries, boolean getOldValues);
|
||||
Flux<Send<LLEntry>> putMulti(Flux<Send<LLEntry>> entries, boolean getOldValues);
|
||||
|
||||
<X> Flux<ExtraKeyOperationResult<ByteBuf, X>> updateMulti(Flux<Tuple2<ByteBuf, X>> entries,
|
||||
BiSerializationFunction<ByteBuf, X, ByteBuf> updateFunction);
|
||||
<X> Flux<ExtraKeyOperationResult<Send<Buffer>, X>> updateMulti(Flux<Tuple2<Send<Buffer>, X>> entries,
|
||||
BiSerializationFunction<Send<Buffer>, X, Send<Buffer>> updateFunction);
|
||||
|
||||
Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean existsAlmostCertainly);
|
||||
Flux<Send<LLEntry>> getRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, boolean existsAlmostCertainly);
|
||||
|
||||
default Flux<LLEntry> getRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range) {
|
||||
default Flux<Send<LLEntry>> getRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range) {
|
||||
return getRange(snapshot, range, false);
|
||||
}
|
||||
|
||||
Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
Flux<List<Send<LLEntry>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<Send<LLRange>> range,
|
||||
int prefixLength,
|
||||
boolean existsAlmostCertainly);
|
||||
|
||||
default Flux<List<LLEntry>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> range,
|
||||
default Flux<List<Send<LLEntry>>> getRangeGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<Send<LLRange>> range,
|
||||
int prefixLength) {
|
||||
return getRangeGrouped(snapshot, range, prefixLength, false);
|
||||
}
|
||||
|
||||
Flux<ByteBuf> getRangeKeys(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
Flux<Send<Buffer>> getRangeKeys(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
|
||||
|
||||
Flux<List<ByteBuf>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono<LLRange> range, int prefixLength);
|
||||
Flux<List<Send<Buffer>>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, int prefixLength);
|
||||
|
||||
Flux<ByteBuf> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<LLRange> range, int prefixLength);
|
||||
Flux<Send<Buffer>> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, int prefixLength);
|
||||
|
||||
Flux<BadBlock> badBlocks(Mono<LLRange> range);
|
||||
Flux<BadBlock> badBlocks(Mono<Send<LLRange>> range);
|
||||
|
||||
Mono<Void> setRange(Mono<LLRange> range, Flux<LLEntry> entries);
|
||||
Mono<Void> setRange(Mono<Send<LLRange>> range, Flux<Send<LLEntry>> entries);
|
||||
|
||||
default Mono<Void> replaceRange(Mono<LLRange> range,
|
||||
default Mono<Void> replaceRange(Mono<Send<LLRange>> range,
|
||||
boolean canKeysChange,
|
||||
Function<LLEntry, Mono<LLEntry>> entriesReplacer,
|
||||
Function<Send<LLEntry>, Mono<Send<LLEntry>>> entriesReplacer,
|
||||
boolean existsAlmostCertainly) {
|
||||
return Mono.defer(() -> {
|
||||
if (canKeysChange) {
|
||||
@ -124,19 +126,19 @@ public interface LLDictionary extends LLKeyValueDatabaseStructure {
|
||||
});
|
||||
}
|
||||
|
||||
default Mono<Void> replaceRange(Mono<LLRange> range,
|
||||
default Mono<Void> replaceRange(Mono<Send<LLRange>> range,
|
||||
boolean canKeysChange,
|
||||
Function<LLEntry, Mono<LLEntry>> entriesReplacer) {
|
||||
Function<Send<LLEntry>, Mono<Send<LLEntry>>> entriesReplacer) {
|
||||
return replaceRange(range, canKeysChange, entriesReplacer, false);
|
||||
}
|
||||
|
||||
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
|
||||
|
||||
Mono<Long> sizeRange(@Nullable LLSnapshot snapshot, Mono<LLRange> range, boolean fast);
|
||||
Mono<Long> sizeRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range, boolean fast);
|
||||
|
||||
Mono<LLEntry> getOne(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
Mono<Send<LLEntry>> getOne(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
|
||||
|
||||
Mono<ByteBuf> getOneKey(@Nullable LLSnapshot snapshot, Mono<LLRange> range);
|
||||
Mono<Send<Buffer>> getOneKey(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> range);
|
||||
|
||||
Mono<LLEntry> removeOne(Mono<LLRange> range);
|
||||
Mono<Send<LLEntry>> removeOne(Mono<Send<LLRange>> range);
|
||||
}
|
||||
|
@ -1,74 +1,127 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.util.IllegalReferenceCountException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import org.warp.commonutils.log.Logger;
|
||||
import org.warp.commonutils.log.LoggerFactory;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Drop;
|
||||
import io.netty.buffer.api.Owned;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.buffer.api.internal.ResourceSupport;
|
||||
import java.util.StringJoiner;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
public class LLEntry {
|
||||
public class LLEntry extends ResourceSupport<LLEntry, LLEntry> {
|
||||
@NotNull
|
||||
private final Buffer key;
|
||||
@NotNull
|
||||
private final Buffer value;
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(LLEntry.class);
|
||||
|
||||
private final AtomicInteger refCnt = new AtomicInteger(1);
|
||||
|
||||
private final ByteBuf key;
|
||||
private final ByteBuf value;
|
||||
|
||||
public LLEntry(ByteBuf key, ByteBuf value) {
|
||||
try {
|
||||
this.key = key.retain();
|
||||
this.value = value.retain();
|
||||
} finally {
|
||||
key.release();
|
||||
value.release();
|
||||
}
|
||||
private LLEntry(Send<Buffer> key, Send<Buffer> value, Drop<LLEntry> drop) {
|
||||
super(new LLEntry.CloseOnDrop(drop));
|
||||
assert isAllAccessible();
|
||||
this.key = key.receive().makeReadOnly();
|
||||
this.value = value.receive().makeReadOnly();
|
||||
}
|
||||
|
||||
public ByteBuf getKey() {
|
||||
if (refCnt.get() <= 0) {
|
||||
throw new IllegalReferenceCountException(refCnt.get());
|
||||
private boolean isAllAccessible() {
|
||||
assert key.isAccessible();
|
||||
assert value.isAccessible();
|
||||
assert this.isAccessible();
|
||||
assert this.isOwned();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static LLEntry of(Send<Buffer> key, Send<Buffer> value) {
|
||||
return new LLEntry(key, value, d -> {});
|
||||
}
|
||||
|
||||
public Send<Buffer> getKey() {
|
||||
ensureOwned();
|
||||
return key.copy().send();
|
||||
}
|
||||
|
||||
public Buffer getKeyUnsafe() {
|
||||
return key;
|
||||
}
|
||||
|
||||
public ByteBuf getValue() {
|
||||
if (refCnt.get() <= 0) {
|
||||
throw new IllegalReferenceCountException(refCnt.get());
|
||||
public Send<Buffer> getValue() {
|
||||
ensureOwned();
|
||||
return value.copy().send();
|
||||
}
|
||||
|
||||
|
||||
public Buffer getValueUnsafe() {
|
||||
return value;
|
||||
}
|
||||
|
||||
public void retain() {
|
||||
if (refCnt.getAndIncrement() <= 0) {
|
||||
throw new IllegalReferenceCountException(refCnt.get(), 1);
|
||||
}
|
||||
key.retain();
|
||||
value.retain();
|
||||
}
|
||||
|
||||
public void release() {
|
||||
if (refCnt.decrementAndGet() < 0) {
|
||||
throw new IllegalReferenceCountException(refCnt.get(), -1);
|
||||
}
|
||||
if (key.refCnt() > 0) {
|
||||
key.release();
|
||||
}
|
||||
if (value.refCnt() > 0) {
|
||||
value.release();
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
if (!isOwned()) {
|
||||
if (!isAccessible()) {
|
||||
throw this.createResourceClosedException();
|
||||
} else {
|
||||
throw new IllegalStateException("Resource not owned");
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isReleased() {
|
||||
return refCnt.get() <= 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void finalize() throws Throwable {
|
||||
if (refCnt.get() > 0) {
|
||||
logger.warn(this.getClass().getName() + "::release has not been called!");
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
LLEntry LLEntry = (LLEntry) o;
|
||||
return LLUtils.equals(key, LLEntry.key) && LLUtils.equals(value, LLEntry.value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = LLUtils.hashCode(key);
|
||||
result = 31 * result + LLUtils.hashCode(value);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return new StringJoiner(", ", LLEntry.class.getSimpleName() + "[", "]")
|
||||
.add("key=" + LLUtils.toString(key))
|
||||
.add("value=" + LLUtils.toString(value))
|
||||
.toString();
|
||||
}
|
||||
|
||||
public LLEntry copy() {
|
||||
ensureOwned();
|
||||
return new LLEntry(key.copy().send(), value.copy().send(), d -> {});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLEntry> prepareSend() {
|
||||
Send<Buffer> keySend;
|
||||
Send<Buffer> valueSend;
|
||||
keySend = this.key.send();
|
||||
valueSend = this.value.send();
|
||||
return drop -> new LLEntry(keySend, valueSend, drop);
|
||||
}
|
||||
|
||||
private static class CloseOnDrop implements Drop<LLEntry> {
|
||||
|
||||
private final Drop<LLEntry> delegate;
|
||||
|
||||
public CloseOnDrop(Drop<LLEntry> drop) {
|
||||
this.delegate = drop;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void drop(LLEntry obj) {
|
||||
obj.key.close();
|
||||
obj.value.close();
|
||||
delegate.drop(obj);
|
||||
}
|
||||
super.finalize();
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,7 @@ package it.cavallium.dbengine.database;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseInt;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseLong;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
@ -46,7 +46,7 @@ public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseS
|
||||
|
||||
Mono<Void> verifyChecksum();
|
||||
|
||||
ByteBufAllocator getAllocator();
|
||||
BufferAllocator getAllocator();
|
||||
|
||||
Mono<Void> close();
|
||||
}
|
||||
|
@ -1,117 +1,146 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import static io.netty.buffer.Unpooled.wrappedBuffer;
|
||||
import static io.netty.buffer.Unpooled.wrappedUnmodifiableBuffer;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.util.IllegalReferenceCountException;
|
||||
import java.util.Arrays;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Drop;
|
||||
import io.netty.buffer.api.Owned;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.buffer.api.internal.ResourceSupport;
|
||||
import java.util.StringJoiner;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
* Range of data, from min (inclusive),to max (exclusive)
|
||||
*/
|
||||
public class LLRange {
|
||||
public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
|
||||
private static final LLRange RANGE_ALL = new LLRange(null, null, false);
|
||||
private final ByteBuf min;
|
||||
private final ByteBuf max;
|
||||
private final boolean releasable;
|
||||
private final AtomicInteger refCnt = new AtomicInteger(1);
|
||||
private static final LLRange RANGE_ALL = new LLRange(null, null, null, d -> {});
|
||||
private Buffer min;
|
||||
private Buffer max;
|
||||
private Buffer single;
|
||||
|
||||
private LLRange(ByteBuf min, ByteBuf max, boolean releasable) {
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
this.min = min;
|
||||
this.max = max;
|
||||
this.releasable = releasable;
|
||||
private LLRange(Send<Buffer> min, Send<Buffer> max, Send<Buffer> single, Drop<LLRange> drop) {
|
||||
super(new CloseOnDrop(drop));
|
||||
assert isAllAccessible();
|
||||
assert single == null || (min == null && max == null);
|
||||
this.min = min != null ? min.receive().makeReadOnly() : null;
|
||||
this.max = max != null ? max.receive().makeReadOnly() : null;
|
||||
this.single = single != null ? single.receive().makeReadOnly() : null;
|
||||
}
|
||||
|
||||
private boolean isAllAccessible() {
|
||||
assert min == null || min.isAccessible();
|
||||
assert max == null || max.isAccessible();
|
||||
assert single == null || single.isAccessible();
|
||||
assert this.isAccessible();
|
||||
assert this.isOwned();
|
||||
return true;
|
||||
}
|
||||
|
||||
public static LLRange all() {
|
||||
return RANGE_ALL;
|
||||
return RANGE_ALL.copy();
|
||||
}
|
||||
|
||||
public static LLRange from(ByteBuf min) {
|
||||
return new LLRange(min, null, true);
|
||||
public static LLRange from(Send<Buffer> min) {
|
||||
return new LLRange(min, null, null, d -> {});
|
||||
}
|
||||
|
||||
public static LLRange to(ByteBuf max) {
|
||||
return new LLRange(null, max, true);
|
||||
public static LLRange to(Send<Buffer> max) {
|
||||
return new LLRange(null, max, null, d -> {});
|
||||
}
|
||||
|
||||
public static LLRange single(ByteBuf single) {
|
||||
try {
|
||||
return new LLRange(single.retain(), single.retain(), true);
|
||||
} finally {
|
||||
single.release();
|
||||
}
|
||||
public static LLRange single(Send<Buffer> single) {
|
||||
return new LLRange(null, null, single, d -> {});
|
||||
}
|
||||
|
||||
public static LLRange of(ByteBuf min, ByteBuf max) {
|
||||
return new LLRange(min, max, true);
|
||||
public static LLRange of(Send<Buffer> min, Send<Buffer> max) {
|
||||
return new LLRange(min, max, null, d -> {});
|
||||
}
|
||||
|
||||
public boolean isAll() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
return min == null && max == null;
|
||||
ensureOwned();
|
||||
return min == null && max == null && single == null;
|
||||
}
|
||||
|
||||
public boolean isSingle() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
if (min == null || max == null) return false;
|
||||
return LLUtils.equals(min, max);
|
||||
ensureOwned();
|
||||
return single != null;
|
||||
}
|
||||
|
||||
public boolean hasMin() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
return min != null;
|
||||
ensureOwned();
|
||||
return min != null || single != null;
|
||||
}
|
||||
|
||||
public ByteBuf getMin() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
assert min != null;
|
||||
public Send<Buffer> getMin() {
|
||||
ensureOwned();
|
||||
if (min != null) {
|
||||
return min.copy().send();
|
||||
} else if (single != null) {
|
||||
return single.copy().send();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMinUnsafe() {
|
||||
ensureOwned();
|
||||
if (min != null) {
|
||||
return min;
|
||||
} else if (single != null) {
|
||||
return single;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean hasMax() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
return max != null;
|
||||
ensureOwned();
|
||||
return max != null || single != null;
|
||||
}
|
||||
|
||||
public ByteBuf getMax() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
assert max != null;
|
||||
public Send<Buffer> getMax() {
|
||||
ensureOwned();
|
||||
if (max != null) {
|
||||
return max.copy().send();
|
||||
} else if (single != null) {
|
||||
return single.copy().send();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public Buffer getMaxUnsafe() {
|
||||
ensureOwned();
|
||||
if (max != null) {
|
||||
return max;
|
||||
} else if (single != null) {
|
||||
return single;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public ByteBuf getSingle() {
|
||||
checkReleased();
|
||||
assert min == null || min.refCnt() > 0;
|
||||
assert max == null || max.refCnt() > 0;
|
||||
public Send<Buffer> getSingle() {
|
||||
ensureOwned();
|
||||
assert isSingle();
|
||||
return min;
|
||||
return single != null ? single.copy().send() : null;
|
||||
}
|
||||
|
||||
private void checkReleased() {
|
||||
if (!releasable) {
|
||||
return;
|
||||
public Buffer getSingleUnsafe() {
|
||||
ensureOwned();
|
||||
assert isSingle();
|
||||
return single;
|
||||
}
|
||||
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
if (!isOwned()) {
|
||||
if (!isAccessible()) {
|
||||
throw this.createResourceClosedException();
|
||||
} else {
|
||||
throw new IllegalStateException("Resource not owned");
|
||||
}
|
||||
if (refCnt.get() <= 0) {
|
||||
throw new IllegalReferenceCountException(0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,34 +171,53 @@ public class LLRange {
|
||||
.toString();
|
||||
}
|
||||
|
||||
public LLRange retain() {
|
||||
if (!releasable) {
|
||||
return this;
|
||||
}
|
||||
if (refCnt.updateAndGet(refCnt -> refCnt <= 0 ? 0 : (refCnt + 1)) <= 0) {
|
||||
throw new IllegalReferenceCountException(0, 1);
|
||||
}
|
||||
if (min != null) {
|
||||
min.retain();
|
||||
}
|
||||
if (max != null) {
|
||||
max.retain();
|
||||
}
|
||||
return this;
|
||||
public LLRange copy() {
|
||||
ensureOwned();
|
||||
return new LLRange(min != null ? min.copy().send() : null,
|
||||
max != null ? max.copy().send() : null,
|
||||
single != null ? single.copy().send(): null,
|
||||
d -> {}
|
||||
);
|
||||
}
|
||||
|
||||
public void release() {
|
||||
if (!releasable) {
|
||||
return;
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
if (refCnt.decrementAndGet() < 0) {
|
||||
throw new IllegalReferenceCountException(0, -1);
|
||||
|
||||
@Override
|
||||
protected Owned<LLRange> prepareSend() {
|
||||
Send<Buffer> minSend;
|
||||
Send<Buffer> maxSend;
|
||||
Send<Buffer> singleSend;
|
||||
minSend = this.min != null ? this.min.send() : null;
|
||||
maxSend = this.max != null ? this.max.send() : null;
|
||||
singleSend = this.single != null ? this.single.send() : null;
|
||||
this.makeInaccessible();
|
||||
return drop -> new LLRange(minSend, maxSend, singleSend, drop);
|
||||
}
|
||||
if (min != null) {
|
||||
min.release();
|
||||
|
||||
private void makeInaccessible() {
|
||||
this.min = null;
|
||||
this.max = null;
|
||||
this.single = null;
|
||||
}
|
||||
if (max != null) {
|
||||
max.release();
|
||||
|
||||
private static class CloseOnDrop implements Drop<LLRange> {
|
||||
|
||||
private final Drop<LLRange> delegate;
|
||||
|
||||
public CloseOnDrop(Drop<LLRange> drop) {
|
||||
this.delegate = drop;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void drop(LLRange obj) {
|
||||
if (obj.min != null) obj.min.close();
|
||||
if (obj.max != null) obj.max.close();
|
||||
if (obj.single != null) obj.single.close();
|
||||
obj.makeInaccessible();
|
||||
delegate.drop(obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2,21 +2,17 @@ package it.cavallium.dbengine.database;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.CompositeByteBuf;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import io.netty.util.AbstractReferenceCounted;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.CompositeBuffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.util.IllegalReferenceCountException;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.database.disk.ReleasableSlice;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.lucene.RandomSortField;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.charset.Charset;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -24,7 +20,8 @@ import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.function.Function;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.ToIntFunction;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.document.Field;
|
||||
@ -45,7 +42,6 @@ import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.warp.commonutils.functional.IOFunction;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuple2;
|
||||
@ -56,6 +52,7 @@ public class LLUtils {
|
||||
|
||||
private static final Logger logger = LoggerFactory.getLogger(LLUtils.class);
|
||||
|
||||
private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0);
|
||||
private static final byte[] RESPONSE_TRUE = new byte[]{1};
|
||||
private static final byte[] RESPONSE_FALSE = new byte[]{0};
|
||||
private static final byte[] RESPONSE_TRUE_BUF = new byte[]{1};
|
||||
@ -73,12 +70,10 @@ public class LLUtils {
|
||||
return response[0] == 1;
|
||||
}
|
||||
|
||||
public static boolean responseToBoolean(ByteBuf response) {
|
||||
try {
|
||||
public static boolean responseToBoolean(Buffer response) {
|
||||
try (response) {
|
||||
assert response.readableBytes() == 1;
|
||||
return response.getByte(response.readerIndex()) == 1;
|
||||
} finally {
|
||||
response.release();
|
||||
return response.getByte(response.readerOffset()) == 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,8 +81,8 @@ public class LLUtils {
|
||||
return bool ? RESPONSE_TRUE : RESPONSE_FALSE;
|
||||
}
|
||||
|
||||
public static ByteBuf booleanToResponseByteBuffer(boolean bool) {
|
||||
return Unpooled.wrappedBuffer(booleanToResponse(bool));
|
||||
public static Buffer booleanToResponseByteBuffer(BufferAllocator alloc, boolean bool) {
|
||||
return alloc.allocate(1).writeByte(bool ? (byte) 1 : 0);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
@ -171,9 +166,9 @@ public class LLUtils {
|
||||
return new it.cavallium.dbengine.database.LLKeyScore(hit.docId(), hit.score(), hit.key());
|
||||
}
|
||||
|
||||
public static String toStringSafe(ByteBuf key) {
|
||||
public static String toStringSafe(Buffer key) {
|
||||
try {
|
||||
if (key.refCnt() > 0) {
|
||||
if (key.isAccessible()) {
|
||||
return toString(key);
|
||||
} else {
|
||||
return "(released)";
|
||||
@ -183,11 +178,11 @@ public class LLUtils {
|
||||
}
|
||||
}
|
||||
|
||||
public static String toString(ByteBuf key) {
|
||||
public static String toString(Buffer key) {
|
||||
if (key == null) {
|
||||
return "null";
|
||||
} else {
|
||||
int startIndex = key.readerIndex();
|
||||
int startIndex = key.readerOffset();
|
||||
int iMax = key.readableBytes() - 1;
|
||||
int iLimit = 128;
|
||||
if (iMax <= -1) {
|
||||
@ -213,111 +208,117 @@ public class LLUtils {
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean equals(ByteBuf a, ByteBuf b) {
|
||||
public static boolean equals(Buffer a, Buffer b) {
|
||||
if (a == null && b == null) {
|
||||
return true;
|
||||
} else if (a != null && b != null) {
|
||||
return ByteBufUtil.equals(a, b);
|
||||
var aCur = a.openCursor();
|
||||
var bCur = b.openCursor();
|
||||
if (aCur.bytesLeft() != bCur.bytesLeft()) {
|
||||
return false;
|
||||
}
|
||||
while (aCur.readByte() && bCur.readByte()) {
|
||||
if (aCur.getByte() != bCur.getByte()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] toArray(ByteBuf key) {
|
||||
if (key.hasArray()) {
|
||||
return Arrays.copyOfRange(key.array(), key.arrayOffset() + key.readerIndex(), key.arrayOffset() + key.writerIndex());
|
||||
} else {
|
||||
byte[] keyBytes = new byte[key.readableBytes()];
|
||||
key.getBytes(key.readerIndex(), keyBytes, 0, key.readableBytes());
|
||||
return keyBytes;
|
||||
}
|
||||
public static byte[] toArray(Buffer key) {
|
||||
byte[] array = new byte[key.readableBytes()];
|
||||
key.copyInto(key.readerOffset(), array, 0, key.readableBytes());
|
||||
return array;
|
||||
}
|
||||
|
||||
public static List<byte[]> toArray(List<ByteBuf> input) {
|
||||
public static List<byte[]> toArray(List<Buffer> input) {
|
||||
List<byte[]> result = new ArrayList<>(input.size());
|
||||
for (ByteBuf byteBuf : input) {
|
||||
for (Buffer byteBuf : input) {
|
||||
result.add(toArray(byteBuf));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
public static int hashCode(ByteBuf buf) {
|
||||
return buf == null ? 0 : buf.hashCode();
|
||||
public static int hashCode(Buffer buf) {
|
||||
if (buf == null)
|
||||
return 0;
|
||||
|
||||
int result = 1;
|
||||
var cur = buf.openCursor();
|
||||
while (cur.readByte()) {
|
||||
var element = cur.getByte();
|
||||
result = 31 * result + element;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
*
|
||||
* @return null if size is equal to RocksDB.NOT_FOUND
|
||||
*/
|
||||
@Nullable
|
||||
public static ByteBuf readNullableDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction<ByteBuffer> reader) {
|
||||
ByteBuf buffer = alloc.directBuffer();
|
||||
ByteBuf directBuffer = null;
|
||||
public static Buffer readNullableDirectNioBuffer(BufferAllocator alloc, ToIntFunction<ByteBuffer> reader) {
|
||||
Buffer buffer = alloc.allocate(4096);
|
||||
ByteBuffer nioBuffer;
|
||||
int size;
|
||||
Boolean mustBeCopied = null;
|
||||
do {
|
||||
if (mustBeCopied == null || !mustBeCopied) {
|
||||
nioBuffer = LLUtils.toDirectFast(buffer);
|
||||
if (nioBuffer != null) {
|
||||
nioBuffer = LLUtils.toDirect(buffer);
|
||||
nioBuffer.limit(nioBuffer.capacity());
|
||||
}
|
||||
} else {
|
||||
nioBuffer = null;
|
||||
}
|
||||
if ((mustBeCopied != null && mustBeCopied) || nioBuffer == null) {
|
||||
directBuffer = buffer;
|
||||
nioBuffer = directBuffer.nioBuffer(0, directBuffer.capacity());
|
||||
mustBeCopied = true;
|
||||
} else {
|
||||
mustBeCopied = false;
|
||||
}
|
||||
try {
|
||||
assert nioBuffer.isDirect();
|
||||
size = reader.applyAsInt(nioBuffer);
|
||||
if (size != RocksDB.NOT_FOUND) {
|
||||
if (mustBeCopied) {
|
||||
buffer.writerIndex(0).writeBytes(nioBuffer);
|
||||
}
|
||||
if (size == nioBuffer.limit()) {
|
||||
buffer.setIndex(0, size);
|
||||
buffer.readerOffset(0).writerOffset(size);
|
||||
return buffer;
|
||||
} else {
|
||||
assert size > nioBuffer.limit();
|
||||
assert nioBuffer.limit() > 0;
|
||||
buffer.capacity(size);
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
if (nioBuffer != null) {
|
||||
nioBuffer = null;
|
||||
}
|
||||
if(directBuffer != null) {
|
||||
directBuffer.release();
|
||||
directBuffer = null;
|
||||
buffer.ensureWritable(size);
|
||||
}
|
||||
}
|
||||
} while (size != RocksDB.NOT_FOUND);
|
||||
|
||||
// Return null if size is equal to RocksDB.NOT_FOUND
|
||||
return null;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static ByteBuffer toDirectFast(ByteBuf buffer) {
|
||||
ByteBuffer result = buffer.nioBuffer(0, buffer.capacity());
|
||||
if (result.isDirect()) {
|
||||
result.limit(buffer.writerIndex());
|
||||
public static ByteBuffer toDirectFast(Buffer buffer) {
|
||||
int readableComponents = buffer.countReadableComponents();
|
||||
if (readableComponents > 0) {
|
||||
AtomicReference<ByteBuffer> byteBufferReference = new AtomicReference<>(null);
|
||||
buffer.forEachReadable(0, (index, component) -> {
|
||||
byteBufferReference.setPlain(component.readableBuffer());
|
||||
return false;
|
||||
});
|
||||
ByteBuffer byteBuffer = byteBufferReference.getPlain();
|
||||
if (byteBuffer != null && byteBuffer.isDirect()) {
|
||||
byteBuffer.limit(buffer.writerOffset());
|
||||
|
||||
assert result.isDirect();
|
||||
assert result.capacity() == buffer.capacity();
|
||||
assert buffer.readerIndex() == result.position();
|
||||
assert result.limit() - result.position() == buffer.readableBytes();
|
||||
assert byteBuffer.isDirect();
|
||||
assert byteBuffer.capacity() == buffer.capacity();
|
||||
assert buffer.readerOffset() == byteBuffer.position();
|
||||
assert byteBuffer.limit() - byteBuffer.position() == buffer.readableBytes();
|
||||
|
||||
return result;
|
||||
return byteBuffer;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else if (readableComponents == 0) {
|
||||
return EMPTY_BYTE_BUFFER;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public static ByteBuffer toDirect(ByteBuf buffer) {
|
||||
public static ByteBuffer toDirect(Buffer buffer) {
|
||||
ByteBuffer result = toDirectFast(buffer);
|
||||
if (result == null) {
|
||||
throw new IllegalArgumentException("The supplied ByteBuf is not direct "
|
||||
throw new IllegalArgumentException("The supplied Buffer is not direct "
|
||||
+ "(if it's a CompositeByteBuf it must be consolidated before)");
|
||||
}
|
||||
assert result.isDirect();
|
||||
@ -325,9 +326,9 @@ public class LLUtils {
|
||||
}
|
||||
|
||||
/*
|
||||
public static ByteBuf toDirectCopy(ByteBuf buffer) {
|
||||
public static Buffer toDirectCopy(Buffer buffer) {
|
||||
try {
|
||||
ByteBuf directCopyBuf = buffer.alloc().buffer(buffer.capacity(), buffer.maxCapacity());
|
||||
Buffer directCopyBuf = buffer.alloc().buffer(buffer.capacity(), buffer.maxCapacity());
|
||||
directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex());
|
||||
return directCopyBuf;
|
||||
} finally {
|
||||
@ -336,26 +337,14 @@ public class LLUtils {
|
||||
}
|
||||
*/
|
||||
|
||||
public static ByteBuf convertToDirectByteBuf(ByteBufAllocator alloc, ByteBuf buffer) {
|
||||
ByteBuf result;
|
||||
ByteBuf directCopyBuf = alloc.buffer(buffer.capacity(), buffer.maxCapacity());
|
||||
directCopyBuf.writeBytes(buffer, 0, buffer.writerIndex());
|
||||
directCopyBuf.readerIndex(buffer.readerIndex());
|
||||
result = directCopyBuf;
|
||||
assert result.isDirect();
|
||||
assert result.capacity() == buffer.capacity();
|
||||
assert buffer.readerIndex() == result.readerIndex();
|
||||
return result;
|
||||
}
|
||||
|
||||
public static ByteBuf fromByteArray(ByteBufAllocator alloc, byte[] array) {
|
||||
ByteBuf result = alloc.buffer(array.length);
|
||||
public static Buffer fromByteArray(BufferAllocator alloc, byte[] array) {
|
||||
Buffer result = alloc.allocate(array.length);
|
||||
result.writeBytes(array);
|
||||
return result;
|
||||
}
|
||||
|
||||
@NotNull
|
||||
public static ByteBuf readDirectNioBuffer(ByteBufAllocator alloc, ToIntFunction<ByteBuffer> reader) {
|
||||
public static Buffer readDirectNioBuffer(BufferAllocator alloc, ToIntFunction<ByteBuffer> reader) {
|
||||
var buffer = readNullableDirectNioBuffer(alloc, reader);
|
||||
if (buffer == null) {
|
||||
throw new IllegalStateException("A non-nullable buffer read operation tried to return a \"not found\" element");
|
||||
@ -363,81 +352,54 @@ public class LLUtils {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2) {
|
||||
try {
|
||||
if (buffer1.readableBytes() == 0) {
|
||||
return compositeBuffer(alloc, buffer2.retain());
|
||||
} else if (buffer2.readableBytes() == 0) {
|
||||
return compositeBuffer(alloc, buffer1.retain());
|
||||
}
|
||||
CompositeByteBuf result = alloc.compositeBuffer(2);
|
||||
try {
|
||||
result.addComponent(true, buffer1.retain());
|
||||
result.addComponent(true, buffer2.retain());
|
||||
return result.consolidate().retain();
|
||||
} finally {
|
||||
result.release();
|
||||
}
|
||||
} finally {
|
||||
buffer1.release();
|
||||
buffer2.release();
|
||||
public static Send<Buffer> compositeBuffer(BufferAllocator alloc, Send<Buffer> buffer) {
|
||||
try (var composite = buffer.receive().compact()) {
|
||||
assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0;
|
||||
return composite.send();
|
||||
}
|
||||
}
|
||||
|
||||
public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf buffer1, ByteBuf buffer2, ByteBuf buffer3) {
|
||||
try {
|
||||
if (buffer1.readableBytes() == 0) {
|
||||
return compositeBuffer(alloc, buffer2.retain(), buffer3.retain());
|
||||
} else if (buffer2.readableBytes() == 0) {
|
||||
return compositeBuffer(alloc, buffer1.retain(), buffer3.retain());
|
||||
} else if (buffer3.readableBytes() == 0) {
|
||||
return compositeBuffer(alloc, buffer1.retain(), buffer2.retain());
|
||||
public static Send<Buffer> compositeBuffer(BufferAllocator alloc, Send<Buffer> buffer1, Send<Buffer> buffer2) {
|
||||
try (buffer1) {
|
||||
try (buffer2) {
|
||||
try (var composite = CompositeBuffer.compose(alloc, buffer1, buffer2).compact()) {
|
||||
assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0;
|
||||
return composite.send();
|
||||
}
|
||||
CompositeByteBuf result = alloc.compositeBuffer(3);
|
||||
try {
|
||||
result.addComponent(true, buffer1.retain());
|
||||
result.addComponent(true, buffer2.retain());
|
||||
result.addComponent(true, buffer3.retain());
|
||||
return result.consolidate().retain();
|
||||
} finally {
|
||||
result.release();
|
||||
}
|
||||
} finally {
|
||||
buffer1.release();
|
||||
buffer2.release();
|
||||
buffer3.release();
|
||||
}
|
||||
}
|
||||
|
||||
public static ByteBuf compositeBuffer(ByteBufAllocator alloc, ByteBuf... buffers) {
|
||||
public static Send<Buffer> compositeBuffer(BufferAllocator alloc, Send<Buffer> buffer1, Send<Buffer> buffer2, Send<Buffer> buffer3) {
|
||||
try (buffer1) {
|
||||
try (buffer2) {
|
||||
try (buffer3) {
|
||||
try (var composite = CompositeBuffer.compose(alloc, buffer1, buffer2, buffer3).compact()) {
|
||||
assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0;
|
||||
return composite.send();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static Send<Buffer> compositeBuffer(BufferAllocator alloc, Send<Buffer>... buffers) {
|
||||
try {
|
||||
switch (buffers.length) {
|
||||
case 0:
|
||||
return alloc.buffer(0);
|
||||
case 1:
|
||||
return compositeBuffer(alloc, buffers[0].retain().retain());
|
||||
case 2:
|
||||
return compositeBuffer(alloc, buffers[0].retain(), buffers[1].retain());
|
||||
case 3:
|
||||
return compositeBuffer(alloc, buffers[0].retain(), buffers[1].retain(), buffers[2].retain());
|
||||
default:
|
||||
CompositeByteBuf result = alloc.compositeBuffer(buffers.length);
|
||||
try {
|
||||
for (ByteBuf buffer : buffers) {
|
||||
result.addComponent(true, buffer.retain());
|
||||
return switch (buffers.length) {
|
||||
case 0 -> alloc.allocate(0).send();
|
||||
case 1 -> compositeBuffer(alloc, buffers[0]);
|
||||
case 2 -> compositeBuffer(alloc, buffers[0], buffers[1]);
|
||||
case 3 -> compositeBuffer(alloc, buffers[0], buffers[1], buffers[2]);
|
||||
default -> {
|
||||
try (var composite = CompositeBuffer.compose(alloc, buffers).compact()) {
|
||||
assert composite.countReadableComponents() == 1 || composite.countReadableComponents() == 0;
|
||||
yield composite.send();
|
||||
}
|
||||
return result.consolidate().retain();
|
||||
}
|
||||
};
|
||||
} finally {
|
||||
result.release();
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
for (ByteBuf buffer : buffers) {
|
||||
buffer.release();
|
||||
for (Send<Buffer> buffer : buffers) {
|
||||
buffer.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -467,6 +429,33 @@ public class LLUtils {
|
||||
});
|
||||
}
|
||||
|
||||
public static Mono<Send<Buffer>> resolveLLDelta(Mono<LLDelta> prev, UpdateReturnMode updateReturnMode) {
|
||||
return prev.handle((delta, sink) -> {
|
||||
try (delta) {
|
||||
switch (updateReturnMode) {
|
||||
case GET_NEW_VALUE -> {
|
||||
var current = delta.current();
|
||||
if (current != null) {
|
||||
sink.next(current);
|
||||
} else {
|
||||
sink.complete();
|
||||
}
|
||||
}
|
||||
case GET_OLD_VALUE -> {
|
||||
var previous = delta.previous();
|
||||
if (previous != null) {
|
||||
sink.next(previous);
|
||||
} else {
|
||||
sink.complete();
|
||||
}
|
||||
}
|
||||
case NOTHING -> sink.complete();
|
||||
default -> sink.error(new IllegalStateException());
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public static <T, U> Mono<Delta<U>> mapDelta(Mono<Delta<T>> mono,
|
||||
SerializationFunction<@NotNull T, @Nullable U> mapper) {
|
||||
return mono.handle((delta, sink) -> {
|
||||
@ -492,38 +481,57 @@ public class LLUtils {
|
||||
});
|
||||
}
|
||||
|
||||
public static <U> Mono<Delta<U>> mapLLDelta(Mono<LLDelta> mono,
|
||||
SerializationFunction<@NotNull Send<Buffer>, @Nullable U> mapper) {
|
||||
return mono.handle((delta, sink) -> {
|
||||
try {
|
||||
try (Send<Buffer> prev = delta.previous()) {
|
||||
try (Send<Buffer> curr = delta.current()) {
|
||||
U newPrev;
|
||||
U newCurr;
|
||||
if (prev != null) {
|
||||
newPrev = mapper.apply(prev);
|
||||
} else {
|
||||
newPrev = null;
|
||||
}
|
||||
if (curr != null) {
|
||||
newCurr = mapper.apply(curr);
|
||||
} else {
|
||||
newCurr = null;
|
||||
}
|
||||
sink.next(new Delta<>(newPrev, newCurr));
|
||||
}
|
||||
}
|
||||
} catch (SerializationException ex) {
|
||||
sink.error(ex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
public static <R, V> boolean isDeltaChanged(Delta<V> delta) {
|
||||
return !Objects.equals(delta.previous(), delta.current());
|
||||
}
|
||||
|
||||
public static Mono<ByteBuf> lazyRetain(ByteBuf buf) {
|
||||
return Mono.just(buf).map(ByteBuf::retain);
|
||||
public static Mono<Send<Buffer>> lazyRetain(Buffer buf) {
|
||||
return Mono.just(buf).map(b -> b.copy().send());
|
||||
}
|
||||
|
||||
public static Mono<LLRange> lazyRetainRange(LLRange range) {
|
||||
return Mono.just(range).map(LLRange::retain);
|
||||
public static Mono<Send<LLRange>> lazyRetainRange(LLRange range) {
|
||||
return Mono.just(range).map(r -> r.copy().send());
|
||||
}
|
||||
|
||||
public static Mono<ByteBuf> lazyRetain(Callable<ByteBuf> bufCallable) {
|
||||
return Mono.fromCallable(bufCallable).cacheInvalidateIf(byteBuf -> {
|
||||
// Retain if the value has been cached previously
|
||||
byteBuf.retain();
|
||||
return false;
|
||||
});
|
||||
public static Mono<Send<Buffer>> lazyRetain(Callable<Send<Buffer>> bufCallable) {
|
||||
return Mono.fromCallable(bufCallable);
|
||||
}
|
||||
|
||||
public static Mono<LLRange> lazyRetainRange(Callable<LLRange> rangeCallable) {
|
||||
return Mono.fromCallable(rangeCallable).cacheInvalidateIf(range -> {
|
||||
// Retain if the value has been cached previously
|
||||
range.retain();
|
||||
return false;
|
||||
});
|
||||
public static Mono<Send<LLRange>> lazyRetainRange(Callable<Send<LLRange>> rangeCallable) {
|
||||
return Mono.fromCallable(rangeCallable);
|
||||
}
|
||||
|
||||
public static <T> Mono<T> handleDiscard(Mono<T> mono) {
|
||||
return mono
|
||||
.doOnDiscard(Object.class, obj -> {
|
||||
if (obj instanceof ReferenceCounted o) {
|
||||
if (obj instanceof SafeCloseable o) {
|
||||
discardRefCounted(o);
|
||||
} else if (obj instanceof Entry o) {
|
||||
discardEntry(o);
|
||||
@ -539,13 +547,15 @@ public class LLUtils {
|
||||
discardLLRange(o);
|
||||
} else if (obj instanceof Delta o) {
|
||||
discardDelta(o);
|
||||
} else if (obj instanceof Send o) {
|
||||
discardSend(o);
|
||||
} else if (obj instanceof Map o) {
|
||||
discardMap(o);
|
||||
}
|
||||
});
|
||||
// todo: check if the single object discard hook is more performant
|
||||
/*
|
||||
.doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted)
|
||||
.doOnDiscard(SafeCloseable.class, LLUtils::discardRefCounted)
|
||||
.doOnDiscard(Map.Entry.class, LLUtils::discardEntry)
|
||||
.doOnDiscard(Collection.class, LLUtils::discardCollection)
|
||||
.doOnDiscard(Tuple2.class, LLUtils::discardTuple2)
|
||||
@ -553,6 +563,7 @@ public class LLUtils {
|
||||
.doOnDiscard(LLEntry.class, LLUtils::discardLLEntry)
|
||||
.doOnDiscard(LLRange.class, LLUtils::discardLLRange)
|
||||
.doOnDiscard(Delta.class, LLUtils::discardDelta)
|
||||
.doOnDiscard(Send.class, LLUtils::discardSend)
|
||||
.doOnDiscard(Map.class, LLUtils::discardMap);
|
||||
|
||||
*/
|
||||
@ -561,7 +572,7 @@ public class LLUtils {
|
||||
public static <T> Flux<T> handleDiscard(Flux<T> mono) {
|
||||
return mono
|
||||
.doOnDiscard(Object.class, obj -> {
|
||||
if (obj instanceof ReferenceCounted o) {
|
||||
if (obj instanceof SafeCloseable o) {
|
||||
discardRefCounted(o);
|
||||
} else if (obj instanceof Entry o) {
|
||||
discardEntry(o);
|
||||
@ -577,15 +588,15 @@ public class LLUtils {
|
||||
discardLLRange(o);
|
||||
} else if (obj instanceof Delta o) {
|
||||
discardDelta(o);
|
||||
} else if (obj instanceof Send o) {
|
||||
discardSend(o);
|
||||
} else if (obj instanceof Map o) {
|
||||
discardMap(o);
|
||||
} else {
|
||||
System.err.println(obj.getClass().getName());
|
||||
}
|
||||
});
|
||||
// todo: check if the single object discard hook is more performant
|
||||
/*
|
||||
.doOnDiscard(ReferenceCounted.class, LLUtils::discardRefCounted)
|
||||
.doOnDiscard(SafeCloseable.class, LLUtils::discardRefCounted)
|
||||
.doOnDiscard(Map.Entry.class, LLUtils::discardEntry)
|
||||
.doOnDiscard(Collection.class, LLUtils::discardCollection)
|
||||
.doOnDiscard(Tuple2.class, LLUtils::discardTuple2)
|
||||
@ -593,113 +604,78 @@ public class LLUtils {
|
||||
.doOnDiscard(LLEntry.class, LLUtils::discardLLEntry)
|
||||
.doOnDiscard(LLRange.class, LLUtils::discardLLRange)
|
||||
.doOnDiscard(Delta.class, LLUtils::discardDelta)
|
||||
.doOnDiscard(Send.class, LLUtils::discardSend)
|
||||
.doOnDiscard(Map.class, LLUtils::discardMap);
|
||||
|
||||
*/
|
||||
}
|
||||
|
||||
private static void discardLLEntry(LLEntry entry) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
entry.release();
|
||||
logger.trace("Releasing discarded Buffer");
|
||||
entry.close();
|
||||
}
|
||||
|
||||
private static void discardLLRange(LLRange range) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
range.release();
|
||||
logger.trace("Releasing discarded Buffer");
|
||||
range.close();
|
||||
}
|
||||
|
||||
private static void discardEntry(Map.Entry<?, ?> e) {
|
||||
if (e.getKey() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
}
|
||||
if (e.getValue() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (e.getKey() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
if (e.getValue() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void discardTuple2(Tuple2<?, ?> e) {
|
||||
if (e.getT1() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
}
|
||||
if (e.getT2() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (e.getT1() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
if (e.getT2() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void discardTuple3(Tuple3<?, ?, ?> e) {
|
||||
if (e.getT1() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
if (e.getT1() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
} else if (e.getT1() instanceof Optional opt) {
|
||||
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (opt.isPresent() && opt.get() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
if (e.getT2() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
if (e.getT2() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
} else if (e.getT1() instanceof Optional opt) {
|
||||
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (opt.isPresent() && opt.get() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
if (e.getT3() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
if (e.getT3() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
} else if (e.getT1() instanceof Optional opt) {
|
||||
if (opt.isPresent() && opt.get() instanceof ByteBuf bb) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (opt.isPresent() && opt.get() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void discardRefCounted(ReferenceCounted referenceCounted) {
|
||||
if (referenceCounted.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
referenceCounted.release();
|
||||
}
|
||||
private static void discardRefCounted(SafeCloseable safeCloseable) {
|
||||
safeCloseable.close();
|
||||
}
|
||||
|
||||
private static void discardCollection(Collection<?> collection) {
|
||||
for (Object o : collection) {
|
||||
if (o instanceof ReferenceCounted referenceCounted) {
|
||||
if (referenceCounted.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
referenceCounted.release();
|
||||
}
|
||||
if (o instanceof SafeCloseable safeCloseable) {
|
||||
safeCloseable.close();
|
||||
} else if (o instanceof Map.Entry entry) {
|
||||
if (entry.getKey() instanceof ReferenceCounted bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
}
|
||||
if (entry.getValue() instanceof ReferenceCounted bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (entry.getKey() instanceof SafeCloseable bb) {
|
||||
bb.close();
|
||||
}
|
||||
if (entry.getValue() instanceof SafeCloseable bb) {
|
||||
bb.close();
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
@ -708,35 +684,27 @@ public class LLUtils {
|
||||
}
|
||||
|
||||
private static void discardDelta(Delta<?> delta) {
|
||||
if (delta.previous() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
}
|
||||
if (delta.current() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
if (delta.previous() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
if (delta.current() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
}
|
||||
}
|
||||
|
||||
private static void discardSend(Send<?> send) {
|
||||
send.close();
|
||||
}
|
||||
|
||||
private static void discardMap(Map<?, ?> map) {
|
||||
for (Entry<?, ?> entry : map.entrySet()) {
|
||||
boolean hasByteBuf = false;
|
||||
if (entry.getKey() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
if (entry.getKey() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
hasByteBuf = true;
|
||||
}
|
||||
if (entry.getValue() instanceof ByteBuf bb) {
|
||||
if (bb.refCnt() > 0) {
|
||||
logger.trace("Releasing discarded ByteBuf");
|
||||
bb.release();
|
||||
}
|
||||
if (entry.getValue() instanceof Buffer bb) {
|
||||
bb.close();
|
||||
hasByteBuf = true;
|
||||
}
|
||||
if (!hasByteBuf) {
|
||||
@ -744,4 +712,28 @@ public class LLUtils {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public static boolean isDirect(Buffer key) {
|
||||
if (key.countReadableComponents() == 1) {
|
||||
return key.forEachReadable(0, (index, component) -> component.readableBuffer().isDirect()) >= 0;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public static String deserializeString(Send<Buffer> bufferSend, int readerOffset, int length, Charset charset) {
|
||||
try (var buffer = bufferSend.receive()) {
|
||||
byte[] bytes = new byte[Math.min(length, buffer.readableBytes())];
|
||||
buffer.copyInto(readerOffset, bytes, 0, length);
|
||||
return new String(bytes, charset);
|
||||
}
|
||||
}
|
||||
|
||||
public static int utf8MaxBytes(String deserialized) {
|
||||
return deserialized.length() * 3;
|
||||
}
|
||||
|
||||
public static void writeString(Buffer buf, String deserialized, Charset charset) {
|
||||
buf.writeBytes(deserialized.getBytes(charset));
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,7 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
public interface SafeCloseable extends AutoCloseable {
|
||||
|
||||
@Override
|
||||
void close();
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import java.util.function.Function;
|
||||
@ -11,9 +11,9 @@ public class DatabaseEmpty {
|
||||
|
||||
@SuppressWarnings({"unused", "InstantiationOfUtilityClass"})
|
||||
public static final Nothing NOTHING = new Nothing();
|
||||
public static final Serializer<Nothing, ByteBuf> NOTHING_SERIALIZER = new Serializer<>() {
|
||||
public static final Serializer<Nothing, Buffer> NOTHING_SERIALIZER = new Serializer<>() {
|
||||
@Override
|
||||
public @NotNull Nothing deserialize(@NotNull ByteBuf serialized) {
|
||||
public @NotNull Nothing deserialize(@NotNull Buffer serialized) {
|
||||
try {
|
||||
return NOTHING;
|
||||
} finally {
|
||||
@ -22,7 +22,7 @@ public class DatabaseEmpty {
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull Nothing deserialized) {
|
||||
public @NotNull Buffer serialize(@NotNull Nothing deserialized) {
|
||||
return EMPTY_BUFFER;
|
||||
}
|
||||
};
|
||||
@ -33,7 +33,7 @@ public class DatabaseEmpty {
|
||||
private DatabaseEmpty() {
|
||||
}
|
||||
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, ByteBuf key) {
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, Buffer key) {
|
||||
return new DatabaseSingle<>(dictionary, key, NOTHING_SERIALIZER);
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
@ -38,40 +39,38 @@ import reactor.util.function.Tuples;
|
||||
*/
|
||||
public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U, DatabaseStageEntry<U>> {
|
||||
|
||||
private final Serializer<U, ByteBuf> valueSerializer;
|
||||
private final Serializer<U, Send<Buffer>> valueSerializer;
|
||||
|
||||
protected DatabaseMapDictionary(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer) {
|
||||
Send<Buffer> prefixKey,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySuffixSerializer,
|
||||
Serializer<U, Send<Buffer>> valueSerializer) {
|
||||
// Do not retain or release or use the prefixKey here
|
||||
super(dictionary, prefixKey, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0);
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
|
||||
public static <T, U> DatabaseMapDictionary<T, U> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, dictionary.getAllocator().buffer(0), keySerializer, valueSerializer);
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySerializer,
|
||||
Serializer<U, Send<Buffer>> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, dictionary.getAllocator().allocate(0).send(), keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public static <T, U> DatabaseMapDictionary<T, U> tail(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer) {
|
||||
Send<Buffer> prefixKey,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySuffixSerializer,
|
||||
Serializer<U, Send<Buffer>> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, prefixKey, keySuffixSerializer, valueSerializer);
|
||||
}
|
||||
|
||||
private ByteBuf toKey(ByteBuf suffixKey) {
|
||||
try {
|
||||
private Send<Buffer> toKey(Send<Buffer> suffixKeyToSend) {
|
||||
try (var suffixKey = suffixKeyToSend.receive()) {
|
||||
assert suffixKeyConsistency(suffixKey.readableBytes());
|
||||
return LLUtils.compositeBuffer(dictionary.getAllocator(), keyPrefix.retain(), suffixKey.retain());
|
||||
} finally {
|
||||
suffixKey.release();
|
||||
return LLUtils.compositeBuffer(dictionary.getAllocator(), keyPrefix.copy().send(), suffixKey.send());
|
||||
}
|
||||
}
|
||||
|
||||
private void deserializeValue(ByteBuf value, SynchronousSink<U> sink) {
|
||||
private void deserializeValue(Send<Buffer> value, SynchronousSink<U> sink) {
|
||||
try {
|
||||
sink.next(valueSerializer.deserialize(value));
|
||||
} catch (SerializationException ex) {
|
||||
@ -202,7 +201,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
);
|
||||
}
|
||||
|
||||
public SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
public SerializationFunction<@Nullable Buffer, @Nullable Buffer> getSerializedUpdater(SerializationFunction<@Nullable U, @Nullable U> updater) {
|
||||
return oldSerialized -> {
|
||||
try {
|
||||
U result;
|
||||
@ -224,7 +223,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
};
|
||||
}
|
||||
|
||||
public <X> BiSerializationFunction<@Nullable ByteBuf, X, @Nullable ByteBuf> getSerializedUpdater(
|
||||
public <X> BiSerializationFunction<@Nullable Buffer, X, @Nullable Buffer> getSerializedUpdater(
|
||||
BiSerializationFunction<@Nullable U, X, @Nullable U> updater) {
|
||||
return (oldSerialized, extra) -> {
|
||||
try {
|
||||
@ -336,7 +335,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
@Override
|
||||
public Flux<Entry<T, Optional<U>>> getMulti(@Nullable CompositeSnapshot snapshot, Flux<T> keys, boolean existsAlmostCertainly) {
|
||||
return dictionary.getMulti(resolveSnapshot(snapshot), keys.flatMap(keySuffix -> Mono.fromCallable(() -> {
|
||||
ByteBuf keySuffixBuf = serializeSuffix(keySuffix);
|
||||
Buffer keySuffixBuf = serializeSuffix(keySuffix);
|
||||
try {
|
||||
var key = toKey(keySuffixBuf.retain());
|
||||
try {
|
||||
@ -367,9 +366,9 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
|
||||
private LLEntry serializeEntry(T key, U value) throws SerializationException {
|
||||
ByteBuf serializedKey = toKey(serializeSuffix(key));
|
||||
Buffer serializedKey = toKey(serializeSuffix(key));
|
||||
try {
|
||||
ByteBuf serializedValue = valueSerializer.serialize(value);
|
||||
Buffer serializedValue = valueSerializer.serialize(value);
|
||||
try {
|
||||
return new LLEntry(serializedKey.retain(), serializedValue.retain());
|
||||
} finally {
|
||||
@ -403,15 +402,15 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
@Override
|
||||
public <X> Flux<ExtraKeyOperationResult<T, X>> updateMulti(Flux<Tuple2<T, X>> entries,
|
||||
BiSerializationFunction<@Nullable U, X, @Nullable U> updater) {
|
||||
Flux<Tuple2<ByteBuf, X>> serializedEntries = entries
|
||||
Flux<Tuple2<Buffer, X>> serializedEntries = entries
|
||||
.flatMap(entry -> Mono
|
||||
.fromCallable(() -> Tuples.of(serializeSuffix(entry.getT1()), entry.getT2()))
|
||||
)
|
||||
.doOnDiscard(Tuple2.class, uncastedEntry -> {
|
||||
if (uncastedEntry.getT1() instanceof ByteBuf byteBuf) {
|
||||
if (uncastedEntry.getT1() instanceof Buffer byteBuf) {
|
||||
byteBuf.release();
|
||||
}
|
||||
if (uncastedEntry.getT2() instanceof ByteBuf byteBuf) {
|
||||
if (uncastedEntry.getT2() instanceof Buffer byteBuf) {
|
||||
byteBuf.release();
|
||||
}
|
||||
});
|
||||
@ -435,7 +434,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
.getRangeKeys(resolveSnapshot(snapshot), rangeMono)
|
||||
.handle((key, sink) -> {
|
||||
try {
|
||||
ByteBuf keySuffixWithExt = stripPrefix(key.retain(), false);
|
||||
Buffer keySuffixWithExt = stripPrefix(key.retain(), false);
|
||||
try {
|
||||
sink.next(Map.entry(deserializeSuffix(keySuffixWithExt.retainedSlice()),
|
||||
new DatabaseSingleMapped<>(new DatabaseSingle<>(dictionary,
|
||||
@ -459,10 +458,10 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
return dictionary
|
||||
.getRange(resolveSnapshot(snapshot), rangeMono)
|
||||
.<Entry<T, U>>handle((serializedEntry, sink) -> {
|
||||
ByteBuf key = serializedEntry.getKey();
|
||||
ByteBuf value = serializedEntry.getValue();
|
||||
Buffer key = serializedEntry.getKey();
|
||||
Buffer value = serializedEntry.getValue();
|
||||
try {
|
||||
ByteBuf keySuffix = stripPrefix(key.retain(), false);
|
||||
Buffer keySuffix = stripPrefix(key.retain(), false);
|
||||
try {
|
||||
sink.next(Map.entry(deserializeSuffix(keySuffix.retain()),
|
||||
valueSerializer.deserialize(value.retain())));
|
||||
@ -477,12 +476,12 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
}
|
||||
})
|
||||
.doOnDiscard(Entry.class, uncastedEntry -> {
|
||||
if (uncastedEntry.getKey() instanceof ByteBuf byteBuf) {
|
||||
if (uncastedEntry.getKey() instanceof Buffer byteBuf) {
|
||||
if (byteBuf.refCnt() > 0) {
|
||||
byteBuf.release();
|
||||
}
|
||||
}
|
||||
if (uncastedEntry.getValue() instanceof ByteBuf byteBuf) {
|
||||
if (uncastedEntry.getValue() instanceof Buffer byteBuf) {
|
||||
if (byteBuf.refCnt() > 0) {
|
||||
byteBuf.release();
|
||||
}
|
||||
@ -496,9 +495,9 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
this.getAllValues(null),
|
||||
dictionary.setRange(rangeMono, entries.handle((entry, sink) -> {
|
||||
try {
|
||||
ByteBuf serializedKey = toKey(serializeSuffix(entry.getKey()));
|
||||
Buffer serializedKey = toKey(serializeSuffix(entry.getKey()));
|
||||
try {
|
||||
ByteBuf serializedValue = valueSerializer.serialize(entry.getValue());
|
||||
Buffer serializedValue = valueSerializer.serialize(entry.getValue());
|
||||
try {
|
||||
sink.next(new LLEntry(serializedKey.retain(), serializedValue.retain()));
|
||||
} finally {
|
||||
|
@ -1,7 +1,9 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Resource;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.util.IllegalReferenceCountException;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
@ -28,178 +30,155 @@ import reactor.util.function.Tuples;
|
||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implements DatabaseStageMap<T, U, US> {
|
||||
|
||||
protected final LLDictionary dictionary;
|
||||
private final ByteBufAllocator alloc;
|
||||
private final BufferAllocator alloc;
|
||||
protected final SubStageGetter<U, US> subStageGetter;
|
||||
protected final SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer;
|
||||
protected final ByteBuf keyPrefix;
|
||||
protected final SerializerFixedBinaryLength<T, Send<Buffer>> keySuffixSerializer;
|
||||
protected final Buffer keyPrefix;
|
||||
protected final int keyPrefixLength;
|
||||
protected final int keySuffixLength;
|
||||
protected final int keyExtLength;
|
||||
protected final LLRange range;
|
||||
protected final Mono<LLRange> rangeMono;
|
||||
protected final Mono<Send<LLRange>> rangeMono;
|
||||
private volatile boolean released;
|
||||
|
||||
private static ByteBuf incrementPrefix(ByteBufAllocator alloc, ByteBuf originalKey, int prefixLength) {
|
||||
try {
|
||||
private static Send<Buffer> incrementPrefix(BufferAllocator alloc, Send<Buffer> originalKeySend, int prefixLength) {
|
||||
try (var originalKey = originalKeySend.receive()) {
|
||||
assert originalKey.readableBytes() >= prefixLength;
|
||||
ByteBuf copiedBuf = alloc.buffer(originalKey.writerIndex(), originalKey.writerIndex() + 1);
|
||||
try {
|
||||
try (Buffer copiedBuf = alloc.allocate(originalKey.writerOffset())) {
|
||||
boolean overflowed = true;
|
||||
final int ff = 0xFF;
|
||||
int writtenBytes = 0;
|
||||
copiedBuf.writerIndex(prefixLength);
|
||||
copiedBuf.writerOffset(prefixLength);
|
||||
for (int i = prefixLength - 1; i >= 0; i--) {
|
||||
int iByte = originalKey.getUnsignedByte(i);
|
||||
if (iByte != ff) {
|
||||
copiedBuf.setByte(i, iByte + 1);
|
||||
copiedBuf.setUnsignedByte(i, iByte + 1);
|
||||
writtenBytes++;
|
||||
overflowed = false;
|
||||
break;
|
||||
} else {
|
||||
copiedBuf.setByte(i, 0x00);
|
||||
copiedBuf.setUnsignedByte(i, 0x00);
|
||||
writtenBytes++;
|
||||
overflowed = true;
|
||||
}
|
||||
}
|
||||
assert prefixLength - writtenBytes >= 0;
|
||||
if (prefixLength - writtenBytes > 0) {
|
||||
copiedBuf.setBytes(0, originalKey, 0, (prefixLength - writtenBytes));
|
||||
originalKey.copyInto(0, copiedBuf, 0, (prefixLength - writtenBytes));
|
||||
}
|
||||
|
||||
copiedBuf.writerIndex(copiedBuf.capacity());
|
||||
copiedBuf.writerOffset(copiedBuf.capacity());
|
||||
|
||||
if (originalKey.writerIndex() - prefixLength > 0) {
|
||||
copiedBuf.setBytes(prefixLength, originalKey, prefixLength, originalKey.writerIndex() - prefixLength);
|
||||
if (originalKey.writerOffset() - prefixLength > 0) {
|
||||
originalKey.copyInto(prefixLength, copiedBuf, prefixLength, originalKey.writerOffset() - prefixLength);
|
||||
}
|
||||
|
||||
if (overflowed) {
|
||||
for (int i = 0; i < copiedBuf.writerIndex(); i++) {
|
||||
copiedBuf.setByte(i, 0xFF);
|
||||
for (int i = 0; i < copiedBuf.writerOffset(); i++) {
|
||||
copiedBuf.setUnsignedByte(i, 0xFF);
|
||||
}
|
||||
copiedBuf.writeZero(1);
|
||||
copiedBuf.writeByte((byte) 0x00);
|
||||
}
|
||||
return copiedBuf.retain();
|
||||
} finally {
|
||||
copiedBuf.release();
|
||||
return copiedBuf.send();
|
||||
}
|
||||
} finally {
|
||||
originalKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
static ByteBuf firstRangeKey(ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
static Send<Buffer> firstRangeKey(BufferAllocator alloc,
|
||||
Send<Buffer> prefixKey,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
return zeroFillKeySuffixAndExt(alloc, prefixKey, prefixLength, suffixLength, extLength);
|
||||
}
|
||||
|
||||
static ByteBuf nextRangeKey(ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
static Send<Buffer> nextRangeKey(BufferAllocator alloc,
|
||||
Send<Buffer> prefixKey,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
try {
|
||||
ByteBuf nonIncremented = zeroFillKeySuffixAndExt(alloc, prefixKey.retain(), prefixLength, suffixLength, extLength);
|
||||
try {
|
||||
return incrementPrefix(alloc, nonIncremented.retain(), prefixLength);
|
||||
} finally {
|
||||
nonIncremented.release();
|
||||
try (prefixKey) {
|
||||
try (Send<Buffer> nonIncremented = zeroFillKeySuffixAndExt(alloc, prefixKey, prefixLength, suffixLength,
|
||||
extLength)) {
|
||||
return incrementPrefix(alloc, nonIncremented, prefixLength);
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
protected static ByteBuf zeroFillKeySuffixAndExt(ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
protected static Send<Buffer> zeroFillKeySuffixAndExt(BufferAllocator alloc,
|
||||
Send<Buffer> prefixKeySend,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
try {
|
||||
try (var prefixKey = prefixKeySend.receive()) {
|
||||
assert prefixKey.readableBytes() == prefixLength;
|
||||
assert suffixLength > 0;
|
||||
assert extLength >= 0;
|
||||
ByteBuf zeroSuffixAndExt = alloc.buffer(suffixLength + extLength, suffixLength + extLength);
|
||||
try {
|
||||
zeroSuffixAndExt.writeZero(suffixLength + extLength);
|
||||
ByteBuf result = LLUtils.compositeBuffer(alloc, prefixKey.retain(), zeroSuffixAndExt.retain());
|
||||
try {
|
||||
return result.retain();
|
||||
} finally {
|
||||
result.release();
|
||||
try (Buffer zeroSuffixAndExt = alloc.allocate(suffixLength + extLength)) {
|
||||
for (int i = 0; i < suffixLength + extLength; i++) {
|
||||
zeroSuffixAndExt.writeByte((byte) 0x0);
|
||||
}
|
||||
try (Send<Buffer> result = LLUtils.compositeBuffer(alloc, prefixKey.send(), zeroSuffixAndExt.send())) {
|
||||
return result;
|
||||
}
|
||||
} finally {
|
||||
zeroSuffixAndExt.release();
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
static ByteBuf firstRangeKey(
|
||||
ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
ByteBuf suffixKey,
|
||||
static Send<Buffer> firstRangeKey(
|
||||
BufferAllocator alloc,
|
||||
Send<Buffer> prefixKey,
|
||||
Send<Buffer> suffixKey,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
return zeroFillKeyExt(alloc, prefixKey, suffixKey, prefixLength, suffixLength, extLength);
|
||||
}
|
||||
|
||||
static ByteBuf nextRangeKey(
|
||||
ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
ByteBuf suffixKey,
|
||||
static Send<Buffer> nextRangeKey(
|
||||
BufferAllocator alloc,
|
||||
Send<Buffer> prefixKey,
|
||||
Send<Buffer> suffixKey,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
try {
|
||||
ByteBuf nonIncremented = zeroFillKeyExt(alloc,
|
||||
prefixKey.retain(),
|
||||
suffixKey.retain(),
|
||||
try (Send<Buffer> nonIncremented = zeroFillKeyExt(alloc,
|
||||
prefixKey,
|
||||
suffixKey,
|
||||
prefixLength,
|
||||
suffixLength,
|
||||
extLength
|
||||
);
|
||||
try {
|
||||
return incrementPrefix(alloc, nonIncremented.retain(), prefixLength + suffixLength);
|
||||
} finally {
|
||||
nonIncremented.release();
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
suffixKey.release();
|
||||
)) {
|
||||
return incrementPrefix(alloc, nonIncremented, prefixLength + suffixLength);
|
||||
}
|
||||
}
|
||||
|
||||
protected static ByteBuf zeroFillKeyExt(
|
||||
ByteBufAllocator alloc,
|
||||
ByteBuf prefixKey,
|
||||
ByteBuf suffixKey,
|
||||
protected static Send<Buffer> zeroFillKeyExt(
|
||||
BufferAllocator alloc,
|
||||
Send<Buffer> prefixKeySend,
|
||||
Send<Buffer> suffixKeySend,
|
||||
int prefixLength,
|
||||
int suffixLength,
|
||||
int extLength) {
|
||||
try {
|
||||
try (var prefixKey = prefixKeySend.receive()) {
|
||||
try (var suffixKey = suffixKeySend.receive()) {
|
||||
assert prefixKey.readableBytes() == prefixLength;
|
||||
assert suffixKey.readableBytes() == suffixLength;
|
||||
assert suffixLength > 0;
|
||||
assert extLength >= 0;
|
||||
ByteBuf result = LLUtils.compositeBuffer(alloc,
|
||||
prefixKey.retain(),
|
||||
suffixKey.retain(),
|
||||
alloc.buffer(extLength, extLength).writeZero(extLength)
|
||||
);
|
||||
try {
|
||||
assert result.readableBytes() == prefixLength + suffixLength + extLength;
|
||||
return result.retain();
|
||||
} finally {
|
||||
result.release();
|
||||
|
||||
try (var ext = alloc.allocate(extLength)) {
|
||||
for (int i = 0; i < extLength; i++) {
|
||||
ext.writeByte((byte) 0);
|
||||
}
|
||||
|
||||
try (Buffer result = LLUtils.compositeBuffer(alloc, prefixKey.send(), suffixKey.send(), ext.send())
|
||||
.receive()) {
|
||||
assert result.readableBytes() == prefixLength + suffixLength + extLength;
|
||||
return result.send();
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
suffixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,22 +187,18 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
*/
|
||||
@Deprecated
|
||||
public static <T, U> DatabaseMapDictionaryDeep<T, U, DatabaseStageEntry<U>> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySerializer,
|
||||
SubStageGetterSingle<U> subStageGetter) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary,
|
||||
dictionary.getAllocator().buffer(0),
|
||||
keySerializer,
|
||||
subStageGetter,
|
||||
0
|
||||
);
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, dictionary.getAllocator().allocate(0).send(),
|
||||
keySerializer, subStageGetter, 0);
|
||||
}
|
||||
|
||||
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepTail(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySerializer,
|
||||
int keyExtLength,
|
||||
SubStageGetter<U, US> subStageGetter) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary,
|
||||
dictionary.getAllocator().buffer(0),
|
||||
dictionary.getAllocator().allocate(0).send(),
|
||||
keySerializer,
|
||||
subStageGetter,
|
||||
keyExtLength
|
||||
@ -231,56 +206,45 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
}
|
||||
|
||||
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepIntermediate(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer,
|
||||
Send<Buffer> prefixKey,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySuffixSerializer,
|
||||
SubStageGetter<U, US> subStageGetter,
|
||||
int keyExtLength) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength);
|
||||
}
|
||||
|
||||
protected DatabaseMapDictionaryDeep(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer,
|
||||
Send<Buffer> prefixKey,
|
||||
SerializerFixedBinaryLength<T, Send<Buffer>> keySuffixSerializer,
|
||||
SubStageGetter<U, US> subStageGetter,
|
||||
int keyExtLength) {
|
||||
try {
|
||||
this.dictionary = dictionary;
|
||||
this.alloc = dictionary.getAllocator();
|
||||
this.subStageGetter = subStageGetter;
|
||||
this.keySuffixSerializer = keySuffixSerializer;
|
||||
assert prefixKey.refCnt() > 0;
|
||||
this.keyPrefix = prefixKey.retain();
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
this.keyPrefix = prefixKey.receive();
|
||||
assert keyPrefix.isAccessible();
|
||||
this.keyPrefixLength = keyPrefix.readableBytes();
|
||||
this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength();
|
||||
this.keyExtLength = keyExtLength;
|
||||
ByteBuf firstKey = firstRangeKey(alloc,
|
||||
keyPrefix.retain(),
|
||||
try (Buffer firstKey = firstRangeKey(alloc,
|
||||
keyPrefix.copy().send(),
|
||||
keyPrefixLength,
|
||||
keySuffixLength,
|
||||
keyExtLength
|
||||
);
|
||||
try {
|
||||
ByteBuf nextRangeKey = nextRangeKey(alloc,
|
||||
keyPrefix.retain(),
|
||||
).receive()) {
|
||||
try (Buffer nextRangeKey = nextRangeKey(alloc,
|
||||
keyPrefix.copy().send(),
|
||||
keyPrefixLength,
|
||||
keySuffixLength,
|
||||
keyExtLength
|
||||
);
|
||||
try {
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
).receive()) {
|
||||
assert keyPrefix.isAccessible();
|
||||
assert keyPrefixLength == 0 || !LLUtils.equals(firstKey, nextRangeKey);
|
||||
this.range = keyPrefixLength == 0 ? LLRange.all() : LLRange.of(firstKey.retain(), nextRangeKey.retain());
|
||||
this.range = keyPrefixLength == 0 ? LLRange.all() : LLRange.of(firstKey.send(), nextRangeKey.send());
|
||||
this.rangeMono = LLUtils.lazyRetainRange(this.range);
|
||||
assert subStageKeysConsistency(keyPrefixLength + keySuffixLength + keyExtLength);
|
||||
} finally {
|
||||
nextRangeKey.release();
|
||||
}
|
||||
} finally {
|
||||
firstKey.release();
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
@ -302,49 +266,31 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
/**
|
||||
* Keep only suffix and ext
|
||||
*/
|
||||
protected ByteBuf stripPrefix(ByteBuf key, boolean slice) {
|
||||
try {
|
||||
if (slice) {
|
||||
return key.retainedSlice(this.keyPrefixLength, key.readableBytes() - this.keyPrefixLength);
|
||||
} else {
|
||||
return key.retain().readerIndex(key.readerIndex() + keyPrefixLength);
|
||||
}
|
||||
} finally {
|
||||
key.release();
|
||||
protected Send<Buffer> stripPrefix(Send<Buffer> keyToReceive) {
|
||||
try (var key = keyToReceive.receive()) {
|
||||
return key.copy(this.keyPrefixLength, key.readableBytes() - this.keyPrefixLength).send();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove ext from full key
|
||||
*/
|
||||
protected ByteBuf removeExtFromFullKey(ByteBuf key, boolean slice) {
|
||||
try {
|
||||
if (slice) {
|
||||
return key.retainedSlice(key.readerIndex(), keyPrefixLength + keySuffixLength);
|
||||
} else {
|
||||
return key.retain().writerIndex(key.writerIndex() - (keyPrefixLength + keySuffixLength));
|
||||
}
|
||||
} finally {
|
||||
key.release();
|
||||
protected Send<Buffer> removeExtFromFullKey(Send<Buffer> keyToReceive) {
|
||||
try (var key = keyToReceive.receive()) {
|
||||
return key.copy(key.readerOffset(), keyPrefixLength + keySuffixLength).send();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Add prefix to suffix
|
||||
*/
|
||||
protected ByteBuf toKeyWithoutExt(ByteBuf suffixKey) {
|
||||
try {
|
||||
protected Send<Buffer> toKeyWithoutExt(Send<Buffer> suffixKeyToReceive) {
|
||||
try (var suffixKey = suffixKeyToReceive.receive()) {
|
||||
assert suffixKey.readableBytes() == keySuffixLength;
|
||||
ByteBuf result = LLUtils.compositeBuffer(alloc, keyPrefix.retain(), suffixKey.retain());
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
try {
|
||||
try (Buffer result = LLUtils.compositeBuffer(alloc, keyPrefix.copy().send(), suffixKey.send()).receive()) {
|
||||
assert result.readableBytes() == keyPrefixLength + keySuffixLength;
|
||||
return result.retain();
|
||||
} finally {
|
||||
result.release();
|
||||
return result.send();
|
||||
}
|
||||
} finally {
|
||||
suffixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
@ -356,26 +302,23 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
}
|
||||
}
|
||||
|
||||
protected LLRange toExtRange(ByteBuf keySuffix) {
|
||||
try {
|
||||
ByteBuf first = firstRangeKey(alloc,
|
||||
keyPrefix.retain(),
|
||||
keySuffix.retain(),
|
||||
protected Send<LLRange> toExtRange(Buffer keySuffix) {
|
||||
try (Buffer first = firstRangeKey(alloc,
|
||||
keyPrefix.copy().send(),
|
||||
keySuffix.copy().send(),
|
||||
keyPrefixLength,
|
||||
keySuffixLength,
|
||||
keyExtLength
|
||||
);
|
||||
ByteBuf end = nextRangeKey(alloc,
|
||||
keyPrefix.retain(),
|
||||
keySuffix.retain(),
|
||||
).receive()) {
|
||||
try (Buffer end = nextRangeKey(alloc,
|
||||
keyPrefix.copy().send(),
|
||||
keySuffix.copy().send(),
|
||||
keyPrefixLength,
|
||||
keySuffixLength,
|
||||
keyExtLength
|
||||
);
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
return LLRange.of(first, end);
|
||||
} finally {
|
||||
keySuffix.release();
|
||||
).receive()) {
|
||||
return LLRange.of(first.send(), end.send()).send();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -392,16 +335,14 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
@Override
|
||||
public Mono<US> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return Mono.using(
|
||||
() -> serializeSuffix(keySuffix),
|
||||
keySuffixData -> {
|
||||
return Mono.using(
|
||||
() -> toKeyWithoutExt(keySuffixData.retain()),
|
||||
() -> serializeSuffix(keySuffix).receive(),
|
||||
keySuffixData -> Mono.using(
|
||||
() -> toKeyWithoutExt(keySuffixData.send()).receive(),
|
||||
keyWithoutExt -> this.subStageGetter
|
||||
.subStage(dictionary, snapshot, LLUtils.lazyRetain(keyWithoutExt)),
|
||||
ReferenceCounted::release
|
||||
);
|
||||
},
|
||||
ReferenceCounted::release
|
||||
Resource::close
|
||||
),
|
||||
Resource::close
|
||||
).transform(LLUtils::handleDiscard).doOnDiscard(DatabaseStage.class, DatabaseStage::release);
|
||||
}
|
||||
|
||||
@ -415,26 +356,21 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
return dictionary.badBlocks(rangeMono);
|
||||
}
|
||||
|
||||
private static record GroupBuffers(ByteBuf groupKeyWithExt, ByteBuf groupKeyWithoutExt, ByteBuf groupSuffix) {}
|
||||
private static record GroupBuffers(Buffer groupKeyWithExt, Buffer groupKeyWithoutExt, Buffer groupSuffix) {}
|
||||
|
||||
@Override
|
||||
public Flux<Entry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot) {
|
||||
|
||||
return Flux
|
||||
.defer(() -> dictionary.getRangeKeyPrefixes(resolveSnapshot(snapshot), rangeMono, keyPrefixLength + keySuffixLength))
|
||||
.flatMapSequential(groupKeyWithoutExt -> Mono
|
||||
.flatMapSequential(groupKeyWithoutExtSend -> Mono
|
||||
.using(
|
||||
() -> {
|
||||
try {
|
||||
var groupSuffix = this.stripPrefix(groupKeyWithoutExt.retain(), true);
|
||||
try {
|
||||
try (var groupKeyWithoutExt = groupKeyWithoutExtSend.receive()) {
|
||||
try (var groupSuffix = this.stripPrefix(groupKeyWithoutExt.copy().send()).receive()) {
|
||||
assert subStageKeysConsistency(groupKeyWithoutExt.readableBytes() + keyExtLength);
|
||||
return Tuples.of(groupKeyWithoutExt.retain(), groupSuffix.retain());
|
||||
} finally {
|
||||
groupSuffix.release();
|
||||
return Tuples.of(groupKeyWithoutExt, groupSuffix);
|
||||
}
|
||||
} finally {
|
||||
groupKeyWithoutExt.release();
|
||||
}
|
||||
},
|
||||
groupKeyWithoutExtAndGroupSuffix -> this.subStageGetter
|
||||
@ -444,14 +380,15 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
)
|
||||
.<Entry<T, US>>handle((us, sink) -> {
|
||||
try {
|
||||
sink.next(Map.entry(this.deserializeSuffix(groupKeyWithoutExtAndGroupSuffix.getT2().retain()), us));
|
||||
sink.next(Map.entry(this.deserializeSuffix(groupKeyWithoutExtAndGroupSuffix.getT2().send()),
|
||||
us));
|
||||
} catch (SerializationException ex) {
|
||||
sink.error(ex);
|
||||
}
|
||||
}),
|
||||
entry -> {
|
||||
entry.getT1().release();
|
||||
entry.getT2().release();
|
||||
entry.getT1().close();
|
||||
entry.getT2().close();
|
||||
}
|
||||
)
|
||||
)
|
||||
@ -489,8 +426,8 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
return dictionary.clear();
|
||||
} else if (range.isSingle()) {
|
||||
return dictionary
|
||||
.remove(LLUtils.lazyRetain(range.getSingle()), LLDictionaryResultType.VOID)
|
||||
.doOnNext(ReferenceCounted::release)
|
||||
.remove(LLUtils.lazyRetain(range::getSingle), LLDictionaryResultType.VOID)
|
||||
.doOnNext(Send::close)
|
||||
.then();
|
||||
} else {
|
||||
return dictionary.setRange(LLUtils.lazyRetainRange(range), Flux.empty());
|
||||
@ -499,31 +436,30 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
|
||||
}
|
||||
|
||||
//todo: temporary wrapper. convert the whole class to buffers
|
||||
protected T deserializeSuffix(ByteBuf keySuffix) throws SerializationException {
|
||||
try {
|
||||
protected T deserializeSuffix(Send<Buffer> keySuffixToReceive) throws SerializationException {
|
||||
try (var keySuffix = keySuffixToReceive.receive()) {
|
||||
assert suffixKeyConsistency(keySuffix.readableBytes());
|
||||
var result = keySuffixSerializer.deserialize(keySuffix.retain());
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
var result = keySuffixSerializer.deserialize(keySuffix.send());
|
||||
assert keyPrefix.isAccessible();
|
||||
return result;
|
||||
} finally {
|
||||
keySuffix.release();
|
||||
}
|
||||
}
|
||||
|
||||
//todo: temporary wrapper. convert the whole class to buffers
|
||||
protected ByteBuf serializeSuffix(T keySuffix) throws SerializationException {
|
||||
ByteBuf suffixData = keySuffixSerializer.serialize(keySuffix);
|
||||
protected Send<Buffer> serializeSuffix(T keySuffix) throws SerializationException {
|
||||
try (Buffer suffixData = keySuffixSerializer.serialize(keySuffix).receive()) {
|
||||
assert suffixKeyConsistency(suffixData.readableBytes());
|
||||
assert keyPrefix.refCnt() > 0;
|
||||
return suffixData;
|
||||
assert keyPrefix.isAccessible();
|
||||
return suffixData.send();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void release() {
|
||||
if (!released) {
|
||||
released = true;
|
||||
this.range.release();
|
||||
this.keyPrefix.release();
|
||||
this.range.close();
|
||||
this.keyPrefix.close();
|
||||
} else {
|
||||
throw new IllegalReferenceCountException(0, -1);
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
@ -33,17 +34,16 @@ import reactor.util.function.Tuples;
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
|
||||
|
||||
private final ByteBufAllocator alloc;
|
||||
private final BufferAllocator alloc;
|
||||
private final DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
|
||||
private final Function<T, TH> keySuffixHashFunction;
|
||||
|
||||
protected DatabaseMapDictionaryHashed(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
Serializer<T, ByteBuf> keySuffixSerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer,
|
||||
Send<Buffer> prefixKey,
|
||||
Serializer<T, Send<Buffer>> keySuffixSerializer,
|
||||
Serializer<U, Send<Buffer>> valueSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keySuffixHashSerializer) {
|
||||
try {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keySuffixHashSerializer) {
|
||||
if (dictionary.getUpdateMode().block() != UpdateMode.ALLOW) {
|
||||
throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW");
|
||||
}
|
||||
@ -53,21 +53,18 @@ public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T
|
||||
ValuesSetSerializer<Entry<T, U>> valuesSetSerializer
|
||||
= new ValuesSetSerializer<>(alloc, valueWithHashSerializer);
|
||||
this.subDictionary = DatabaseMapDictionary.tail(dictionary,
|
||||
prefixKey.retain(),
|
||||
prefixKey,
|
||||
keySuffixHashSerializer,
|
||||
valuesSetSerializer
|
||||
);
|
||||
this.keySuffixHashFunction = keySuffixHashFunction;
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
}
|
||||
}
|
||||
|
||||
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> simple(LLDictionary dictionary,
|
||||
Serializer<T, ByteBuf> keySerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer,
|
||||
Serializer<T, Buffer> keySerializer,
|
||||
Serializer<U, Buffer> valueSerializer,
|
||||
Function<T, UH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<UH, ByteBuf> keyHashSerializer) {
|
||||
SerializerFixedBinaryLength<UH, Buffer> keyHashSerializer) {
|
||||
return new DatabaseMapDictionaryHashed<>(
|
||||
dictionary,
|
||||
dictionary.getAllocator().buffer(0),
|
||||
@ -79,11 +76,11 @@ public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T
|
||||
}
|
||||
|
||||
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> tail(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
Serializer<T, ByteBuf> keySuffixSerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer,
|
||||
Buffer prefixKey,
|
||||
Serializer<T, Buffer> keySuffixSerializer,
|
||||
Serializer<U, Buffer> valueSerializer,
|
||||
Function<T, UH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<UH, ByteBuf> keySuffixHashSerializer) {
|
||||
SerializerFixedBinaryLength<UH, Buffer> keySuffixHashSerializer) {
|
||||
return new DatabaseMapDictionaryHashed<>(dictionary,
|
||||
prefixKey,
|
||||
keySuffixSerializer,
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
@ -15,13 +15,13 @@ import reactor.core.publisher.Mono;
|
||||
public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing> {
|
||||
|
||||
protected DatabaseSetDictionary(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer) {
|
||||
Buffer prefixKey,
|
||||
SerializerFixedBinaryLength<T, Buffer> keySuffixSerializer) {
|
||||
super(dictionary, prefixKey, keySuffixSerializer, DatabaseEmpty.NOTHING_SERIALIZER);
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySerializer) {
|
||||
SerializerFixedBinaryLength<T, Buffer> keySerializer) {
|
||||
var buf = dictionary.getAllocator().buffer(0);
|
||||
try {
|
||||
return new DatabaseSetDictionary<>(dictionary, buf, keySerializer);
|
||||
@ -31,8 +31,8 @@ public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing>
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> tail(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySuffixSerializer) {
|
||||
Buffer prefixKey,
|
||||
SerializerFixedBinaryLength<T, Buffer> keySuffixSerializer) {
|
||||
return new DatabaseSetDictionary<>(dictionary, prefixKey, keySuffixSerializer);
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
@ -17,10 +17,10 @@ import reactor.core.publisher.Mono;
|
||||
public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHashed<T, Nothing, TH> {
|
||||
|
||||
protected DatabaseSetDictionaryHashed(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
Serializer<T, ByteBuf> keySuffixSerializer,
|
||||
Buffer prefixKey,
|
||||
Serializer<T, Buffer> keySuffixSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keySuffixHashSerializer) {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keySuffixHashSerializer) {
|
||||
super(dictionary,
|
||||
prefixKey,
|
||||
keySuffixSerializer,
|
||||
@ -31,9 +31,9 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
}
|
||||
|
||||
public static <T, TH> DatabaseSetDictionaryHashed<T, TH> simple(LLDictionary dictionary,
|
||||
Serializer<T, ByteBuf> keySerializer,
|
||||
Serializer<T, Buffer> keySerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer) {
|
||||
return new DatabaseSetDictionaryHashed<>(dictionary,
|
||||
dictionary.getAllocator().buffer(0),
|
||||
keySerializer,
|
||||
@ -43,10 +43,10 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
}
|
||||
|
||||
public static <T, TH> DatabaseSetDictionaryHashed<T, TH> tail(LLDictionary dictionary,
|
||||
ByteBuf prefixKey,
|
||||
Serializer<T, ByteBuf> keySuffixSerializer,
|
||||
Buffer prefixKey,
|
||||
Serializer<T, Buffer> keySuffixSerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer) {
|
||||
return new DatabaseSetDictionaryHashed<>(dictionary,
|
||||
prefixKey,
|
||||
keySuffixSerializer,
|
||||
|
@ -1,6 +1,8 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.buffer.api.internal.ResourceSupport;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
@ -23,18 +25,16 @@ import reactor.core.publisher.SynchronousSink;
|
||||
public class DatabaseSingle<U> implements DatabaseStageEntry<U> {
|
||||
|
||||
private final LLDictionary dictionary;
|
||||
private final ByteBuf key;
|
||||
private final Mono<ByteBuf> keyMono;
|
||||
private final Serializer<U, ByteBuf> serializer;
|
||||
private final Buffer key;
|
||||
private final Mono<Send<Buffer>> keyMono;
|
||||
private final Serializer<U, Send<Buffer>> serializer;
|
||||
|
||||
public DatabaseSingle(LLDictionary dictionary, ByteBuf key, Serializer<U, ByteBuf> serializer) {
|
||||
try {
|
||||
public DatabaseSingle(LLDictionary dictionary, Send<Buffer> key, Serializer<U, Send<Buffer>> serializer) {
|
||||
try (key) {
|
||||
this.dictionary = dictionary;
|
||||
this.key = key.retain();
|
||||
this.key = key.receive();
|
||||
this.keyMono = LLUtils.lazyRetain(this.key);
|
||||
this.serializer = serializer;
|
||||
} finally {
|
||||
key.release();
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +46,7 @@ public class DatabaseSingle<U> implements DatabaseStageEntry<U> {
|
||||
}
|
||||
}
|
||||
|
||||
private void deserializeValue(ByteBuf value, SynchronousSink<U> sink) {
|
||||
private void deserializeValue(Send<Buffer> value, SynchronousSink<U> sink) {
|
||||
try {
|
||||
sink.next(serializer.deserialize(value));
|
||||
} catch (SerializationException ex) {
|
||||
@ -63,13 +63,9 @@ public class DatabaseSingle<U> implements DatabaseStageEntry<U> {
|
||||
|
||||
@Override
|
||||
public Mono<U> setAndGetPrevious(U value) {
|
||||
return Mono
|
||||
.using(() -> serializer.serialize(value),
|
||||
valueByteBuf -> dictionary
|
||||
.put(keyMono, LLUtils.lazyRetain(valueByteBuf), LLDictionaryResultType.PREVIOUS_VALUE)
|
||||
.handle(this::deserializeValue),
|
||||
ReferenceCounted::release
|
||||
);
|
||||
return dictionary
|
||||
.put(keyMono, Mono.fromCallable(() -> serializer.serialize(value)), LLDictionaryResultType.PREVIOUS_VALUE)
|
||||
.handle(this::deserializeValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -99,7 +95,7 @@ public class DatabaseSingle<U> implements DatabaseStageEntry<U> {
|
||||
} else {
|
||||
return serializer.serialize(result);
|
||||
}
|
||||
}, existsAlmostCertainly).transform(mono -> LLUtils.mapDelta(mono, serializer::deserialize));
|
||||
}, existsAlmostCertainly).transform(mono -> LLUtils.mapLLDelta(mono, serializer::deserialize));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -112,23 +108,23 @@ public class DatabaseSingle<U> implements DatabaseStageEntry<U> {
|
||||
@Override
|
||||
public Mono<Long> leavesCount(@Nullable CompositeSnapshot snapshot, boolean fast) {
|
||||
return dictionary
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single))
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single).map(ResourceSupport::send))
|
||||
.map(empty -> empty ? 0L : 1L);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Boolean> isEmpty(@Nullable CompositeSnapshot snapshot) {
|
||||
return dictionary
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single));
|
||||
.isRangeEmpty(resolveSnapshot(snapshot), keyMono.map(LLRange::single).map(ResourceSupport::send));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void release() {
|
||||
key.release();
|
||||
key.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<BadBlock> badBlocks() {
|
||||
return dictionary.badBlocks(keyMono.map(LLRange::single));
|
||||
return dictionary.badBlocks(keyMono.map(LLRange::single).map(ResourceSupport::send));
|
||||
}
|
||||
}
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
|
@ -1,6 +1,7 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import java.util.Collection;
|
||||
@ -13,7 +14,7 @@ public interface SubStageGetter<U, US extends DatabaseStage<U>> {
|
||||
|
||||
Mono<US> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKey);
|
||||
Mono<Send<Buffer>> prefixKey);
|
||||
|
||||
boolean isMultiKey();
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
@ -16,15 +16,15 @@ import reactor.core.publisher.Mono;
|
||||
public class SubStageGetterHashMap<T, U, TH> implements
|
||||
SubStageGetter<Map<T, U>, DatabaseMapDictionaryHashed<T, U, TH>> {
|
||||
|
||||
private final Serializer<T, ByteBuf> keySerializer;
|
||||
private final Serializer<U, ByteBuf> valueSerializer;
|
||||
private final Serializer<T, Buffer> keySerializer;
|
||||
private final Serializer<U, Buffer> valueSerializer;
|
||||
private final Function<T, TH> keyHashFunction;
|
||||
private final SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer;
|
||||
private final SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer;
|
||||
|
||||
public SubStageGetterHashMap(Serializer<T, ByteBuf> keySerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer,
|
||||
public SubStageGetterHashMap(Serializer<T, Buffer> keySerializer,
|
||||
Serializer<U, Buffer> valueSerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
this.keyHashFunction = keyHashFunction;
|
||||
@ -34,7 +34,7 @@ public class SubStageGetterHashMap<T, U, TH> implements
|
||||
@Override
|
||||
public Mono<DatabaseMapDictionaryHashed<T, U, TH>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKeyMono) {
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return Mono.usingWhen(
|
||||
prefixKeyMono,
|
||||
prefixKey -> Mono
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
@ -16,13 +16,13 @@ import reactor.core.publisher.Mono;
|
||||
public class SubStageGetterHashSet<T, TH> implements
|
||||
SubStageGetter<Map<T, Nothing>, DatabaseSetDictionaryHashed<T, TH>> {
|
||||
|
||||
private final Serializer<T, ByteBuf> keySerializer;
|
||||
private final Serializer<T, Buffer> keySerializer;
|
||||
private final Function<T, TH> keyHashFunction;
|
||||
private final SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer;
|
||||
private final SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer;
|
||||
|
||||
public SubStageGetterHashSet(Serializer<T, ByteBuf> keySerializer,
|
||||
public SubStageGetterHashSet(Serializer<T, Buffer> keySerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH, ByteBuf> keyHashSerializer) {
|
||||
SerializerFixedBinaryLength<TH, Buffer> keyHashSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.keyHashFunction = keyHashFunction;
|
||||
this.keyHashSerializer = keyHashSerializer;
|
||||
@ -31,7 +31,7 @@ public class SubStageGetterHashSet<T, TH> implements
|
||||
@Override
|
||||
public Mono<DatabaseSetDictionaryHashed<T, TH>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKeyMono) {
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return Mono.usingWhen(prefixKeyMono,
|
||||
prefixKey -> Mono
|
||||
.fromSupplier(() -> DatabaseSetDictionaryHashed
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
@ -14,11 +14,11 @@ import reactor.core.publisher.Mono;
|
||||
|
||||
public class SubStageGetterMap<T, U> implements SubStageGetter<Map<T, U>, DatabaseMapDictionary<T, U>> {
|
||||
|
||||
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
|
||||
private final Serializer<U, ByteBuf> valueSerializer;
|
||||
private final SerializerFixedBinaryLength<T, Buffer> keySerializer;
|
||||
private final Serializer<U, Buffer> valueSerializer;
|
||||
|
||||
public SubStageGetterMap(SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
|
||||
Serializer<U, ByteBuf> valueSerializer) {
|
||||
public SubStageGetterMap(SerializerFixedBinaryLength<T, Buffer> keySerializer,
|
||||
Serializer<U, Buffer> valueSerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
@ -26,7 +26,7 @@ public class SubStageGetterMap<T, U> implements SubStageGetter<Map<T, U>, Databa
|
||||
@Override
|
||||
public Mono<DatabaseMapDictionary<T, U>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKeyMono) {
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return Mono.usingWhen(prefixKeyMono,
|
||||
prefixKey -> Mono
|
||||
.fromSupplier(() -> DatabaseMapDictionary
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
@ -15,11 +15,11 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
|
||||
SubStageGetter<Map<T, U>, DatabaseMapDictionaryDeep<T, U, US>> {
|
||||
|
||||
private final SubStageGetter<U, US> subStageGetter;
|
||||
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
|
||||
private final SerializerFixedBinaryLength<T, Buffer> keySerializer;
|
||||
private final int keyExtLength;
|
||||
|
||||
public SubStageGetterMapDeep(SubStageGetter<U, US> subStageGetter,
|
||||
SerializerFixedBinaryLength<T, ByteBuf> keySerializer,
|
||||
SerializerFixedBinaryLength<T, Buffer> keySerializer,
|
||||
int keyExtLength) {
|
||||
this.subStageGetter = subStageGetter;
|
||||
this.keySerializer = keySerializer;
|
||||
@ -41,7 +41,7 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
|
||||
@Override
|
||||
public Mono<DatabaseMapDictionaryDeep<T, U, US>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKeyMono) {
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return Mono.usingWhen(prefixKeyMono,
|
||||
prefixKey -> Mono
|
||||
.fromSupplier(() -> DatabaseMapDictionaryDeep
|
||||
@ -61,16 +61,16 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
|
||||
return true;
|
||||
}
|
||||
|
||||
private Mono<Void> checkKeyFluxConsistency(ByteBuf prefixKey, List<ByteBuf> keys) {
|
||||
private Mono<Void> checkKeyFluxConsistency(Buffer prefixKey, List<Buffer> keys) {
|
||||
return Mono
|
||||
.fromCallable(() -> {
|
||||
try {
|
||||
for (ByteBuf key : keys) {
|
||||
for (Buffer key : keys) {
|
||||
assert key.readableBytes() == prefixKey.readableBytes() + getKeyBinaryLength();
|
||||
}
|
||||
} finally {
|
||||
prefixKey.release();
|
||||
for (ByteBuf key : keys) {
|
||||
for (Buffer key : keys) {
|
||||
key.release();
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.util.ReferenceCounted;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
@ -14,16 +14,16 @@ import reactor.core.publisher.Mono;
|
||||
|
||||
public class SubStageGetterSet<T> implements SubStageGetter<Map<T, Nothing>, DatabaseSetDictionary<T>> {
|
||||
|
||||
private final SerializerFixedBinaryLength<T, ByteBuf> keySerializer;
|
||||
private final SerializerFixedBinaryLength<T, Buffer> keySerializer;
|
||||
|
||||
public SubStageGetterSet(SerializerFixedBinaryLength<T, ByteBuf> keySerializer) {
|
||||
public SubStageGetterSet(SerializerFixedBinaryLength<T, Buffer> keySerializer) {
|
||||
this.keySerializer = keySerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<DatabaseSetDictionary<T>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> prefixKeyMono) {
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return Mono.usingWhen(prefixKeyMono,
|
||||
prefixKey -> Mono
|
||||
.fromSupplier(() -> DatabaseSetDictionary.tail(dictionary, prefixKey.retain(), keySerializer)),
|
||||
|
@ -1,6 +1,7 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
@ -13,21 +14,21 @@ import reactor.core.publisher.Mono;
|
||||
|
||||
public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageEntry<T>> {
|
||||
|
||||
private final Serializer<T, ByteBuf> serializer;
|
||||
private final Serializer<T, Send<Buffer>> serializer;
|
||||
|
||||
public SubStageGetterSingle(Serializer<T, ByteBuf> serializer) {
|
||||
public SubStageGetterSingle(Serializer<T, Send<Buffer>> serializer) {
|
||||
this.serializer = serializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<DatabaseStageEntry<T>> subStage(LLDictionary dictionary,
|
||||
@Nullable CompositeSnapshot snapshot,
|
||||
Mono<ByteBuf> keyPrefixMono) {
|
||||
Mono<Send<Buffer>> keyPrefixMono) {
|
||||
return Mono.usingWhen(
|
||||
keyPrefixMono,
|
||||
keyPrefix -> Mono
|
||||
.<DatabaseStageEntry<T>>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix.retain(), serializer)),
|
||||
keyPrefix -> Mono.fromRunnable(keyPrefix::release)
|
||||
.<DatabaseStageEntry<T>>fromSupplier(() -> new DatabaseSingle<>(dictionary, keyPrefix, serializer)),
|
||||
keyPrefix -> Mono.fromRunnable(keyPrefix::close)
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
|
||||
public class SubStageGetterSingleBytes extends SubStageGetterSingle<ByteBuf> {
|
||||
public class SubStageGetterSingleBytes extends SubStageGetterSingle<Buffer> {
|
||||
|
||||
public SubStageGetterSingleBytes() {
|
||||
super(Serializer.noop());
|
||||
|
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
@ -9,43 +10,35 @@ import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
class ValueWithHashSerializer<X, Y> implements Serializer<Entry<X, Y>, ByteBuf> {
|
||||
class ValueWithHashSerializer<X, Y> implements Serializer<Entry<X, Y>, Send<Buffer>> {
|
||||
|
||||
private final ByteBufAllocator allocator;
|
||||
private final Serializer<X, ByteBuf> keySuffixSerializer;
|
||||
private final Serializer<Y, ByteBuf> valueSerializer;
|
||||
private final BufferAllocator allocator;
|
||||
private final Serializer<X, Send<Buffer>> keySuffixSerializer;
|
||||
private final Serializer<Y, Send<Buffer>> valueSerializer;
|
||||
|
||||
ValueWithHashSerializer(ByteBufAllocator allocator,
|
||||
Serializer<X, ByteBuf> keySuffixSerializer,
|
||||
Serializer<Y, ByteBuf> valueSerializer) {
|
||||
ValueWithHashSerializer(BufferAllocator allocator,
|
||||
Serializer<X, Send<Buffer>> keySuffixSerializer,
|
||||
Serializer<Y, Send<Buffer>> valueSerializer) {
|
||||
this.allocator = allocator;
|
||||
this.keySuffixSerializer = keySuffixSerializer;
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull Entry<X, Y> deserialize(@NotNull ByteBuf serialized) throws SerializationException {
|
||||
try {
|
||||
X deserializedKey = keySuffixSerializer.deserialize(serialized.retain());
|
||||
Y deserializedValue = valueSerializer.deserialize(serialized.retain());
|
||||
public @NotNull Entry<X, Y> deserialize(@NotNull Send<Buffer> serializedToReceive) throws SerializationException {
|
||||
try (var serialized = serializedToReceive.receive()) {
|
||||
X deserializedKey = keySuffixSerializer.deserialize(serialized.copy().send());
|
||||
Y deserializedValue = valueSerializer.deserialize(serialized.send());
|
||||
return Map.entry(deserializedKey, deserializedValue);
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull Entry<X, Y> deserialized) throws SerializationException {
|
||||
ByteBuf keySuffix = keySuffixSerializer.serialize(deserialized.getKey());
|
||||
try {
|
||||
ByteBuf value = valueSerializer.serialize(deserialized.getValue());
|
||||
try {
|
||||
return LLUtils.compositeBuffer(allocator, keySuffix.retain(), value.retain());
|
||||
} finally {
|
||||
value.release();
|
||||
public @NotNull Send<Buffer> serialize(@NotNull Entry<X, Y> deserialized) throws SerializationException {
|
||||
try (Buffer keySuffix = keySuffixSerializer.serialize(deserialized.getKey()).receive()) {
|
||||
try (Buffer value = valueSerializer.serialize(deserialized.getValue()).receive()) {
|
||||
return LLUtils.compositeBuffer(allocator, keySuffix.send(), value.send());
|
||||
}
|
||||
} finally {
|
||||
keySuffix.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database.collections;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
|
||||
@ -13,47 +14,39 @@ import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
class ValuesSetSerializer<X> implements Serializer<ObjectArraySet<X>, ByteBuf> {
|
||||
class ValuesSetSerializer<X> implements Serializer<ObjectArraySet<X>, Send<Buffer>> {
|
||||
|
||||
private final ByteBufAllocator allocator;
|
||||
private final Serializer<X, ByteBuf> entrySerializer;
|
||||
private final BufferAllocator allocator;
|
||||
private final Serializer<X, Send<Buffer>> entrySerializer;
|
||||
|
||||
ValuesSetSerializer(ByteBufAllocator allocator, Serializer<X, ByteBuf> entrySerializer) {
|
||||
ValuesSetSerializer(BufferAllocator allocator, Serializer<X, Send<Buffer>> entrySerializer) {
|
||||
this.allocator = allocator;
|
||||
this.entrySerializer = entrySerializer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ObjectArraySet<X> deserialize(@NotNull ByteBuf serialized) throws SerializationException {
|
||||
try {
|
||||
public @NotNull ObjectArraySet<X> deserialize(@NotNull Send<Buffer> serializedToReceive) throws SerializationException {
|
||||
try (var serialized = serializedToReceive.receive()) {
|
||||
int entriesLength = serialized.readInt();
|
||||
ArrayList<X> deserializedElements = new ArrayList<>(entriesLength);
|
||||
for (int i = 0; i < entriesLength; i++) {
|
||||
X entry = entrySerializer.deserialize(serialized.retain());
|
||||
X entry = entrySerializer.deserialize(serialized.send());
|
||||
deserializedElements.add(entry);
|
||||
}
|
||||
return new ObjectArraySet<>(deserializedElements);
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull ObjectArraySet<X> deserialized) throws SerializationException {
|
||||
ByteBuf output = allocator.buffer();
|
||||
try {
|
||||
public @NotNull Send<Buffer> serialize(@NotNull ObjectArraySet<X> deserialized) throws SerializationException {
|
||||
try (Buffer output = allocator.allocate(64)) {
|
||||
output.writeInt(deserialized.size());
|
||||
for (X entry : deserialized) {
|
||||
ByteBuf serialized = entrySerializer.serialize(entry);
|
||||
try {
|
||||
try (Buffer serialized = entrySerializer.serialize(entry).receive()) {
|
||||
output.writeBytes(serialized);
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
}
|
||||
return output.retain();
|
||||
} finally {
|
||||
output.release();
|
||||
return output.send();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.client.IndicizerSimilarities;
|
||||
import it.cavallium.dbengine.client.LuceneOptions;
|
||||
@ -23,16 +23,16 @@ public class LLLocalDatabaseConnection implements LLDatabaseConnection {
|
||||
JMXNettyMonitoringManager.initialize();
|
||||
}
|
||||
|
||||
private final ByteBufAllocator allocator;
|
||||
private final BufferAllocator allocator;
|
||||
private final Path basePath;
|
||||
|
||||
public LLLocalDatabaseConnection(ByteBufAllocator allocator, Path basePath) {
|
||||
public LLLocalDatabaseConnection(BufferAllocator allocator, Path basePath) {
|
||||
this.allocator = allocator;
|
||||
this.basePath = basePath;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBufAllocator getAllocator() {
|
||||
public BufferAllocator getAllocator() {
|
||||
return allocator;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLEntry;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.Map;
|
||||
@ -10,12 +11,12 @@ import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
|
||||
public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator<LLEntry> {
|
||||
public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator<Send<LLEntry>> {
|
||||
|
||||
public LLLocalEntryReactiveRocksIterator(RocksDB db,
|
||||
ByteBufAllocator alloc,
|
||||
BufferAllocator alloc,
|
||||
ColumnFamilyHandle cfh,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
String debugName) {
|
||||
@ -23,7 +24,7 @@ public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksItera
|
||||
}
|
||||
|
||||
@Override
|
||||
public LLEntry getEntry(ByteBuf key, ByteBuf value) {
|
||||
return new LLEntry(key, value);
|
||||
public Send<LLEntry> getEntry(Send<Buffer> key, Send<Buffer> value) {
|
||||
return LLEntry.of(key, value).send();
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,8 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLEntry;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.Map;
|
||||
@ -11,11 +12,11 @@ import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
|
||||
public class LLLocalGroupedEntryReactiveRocksIterator extends
|
||||
LLLocalGroupedReactiveRocksIterator<LLEntry> {
|
||||
LLLocalGroupedReactiveRocksIterator<Send<LLEntry>> {
|
||||
|
||||
public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh,
|
||||
public LLLocalGroupedEntryReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
String debugName) {
|
||||
@ -23,7 +24,7 @@ public class LLLocalGroupedEntryReactiveRocksIterator extends
|
||||
}
|
||||
|
||||
@Override
|
||||
public LLEntry getEntry(ByteBuf key, ByteBuf value) {
|
||||
return new LLEntry(key, value);
|
||||
public Send<LLEntry> getEntry(Send<Buffer> key, Send<Buffer> value) {
|
||||
return LLEntry.of(key, value).send();
|
||||
}
|
||||
}
|
||||
|
@ -1,19 +1,20 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
|
||||
public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator<ByteBuf> {
|
||||
public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator<Send<Buffer>> {
|
||||
|
||||
public LLLocalGroupedKeyReactiveRocksIterator(RocksDB db,
|
||||
ByteBufAllocator alloc,
|
||||
BufferAllocator alloc,
|
||||
ColumnFamilyHandle cfh,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
String debugName) {
|
||||
@ -21,9 +22,9 @@ public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReacti
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBuf getEntry(ByteBuf key, ByteBuf value) {
|
||||
public Send<Buffer> getEntry(Send<Buffer> key, Send<Buffer> value) {
|
||||
if (value != null) {
|
||||
value.release();
|
||||
value.close();
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
@ -1,8 +1,9 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.BufferUtil;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseMapDictionaryDeep;
|
||||
@ -20,7 +21,7 @@ import static io.netty.buffer.Unpooled.*;
|
||||
public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||
|
||||
private final RocksDB db;
|
||||
private final ByteBufAllocator alloc;
|
||||
private final BufferAllocator alloc;
|
||||
private final ColumnFamilyHandle cfh;
|
||||
private final int prefixLength;
|
||||
private final LLRange range;
|
||||
@ -29,9 +30,9 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||
private final boolean canFillCache;
|
||||
private final boolean readValues;
|
||||
|
||||
public LLLocalGroupedReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh,
|
||||
public LLLocalGroupedReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
boolean canFillCache,
|
||||
@ -59,18 +60,18 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||
try {
|
||||
var rocksIterator = tuple.getT1();
|
||||
ObjectArrayList<T> values = new ObjectArrayList<>();
|
||||
ByteBuf firstGroupKey = null;
|
||||
Buffer firstGroupKey = null;
|
||||
try {
|
||||
rocksIterator.status();
|
||||
while (rocksIterator.isValid()) {
|
||||
ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
|
||||
Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
|
||||
try {
|
||||
if (firstGroupKey == null) {
|
||||
firstGroupKey = key.retain();
|
||||
} else if (!ByteBufUtil.equals(firstGroupKey, firstGroupKey.readerIndex(), key, key.readerIndex(), prefixLength)) {
|
||||
break;
|
||||
}
|
||||
ByteBuf value;
|
||||
Buffer value;
|
||||
if (readValues) {
|
||||
value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value);
|
||||
} else {
|
||||
@ -112,7 +113,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||
});
|
||||
}
|
||||
|
||||
public abstract T getEntry(ByteBuf key, ByteBuf value);
|
||||
public abstract T getEntry(Send<Buffer> key, Send<Buffer> value);
|
||||
|
||||
public void release() {
|
||||
range.release();
|
||||
|
@ -1,8 +1,9 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.BufferUtil;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.util.Arrays;
|
||||
@ -17,7 +18,7 @@ import static io.netty.buffer.Unpooled.*;
|
||||
public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||
|
||||
private final RocksDB db;
|
||||
private final ByteBufAllocator alloc;
|
||||
private final BufferAllocator alloc;
|
||||
private final ColumnFamilyHandle cfh;
|
||||
private final int prefixLength;
|
||||
private final LLRange range;
|
||||
@ -26,9 +27,9 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||
private final boolean canFillCache;
|
||||
private final String debugName;
|
||||
|
||||
public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, ByteBufAllocator alloc, ColumnFamilyHandle cfh,
|
||||
public LLLocalKeyPrefixReactiveRocksIterator(RocksDB db, BufferAllocator alloc, ColumnFamilyHandle cfh,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
boolean canFillCache,
|
||||
@ -45,7 +46,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||
}
|
||||
|
||||
|
||||
public Flux<ByteBuf> flux() {
|
||||
public Flux<Send<Buffer>> flux() {
|
||||
return Flux
|
||||
.generate(() -> {
|
||||
var readOptions = new ReadOptions(this.readOptions);
|
||||
@ -59,10 +60,10 @@ public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||
try {
|
||||
var rocksIterator = tuple.getT1();
|
||||
rocksIterator.status();
|
||||
ByteBuf firstGroupKey = null;
|
||||
Buffer firstGroupKey = null;
|
||||
try {
|
||||
while (rocksIterator.isValid()) {
|
||||
ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
|
||||
Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
|
||||
try {
|
||||
if (firstGroupKey == null) {
|
||||
firstGroupKey = key.retain();
|
||||
|
@ -1,18 +1,19 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDB;
|
||||
|
||||
public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator<ByteBuf> {
|
||||
public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator<Send<Buffer>> {
|
||||
|
||||
public LLLocalKeyReactiveRocksIterator(RocksDB db,
|
||||
ByteBufAllocator alloc,
|
||||
BufferAllocator alloc,
|
||||
ColumnFamilyHandle cfh,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
String debugName) {
|
||||
@ -20,9 +21,9 @@ public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterato
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBuf getEntry(ByteBuf key, ByteBuf value) {
|
||||
public Send<Buffer> getEntry(Send<Buffer> key, Send<Buffer> value) {
|
||||
if (value != null) {
|
||||
value.release();
|
||||
value.close();
|
||||
}
|
||||
return key;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.database.Column;
|
||||
import it.cavallium.dbengine.client.DatabaseOptions;
|
||||
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
||||
@ -65,7 +65,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
private static final ColumnFamilyDescriptor DEFAULT_COLUMN_FAMILY = new ColumnFamilyDescriptor(
|
||||
RocksDB.DEFAULT_COLUMN_FAMILY);
|
||||
|
||||
private final ByteBufAllocator allocator;
|
||||
private final BufferAllocator allocator;
|
||||
private final Scheduler dbScheduler;
|
||||
|
||||
// Configurations
|
||||
@ -81,7 +81,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
private final AtomicLong nextSnapshotNumbers = new AtomicLong(1);
|
||||
|
||||
@SuppressWarnings("SwitchStatementWithTooFewBranches")
|
||||
public LLLocalKeyValueDatabase(ByteBufAllocator allocator,
|
||||
public LLLocalKeyValueDatabase(BufferAllocator allocator,
|
||||
String name,
|
||||
@Nullable Path path,
|
||||
List<Column> columns,
|
||||
@ -497,7 +497,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ByteBufAllocator getAllocator() {
|
||||
public BufferAllocator getAllocator() {
|
||||
return allocator;
|
||||
}
|
||||
|
||||
|
@ -2,8 +2,9 @@ package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import static it.cavallium.dbengine.database.disk.LLLocalDictionary.getRocksIterator;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import io.netty.util.IllegalReferenceCountException;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
@ -27,7 +28,7 @@ public abstract class LLLocalReactiveRocksIterator<T> {
|
||||
|
||||
private final AtomicBoolean released = new AtomicBoolean(false);
|
||||
private final RocksDB db;
|
||||
private final ByteBufAllocator alloc;
|
||||
private final BufferAllocator alloc;
|
||||
private final ColumnFamilyHandle cfh;
|
||||
private final LLRange range;
|
||||
private final boolean allowNettyDirect;
|
||||
@ -36,9 +37,9 @@ public abstract class LLLocalReactiveRocksIterator<T> {
|
||||
private final String debugName;
|
||||
|
||||
public LLLocalReactiveRocksIterator(RocksDB db,
|
||||
ByteBufAllocator alloc,
|
||||
BufferAllocator alloc,
|
||||
ColumnFamilyHandle cfh,
|
||||
LLRange range,
|
||||
Send<LLRange> range,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
boolean readValues,
|
||||
@ -46,7 +47,7 @@ public abstract class LLLocalReactiveRocksIterator<T> {
|
||||
this.db = db;
|
||||
this.alloc = alloc;
|
||||
this.cfh = cfh;
|
||||
this.range = range;
|
||||
this.range = range.receive();
|
||||
this.allowNettyDirect = allowNettyDirect;
|
||||
this.readOptions = readOptions;
|
||||
this.readValues = readValues;
|
||||
@ -55,59 +56,53 @@ public abstract class LLLocalReactiveRocksIterator<T> {
|
||||
|
||||
public Flux<T> flux() {
|
||||
return Flux
|
||||
.<T, @NotNull Tuple3<RocksIterator, ReleasableSlice, ReleasableSlice>>generate(() -> {
|
||||
.generate(() -> {
|
||||
var readOptions = new ReadOptions(this.readOptions);
|
||||
if (!range.hasMin() || !range.hasMax()) {
|
||||
readOptions.setReadaheadSize(32 * 1024); // 32KiB
|
||||
readOptions.setFillCache(false);
|
||||
}
|
||||
return getRocksIterator(allowNettyDirect, readOptions, range.retain(), db, cfh);
|
||||
return getRocksIterator(allowNettyDirect, readOptions, range.copy().send(), db, cfh);
|
||||
}, (tuple, sink) -> {
|
||||
range.retain();
|
||||
try {
|
||||
var rocksIterator = tuple.getT1();
|
||||
rocksIterator.status();
|
||||
if (rocksIterator.isValid()) {
|
||||
ByteBuf key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key);
|
||||
try {
|
||||
ByteBuf value;
|
||||
try (Buffer key = LLUtils.readDirectNioBuffer(alloc, rocksIterator::key)) {
|
||||
Buffer value;
|
||||
if (readValues) {
|
||||
value = LLUtils.readDirectNioBuffer(alloc, rocksIterator::value);
|
||||
} else {
|
||||
value = alloc.buffer(0);
|
||||
value = alloc.allocate(0);
|
||||
}
|
||||
try {
|
||||
rocksIterator.next();
|
||||
rocksIterator.status();
|
||||
sink.next(getEntry(key.retain(), value.retain()));
|
||||
sink.next(getEntry(key.send(), value.send()));
|
||||
} finally {
|
||||
value.release();
|
||||
value.close();
|
||||
}
|
||||
} finally {
|
||||
key.release();
|
||||
}
|
||||
} else {
|
||||
sink.complete();
|
||||
}
|
||||
} catch (RocksDBException ex) {
|
||||
sink.error(ex);
|
||||
} finally {
|
||||
range.release();
|
||||
}
|
||||
return tuple;
|
||||
}, tuple -> {
|
||||
var rocksIterator = tuple.getT1();
|
||||
rocksIterator.close();
|
||||
tuple.getT2().release();
|
||||
tuple.getT3().release();
|
||||
tuple.getT2().close();
|
||||
tuple.getT3().close();
|
||||
});
|
||||
}
|
||||
|
||||
public abstract T getEntry(ByteBuf key, ByteBuf value);
|
||||
public abstract T getEntry(Send<Buffer> key, Send<Buffer> value);
|
||||
|
||||
public void release() {
|
||||
if (released.compareAndSet(false, true)) {
|
||||
range.release();
|
||||
range.close();
|
||||
} else {
|
||||
throw new IllegalReferenceCountException(0, -1);
|
||||
}
|
||||
|
@ -1,17 +1,20 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Resource;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
|
||||
public interface ReleasableSlice {
|
||||
public interface ReleasableSlice extends SafeCloseable {
|
||||
|
||||
default void release() {
|
||||
@Override
|
||||
default void close() {
|
||||
|
||||
}
|
||||
|
||||
AbstractSlice<?> slice();
|
||||
|
||||
ByteBuf byteBuf();
|
||||
Buffer byteBuf();
|
||||
|
||||
Object additionalData();
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.memory;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.DatabaseOptions;
|
||||
import it.cavallium.dbengine.client.IndicizerAnalyzers;
|
||||
import it.cavallium.dbengine.client.IndicizerSimilarities;
|
||||
|
@ -1,7 +1,7 @@
|
||||
package it.cavallium.dbengine.database.memory;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.ExtraKeyOperationResult;
|
||||
@ -81,7 +81,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
}
|
||||
|
||||
private Mono<ByteBuf> transformResult(Mono<ByteList> result, LLDictionaryResultType resultType) {
|
||||
private Mono<Buffer> transformResult(Mono<ByteList> result, LLDictionaryResultType resultType) {
|
||||
if (resultType == LLDictionaryResultType.PREVIOUS_VALUE) {
|
||||
// Don't retain the result because it has been removed from the skip list
|
||||
return result.map(this::kk);
|
||||
@ -95,11 +95,11 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
}
|
||||
|
||||
private ByteList k(ByteBuf buf) {
|
||||
private ByteList k(Buffer buf) {
|
||||
return new BinaryLexicographicList(LLUtils.toArray(buf));
|
||||
}
|
||||
|
||||
private ByteBuf kk(ByteList bytesList) {
|
||||
private Buffer kk(ByteList bytesList) {
|
||||
var buffer = getAllocator().buffer(bytesList.size());
|
||||
buffer.writeBytes(bytesList.toByteArray());
|
||||
return buffer;
|
||||
@ -139,7 +139,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuf> get(@Nullable LLSnapshot snapshot, Mono<ByteBuf> keyMono, boolean existsAlmostCertainly) {
|
||||
public Mono<Buffer> get(@Nullable LLSnapshot snapshot, Mono<Buffer> keyMono, boolean existsAlmostCertainly) {
|
||||
return Mono.usingWhen(keyMono,
|
||||
key -> Mono
|
||||
.fromCallable(() -> snapshots.get(resolveSnapshot(snapshot)).get(k(key)))
|
||||
@ -150,7 +150,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuf> put(Mono<ByteBuf> keyMono, Mono<ByteBuf> valueMono, LLDictionaryResultType resultType) {
|
||||
public Mono<Buffer> put(Mono<Buffer> keyMono, Mono<Buffer> valueMono, LLDictionaryResultType resultType) {
|
||||
return Mono.usingWhen(keyMono,
|
||||
key -> Mono.usingWhen(valueMono,
|
||||
value -> Mono
|
||||
@ -169,17 +169,17 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Delta<ByteBuf>> updateAndGetDelta(Mono<ByteBuf> keyMono,
|
||||
SerializationFunction<@Nullable ByteBuf, @Nullable ByteBuf> updater,
|
||||
public Mono<Delta<Buffer>> updateAndGetDelta(Mono<Buffer> keyMono,
|
||||
SerializationFunction<@Nullable Buffer, @Nullable Buffer> updater,
|
||||
boolean existsAlmostCertainly) {
|
||||
return Mono.usingWhen(keyMono,
|
||||
key -> Mono.fromCallable(() -> {
|
||||
AtomicReference<ByteBuf> oldRef = new AtomicReference<>(null);
|
||||
AtomicReference<Buffer> oldRef = new AtomicReference<>(null);
|
||||
var newValue = mainDb.compute(k(key), (_unused, old) -> {
|
||||
if (old != null) {
|
||||
oldRef.set(kk(old));
|
||||
}
|
||||
ByteBuf v = null;
|
||||
Buffer v = null;
|
||||
try {
|
||||
v = updater.apply(old != null ? kk(old) : null);
|
||||
} catch (SerializationException e) {
|
||||
@ -205,7 +205,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuf> remove(Mono<ByteBuf> keyMono, LLDictionaryResultType resultType) {
|
||||
public Mono<Buffer> remove(Mono<Buffer> keyMono, LLDictionaryResultType resultType) {
|
||||
return Mono.usingWhen(keyMono,
|
||||
key -> Mono
|
||||
.fromCallable(() -> mainDb.remove(k(key)))
|
||||
@ -228,8 +228,8 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <K> Flux<Tuple3<K, ByteBuf, Optional<ByteBuf>>> getMulti(@Nullable LLSnapshot snapshot,
|
||||
Flux<Tuple2<K, ByteBuf>> keys,
|
||||
public <K> Flux<Tuple3<K, Buffer, Optional<Buffer>>> getMulti(@Nullable LLSnapshot snapshot,
|
||||
Flux<Tuple2<K, Buffer>> keys,
|
||||
boolean existsAlmostCertainly) {
|
||||
return keys
|
||||
.flatMapSequential(key -> {
|
||||
@ -267,8 +267,8 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public <X> Flux<ExtraKeyOperationResult<ByteBuf, X>> updateMulti(Flux<Tuple2<ByteBuf, X>> entries,
|
||||
BiSerializationFunction<ByteBuf, X, ByteBuf> updateFunction) {
|
||||
public <X> Flux<ExtraKeyOperationResult<Buffer, X>> updateMulti(Flux<Tuple2<Buffer, X>> entries,
|
||||
BiSerializationFunction<Buffer, X, Buffer> updateFunction) {
|
||||
return Flux.error(new UnsupportedOperationException("Not implemented"));
|
||||
}
|
||||
|
||||
@ -304,7 +304,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBuf> getRangeKeys(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
|
||||
public Flux<Buffer> getRangeKeys(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
|
||||
return Flux.usingWhen(rangeMono,
|
||||
range -> {
|
||||
if (range.isSingle()) {
|
||||
@ -325,7 +325,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<List<ByteBuf>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot,
|
||||
public Flux<List<Buffer>> getRangeKeysGrouped(@Nullable LLSnapshot snapshot,
|
||||
Mono<LLRange> rangeMono,
|
||||
int prefixLength) {
|
||||
return getRangeKeys(snapshot, rangeMono)
|
||||
@ -333,7 +333,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<ByteBuf> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono, int prefixLength) {
|
||||
public Flux<Buffer> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono, int prefixLength) {
|
||||
return getRangeKeys(snapshot, rangeMono)
|
||||
.distinctUntilChanged(k -> k.slice(k.readerIndex(), prefixLength), (a, b) -> {
|
||||
if (LLUtils.equals(a, b)) {
|
||||
@ -376,7 +376,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<ByteBuf> getOneKey(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
|
||||
public Mono<Buffer> getOneKey(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono) {
|
||||
return Mono.error(new UnsupportedOperationException("Not implemented"));
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
package it.cavallium.dbengine.database.memory;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.DatabaseOptions;
|
||||
import it.cavallium.dbengine.database.Column;
|
||||
import it.cavallium.dbengine.database.LLDictionary;
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.database.memory;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import it.cavallium.dbengine.database.LLDictionaryResultType;
|
||||
import it.cavallium.dbengine.database.LLSingleton;
|
||||
@ -13,13 +13,13 @@ public class LLMemorySingleton implements LLSingleton {
|
||||
|
||||
private final LLMemoryDictionary dict;
|
||||
private final byte[] singletonName;
|
||||
private final Mono<ByteBuf> singletonNameBufMono;
|
||||
private final Mono<Buffer> singletonNameBufMono;
|
||||
|
||||
public LLMemorySingleton(LLMemoryDictionary dict, byte[] singletonName) {
|
||||
this.dict = dict;
|
||||
this.singletonName = singletonName;
|
||||
ByteBuf singletonNameBuf = Unpooled.wrappedBuffer(singletonName);
|
||||
this.singletonNameBufMono = Mono.just(singletonNameBuf).map(ByteBuf::retain);
|
||||
Buffer singletonNameBuf = Unpooled.wrappedBuffer(singletonName);
|
||||
this.singletonNameBufMono = Mono.just(singletonNameBuf).map(Buffer::retain);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -42,8 +42,8 @@ public class LLMemorySingleton implements LLSingleton {
|
||||
|
||||
@Override
|
||||
public Mono<Void> set(byte[] value) {
|
||||
var bbKey = Mono.just(Unpooled.wrappedBuffer(singletonName)).map(ByteBuf::retain);
|
||||
var bbVal = Mono.just(Unpooled.wrappedBuffer(value)).map(ByteBuf::retain);
|
||||
var bbKey = Mono.just(Unpooled.wrappedBuffer(singletonName)).map(Buffer::retain);
|
||||
var bbVal = Mono.just(Unpooled.wrappedBuffer(value)).map(Buffer::retain);
|
||||
return dict
|
||||
.put(bbKey, bbVal, LLDictionaryResultType.VOID)
|
||||
.then();
|
||||
|
@ -1,7 +1,7 @@
|
||||
package it.cavallium.dbengine.database.serialization;
|
||||
|
||||
import io.netty.buffer.ByteBufInputStream;
|
||||
import io.netty.buffer.ByteBufOutputStream;
|
||||
import io.netty.buffer.api.BufferInputStream;
|
||||
import io.netty.buffer.api.BufferOutputStream;
|
||||
import java.io.IOException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
package it.cavallium.dbengine.database.serialization;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufInputStream;
|
||||
import io.netty.buffer.ByteBufOutputStream;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.BufferInputStream;
|
||||
import io.netty.buffer.api.BufferOutputStream;
|
||||
import io.netty.buffer.PooledByteBufAllocator;
|
||||
import io.netty.buffer.Unpooled;
|
||||
import java.io.IOError;
|
||||
@ -11,7 +11,7 @@ import java.io.IOException;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.warp.commonutils.error.IndexOutOfBoundsException;
|
||||
|
||||
public class CodecSerializer<A> implements Serializer<A, ByteBuf> {
|
||||
public class CodecSerializer<A> implements Serializer<A, Buffer> {
|
||||
|
||||
private final ByteBufAllocator allocator;
|
||||
private final Codecs<A> deserializationCodecs;
|
||||
@ -40,7 +40,7 @@ public class CodecSerializer<A> implements Serializer<A, ByteBuf> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull A deserialize(@NotNull ByteBuf serialized) {
|
||||
public @NotNull A deserialize(@NotNull Buffer serialized) {
|
||||
try (var is = new ByteBufInputStream(serialized)) {
|
||||
int codecId;
|
||||
if (microCodecs) {
|
||||
@ -59,8 +59,8 @@ public class CodecSerializer<A> implements Serializer<A, ByteBuf> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull A deserialized) {
|
||||
ByteBuf buf = allocator.buffer();
|
||||
public @NotNull Buffer serialize(@NotNull A deserialized) {
|
||||
Buffer buf = allocator.buffer();
|
||||
try (var os = new ByteBufOutputStream(buf)) {
|
||||
if (microCodecs) {
|
||||
os.writeByte(serializationCodecId);
|
||||
|
@ -1,9 +1,9 @@
|
||||
package it.cavallium.dbengine.database.serialization;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.PooledByteBufAllocator;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
@ -13,52 +13,41 @@ public interface Serializer<A, B> {
|
||||
|
||||
@NotNull B serialize(@NotNull A deserialized) throws SerializationException;
|
||||
|
||||
Serializer<ByteBuf, ByteBuf> NOOP_SERIALIZER = new Serializer<>() {
|
||||
Serializer<Send<Buffer>, Send<Buffer>> NOOP_SERIALIZER = new Serializer<>() {
|
||||
@Override
|
||||
public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) {
|
||||
try {
|
||||
return serialized.retainedSlice();
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
public @NotNull Send<Buffer> deserialize(@NotNull Send<Buffer> serialized) {
|
||||
return serialized;
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) {
|
||||
try {
|
||||
return deserialized.retainedSlice();
|
||||
} finally {
|
||||
deserialized.release();
|
||||
}
|
||||
public @NotNull Send<Buffer> serialize(@NotNull Send<Buffer> deserialized) {
|
||||
return deserialized;
|
||||
}
|
||||
};
|
||||
|
||||
static Serializer<ByteBuf, ByteBuf> noop() {
|
||||
static Serializer<Send<Buffer>, Send<Buffer>> noop() {
|
||||
return NOOP_SERIALIZER;
|
||||
}
|
||||
|
||||
static Serializer<String, ByteBuf> utf8(ByteBufAllocator allocator) {
|
||||
static Serializer<String, Send<Buffer>> utf8(BufferAllocator allocator) {
|
||||
return new Serializer<>() {
|
||||
@Override
|
||||
public @NotNull String deserialize(@NotNull ByteBuf serialized) {
|
||||
try {
|
||||
var length = serialized.readInt();
|
||||
var result = serialized.toString(serialized.readerIndex(), length, StandardCharsets.UTF_8);
|
||||
serialized.readerIndex(serialized.readerIndex() + length);
|
||||
return result;
|
||||
} finally {
|
||||
serialized.release();
|
||||
public @NotNull String deserialize(@NotNull Send<Buffer> serializedToReceive) {
|
||||
try (Buffer serialized = serializedToReceive.receive()) {
|
||||
int length = serialized.readInt();
|
||||
return LLUtils.deserializeString(serialized.send(), serialized.readerOffset(), length, StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull String deserialized) {
|
||||
public @NotNull Send<Buffer> serialize(@NotNull String deserialized) {
|
||||
// UTF-8 uses max. 3 bytes per char, so calculate the worst case.
|
||||
int length = ByteBufUtil.utf8Bytes(deserialized);
|
||||
ByteBuf buf = allocator.buffer(Integer.BYTES + length);
|
||||
int length = LLUtils.utf8MaxBytes(deserialized);
|
||||
try (Buffer buf = allocator.allocate(Integer.BYTES + length)) {
|
||||
buf.writeInt(length);
|
||||
ByteBufUtil.writeUtf8(buf, deserialized);
|
||||
return buf;
|
||||
LLUtils.writeString(buf, deserialized, StandardCharsets.UTF_8);
|
||||
return buf.send();
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
@ -1,12 +1,9 @@
|
||||
package it.cavallium.dbengine.database.serialization;
|
||||
|
||||
import com.google.common.primitives.Ints;
|
||||
import com.google.common.primitives.Longs;
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import io.netty.buffer.ByteBufAllocator;
|
||||
import io.netty.buffer.ByteBufUtil;
|
||||
import io.netty.buffer.PooledByteBufAllocator;
|
||||
import java.io.NotSerializableException;
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.BufferAllocator;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
||||
@ -15,31 +12,30 @@ public interface SerializerFixedBinaryLength<A, B> extends Serializer<A, B> {
|
||||
|
||||
int getSerializedBinaryLength();
|
||||
|
||||
static SerializerFixedBinaryLength<ByteBuf, ByteBuf> noop(int length) {
|
||||
static SerializerFixedBinaryLength<Send<Buffer>, Send<Buffer>> noop(int length) {
|
||||
return new SerializerFixedBinaryLength<>() {
|
||||
@Override
|
||||
public @NotNull ByteBuf deserialize(@NotNull ByteBuf serialized) {
|
||||
try {
|
||||
if (serialized.readableBytes() != getSerializedBinaryLength()) {
|
||||
public @NotNull Send<Buffer> deserialize(@NotNull Send<Buffer> serialized) {
|
||||
try (var buf = serialized.receive()) {
|
||||
if (buf.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with "
|
||||
+ serialized.readableBytes() + " bytes instead");
|
||||
+ buf.readableBytes() + " bytes instead");
|
||||
}
|
||||
return serialized.retain();
|
||||
} finally {
|
||||
serialized.release();
|
||||
return buf.send();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull ByteBuf deserialized) {
|
||||
ByteBuf buf = deserialized.retain();
|
||||
public @NotNull Send<Buffer> serialize(@NotNull Send<Buffer> deserialized) {
|
||||
try (Buffer buf = deserialized.receive()) {
|
||||
if (buf.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to serialize an element with "
|
||||
+ buf.readableBytes() + " bytes instead");
|
||||
}
|
||||
return buf;
|
||||
return buf.send();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -49,38 +45,32 @@ public interface SerializerFixedBinaryLength<A, B> extends Serializer<A, B> {
|
||||
};
|
||||
}
|
||||
|
||||
static SerializerFixedBinaryLength<String, ByteBuf> utf8(ByteBufAllocator allocator, int length) {
|
||||
static SerializerFixedBinaryLength<String, Send<Buffer>> utf8(BufferAllocator allocator, int length) {
|
||||
return new SerializerFixedBinaryLength<>() {
|
||||
@Override
|
||||
public @NotNull String deserialize(@NotNull ByteBuf serialized) throws SerializationException {
|
||||
try {
|
||||
public @NotNull String deserialize(@NotNull Send<Buffer> serializedToReceive) throws SerializationException {
|
||||
try (var serialized = serializedToReceive.receive()) {
|
||||
if (serialized.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new SerializationException(
|
||||
"Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with "
|
||||
+ serialized.readableBytes() + " bytes instead");
|
||||
}
|
||||
var result = serialized.toString(StandardCharsets.UTF_8);
|
||||
serialized.readerIndex(serialized.writerIndex());
|
||||
return result;
|
||||
} finally {
|
||||
serialized.release();
|
||||
var readerOffset = serialized.readerOffset();
|
||||
return LLUtils.deserializeString(serialized.send(), readerOffset, length, StandardCharsets.UTF_8);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull String deserialized) throws SerializationException {
|
||||
public @NotNull Send<Buffer> serialize(@NotNull String deserialized) throws SerializationException {
|
||||
// UTF-8 uses max. 3 bytes per char, so calculate the worst case.
|
||||
ByteBuf buf = allocator.buffer(ByteBufUtil.utf8MaxBytes(deserialized));
|
||||
try {
|
||||
ByteBufUtil.writeUtf8(buf, deserialized);
|
||||
try (Buffer buf = allocator.allocate(LLUtils.utf8MaxBytes(deserialized))) {
|
||||
LLUtils.writeString(buf, deserialized, StandardCharsets.UTF_8);
|
||||
if (buf.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new SerializationException("Fixed serializer with " + getSerializedBinaryLength()
|
||||
+ " bytes has tried to serialize an element with "
|
||||
+ buf.readableBytes() + " bytes instead");
|
||||
}
|
||||
return buf.retain();
|
||||
} finally {
|
||||
buf.release();
|
||||
return buf.send();
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,26 +81,25 @@ public interface SerializerFixedBinaryLength<A, B> extends Serializer<A, B> {
|
||||
};
|
||||
}
|
||||
|
||||
static SerializerFixedBinaryLength<Integer, ByteBuf> intSerializer(ByteBufAllocator allocator) {
|
||||
static SerializerFixedBinaryLength<Integer, Send<Buffer>> intSerializer(BufferAllocator allocator) {
|
||||
return new SerializerFixedBinaryLength<>() {
|
||||
@Override
|
||||
public @NotNull Integer deserialize(@NotNull ByteBuf serialized) {
|
||||
try {
|
||||
public @NotNull Integer deserialize(@NotNull Send<Buffer> serializedToReceive) {
|
||||
try (var serialized = serializedToReceive.receive()) {
|
||||
if (serialized.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with "
|
||||
+ serialized.readableBytes() + " bytes instead");
|
||||
}
|
||||
return serialized.readInt();
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull Integer deserialized) {
|
||||
ByteBuf buf = allocator.buffer(Integer.BYTES);
|
||||
return buf.writeInt(deserialized);
|
||||
public @NotNull Send<Buffer> serialize(@NotNull Integer deserialized) {
|
||||
try (Buffer buf = allocator.allocate(Integer.BYTES)) {
|
||||
return buf.writeInt(deserialized).send();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -120,26 +109,25 @@ public interface SerializerFixedBinaryLength<A, B> extends Serializer<A, B> {
|
||||
};
|
||||
}
|
||||
|
||||
static SerializerFixedBinaryLength<Long, ByteBuf> longSerializer(ByteBufAllocator allocator) {
|
||||
static SerializerFixedBinaryLength<Long, Send<Buffer>> longSerializer(BufferAllocator allocator) {
|
||||
return new SerializerFixedBinaryLength<>() {
|
||||
@Override
|
||||
public @NotNull Long deserialize(@NotNull ByteBuf serialized) {
|
||||
try {
|
||||
public @NotNull Long deserialize(@NotNull Send<Buffer> serializedToReceive) {
|
||||
try (var serialized = serializedToReceive.receive()) {
|
||||
if (serialized.readableBytes() != getSerializedBinaryLength()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Fixed serializer with " + getSerializedBinaryLength() + " bytes has tried to deserialize an element with "
|
||||
+ serialized.readableBytes() + " bytes instead");
|
||||
}
|
||||
return serialized.readLong();
|
||||
} finally {
|
||||
serialized.release();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public @NotNull ByteBuf serialize(@NotNull Long deserialized) {
|
||||
ByteBuf buf = allocator.buffer(Long.BYTES);
|
||||
return buf.writeLong(deserialized);
|
||||
public @NotNull Send<Buffer> serialize(@NotNull Long deserialized) {
|
||||
try (Buffer buf = allocator.allocate(Long.BYTES)) {
|
||||
return buf.writeLong(deserialized).send();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +75,7 @@ public class RandomFieldComparator extends FieldComparator<Float> implements Lea
|
||||
}
|
||||
};
|
||||
if (!(scorer instanceof ScoreCachingWrappingScorer)) {
|
||||
this.scorer = new ScoreCachingWrappingScorer(randomizedScorer);
|
||||
this.scorer = ScoreCachingWrappingScorer.wrap(randomizedScorer);
|
||||
} else {
|
||||
this.scorer = randomizedScorer;
|
||||
}
|
||||
|
@ -13,7 +13,6 @@ import org.apache.lucene.search.FieldDoc;
|
||||
import org.apache.lucene.search.HitQueue;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.MultiCollectorManager.Collectors;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
@ -1,13 +1,13 @@
|
||||
package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocatorMetric;
|
||||
import io.netty.buffer.api.BufferAllocatorMetric;
|
||||
|
||||
public class JMXNettyMonitoring implements JMXNettyMonitoringMBean {
|
||||
|
||||
private final String name;
|
||||
private final ByteBufAllocatorMetric metric;
|
||||
|
||||
public JMXNettyMonitoring(String name, io.netty.buffer.ByteBufAllocatorMetric metric) {
|
||||
public JMXNettyMonitoring(String name, io.netty.buffer.api.BufferAllocatorMetric metric) {
|
||||
this.name = name;
|
||||
this.metric = metric;
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocatorMetric;
|
||||
import io.netty.buffer.api.BufferAllocatorMetric;
|
||||
import io.netty.buffer.PoolArenaMetric;
|
||||
import io.netty.buffer.PooledByteBufAllocator;
|
||||
import io.netty.buffer.PooledByteBufAllocatorMetric;
|
||||
|
@ -1,6 +1,6 @@
|
||||
package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty.buffer.ByteBufAllocatorMetric;
|
||||
import io.netty.buffer.api.BufferAllocatorMetric;
|
||||
import io.netty.buffer.PooledByteBufAllocatorMetric;
|
||||
|
||||
public class JMXPooledNettyMonitoring extends JMXNettyMonitoring implements JMXNettyMonitoringMBean {
|
||||
|
@ -1,6 +1,9 @@
|
||||
package org.rocksdb;
|
||||
|
||||
import io.netty.buffer.ByteBuf;
|
||||
import static it.cavallium.dbengine.database.LLUtils.isDirect;
|
||||
|
||||
import io.netty.buffer.api.Buffer;
|
||||
import io.netty.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
@ -25,7 +28,7 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
private final int cap;
|
||||
private final WriteOptions writeOptions;
|
||||
|
||||
private final List<ByteBuf> buffersToRelease;
|
||||
private final List<Buffer> buffersToRelease;
|
||||
|
||||
/**
|
||||
* @param cap The limit of operations
|
||||
@ -53,9 +56,8 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
|
||||
private synchronized void releaseAllBuffers() {
|
||||
if (!buffersToRelease.isEmpty()) {
|
||||
for (ByteBuf byteBuffer : buffersToRelease) {
|
||||
assert byteBuffer.refCnt() > 0;
|
||||
byteBuffer.release();
|
||||
for (Buffer byteBuffer : buffersToRelease) {
|
||||
byteBuffer.close();
|
||||
}
|
||||
buffersToRelease.clear();
|
||||
}
|
||||
@ -90,8 +92,12 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
flushIfNeeded(false);
|
||||
}
|
||||
|
||||
public synchronized void put(ColumnFamilyHandle columnFamilyHandle, ByteBuf key, ByteBuf value) throws RocksDBException {
|
||||
if (USE_FAST_DIRECT_BUFFERS && key.isDirect() && value.isDirect()) {
|
||||
public synchronized void put(ColumnFamilyHandle columnFamilyHandle,
|
||||
Send<Buffer> keyToReceive,
|
||||
Send<Buffer> valueToReceive) throws RocksDBException {
|
||||
var key = keyToReceive.receive();
|
||||
var value = valueToReceive.receive();
|
||||
if (USE_FAST_DIRECT_BUFFERS && isDirect(key) && isDirect(value)) {
|
||||
buffersToRelease.add(key);
|
||||
buffersToRelease.add(value);
|
||||
ByteBuffer keyNioBuffer = LLUtils.toDirect(key);
|
||||
@ -106,8 +112,8 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
byte[] valueArray = LLUtils.toArray(value);
|
||||
super.put(columnFamilyHandle, keyArray, valueArray);
|
||||
} finally {
|
||||
key.release();
|
||||
value.release();
|
||||
key.close();
|
||||
value.close();
|
||||
}
|
||||
}
|
||||
flushIfNeeded(false);
|
||||
@ -151,7 +157,8 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
flushIfNeeded(false);
|
||||
}
|
||||
|
||||
public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, ByteBuf key) throws RocksDBException {
|
||||
public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, Send<Buffer> keyToReceive) throws RocksDBException {
|
||||
var key = keyToReceive.receive();
|
||||
if (USE_FAST_DIRECT_BUFFERS) {
|
||||
buffersToRelease.add(key);
|
||||
ByteBuffer keyNioBuffer = LLUtils.toDirect(key);
|
||||
@ -167,7 +174,7 @@ public class CappedWriteBatch extends WriteBatch {
|
||||
try {
|
||||
super.delete(columnFamilyHandle, LLUtils.toArray(key));
|
||||
} finally {
|
||||
key.release();
|
||||
key.close();
|
||||
}
|
||||
}
|
||||
flushIfNeeded(false);
|
||||
|
Loading…
Reference in New Issue
Block a user