Use SimpleResources everywhere
This commit is contained in:
parent
8d8442d55d
commit
a976a4baa4
@ -5,97 +5,64 @@ import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(LLDelta.class);
|
||||
|
||||
private static final Drop<LLDelta> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLDelta obj) {
|
||||
try {
|
||||
if (obj.previous != null && obj.previous.isAccessible()) {
|
||||
obj.previous.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close previous", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.current != null && obj.current.isAccessible()) {
|
||||
obj.current.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close current", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.onClose != null) {
|
||||
obj.onClose.run();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close onDrop", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLDelta> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLDelta obj) {
|
||||
|
||||
}
|
||||
};
|
||||
public class LLDelta extends SimpleResource {
|
||||
|
||||
@Nullable
|
||||
private Buffer previous;
|
||||
private final Buffer previous;
|
||||
@Nullable
|
||||
private Buffer current;
|
||||
@Nullable
|
||||
private Runnable onClose;
|
||||
private final Buffer current;
|
||||
|
||||
private LLDelta(@Nullable Buffer previous, @Nullable Buffer current, @Nullable Runnable onClose) {
|
||||
super(DROP);
|
||||
assert isAllAccessible();
|
||||
private LLDelta(@Nullable Buffer previous, @Nullable Buffer current) {
|
||||
super();
|
||||
this.previous = previous != null ? previous.makeReadOnly() : null;
|
||||
this.current = current != null ? current.makeReadOnly() : null;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
private boolean isAllAccessible() {
|
||||
@Override
|
||||
protected void ensureOpen() {
|
||||
super.ensureOpen();
|
||||
assert previous == null || previous.isAccessible();
|
||||
assert current == null || current.isAccessible();
|
||||
assert this.isAccessible();
|
||||
assert this.isOwned();
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
if (previous != null && previous.isAccessible()) {
|
||||
previous.close();
|
||||
}
|
||||
if (current != null && current.isAccessible()) {
|
||||
current.close();
|
||||
}
|
||||
}
|
||||
|
||||
public static LLDelta of(Buffer previous, Buffer current) {
|
||||
assert (previous == null && current == null) || (previous != current);
|
||||
return new LLDelta(previous, current, null);
|
||||
return new LLDelta(previous, current);
|
||||
}
|
||||
|
||||
public Send<Buffer> previous() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return previous != null ? previous.copy().send() : null;
|
||||
}
|
||||
|
||||
public Send<Buffer> current() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return current != null ? current.copy().send() : null;
|
||||
}
|
||||
|
||||
public Buffer currentUnsafe() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return current;
|
||||
}
|
||||
|
||||
public Buffer previousUnsafe() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return previous;
|
||||
}
|
||||
|
||||
@ -103,17 +70,6 @@ public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
|
||||
return !LLUtils.equals(previous, current);
|
||||
}
|
||||
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
if (!isOwned()) {
|
||||
if (!isAccessible()) {
|
||||
throw this.createResourceClosedException();
|
||||
} else {
|
||||
throw new IllegalStateException("Resource not owned");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
@ -141,32 +97,4 @@ public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
|
||||
.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.current = null;
|
||||
this.previous = null;
|
||||
this.onClose = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLDelta> prepareSend() {
|
||||
Send<Buffer> minSend = this.previous != null ? this.previous.send() : null;
|
||||
Send<Buffer> maxSend = this.current != null ? this.current.send() : null;
|
||||
Runnable onClose = this.onClose;
|
||||
return drop -> {
|
||||
var instance = new LLDelta(
|
||||
minSend != null ? minSend.receive() : null,
|
||||
maxSend != null ? maxSend.receive() : null,
|
||||
onClose
|
||||
);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.StringJoiner;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -13,46 +14,7 @@ import org.jetbrains.annotations.Nullable;
|
||||
/**
|
||||
* Range of data, from min (inclusive), to max (exclusive)
|
||||
*/
|
||||
public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(LLRange.class);
|
||||
|
||||
private static final Drop<LLRange> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLRange obj) {
|
||||
try {
|
||||
if (obj.min != null) {
|
||||
obj.min.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close min", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.max != null) {
|
||||
obj.max.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close max", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.single != null) {
|
||||
obj.single.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close single", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLRange> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLRange obj) {
|
||||
|
||||
}
|
||||
};
|
||||
public class LLRange extends SimpleResource {
|
||||
|
||||
private static final LLRange RANGE_ALL = new LLRange((Buffer) null, (Buffer) null, (Buffer) null);
|
||||
@Nullable
|
||||
@ -63,8 +25,7 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
private Buffer single;
|
||||
|
||||
private LLRange(Send<Buffer> min, Send<Buffer> max, Send<Buffer> single) {
|
||||
super(DROP);
|
||||
assert isAllAccessible();
|
||||
super();
|
||||
assert single == null || (min == null && max == null);
|
||||
this.min = min != null ? min.receive().makeReadOnly() : null;
|
||||
this.max = max != null ? max.receive().makeReadOnly() : null;
|
||||
@ -72,23 +33,13 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
private LLRange(Buffer min, Buffer max, Buffer single) {
|
||||
super(DROP);
|
||||
assert isAllAccessible();
|
||||
super();
|
||||
assert single == null || (min == null && max == null);
|
||||
this.min = min != null ? min.makeReadOnly() : null;
|
||||
this.max = max != null ? max.makeReadOnly() : null;
|
||||
this.single = single != null ? single.makeReadOnly() : null;
|
||||
}
|
||||
|
||||
private boolean isAllAccessible() {
|
||||
assert min == null || min.isAccessible() : "Range min not owned";
|
||||
assert max == null || max.isAccessible() : "Range max not owned";
|
||||
assert single == null || single.isAccessible() : "Range single not owned";
|
||||
assert this.isAccessible() : "Range not accessible";
|
||||
assert this.isOwned() : "Range not owned";
|
||||
return true;
|
||||
}
|
||||
|
||||
public static LLRange all() {
|
||||
return RANGE_ALL.copy();
|
||||
}
|
||||
@ -118,22 +69,22 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public boolean isAll() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return min == null && max == null && single == null;
|
||||
}
|
||||
|
||||
public boolean isSingle() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return single != null;
|
||||
}
|
||||
|
||||
public boolean hasMin() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return min != null || single != null;
|
||||
}
|
||||
|
||||
public Send<Buffer> getMin() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (min != null) {
|
||||
// todo: use a read-only copy
|
||||
return min.copy().send();
|
||||
@ -146,7 +97,7 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public Buffer getMinUnsafe() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (min != null) {
|
||||
return min;
|
||||
} else if (single != null) {
|
||||
@ -157,7 +108,7 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public Buffer getMinCopy() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (min != null) {
|
||||
return min.copy();
|
||||
} else if (single != null) {
|
||||
@ -168,12 +119,12 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public boolean hasMax() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
return max != null || single != null;
|
||||
}
|
||||
|
||||
public Send<Buffer> getMax() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (max != null) {
|
||||
// todo: use a read-only copy
|
||||
return max.copy().send();
|
||||
@ -186,7 +137,7 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public Buffer getMaxUnsafe() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (max != null) {
|
||||
return max;
|
||||
} else if (single != null) {
|
||||
@ -197,7 +148,7 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public Buffer getMaxCopy() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
if (max != null) {
|
||||
return max.copy();
|
||||
} else if (single != null) {
|
||||
@ -208,26 +159,36 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public Send<Buffer> getSingle() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
assert isSingle();
|
||||
// todo: use a read-only copy
|
||||
return single != null ? single.copy().send() : null;
|
||||
}
|
||||
|
||||
public Buffer getSingleUnsafe() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
assert isSingle();
|
||||
return single;
|
||||
}
|
||||
|
||||
private void ensureOwned() {
|
||||
assert isAllAccessible();
|
||||
if (!isOwned()) {
|
||||
if (!isAccessible()) {
|
||||
throw this.createResourceClosedException();
|
||||
} else {
|
||||
throw new IllegalStateException("Resource not owned");
|
||||
}
|
||||
@Override
|
||||
protected void ensureOpen() {
|
||||
super.ensureOpen();
|
||||
assert min == null || min.isAccessible() : "Range min not owned";
|
||||
assert max == null || max.isAccessible() : "Range max not owned";
|
||||
assert single == null || single.isAccessible() : "Range single not owned";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
if (min != null && min.isAccessible()) {
|
||||
min.close();
|
||||
}
|
||||
if (max != null && max.isAccessible()) {
|
||||
max.close();
|
||||
}
|
||||
if (single != null && single.isAccessible()) {
|
||||
single.close();
|
||||
}
|
||||
}
|
||||
|
||||
@ -259,37 +220,11 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
|
||||
}
|
||||
|
||||
public LLRange copy() {
|
||||
ensureOwned();
|
||||
ensureOpen();
|
||||
// todo: use a read-only copy
|
||||
return new LLRange(min != null ? min.copy().send() : null,
|
||||
max != null ? max.copy().send() : null,
|
||||
single != null ? single.copy().send(): null
|
||||
);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLRange> prepareSend() {
|
||||
Send<Buffer> minSend;
|
||||
Send<Buffer> maxSend;
|
||||
Send<Buffer> singleSend;
|
||||
minSend = this.min != null ? this.min.send() : null;
|
||||
maxSend = this.max != null ? this.max.send() : null;
|
||||
singleSend = this.single != null ? this.single.send() : null;
|
||||
return drop -> {
|
||||
var instance = new LLRange(minSend, maxSend, singleSend);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.min = null;
|
||||
this.max = null;
|
||||
this.single = null;
|
||||
}
|
||||
}
|
||||
|
@ -310,7 +310,7 @@ public class LLUtils {
|
||||
|
||||
public static String toStringSafe(@Nullable LLRange range) {
|
||||
try {
|
||||
if (range == null || range.isAccessible()) {
|
||||
if (range == null || !range.isClosed()) {
|
||||
return toString(range);
|
||||
} else {
|
||||
return "(released)";
|
||||
@ -949,18 +949,6 @@ public class LLUtils {
|
||||
if (rocksObj.isOwningHandle()) {
|
||||
rocksObj.close();
|
||||
}
|
||||
} else if (next instanceof LLIndexSearcher searcher) {
|
||||
try {
|
||||
searcher.close();
|
||||
} catch (UncheckedIOException e) {
|
||||
logger.error("Failed to close searcher {}", searcher, e);
|
||||
}
|
||||
} else if (next instanceof LLIndexSearchers searchers) {
|
||||
try {
|
||||
searchers.close();
|
||||
} catch (UncheckedIOException e) {
|
||||
logger.error("Failed to close searchers {}", searchers, e);
|
||||
}
|
||||
} else if (next instanceof Optional<?> optional) {
|
||||
optional.ifPresent(LLUtils::onNextDropped);
|
||||
} else if (next instanceof Map.Entry<?, ?> entry) {
|
||||
|
@ -9,10 +9,6 @@ public abstract class RangeSupplier implements DiscardingCloseable, Supplier<LLR
|
||||
return new SimpleSupplier(supplier);
|
||||
}
|
||||
|
||||
public static RangeSupplier of(Send<LLRange> supplier) {
|
||||
return new CopySupplier(supplier.receive());
|
||||
}
|
||||
|
||||
public static RangeSupplier ofOwned(LLRange supplier) {
|
||||
return new CopySupplier(supplier);
|
||||
}
|
||||
|
@ -16,7 +16,7 @@ public final class SubStageEntry<T, U extends DatabaseStage<?>> implements Disca
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (value != null && value.isAccessible()) {
|
||||
if (value != null) {
|
||||
value.close();
|
||||
}
|
||||
}
|
||||
|
@ -36,8 +36,8 @@ public class DatabaseEmpty {
|
||||
private DatabaseEmpty() {
|
||||
}
|
||||
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, BufSupplier key, Runnable onClose) {
|
||||
return new DatabaseMapSingle<>(dictionary, key, nothingSerializer(dictionary.getAllocator()), onClose);
|
||||
public static DatabaseStageEntry<Nothing> create(LLDictionary dictionary, BufSupplier key) {
|
||||
return new DatabaseMapSingle<>(dictionary, key, nothingSerializer(dictionary.getAllocator()));
|
||||
}
|
||||
|
||||
public static final class Nothing {
|
||||
|
@ -51,26 +51,23 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
protected DatabaseMapDictionary(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Runnable onClose) {
|
||||
Serializer<U> valueSerializer) {
|
||||
// Do not retain or release or use the prefixKey here
|
||||
super(dictionary, prefixKeySupplier, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0, onClose);
|
||||
super(dictionary, prefixKeySupplier, keySuffixSerializer, new SubStageGetterSingle<>(valueSerializer), 0);
|
||||
this.valueSerializer = valueSerializer;
|
||||
}
|
||||
|
||||
public static <T, U> DatabaseMapDictionary<T, U> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T> keySerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Runnable onClose) {
|
||||
return new DatabaseMapDictionary<>(dictionary, null, keySerializer, valueSerializer, onClose);
|
||||
Serializer<U> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, null, keySerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public static <T, U> DatabaseMapDictionary<T, U> tail(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Runnable onClose) {
|
||||
return new DatabaseMapDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer, valueSerializer, onClose);
|
||||
Serializer<U> valueSerializer) {
|
||||
return new DatabaseMapDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer, valueSerializer);
|
||||
}
|
||||
|
||||
public static <K, V> Flux<Entry<K, V>> getLeavesFrom(DatabaseMapDictionary<K, V> databaseMapDictionary,
|
||||
@ -256,7 +253,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
@Override
|
||||
public Mono<DatabaseStageEntry<U>> at(@Nullable CompositeSnapshot snapshot, T keySuffix) {
|
||||
return Mono.fromCallable(() ->
|
||||
new DatabaseMapSingle<>(dictionary, BufSupplier.ofOwned(serializeKeySuffixToKey(keySuffix)), valueSerializer, null));
|
||||
new DatabaseMapSingle<>(dictionary, BufSupplier.ofOwned(serializeKeySuffixToKey(keySuffix)), valueSerializer));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -512,7 +509,7 @@ public class DatabaseMapDictionary<T, U> extends DatabaseMapDictionaryDeep<T, U,
|
||||
var bufSupplier = BufSupplier.ofOwned(toKey(keyBuf.copy()));
|
||||
try {
|
||||
T keySuffix = deserializeSuffix(keyBuf);
|
||||
var subStage = new DatabaseMapSingle<>(dictionary, bufSupplier, valueSerializer, null);
|
||||
var subStage = new DatabaseMapSingle<>(dictionary, bufSupplier, valueSerializer);
|
||||
return new SubStageEntry<>(keySuffix, subStage);
|
||||
} catch (Throwable ex) {
|
||||
bufSupplier.close();
|
||||
|
@ -22,6 +22,7 @@ import it.cavallium.dbengine.database.collections.DatabaseEmpty.Nothing;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import java.util.List;
|
||||
import java.util.Map.Entry;
|
||||
@ -38,49 +39,11 @@ import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
// todo: implement optimized methods (which?)
|
||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extends
|
||||
ResourceSupport<DatabaseStage<Object2ObjectSortedMap<T, U>>, DatabaseMapDictionaryDeep<T, U, US>> implements
|
||||
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extends SimpleResource implements
|
||||
DatabaseStageMap<T, U, US> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseMapDictionaryDeep.class);
|
||||
|
||||
private static final Drop<DatabaseMapDictionaryDeep<?, ?, ?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseMapDictionaryDeep<?, ?, ?> obj) {
|
||||
try {
|
||||
if (obj.rangeSupplier != null) {
|
||||
obj.rangeSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.keyPrefixSupplier != null) {
|
||||
obj.keyPrefixSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close keyPrefix", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.onClose != null) {
|
||||
obj.onClose.run();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close onClose", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseMapDictionaryDeep<?, ?, ?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseMapDictionaryDeep<?, ?, ?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
protected final LLDictionary dictionary;
|
||||
protected final BufferAllocator alloc;
|
||||
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
|
||||
@ -93,7 +56,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
|
||||
protected RangeSupplier rangeSupplier;
|
||||
protected BufSupplier keyPrefixSupplier;
|
||||
protected Runnable onClose;
|
||||
|
||||
private static void incrementPrefix(Buffer prefix, int prefixLength) {
|
||||
assert prefix.readableBytes() >= prefixLength;
|
||||
@ -176,31 +138,25 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
*/
|
||||
@Deprecated
|
||||
public static <T, U> DatabaseMapDictionaryDeep<T, U, DatabaseStageEntry<U>> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T> keySerializer, SubStageGetterSingle<U> subStageGetter,
|
||||
Runnable onClose) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer,
|
||||
subStageGetter, 0, onClose);
|
||||
SerializerFixedBinaryLength<T> keySerializer, SubStageGetterSingle<U> subStageGetter) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer, subStageGetter, 0);
|
||||
}
|
||||
|
||||
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepTail(
|
||||
LLDictionary dictionary, SerializerFixedBinaryLength<T> keySerializer, int keyExtLength,
|
||||
SubStageGetter<U, US> subStageGetter, Runnable onClose) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer,
|
||||
subStageGetter, keyExtLength, onClose);
|
||||
SubStageGetter<U, US> subStageGetter) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, null, keySerializer, subStageGetter, keyExtLength);
|
||||
}
|
||||
|
||||
public static <T, U, US extends DatabaseStage<U>> DatabaseMapDictionaryDeep<T, U, US> deepIntermediate(
|
||||
LLDictionary dictionary, BufSupplier prefixKey, SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
SubStageGetter<U, US> subStageGetter, int keyExtLength, Runnable onClose) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter,
|
||||
keyExtLength, onClose);
|
||||
SubStageGetter<U, US> subStageGetter, int keyExtLength) {
|
||||
return new DatabaseMapDictionaryDeep<>(dictionary, prefixKey, keySuffixSerializer, subStageGetter, keyExtLength);
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
protected DatabaseMapDictionaryDeep(LLDictionary dictionary, @Nullable BufSupplier prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer, SubStageGetter<U, US> subStageGetter, int keyExtLength,
|
||||
Runnable onClose) {
|
||||
super((Drop<DatabaseMapDictionaryDeep<T, U, US>>) (Drop) DROP);
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer, SubStageGetter<U, US> subStageGetter, int keyExtLength) {
|
||||
try (var prefixKey = prefixKeySupplier != null ? prefixKeySupplier.get() : null) {
|
||||
this.dictionary = dictionary;
|
||||
this.alloc = dictionary.getAllocator();
|
||||
@ -249,7 +205,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
|
||||
this.keyPrefixSupplier = prefixKeySupplier;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
} catch (Throwable t) {
|
||||
if (prefixKeySupplier != null) {
|
||||
@ -271,7 +226,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
RangeSupplier rangeSupplier,
|
||||
BufSupplier keyPrefixSupplier,
|
||||
Runnable onClose) {
|
||||
super((Drop<DatabaseMapDictionaryDeep<T,U,US>>) (Drop) DROP);
|
||||
this.dictionary = dictionary;
|
||||
this.alloc = alloc;
|
||||
this.subStageGetter = subStageGetter;
|
||||
@ -283,7 +237,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
|
||||
this.rangeSupplier = rangeSupplier;
|
||||
this.keyPrefixSupplier = keyPrefixSupplier;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
@ -444,41 +397,6 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
: "Invalid key suffix length: " + (afterWriterOffset - beforeWriterOffset) + ". Expected: " + keySuffixLength;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseMapDictionaryDeep<T, U, US>> prepareSend() {
|
||||
var keyPrefixSupplier = this.keyPrefixSupplier;
|
||||
var rangeSupplier = this.rangeSupplier;
|
||||
var onClose = this.onClose;
|
||||
return drop -> {
|
||||
var instance = new DatabaseMapDictionaryDeep<>(dictionary,
|
||||
alloc,
|
||||
subStageGetter,
|
||||
keySuffixSerializer,
|
||||
keyPrefixLength,
|
||||
keySuffixLength,
|
||||
keyExtLength,
|
||||
rangeMono,
|
||||
rangeSupplier,
|
||||
keyPrefixSupplier,
|
||||
onClose
|
||||
);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.keyPrefixSupplier = null;
|
||||
this.rangeSupplier = null;
|
||||
this.onClose = null;
|
||||
}
|
||||
|
||||
public static <K1, K2, V, R> Flux<R> getAllLeaves2(DatabaseMapDictionaryDeep<K1, Object2ObjectSortedMap<K2, V>, ? extends DatabaseStageMap<K2, V, DatabaseStageEntry<V>>> deepMap,
|
||||
CompositeSnapshot snapshot,
|
||||
TriFunction<K1, K2, V, R> merger,
|
||||
@ -521,7 +439,8 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
.dictionary
|
||||
.getRange(deepMap.resolveSnapshot(snapshot), Mono.zip(savedProgressKey1Opt, deepMap.rangeMono).handle((tuple, sink) -> {
|
||||
var firstKey = tuple.getT1();
|
||||
try (var fullRange = tuple.getT2()) {
|
||||
var fullRange = tuple.getT2();
|
||||
try {
|
||||
if (firstKey.isPresent()) {
|
||||
try (var key1Buf = deepMap.alloc.allocate(keySuffix1Serializer.getSerializedBinaryLength())) {
|
||||
keySuffix1Serializer.serialize(firstKey.get(), key1Buf);
|
||||
@ -532,6 +451,13 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
} else {
|
||||
sink.next(fullRange);
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
try {
|
||||
fullRange.close();
|
||||
} catch (Throwable ex2) {
|
||||
LOG.error(ex2);
|
||||
}
|
||||
throw ex;
|
||||
}
|
||||
}), false, false)
|
||||
.concatMapIterable(entry -> {
|
||||
@ -585,4 +511,22 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extend
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (rangeSupplier != null) {
|
||||
rangeSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (keyPrefixSupplier != null) {
|
||||
keyPrefixSupplier.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
LOG.error("Failed to close keyPrefix", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import it.cavallium.dbengine.database.SubStageEntry;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
|
||||
@ -31,49 +32,22 @@ import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseMapDictionaryHashed<T, U, TH> extends
|
||||
ResourceSupport<DatabaseStage<Object2ObjectSortedMap<T, U>>, DatabaseMapDictionaryHashed<T, U, TH>> implements
|
||||
public class DatabaseMapDictionaryHashed<T, U, TH> extends SimpleResource implements
|
||||
DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryHashed.class);
|
||||
|
||||
private static final Drop<DatabaseMapDictionaryHashed<?, ?, ?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseMapDictionaryHashed<?, ?, ?> obj) {
|
||||
try {
|
||||
if (obj.subDictionary != null) {
|
||||
obj.subDictionary.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close subDictionary", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseMapDictionaryHashed<?, ?, ?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseMapDictionaryHashed<?, ?, ?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final BufferAllocator alloc;
|
||||
private final Function<T, TH> keySuffixHashFunction;
|
||||
|
||||
private DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
|
||||
private final DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>> subDictionary;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
protected DatabaseMapDictionaryHashed(LLDictionary dictionary,
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keySuffixHashSerializer,
|
||||
Runnable onClose) {
|
||||
super((Drop<DatabaseMapDictionaryHashed<T, U, TH>>) (Drop) DROP);
|
||||
SerializerFixedBinaryLength<TH> keySuffixHashSerializer) {
|
||||
if (dictionary.getUpdateMode().transform(LLUtils::handleDiscard).block() != UpdateMode.ALLOW) {
|
||||
throw new IllegalArgumentException("Hashed maps only works when UpdateMode is ALLOW");
|
||||
}
|
||||
@ -83,36 +57,33 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends
|
||||
ValuesSetSerializer<Entry<T, U>> valuesSetSerializer
|
||||
= new ValuesSetSerializer<>(valueWithHashSerializer);
|
||||
this.subDictionary = DatabaseMapDictionary.tail(dictionary, prefixKeySupplier, keySuffixHashSerializer,
|
||||
valuesSetSerializer, onClose);
|
||||
valuesSetSerializer);
|
||||
this.keySuffixHashFunction = keySuffixHashFunction;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private DatabaseMapDictionaryHashed(BufferAllocator alloc,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
Send<DatabaseStage<Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>>>> subDictionary,
|
||||
DatabaseStage<Object2ObjectSortedMap<TH, ObjectArraySet<Entry<T, U>>>> subDictionary,
|
||||
Drop<DatabaseMapDictionaryHashed<T, U, TH>> drop) {
|
||||
super((Drop<DatabaseMapDictionaryHashed<T, U, TH>>) (Drop) DROP);
|
||||
this.alloc = alloc;
|
||||
this.keySuffixHashFunction = keySuffixHashFunction;
|
||||
|
||||
this.subDictionary = (DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>>) subDictionary.receive();
|
||||
this.subDictionary = (DatabaseMapDictionary<TH, ObjectArraySet<Entry<T, U>>>) subDictionary;
|
||||
}
|
||||
|
||||
public static <T, U, UH> DatabaseMapDictionaryHashed<T, U, UH> simple(LLDictionary dictionary,
|
||||
Serializer<T> keySerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Function<T, UH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<UH> keyHashSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<UH> keyHashSerializer) {
|
||||
return new DatabaseMapDictionaryHashed<>(
|
||||
dictionary,
|
||||
null,
|
||||
keySerializer,
|
||||
valueSerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer,
|
||||
onClose
|
||||
keyHashSerializer
|
||||
);
|
||||
}
|
||||
|
||||
@ -121,15 +92,13 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Serializer<U> valueSerializer,
|
||||
Function<T, UH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<UH> keySuffixHashSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<UH> keySuffixHashSerializer) {
|
||||
return new DatabaseMapDictionaryHashed<>(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
valueSerializer,
|
||||
keySuffixHashFunction,
|
||||
keySuffixHashSerializer,
|
||||
onClose
|
||||
keySuffixHashSerializer
|
||||
);
|
||||
}
|
||||
|
||||
@ -203,9 +172,8 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends
|
||||
}
|
||||
|
||||
private Mono<DatabaseSingleBucket<T, U, TH>> atPrivate(@Nullable CompositeSnapshot snapshot, T key, TH hash) {
|
||||
return subDictionary
|
||||
.at(snapshot, hash)
|
||||
.map(entry -> new DatabaseSingleBucket<T, U, TH>(entry, key, null));
|
||||
return subDictionary.at(snapshot, hash)
|
||||
.map(entry -> new DatabaseSingleBucket<T, U, TH>(entry, key));
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -333,18 +301,13 @@ public class DatabaseMapDictionaryHashed<T, U, TH> extends
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseMapDictionaryHashed<T, U, TH>> prepareSend() {
|
||||
var subDictionary = this.subDictionary.send();
|
||||
return drop -> new DatabaseMapDictionaryHashed<>(alloc, keySuffixHashFunction, subDictionary, drop);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.subDictionary = null;
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (subDictionary != null) {
|
||||
subDictionary.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close subDictionary", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
@ -24,49 +25,22 @@ import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class DatabaseMapSingle<U> extends ResourceSupport<DatabaseStage<U>, DatabaseMapSingle<U>> implements
|
||||
DatabaseStageEntry<U> {
|
||||
public class DatabaseMapSingle<U> extends SimpleResource implements DatabaseStageEntry<U> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseMapSingle.class);
|
||||
|
||||
private final AtomicLong totalZeroBytesErrors = new AtomicLong();
|
||||
private static final Drop<DatabaseMapSingle<?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseMapSingle<?> obj) {
|
||||
if (obj.keySupplier != null) {
|
||||
obj.keySupplier.close();
|
||||
}
|
||||
if (obj.onClose != null) {
|
||||
obj.onClose.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseMapSingle<?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseMapSingle<?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final LLDictionary dictionary;
|
||||
private final Mono<Buffer> keyMono;
|
||||
private final Serializer<U> serializer;
|
||||
private BufSupplier keySupplier;
|
||||
private Runnable onClose;
|
||||
private final BufSupplier keySupplier;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseMapSingle(LLDictionary dictionary, BufSupplier keySupplier, Serializer<U> serializer,
|
||||
Runnable onClose) {
|
||||
super((Drop<DatabaseMapSingle<U>>) (Drop) DROP);
|
||||
public DatabaseMapSingle(LLDictionary dictionary, BufSupplier keySupplier, Serializer<U> serializer) {
|
||||
this.dictionary = dictionary;
|
||||
this.keySupplier = keySupplier;
|
||||
this.keyMono = Mono.fromSupplier(() -> keySupplier.get());
|
||||
this.serializer = serializer;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
private LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) {
|
||||
@ -197,24 +171,7 @@ public class DatabaseMapSingle<U> extends ResourceSupport<DatabaseStage<U>, Data
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseMapSingle<U>> prepareSend() {
|
||||
var keySupplier = this.keySupplier;
|
||||
var onClose = this.onClose;
|
||||
return drop -> {
|
||||
var instance = new DatabaseMapSingle<>(dictionary, keySupplier, serializer, onClose);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.keySupplier = null;
|
||||
this.onClose = null;
|
||||
protected void onClose() {
|
||||
keySupplier.close();
|
||||
}
|
||||
}
|
@ -21,27 +21,23 @@ public class DatabaseSetDictionary<T> extends DatabaseMapDictionary<T, Nothing>
|
||||
|
||||
protected DatabaseSetDictionary(LLDictionary dictionary,
|
||||
BufSupplier prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer) {
|
||||
super(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
DatabaseEmpty.nothingSerializer(dictionary.getAllocator()),
|
||||
onClose
|
||||
DatabaseEmpty.nothingSerializer(dictionary.getAllocator())
|
||||
);
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> simple(LLDictionary dictionary,
|
||||
SerializerFixedBinaryLength<T> keySerializer,
|
||||
Runnable onClose) {
|
||||
return new DatabaseSetDictionary<>(dictionary, null, keySerializer, onClose);
|
||||
SerializerFixedBinaryLength<T> keySerializer) {
|
||||
return new DatabaseSetDictionary<>(dictionary, null, keySerializer);
|
||||
}
|
||||
|
||||
public static <T> DatabaseSetDictionary<T> tail(LLDictionary dictionary,
|
||||
BufSupplier prefixKeySupplier,
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer,
|
||||
Runnable onClose) {
|
||||
return new DatabaseSetDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer, onClose);
|
||||
SerializerFixedBinaryLength<T> keySuffixSerializer) {
|
||||
return new DatabaseSetDictionary<>(dictionary, prefixKeySupplier, keySuffixSerializer);
|
||||
}
|
||||
|
||||
public Mono<Set<T>> getKeySet(@Nullable CompositeSnapshot snapshot) {
|
||||
|
@ -26,29 +26,25 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Function<T, TH> keySuffixHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keySuffixHashSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<TH> keySuffixHashSerializer) {
|
||||
super(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
DatabaseEmpty.nothingSerializer(dictionary.getAllocator()),
|
||||
keySuffixHashFunction,
|
||||
keySuffixHashSerializer,
|
||||
onClose
|
||||
keySuffixHashSerializer
|
||||
);
|
||||
}
|
||||
|
||||
public static <T, TH> DatabaseSetDictionaryHashed<T, TH> simple(LLDictionary dictionary,
|
||||
Serializer<T> keySerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keyHashSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<TH> keyHashSerializer) {
|
||||
return new DatabaseSetDictionaryHashed<>(dictionary,
|
||||
null,
|
||||
keySerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer,
|
||||
onClose
|
||||
keyHashSerializer
|
||||
);
|
||||
}
|
||||
|
||||
@ -56,14 +52,12 @@ public class DatabaseSetDictionaryHashed<T, TH> extends DatabaseMapDictionaryHas
|
||||
@Nullable BufSupplier prefixKeySupplier,
|
||||
Serializer<T> keySuffixSerializer,
|
||||
Function<T, TH> keyHashFunction,
|
||||
SerializerFixedBinaryLength<TH> keyHashSerializer,
|
||||
Runnable onClose) {
|
||||
SerializerFixedBinaryLength<TH> keyHashSerializer) {
|
||||
return new DatabaseSetDictionaryHashed<>(dictionary,
|
||||
prefixKeySupplier,
|
||||
keySuffixSerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer,
|
||||
onClose
|
||||
keyHashSerializer
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -10,6 +10,7 @@ import it.cavallium.dbengine.database.LLUtils;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArraySet;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
@ -23,62 +24,22 @@ import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSingleBucket<K, V, TH>
|
||||
extends ResourceSupport<DatabaseStage<V>, DatabaseSingleBucket<K, V, TH>>
|
||||
implements DatabaseStageEntry<V> {
|
||||
public class DatabaseSingleBucket<K, V, TH> extends SimpleResource implements DatabaseStageEntry<V> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseSingleBucket.class);
|
||||
|
||||
private static final Drop<DatabaseSingleBucket<?, ?, ?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseSingleBucket<?, ?, ?> obj) {
|
||||
try {
|
||||
if (obj.bucketStage != null) {
|
||||
obj.bucketStage.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close bucketStage", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.onClose != null) {
|
||||
obj.onClose.run();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close onClose", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseSingleBucket<?, ?, ?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseSingleBucket<?, ?, ?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final K key;
|
||||
|
||||
private DatabaseStageEntry<ObjectArraySet<Entry<K, V>>> bucketStage;
|
||||
private final DatabaseStageEntry<ObjectArraySet<Entry<K, V>>> bucketStage;
|
||||
|
||||
private Runnable onClose;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseSingleBucket(DatabaseStageEntry<ObjectArraySet<Entry<K, V>>> bucketStage, K key, Runnable onClose) {
|
||||
super((Drop<DatabaseSingleBucket<K,V,TH>>) (Drop) DROP);
|
||||
public DatabaseSingleBucket(DatabaseStageEntry<ObjectArraySet<Entry<K, V>>> bucketStage, K key) {
|
||||
this.key = key;
|
||||
this.bucketStage = bucketStage;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private DatabaseSingleBucket(Send<DatabaseStage<ObjectArraySet<Entry<K, V>>>> bucketStage, K key, Runnable onClose) {
|
||||
super((Drop<DatabaseSingleBucket<K,V,TH>>) (Drop) DROP);
|
||||
private DatabaseSingleBucket(DatabaseStage<ObjectArraySet<Entry<K, V>>> bucketStage, K key) {
|
||||
this.key = key;
|
||||
this.bucketStage = (DatabaseStageEntry<ObjectArraySet<Entry<K, V>>>) bucketStage.receive();
|
||||
this.onClose = onClose;
|
||||
this.bucketStage = (DatabaseStageEntry<ObjectArraySet<Entry<K, V>>>) bucketStage;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -229,24 +190,13 @@ public class DatabaseSingleBucket<K, V, TH>
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseSingleBucket<K, V, TH>> prepareSend() {
|
||||
var bucketStage = this.bucketStage.send();
|
||||
var onClose = this.onClose;
|
||||
return drop -> {
|
||||
var instance = new DatabaseSingleBucket<K, V, TH>(bucketStage, key, onClose);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.bucketStage = null;
|
||||
this.onClose = null;
|
||||
protected void onClose() {
|
||||
try {
|
||||
if (bucketStage != null) {
|
||||
bucketStage.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close bucketStage", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
@ -20,53 +21,27 @@ import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SynchronousSink;
|
||||
|
||||
@SuppressWarnings("unused")
|
||||
public class DatabaseSingleMapped<A, B> extends ResourceSupport<DatabaseStage<A>, DatabaseSingleMapped<A, B>>
|
||||
implements DatabaseStageEntry<A> {
|
||||
public class DatabaseSingleMapped<A, B> extends SimpleResource implements DatabaseStageEntry<A> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(DatabaseSingleMapped.class);
|
||||
|
||||
private static final Drop<DatabaseSingleMapped<?, ?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseSingleMapped<?, ?> obj) {
|
||||
try {
|
||||
if (obj.serializedSingle != null) {
|
||||
obj.serializedSingle.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close serializedSingle", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseSingleMapped<?, ?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseSingleMapped<?, ?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final Mapper<A, B> mapper;
|
||||
|
||||
private DatabaseStageEntry<B> serializedSingle;
|
||||
private final DatabaseStageEntry<B> serializedSingle;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseSingleMapped(DatabaseStageEntry<B> serializedSingle, Mapper<A, B> mapper,
|
||||
Drop<DatabaseSingleMapped<A, B>> drop) {
|
||||
super((Drop<DatabaseSingleMapped<A,B>>) (Drop) DROP);
|
||||
this.serializedSingle = serializedSingle;
|
||||
this.mapper = mapper;
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
private DatabaseSingleMapped(Send<DatabaseStage<B>> serializedSingle, Mapper<A, B> mapper,
|
||||
private DatabaseSingleMapped(DatabaseStage<B> serializedSingle, Mapper<A, B> mapper,
|
||||
Drop<DatabaseSingleMapped<A, B>> drop) {
|
||||
super((Drop<DatabaseSingleMapped<A,B>>) (Drop) DROP);
|
||||
this.mapper = mapper;
|
||||
|
||||
this.serializedSingle = (DatabaseStageEntry<B>) serializedSingle.receive();
|
||||
this.serializedSingle = (DatabaseStageEntry<B>) serializedSingle;
|
||||
}
|
||||
|
||||
private void deserializeSink(B value, SynchronousSink<A> sink) {
|
||||
@ -179,19 +154,7 @@ public class DatabaseSingleMapped<A, B> extends ResourceSupport<DatabaseStage<A>
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
protected void onClose() {
|
||||
serializedSingle.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseSingleMapped<A, B>> prepareSend() {
|
||||
var serializedSingle = this.serializedSingle.send();
|
||||
return drop -> new DatabaseSingleMapped<>(serializedSingle, mapper, drop);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.serializedSingle = null;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
@ -21,42 +22,17 @@ import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.publisher.SynchronousSink;
|
||||
|
||||
public class DatabaseSingleton<U> extends ResourceSupport<DatabaseStage<U>, DatabaseSingleton<U>> implements
|
||||
DatabaseStageEntry<U> {
|
||||
public class DatabaseSingleton<U> extends SimpleResource implements DatabaseStageEntry<U> {
|
||||
|
||||
private static final Logger LOG = LogManager.getLogger(DatabaseSingleton.class);
|
||||
|
||||
private static final Drop<DatabaseSingleton<?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(DatabaseSingleton<?> obj) {
|
||||
if (obj.onClose != null) {
|
||||
obj.onClose.run();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<DatabaseSingleton<?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(DatabaseSingleton<?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final LLSingleton singleton;
|
||||
private final Serializer<U> serializer;
|
||||
|
||||
private Runnable onClose;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public DatabaseSingleton(LLSingleton singleton, Serializer<U> serializer,
|
||||
Runnable onClose) {
|
||||
super((Drop<DatabaseSingleton<U>>) (Drop) DROP);
|
||||
public DatabaseSingleton(LLSingleton singleton, Serializer<U> serializer) {
|
||||
this.singleton = singleton;
|
||||
this.serializer = serializer;
|
||||
this.onClose = onClose;
|
||||
}
|
||||
|
||||
private LLSnapshot resolveSnapshot(@Nullable CompositeSnapshot snapshot) {
|
||||
@ -201,22 +177,7 @@ public class DatabaseSingleton<U> extends ResourceSupport<DatabaseStage<U>, Data
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
throw new IllegalStateException("Closed");
|
||||
}
|
||||
protected void onClose() {
|
||||
|
||||
@Override
|
||||
protected Owned<DatabaseSingleton<U>> prepareSend() {
|
||||
var onClose = this.onClose;
|
||||
return drop -> {
|
||||
var instance = new DatabaseSingleton<>(singleton, serializer, onClose);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.onClose = null;
|
||||
}
|
||||
}
|
@ -5,6 +5,7 @@ import it.cavallium.dbengine.client.BadBlock;
|
||||
import it.cavallium.dbengine.client.CompositeSnapshot;
|
||||
import it.cavallium.dbengine.database.Delta;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import java.util.Objects;
|
||||
@ -12,7 +13,7 @@ import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface DatabaseStage<T> extends DatabaseStageWithEntry<T>, Resource<DatabaseStage<T>> {
|
||||
public interface DatabaseStage<T> extends DatabaseStageWithEntry<T>, SafeCloseable {
|
||||
|
||||
Mono<T> get(@Nullable CompositeSnapshot snapshot);
|
||||
|
||||
|
@ -43,8 +43,7 @@ public class SubStageGetterHashMap<T, U, TH> implements
|
||||
keySerializer,
|
||||
valueSerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer,
|
||||
null
|
||||
keyHashSerializer
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -40,8 +40,7 @@ public class SubStageGetterHashSet<T, TH> implements
|
||||
BufSupplier.ofOwned(prefixKey),
|
||||
keySerializer,
|
||||
keyHashFunction,
|
||||
keyHashSerializer,
|
||||
null
|
||||
keyHashSerializer
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -29,8 +29,7 @@ public class SubStageGetterMap<T, U> implements
|
||||
return prefixKeyMono.map(prefixKey -> DatabaseMapDictionary.tail(dictionary,
|
||||
BufSupplier.ofOwned(prefixKey),
|
||||
keySerializer,
|
||||
valueSerializer,
|
||||
null
|
||||
valueSerializer
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -48,8 +48,7 @@ public class SubStageGetterMapDeep<T, U, US extends DatabaseStage<U>> implements
|
||||
BufSupplier.ofOwned(prefixKey),
|
||||
keySerializer,
|
||||
subStageGetter,
|
||||
keyExtLength,
|
||||
null
|
||||
keyExtLength
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -28,8 +28,7 @@ public class SubStageGetterSet<T> implements
|
||||
Mono<Buffer> prefixKeyMono) {
|
||||
return prefixKeyMono.map(prefixKey -> DatabaseSetDictionary.tail(dictionary,
|
||||
BufSupplier.ofOwned(prefixKey),
|
||||
keySerializer,
|
||||
null
|
||||
keySerializer
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -23,8 +23,7 @@ public class SubStageGetterSingle<T> implements SubStageGetter<T, DatabaseStageE
|
||||
Mono<Buffer> keyPrefixMono) {
|
||||
return keyPrefixMono.map(keyPrefix -> new DatabaseMapSingle<>(dictionary,
|
||||
BufSupplier.ofOwned(keyPrefix),
|
||||
serializer,
|
||||
null
|
||||
serializer
|
||||
));
|
||||
}
|
||||
|
||||
|
@ -488,7 +488,7 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
assert result != null;
|
||||
return result.delta();
|
||||
} catch (Throwable ex) {
|
||||
if (result != null && result.delta().isAccessible()) {
|
||||
if (result != null && !result.delta().isClosed()) {
|
||||
result.close();
|
||||
}
|
||||
throw ex;
|
||||
@ -718,16 +718,14 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
return rangeMono.flatMapMany(range -> {
|
||||
try (range) {
|
||||
if (range.isSingle()) {
|
||||
var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe());
|
||||
return getRangeSingle(snapshot, rangeSingleMono);
|
||||
} else {
|
||||
return getRangeMulti(snapshot, rangeMono, reverse, smallRange);
|
||||
}
|
||||
return Flux.usingWhen(rangeMono, range -> {
|
||||
if (range.isSingle()) {
|
||||
var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe());
|
||||
return getRangeSingle(snapshot, rangeSingleMono);
|
||||
} else {
|
||||
return getRangeMulti(snapshot, rangeMono, reverse, smallRange);
|
||||
}
|
||||
});
|
||||
}, LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -735,17 +733,15 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
Mono<LLRange> rangeMono,
|
||||
int prefixLength,
|
||||
boolean smallRange) {
|
||||
return rangeMono.flatMapMany(range -> {
|
||||
try (range) {
|
||||
if (range.isSingle()) {
|
||||
var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe());
|
||||
|
||||
return getRangeSingle(snapshot, rangeSingleMono).map(List::of);
|
||||
} else {
|
||||
return getRangeMultiGrouped(snapshot, rangeMono, prefixLength, smallRange);
|
||||
}
|
||||
return Flux.usingWhen(rangeMono, range -> {
|
||||
if (range.isSingle()) {
|
||||
var rangeSingleMono = rangeMono.map(llRange -> llRange.getSingleUnsafe());
|
||||
|
||||
return getRangeSingle(snapshot, rangeSingleMono).map(List::of);
|
||||
} else {
|
||||
return getRangeMultiGrouped(snapshot, rangeMono, prefixLength, smallRange);
|
||||
}
|
||||
});
|
||||
}, LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
private Flux<LLEntry> getRangeSingle(LLSnapshot snapshot, Mono<Buffer> keyMono) {
|
||||
@ -756,33 +752,24 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
Mono<LLLocalEntryReactiveRocksIterator> iteratorMono = rangeMono.map(range -> {
|
||||
var readOptions = generateReadOptionsOrNull(snapshot);
|
||||
return new LLLocalEntryReactiveRocksIterator(db, range, nettyDirect, readOptions, reverse, smallRange);
|
||||
});
|
||||
return Flux.usingWhen(iteratorMono,
|
||||
iterator -> iterator.flux().subscribeOn(dbRScheduler, false),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
return new LLLocalEntryReactiveRocksIterator(db,
|
||||
rangeMono,
|
||||
nettyDirect,
|
||||
() -> generateReadOptionsOrNull(snapshot),
|
||||
reverse,
|
||||
smallRange
|
||||
).flux().subscribeOn(dbRScheduler, false);
|
||||
}
|
||||
|
||||
private Flux<List<LLEntry>> getRangeMultiGrouped(LLSnapshot snapshot, Mono<LLRange> rangeMono,
|
||||
int prefixLength, boolean smallRange) {
|
||||
Mono<LLLocalGroupedEntryReactiveRocksIterator> iteratorMono = rangeMono.map(range -> {
|
||||
var readOptions = generateReadOptionsOrNull(snapshot);
|
||||
return new LLLocalGroupedEntryReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
range,
|
||||
nettyDirect,
|
||||
readOptions,
|
||||
smallRange
|
||||
);
|
||||
});
|
||||
return Flux.usingWhen(
|
||||
iteratorMono,
|
||||
iterator -> iterator.flux().subscribeOn(dbRScheduler, false),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
return new LLLocalGroupedEntryReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
rangeMono,
|
||||
nettyDirect,
|
||||
() -> generateReadOptionsOrNull(snapshot),
|
||||
smallRange
|
||||
).flux().subscribeOn(dbRScheduler, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -798,7 +785,7 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
return this.getRangeKeysMulti(snapshot, rangeMono, reverse, smallRange);
|
||||
}
|
||||
} finally {
|
||||
if (range != null && range.isAccessible()) {
|
||||
if (range != null && !range.isClosed()) {
|
||||
range.close();
|
||||
}
|
||||
}
|
||||
@ -810,20 +797,13 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
Mono<LLRange> rangeMono,
|
||||
int prefixLength,
|
||||
boolean smallRange) {
|
||||
Mono<LLLocalGroupedKeyReactiveRocksIterator> iteratorMono = rangeMono.map(range -> {
|
||||
var readOptions = generateReadOptionsOrNull(snapshot);
|
||||
return new LLLocalGroupedKeyReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
range,
|
||||
nettyDirect,
|
||||
readOptions,
|
||||
smallRange
|
||||
);
|
||||
});
|
||||
return Flux.usingWhen(iteratorMono,
|
||||
iterator -> iterator.flux().subscribeOn(dbRScheduler, false),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
return new LLLocalGroupedKeyReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
rangeMono,
|
||||
nettyDirect,
|
||||
() -> generateReadOptionsOrNull(snapshot),
|
||||
smallRange
|
||||
).flux().subscribeOn(dbRScheduler, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -868,21 +848,14 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
@Override
|
||||
public Flux<Buffer> getRangeKeyPrefixes(@Nullable LLSnapshot snapshot, Mono<LLRange> rangeMono,
|
||||
int prefixLength, boolean smallRange) {
|
||||
Mono<LLLocalKeyPrefixReactiveRocksIterator> iteratorMono = rangeMono.map(range -> {
|
||||
var readOptions = generateReadOptionsOrNull(snapshot);
|
||||
return new LLLocalKeyPrefixReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
range,
|
||||
nettyDirect,
|
||||
readOptions,
|
||||
true,
|
||||
smallRange
|
||||
);
|
||||
});
|
||||
return Flux.usingWhen(iteratorMono,
|
||||
iterator -> iterator.flux().subscribeOn(dbRScheduler),
|
||||
LLUtils::finalizeResource
|
||||
);
|
||||
return new LLLocalKeyPrefixReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
rangeMono,
|
||||
nettyDirect,
|
||||
() -> generateReadOptionsOrNull(snapshot),
|
||||
true,
|
||||
smallRange
|
||||
).flux().subscribeOn(dbRScheduler);
|
||||
}
|
||||
|
||||
private Flux<Buffer> getRangeKeysSingle(LLSnapshot snapshot, Mono<Buffer> keyMono) {
|
||||
@ -909,15 +882,13 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
Mono<RocksObjTuple<ReadOptions, LLLocalKeyReactiveRocksIterator>> iteratorMono = rangeMono.map(range -> {
|
||||
var readOptions = generateReadOptionsOrNull(snapshot);
|
||||
var it = new LLLocalKeyReactiveRocksIterator(db, range, nettyDirect, readOptions, reverse, smallRange);
|
||||
return new RocksObjTuple<>(readOptions, it);
|
||||
});
|
||||
return Flux.usingWhen(iteratorMono,
|
||||
t -> t.t2().flux().subscribeOn(dbRScheduler, false),
|
||||
t -> Mono.fromRunnable(t::close)
|
||||
);
|
||||
return new LLLocalKeyReactiveRocksIterator(db,
|
||||
rangeMono,
|
||||
nettyDirect,
|
||||
() -> generateReadOptionsOrNull(snapshot),
|
||||
reverse,
|
||||
smallRange
|
||||
).flux().subscribeOn(dbRScheduler, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -4,17 +4,19 @@ import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.database.LLEntry;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.function.Supplier;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class LLLocalEntryReactiveRocksIterator extends LLLocalReactiveRocksIterator<LLEntry> {
|
||||
|
||||
public LLLocalEntryReactiveRocksIterator(RocksDBColumn db,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
super(db, range, allowNettyDirect, readOptions, true, reverse, smallRange);
|
||||
super(db, rangeMono, allowNettyDirect, readOptions, true, reverse, smallRange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -4,18 +4,20 @@ import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.database.LLEntry;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.function.Supplier;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class LLLocalGroupedEntryReactiveRocksIterator extends
|
||||
LLLocalGroupedReactiveRocksIterator<LLEntry> {
|
||||
|
||||
public LLLocalGroupedEntryReactiveRocksIterator(RocksDBColumn db,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean smallRange) {
|
||||
super(db, prefixLength, range, allowNettyDirect, readOptions, false, true, smallRange);
|
||||
super(db, prefixLength, rangeMono, allowNettyDirect, readOptions, false, true, smallRange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -3,17 +3,19 @@ package it.cavallium.dbengine.database.disk;
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.function.Supplier;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class LLLocalGroupedKeyReactiveRocksIterator extends LLLocalGroupedReactiveRocksIterator<Buffer> {
|
||||
|
||||
public LLLocalGroupedKeyReactiveRocksIterator(RocksDBColumn db,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean smallRange) {
|
||||
super(db, prefixLength, range, allowNettyDirect, readOptions, true, false, smallRange);
|
||||
super(db, prefixLength, rangeMono, allowNettyDirect, readOptions, true, false, smallRange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -5,87 +5,53 @@ import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
|
||||
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
|
||||
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
|
||||
import java.util.List;
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
|
||||
ResourceSupport<LLLocalGroupedReactiveRocksIterator<T>, LLLocalGroupedReactiveRocksIterator<T>> {
|
||||
public abstract class LLLocalGroupedReactiveRocksIterator<T> {
|
||||
|
||||
protected static final Logger logger = LogManager.getLogger(LLLocalGroupedReactiveRocksIterator.class);
|
||||
private static final Drop<LLLocalGroupedReactiveRocksIterator<?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLLocalGroupedReactiveRocksIterator<?> obj) {
|
||||
try {
|
||||
if (obj.range != null) {
|
||||
obj.range.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.readOptions != null) {
|
||||
obj.readOptions.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close readOptions", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLLocalGroupedReactiveRocksIterator<?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLLocalGroupedReactiveRocksIterator<?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final RocksDBColumn db;
|
||||
private final int prefixLength;
|
||||
private LLRange range;
|
||||
private final Mono<LLRange> rangeMono;
|
||||
private final boolean allowNettyDirect;
|
||||
private ReadOptions readOptions;
|
||||
private final Supplier<ReadOptions> readOptions;
|
||||
private final boolean canFillCache;
|
||||
private final boolean readValues;
|
||||
private final boolean smallRange;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public LLLocalGroupedReactiveRocksIterator(RocksDBColumn db,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean canFillCache,
|
||||
boolean readValues,
|
||||
boolean smallRange) {
|
||||
super((Drop<LLLocalGroupedReactiveRocksIterator<T>>) (Drop) DROP);
|
||||
this.db = db;
|
||||
this.prefixLength = prefixLength;
|
||||
this.range = range;
|
||||
this.rangeMono = rangeMono;
|
||||
this.allowNettyDirect = allowNettyDirect;
|
||||
this.readOptions = readOptions != null ? readOptions : new ReadOptions();
|
||||
this.readOptions = readOptions != null ? readOptions : ReadOptions::new;
|
||||
this.canFillCache = canFillCache;
|
||||
this.readValues = readValues;
|
||||
this.smallRange = smallRange;
|
||||
}
|
||||
|
||||
public final Flux<List<T>> flux() {
|
||||
return Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions, true, isBoundedRange(range), smallRange);
|
||||
return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
|
||||
}
|
||||
@ -155,38 +121,9 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}, RocksIterWithReadOpts::close), LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
public abstract T getEntry(@Nullable Buffer key, @Nullable Buffer value);
|
||||
|
||||
@Override
|
||||
protected final RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLLocalGroupedReactiveRocksIterator<T>> prepareSend() {
|
||||
var range = this.range.send();
|
||||
var readOptions = this.readOptions;
|
||||
return drop -> new LLLocalGroupedReactiveRocksIterator<>(db,
|
||||
prefixLength,
|
||||
range.receive(),
|
||||
allowNettyDirect,
|
||||
readOptions,
|
||||
canFillCache,
|
||||
readValues,
|
||||
smallRange
|
||||
) {
|
||||
@Override
|
||||
public T getEntry(@Nullable Buffer key, @Nullable Buffer value) {
|
||||
return LLLocalGroupedReactiveRocksIterator.this.getEntry(key, value);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.range = null;
|
||||
this.readOptions = null;
|
||||
}
|
||||
}
|
||||
|
@ -11,84 +11,52 @@ import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
ResourceSupport<LLLocalKeyPrefixReactiveRocksIterator, LLLocalKeyPrefixReactiveRocksIterator> {
|
||||
public class LLLocalKeyPrefixReactiveRocksIterator {
|
||||
|
||||
protected static final Logger logger = LogManager.getLogger(LLLocalKeyPrefixReactiveRocksIterator.class);
|
||||
private static final Drop<LLLocalKeyPrefixReactiveRocksIterator> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLLocalKeyPrefixReactiveRocksIterator obj) {
|
||||
try {
|
||||
if (obj.rangeShared != null && obj.rangeShared.isAccessible()) {
|
||||
obj.rangeShared.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.readOptions != null && obj.readOptions.isAccessible()) {
|
||||
obj.readOptions.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close readOptions", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLLocalKeyPrefixReactiveRocksIterator> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLLocalKeyPrefixReactiveRocksIterator obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final RocksDBColumn db;
|
||||
private final int prefixLength;
|
||||
private LLRange rangeShared;
|
||||
private final Mono<LLRange> rangeMono;
|
||||
private final boolean allowNettyDirect;
|
||||
private ReadOptions readOptions;
|
||||
private final Supplier<ReadOptions> readOptions;
|
||||
private final boolean canFillCache;
|
||||
private final boolean smallRange;
|
||||
|
||||
public LLLocalKeyPrefixReactiveRocksIterator(RocksDBColumn db,
|
||||
int prefixLength,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean canFillCache,
|
||||
boolean smallRange) {
|
||||
super(DROP);
|
||||
this.db = db;
|
||||
this.prefixLength = prefixLength;
|
||||
this.rangeShared = range;
|
||||
this.rangeMono = rangeMono;
|
||||
this.allowNettyDirect = allowNettyDirect;
|
||||
this.readOptions = readOptions != null ? readOptions : new ReadOptions();
|
||||
this.readOptions = readOptions != null ? readOptions : ReadOptions::new;
|
||||
this.canFillCache = canFillCache;
|
||||
this.smallRange = smallRange;
|
||||
}
|
||||
|
||||
|
||||
public Flux<Buffer> flux() {
|
||||
return Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions,
|
||||
canFillCache,
|
||||
isBoundedRange(rangeShared),
|
||||
smallRange
|
||||
);
|
||||
return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> {
|
||||
var readOptions
|
||||
= generateCustomReadOptions(this.readOptions.get(), canFillCache, isBoundedRange(range), smallRange);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
|
||||
}
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, rangeShared, false));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, false));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.iter();
|
||||
@ -131,7 +99,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB,
|
||||
"Range {} is reading prefix {}",
|
||||
LLUtils.toStringSafe(rangeShared),
|
||||
LLUtils.toStringSafe(range),
|
||||
LLUtils.toStringSafe(groupKeyPrefix)
|
||||
);
|
||||
}
|
||||
@ -139,7 +107,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
sink.next(groupKeyPrefix);
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range));
|
||||
}
|
||||
sink.complete();
|
||||
}
|
||||
@ -150,35 +118,12 @@ public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
}
|
||||
} catch (RocksDBException ex) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range));
|
||||
}
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}, RocksIterWithReadOpts::close), LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLLocalKeyPrefixReactiveRocksIterator> prepareSend() {
|
||||
var range = this.rangeShared.send();
|
||||
var readOptions = this.readOptions;
|
||||
return drop -> new LLLocalKeyPrefixReactiveRocksIterator(db,
|
||||
prefixLength,
|
||||
range.receive(),
|
||||
allowNettyDirect,
|
||||
readOptions,
|
||||
canFillCache,
|
||||
smallRange
|
||||
);
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.rangeShared = null;
|
||||
this.readOptions = null;
|
||||
}
|
||||
}
|
||||
|
@ -3,17 +3,19 @@ package it.cavallium.dbengine.database.disk;
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.util.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import java.util.function.Supplier;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class LLLocalKeyReactiveRocksIterator extends LLLocalReactiveRocksIterator<Buffer> {
|
||||
|
||||
public LLLocalKeyReactiveRocksIterator(RocksDBColumn db,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
super(db, range, allowNettyDirect, readOptions, false, reverse, smallRange);
|
||||
super(db, rangeMono, allowNettyDirect, readOptions, false, reverse, smallRange);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -7,63 +7,37 @@ import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public final class LLLocalMigrationReactiveRocksIterator extends
|
||||
ResourceSupport<LLLocalMigrationReactiveRocksIterator, LLLocalMigrationReactiveRocksIterator> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(LLLocalMigrationReactiveRocksIterator.class);
|
||||
private static final Drop<LLLocalMigrationReactiveRocksIterator> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLLocalMigrationReactiveRocksIterator obj) {
|
||||
try {
|
||||
if (obj.rangeShared != null) {
|
||||
obj.rangeShared.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close range", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLLocalMigrationReactiveRocksIterator> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLLocalMigrationReactiveRocksIterator obj) {
|
||||
|
||||
}
|
||||
};
|
||||
public final class LLLocalMigrationReactiveRocksIterator {
|
||||
|
||||
private final RocksDBColumn db;
|
||||
private LLRange rangeShared;
|
||||
private Mono<LLRange> rangeMono;
|
||||
private Supplier<ReadOptions> readOptions;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public LLLocalMigrationReactiveRocksIterator(RocksDBColumn db,
|
||||
Send<LLRange> range,
|
||||
Mono<LLRange> rangeMono,
|
||||
Supplier<ReadOptions> readOptions) {
|
||||
super((Drop<LLLocalMigrationReactiveRocksIterator>) (Drop) DROP);
|
||||
try (range) {
|
||||
this.db = db;
|
||||
this.rangeShared = range.receive();
|
||||
this.readOptions = readOptions;
|
||||
}
|
||||
this.db = db;
|
||||
this.rangeMono = rangeMono;
|
||||
this.readOptions = readOptions;
|
||||
}
|
||||
|
||||
public record ByteEntry(byte[] key, byte[] value) {}
|
||||
|
||||
public Flux<ByteEntry> flux() {
|
||||
return Flux.generate(() -> {
|
||||
return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions.get(), false, false, false);
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(false, readOptions, rangeShared, false));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(false, readOptions, range, false));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.iter();
|
||||
@ -79,26 +53,6 @@ public final class LLLocalMigrationReactiveRocksIterator extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLLocalMigrationReactiveRocksIterator> prepareSend() {
|
||||
var range = this.rangeShared.send();
|
||||
var readOptions = this.readOptions;
|
||||
return drop -> new LLLocalMigrationReactiveRocksIterator(db,
|
||||
range,
|
||||
readOptions
|
||||
);
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.rangeShared = null;
|
||||
this.readOptions = null;
|
||||
}, RocksIterWithReadOpts::close), LLUtils::finalizeResource);
|
||||
}
|
||||
}
|
||||
|
@ -5,87 +5,52 @@ import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
|
||||
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
|
||||
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import java.util.function.Supplier;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.util.function.Tuples;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public abstract class LLLocalReactiveRocksIterator<T> extends
|
||||
ResourceSupport<LLLocalReactiveRocksIterator<T>, LLLocalReactiveRocksIterator<T>> {
|
||||
public abstract class LLLocalReactiveRocksIterator<T> {
|
||||
|
||||
protected static final Logger logger = LogManager.getLogger(LLLocalReactiveRocksIterator.class);
|
||||
private static final Drop<LLLocalReactiveRocksIterator<?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLLocalReactiveRocksIterator<?> obj) {
|
||||
try {
|
||||
if (obj.rangeShared != null) {
|
||||
obj.rangeShared.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close range", ex);
|
||||
}
|
||||
try {
|
||||
if (obj.readOptions != null) {
|
||||
obj.readOptions.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close readOptions", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLLocalReactiveRocksIterator<?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLLocalReactiveRocksIterator<?> obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private final RocksDBColumn db;
|
||||
private LLRange rangeShared;
|
||||
private final Mono<LLRange> rangeMono;
|
||||
private final boolean allowNettyDirect;
|
||||
private ReadOptions readOptions;
|
||||
private final Supplier<ReadOptions> readOptions;
|
||||
private final boolean readValues;
|
||||
private final boolean reverse;
|
||||
private final boolean smallRange;
|
||||
|
||||
@SuppressWarnings({"unchecked", "rawtypes"})
|
||||
public LLLocalReactiveRocksIterator(RocksDBColumn db,
|
||||
LLRange range,
|
||||
Mono<LLRange> rangeMono,
|
||||
boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
Supplier<ReadOptions> readOptions,
|
||||
boolean readValues,
|
||||
boolean reverse,
|
||||
boolean smallRange) {
|
||||
super((Drop<LLLocalReactiveRocksIterator<T>>) (Drop) DROP);
|
||||
this.db = db;
|
||||
this.rangeShared = range;
|
||||
this.rangeMono = rangeMono;
|
||||
this.allowNettyDirect = allowNettyDirect;
|
||||
this.readOptions = readOptions;
|
||||
this.readOptions = readOptions != null ? readOptions : ReadOptions::new;
|
||||
this.readValues = readValues;
|
||||
this.reverse = reverse;
|
||||
this.smallRange = smallRange;
|
||||
}
|
||||
|
||||
public final Flux<T> flux() {
|
||||
return Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions, true, isBoundedRange(rangeShared), smallRange);
|
||||
return Flux.usingWhen(rangeMono, range -> Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions.get(), true, isBoundedRange(range), smallRange);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
|
||||
}
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, rangeShared, reverse));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, reverse));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.iter();
|
||||
@ -111,7 +76,7 @@ public abstract class LLLocalReactiveRocksIterator<T> extends
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB,
|
||||
"Range {} is reading {}: {}",
|
||||
LLUtils.toStringSafe(rangeShared),
|
||||
LLUtils.toStringSafe(range),
|
||||
LLUtils.toStringSafe(key),
|
||||
LLUtils.toStringSafe(value)
|
||||
);
|
||||
@ -137,48 +102,20 @@ public abstract class LLLocalReactiveRocksIterator<T> extends
|
||||
}
|
||||
} else {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} ended", LLUtils.toStringSafe(range));
|
||||
}
|
||||
sink.complete();
|
||||
}
|
||||
} catch (RocksDBException ex) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(rangeShared));
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} failed", LLUtils.toStringSafe(range));
|
||||
}
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}, RocksIterWithReadOpts::close), LLUtils::finalizeResource);
|
||||
}
|
||||
|
||||
public abstract T getEntry(@Nullable Buffer key, @Nullable Buffer value);
|
||||
|
||||
@Override
|
||||
protected final RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLLocalReactiveRocksIterator<T>> prepareSend() {
|
||||
var range = this.rangeShared.send();
|
||||
var readOptions = this.readOptions;
|
||||
return drop -> new LLLocalReactiveRocksIterator<T>(db,
|
||||
range.receive(),
|
||||
allowNettyDirect,
|
||||
readOptions,
|
||||
readValues,
|
||||
reverse,
|
||||
smallRange
|
||||
) {
|
||||
@Override
|
||||
public T getEntry(@Nullable Buffer key, @Nullable Buffer value) {
|
||||
return LLLocalReactiveRocksIterator.this.getEntry(key, value);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.rangeShared = null;
|
||||
this.readOptions = null;
|
||||
}
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ public record UpdateAtomicResultDelta(LLDelta delta) implements UpdateAtomicResu
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (delta != null && delta.isAccessible()) {
|
||||
if (delta != null && !delta.isClosed()) {
|
||||
delta.close();
|
||||
}
|
||||
}
|
||||
|
@ -1,58 +0,0 @@
|
||||
package it.cavallium.dbengine.database.disk.rocksdb;
|
||||
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
import org.rocksdb.DirectSlice;
|
||||
|
||||
public abstract class LLAbstractSlice<T extends AbstractSlice<U>, U> extends ResourceSupport<LLAbstractSlice<T, U>, LLAbstractSlice<T, U>> {
|
||||
|
||||
protected static final Drop<LLAbstractSlice<?, ?>> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLAbstractSlice obj) {
|
||||
if (obj.val != null) {
|
||||
obj.val.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLAbstractSlice<?, ?>> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLAbstractSlice obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private T val;
|
||||
|
||||
public LLAbstractSlice(T val) {
|
||||
//noinspection unchecked
|
||||
super((Drop<LLAbstractSlice<T, U>>) (Drop<?>) DROP);
|
||||
this.val = val;
|
||||
}
|
||||
|
||||
public T getNative() {
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final void makeInaccessible() {
|
||||
this.val = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected final Owned<LLAbstractSlice<T, U>> prepareSend() {
|
||||
var val = this.val;
|
||||
return drop -> {
|
||||
var instance = createInstance(val);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
}
|
||||
|
||||
protected abstract LLAbstractSlice<T, U> createInstance(T val);
|
||||
}
|
@ -3,58 +3,25 @@ package it.cavallium.dbengine.database.disk.rocksdb;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
|
||||
public final class LLColumnFamilyHandle extends ResourceSupport<LLColumnFamilyHandle, LLColumnFamilyHandle> {
|
||||
public final class LLColumnFamilyHandle extends SimpleResource {
|
||||
|
||||
private static final Drop<LLColumnFamilyHandle> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLColumnFamilyHandle obj) {
|
||||
if (obj.val != null) {
|
||||
obj.val.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLColumnFamilyHandle> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLColumnFamilyHandle obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private ColumnFamilyHandle val;
|
||||
private final ColumnFamilyHandle val;
|
||||
|
||||
public LLColumnFamilyHandle(ColumnFamilyHandle val) {
|
||||
super(DROP);
|
||||
this.val = val;
|
||||
}
|
||||
|
||||
public ColumnFamilyHandle getNative() {
|
||||
ensureOpen();
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.val = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLColumnFamilyHandle> prepareSend() {
|
||||
var val = this.val;
|
||||
return drop -> {
|
||||
var instance = new LLColumnFamilyHandle(val);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
protected void onClose() {
|
||||
val.close();
|
||||
}
|
||||
}
|
||||
|
@ -3,57 +3,24 @@ package it.cavallium.dbengine.database.disk.rocksdb;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.rocksdb.CompactionOptions;
|
||||
|
||||
public final class LLCompactionOptions extends ResourceSupport<LLCompactionOptions, LLCompactionOptions> {
|
||||
public final class LLCompactionOptions extends SimpleResource {
|
||||
|
||||
private static final Drop<LLCompactionOptions> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLCompactionOptions obj) {
|
||||
if (obj.val != null) {
|
||||
obj.val.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLCompactionOptions> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLCompactionOptions obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private CompactionOptions val;
|
||||
private final CompactionOptions val;
|
||||
|
||||
public LLCompactionOptions(CompactionOptions val) {
|
||||
super(DROP);
|
||||
this.val = val;
|
||||
}
|
||||
|
||||
public CompactionOptions getNative() {
|
||||
ensureOpen();
|
||||
return val;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.val = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLCompactionOptions> prepareSend() {
|
||||
var val = this.val;
|
||||
return drop -> {
|
||||
var instance = new LLCompactionOptions(val);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
protected void onClose() {
|
||||
val.close();
|
||||
}
|
||||
}
|
||||
|
@ -1,29 +0,0 @@
|
||||
package it.cavallium.dbengine.database.disk.rocksdb;
|
||||
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import java.nio.ByteBuffer;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
import org.rocksdb.DirectSlice;
|
||||
|
||||
public final class LLDirectSlice extends LLAbstractSlice<DirectSlice, ByteBuffer> {
|
||||
|
||||
public LLDirectSlice(DirectSlice val) {
|
||||
super(val);
|
||||
}
|
||||
|
||||
public DirectSlice getNative() {
|
||||
return super.getNative();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected LLAbstractSlice<DirectSlice, ByteBuffer> createInstance(DirectSlice val) {
|
||||
return new LLDirectSlice(val);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
}
|
@ -8,53 +8,19 @@ import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLDelta;
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.disk.LLLocalGroupedReactiveRocksIterator;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.rocksdb.ReadOptions;
|
||||
|
||||
public final class LLReadOptions extends ResourceSupport<LLReadOptions, LLReadOptions> {
|
||||
public final class LLReadOptions extends SimpleResource {
|
||||
|
||||
private static final Drop<LLReadOptions> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLReadOptions obj) {
|
||||
if (obj.val != null) {
|
||||
obj.val.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLReadOptions> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLReadOptions obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private ReadOptions val;
|
||||
private final ReadOptions val;
|
||||
|
||||
public LLReadOptions(ReadOptions val) {
|
||||
super(DROP);
|
||||
this.val = val;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.val = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLReadOptions> prepareSend() {
|
||||
var val = this.val;
|
||||
return drop -> {
|
||||
var instance = new LLReadOptions(val);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
protected void onClose() {
|
||||
val.close();
|
||||
}
|
||||
}
|
||||
|
@ -3,53 +3,19 @@ package it.cavallium.dbengine.database.disk.rocksdb;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import org.rocksdb.WriteOptions;
|
||||
|
||||
public final class LLWriteOptions extends ResourceSupport<LLWriteOptions, LLWriteOptions> {
|
||||
public final class LLWriteOptions extends SimpleResource {
|
||||
|
||||
private static final Drop<LLWriteOptions> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(LLWriteOptions obj) {
|
||||
if (obj.val != null) {
|
||||
obj.val.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<LLWriteOptions> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(LLWriteOptions obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
private WriteOptions val;
|
||||
private final WriteOptions val;
|
||||
|
||||
public LLWriteOptions(WriteOptions val) {
|
||||
super(DROP);
|
||||
this.val = val;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.val = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<LLWriteOptions> prepareSend() {
|
||||
var val = this.val;
|
||||
return drop -> {
|
||||
var instance = new LLWriteOptions(val);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
protected void onClose() {
|
||||
val.close();
|
||||
}
|
||||
}
|
||||
|
@ -10,37 +10,13 @@ import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.ReadableComponent;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import java.nio.ByteBuffer;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.RocksIterator;
|
||||
|
||||
public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIteratorObj> {
|
||||
|
||||
protected static final Drop<RocksIteratorObj> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(RocksIteratorObj obj) {
|
||||
if (obj.rocksIterator != null) {
|
||||
obj.rocksIterator.close();
|
||||
}
|
||||
if (obj.sliceMin != null) {
|
||||
obj.sliceMin.close();
|
||||
}
|
||||
if (obj.sliceMax != null) {
|
||||
obj.sliceMax.close();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<RocksIteratorObj> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(RocksIteratorObj obj) {
|
||||
|
||||
}
|
||||
};
|
||||
public class RocksIteratorObj extends SimpleResource {
|
||||
|
||||
private RocksIterator rocksIterator;
|
||||
private AbstractSlice<?> sliceMin;
|
||||
@ -100,7 +76,6 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
Timer iterNextTime,
|
||||
Object seekingFrom,
|
||||
Object seekingTo) {
|
||||
super(DROP);
|
||||
this.sliceMin = sliceMin;
|
||||
this.sliceMax = sliceMax;
|
||||
this.min = min;
|
||||
@ -118,6 +93,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public void seek(ByteBuffer seekBuf) throws RocksDBException {
|
||||
ensureOpen();
|
||||
startedIterSeek.increment();
|
||||
try {
|
||||
iterSeekTime.record(() -> rocksIterator.seek(seekBuf));
|
||||
@ -128,6 +104,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public void seek(byte[] seekArray) throws RocksDBException {
|
||||
ensureOpen();
|
||||
startedIterSeek.increment();
|
||||
try {
|
||||
iterSeekTime.record(() -> rocksIterator.seek(seekArray));
|
||||
@ -138,6 +115,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public void seekToFirst() throws RocksDBException {
|
||||
ensureOpen();
|
||||
startedIterSeek.increment();
|
||||
try {
|
||||
iterSeekTime.record(rocksIterator::seekToFirst);
|
||||
@ -148,6 +126,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public void seekToLast() throws RocksDBException {
|
||||
ensureOpen();
|
||||
startedIterSeek.increment();
|
||||
try {
|
||||
iterSeekTime.record(rocksIterator::seekToLast);
|
||||
@ -161,6 +140,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
* Useful for reverse iterations
|
||||
*/
|
||||
public void seekFrom(Buffer key) {
|
||||
ensureOpen();
|
||||
if (allowNettyDirect && isReadOnlyDirect(key)) {
|
||||
ByteBuffer keyInternalByteBuffer = ((ReadableComponent) key).readableBuffer();
|
||||
assert keyInternalByteBuffer.position() == 0;
|
||||
@ -179,6 +159,7 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
* Useful for forward iterations
|
||||
*/
|
||||
public void seekTo(Buffer key) {
|
||||
ensureOpen();
|
||||
if (allowNettyDirect && isReadOnlyDirect(key)) {
|
||||
ByteBuffer keyInternalByteBuffer = ((ReadableComponent) key).readableBuffer();
|
||||
assert keyInternalByteBuffer.position() == 0;
|
||||
@ -198,30 +179,37 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public boolean isValid() {
|
||||
ensureOpen();
|
||||
return rocksIterator.isValid();
|
||||
}
|
||||
|
||||
public int key(ByteBuffer buffer) {
|
||||
ensureOpen();
|
||||
return rocksIterator.key(buffer);
|
||||
}
|
||||
|
||||
public int value(ByteBuffer buffer) {
|
||||
ensureOpen();
|
||||
return rocksIterator.value(buffer);
|
||||
}
|
||||
|
||||
public byte[] key() {
|
||||
ensureOpen();
|
||||
return rocksIterator.key();
|
||||
}
|
||||
|
||||
public byte[] value() {
|
||||
ensureOpen();
|
||||
return rocksIterator.value();
|
||||
}
|
||||
|
||||
public void next() throws RocksDBException {
|
||||
ensureOpen();
|
||||
next(true);
|
||||
}
|
||||
|
||||
public void next(boolean traceStats) throws RocksDBException {
|
||||
ensureOpen();
|
||||
if (traceStats) {
|
||||
startedIterNext.increment();
|
||||
iterNextTime.record(rocksIterator::next);
|
||||
@ -232,10 +220,12 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
public void prev() throws RocksDBException {
|
||||
ensureOpen();
|
||||
prev(true);
|
||||
}
|
||||
|
||||
public void prev(boolean traceStats) throws RocksDBException {
|
||||
ensureOpen();
|
||||
if (traceStats) {
|
||||
startedIterNext.increment();
|
||||
iterNextTime.record(rocksIterator::prev);
|
||||
@ -246,48 +236,15 @@ public class RocksIteratorObj extends ResourceSupport<RocksIteratorObj, RocksIte
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void makeInaccessible() {
|
||||
this.rocksIterator = null;
|
||||
this.sliceMin = null;
|
||||
this.sliceMax = null;
|
||||
this.min = null;
|
||||
this.max = null;
|
||||
this.seekingFrom = null;
|
||||
this.seekingTo = null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<RocksIteratorObj> prepareSend() {
|
||||
var rocksIterator = this.rocksIterator;
|
||||
var sliceMin = this.sliceMin;
|
||||
var sliceMax = this.sliceMax;
|
||||
var minSend = this.min != null ? this.min.send() : null;
|
||||
var maxSend = this.max != null ? this.max.send() : null;
|
||||
var seekingFrom = this.seekingFrom;
|
||||
var seekingTo = this.seekingTo;
|
||||
return drop -> {
|
||||
var instance = new RocksIteratorObj(rocksIterator,
|
||||
sliceMin,
|
||||
sliceMax,
|
||||
minSend != null ? minSend.receive() : null,
|
||||
maxSend != null ? maxSend.receive() : null,
|
||||
allowNettyDirect,
|
||||
startedIterSeek,
|
||||
endedIterSeek,
|
||||
iterSeekTime,
|
||||
startedIterNext,
|
||||
endedIterNext,
|
||||
iterNextTime,
|
||||
seekingFrom,
|
||||
seekingTo
|
||||
);
|
||||
drop.attach(instance);
|
||||
return instance;
|
||||
};
|
||||
protected void onClose() {
|
||||
if (rocksIterator != null) {
|
||||
rocksIterator.close();
|
||||
}
|
||||
if (sliceMin != null) {
|
||||
sliceMin.close();
|
||||
}
|
||||
if (sliceMax != null) {
|
||||
sliceMax.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentNavigableMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Supplier;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
@ -128,8 +129,8 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
}
|
||||
}
|
||||
|
||||
private BLRange r(Send<LLRange> send) {
|
||||
try(var range = send.receive()) {
|
||||
private BLRange r(Supplier<LLRange> send) {
|
||||
try(var range = send.get()) {
|
||||
if (range.isAll()) {
|
||||
return new BLRange(null, null, null);
|
||||
} else if (range.isSingle()) {
|
||||
@ -490,7 +491,7 @@ public class LLMemoryDictionary implements LLDictionary {
|
||||
clearMono = Mono.fromRunnable(() -> mapSlice(null, range).clear());
|
||||
}
|
||||
|
||||
var r = r(range.copy().send());
|
||||
var r = r(range::copy);
|
||||
|
||||
return clearMono
|
||||
.thenMany(entries)
|
||||
|
@ -5,7 +5,6 @@ import io.netty5.buffer.api.BufferAllocator;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.util.internal.StringUtil;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.netty.NullableBuffer;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Objects;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
|
@ -1,76 +0,0 @@
|
||||
package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.util.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
||||
public class NullableBuffer extends ResourceSupport<NullableBuffer, NullableBuffer> {
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(NullableBuffer.class);
|
||||
|
||||
private static final Drop<NullableBuffer> DROP = new Drop<>() {
|
||||
@Override
|
||||
public void drop(NullableBuffer obj) {
|
||||
try {
|
||||
if (obj.buffer != null) {
|
||||
obj.buffer.close();
|
||||
}
|
||||
} catch (Throwable ex) {
|
||||
logger.error("Failed to close buffer", ex);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public Drop<NullableBuffer> fork() {
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void attach(NullableBuffer obj) {
|
||||
|
||||
}
|
||||
};
|
||||
|
||||
@Nullable
|
||||
private Buffer buffer;
|
||||
|
||||
public NullableBuffer(@Nullable Buffer buffer) {
|
||||
super(DROP);
|
||||
this.buffer = buffer == null ? null : buffer.send().receive();
|
||||
}
|
||||
|
||||
public NullableBuffer(@Nullable Send<Buffer> buffer) {
|
||||
super(DROP);
|
||||
this.buffer = buffer == null ? null : buffer.receive();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Buffer buf() {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Send<Buffer> sendBuf() {
|
||||
return buffer == null ? null : buffer.send();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RuntimeException createResourceClosedException() {
|
||||
return new IllegalStateException("Closed");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Owned<NullableBuffer> prepareSend() {
|
||||
var buffer = this.buffer == null ? null : this.buffer.send();
|
||||
return drop -> new NullableBuffer(buffer);
|
||||
}
|
||||
|
||||
protected void makeInaccessible() {
|
||||
this.buffer = null;
|
||||
}
|
||||
}
|
@ -65,7 +65,7 @@ public abstract class SimpleResource implements SafeCloseable {
|
||||
}
|
||||
}
|
||||
|
||||
protected boolean isClosed() {
|
||||
public boolean isClosed() {
|
||||
return canClose && closed.get();
|
||||
}
|
||||
|
||||
@ -74,7 +74,7 @@ public abstract class SimpleResource implements SafeCloseable {
|
||||
}
|
||||
|
||||
protected void ensureOpen() {
|
||||
if (canClose && closed.get()) {
|
||||
if (isClosed()) {
|
||||
throw new IllegalStateException("Resource is closed");
|
||||
}
|
||||
}
|
||||
|
@ -143,8 +143,7 @@ public class DbTestUtils {
|
||||
if (mapType == MapType.MAP) {
|
||||
return DatabaseMapDictionary.simple(dictionary,
|
||||
SerializerFixedBinaryLength.utf8(keyBytes),
|
||||
Serializer.UTF8_SERIALIZER,
|
||||
null
|
||||
Serializer.UTF8_SERIALIZER
|
||||
);
|
||||
} else {
|
||||
return DatabaseMapDictionaryHashed.simple(dictionary,
|
||||
@ -167,8 +166,7 @@ public class DbTestUtils {
|
||||
public void serialize(@NotNull Short deserialized, Buffer output) {
|
||||
output.writeShort(deserialized);
|
||||
}
|
||||
},
|
||||
null
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
@ -183,8 +181,7 @@ public class DbTestUtils {
|
||||
key2Bytes,
|
||||
new SubStageGetterMap<>(SerializerFixedBinaryLength.utf8(key2Bytes),
|
||||
Serializer.UTF8_SERIALIZER
|
||||
),
|
||||
null
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@ -199,8 +196,7 @@ public class DbTestUtils {
|
||||
Serializer.UTF8_SERIALIZER,
|
||||
String::hashCode,
|
||||
SerializerFixedBinaryLength.intSerializer(dictionary.getAllocator())
|
||||
),
|
||||
null
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
@ -210,8 +206,7 @@ public class DbTestUtils {
|
||||
Serializer.UTF8_SERIALIZER,
|
||||
Serializer.UTF8_SERIALIZER,
|
||||
String::hashCode,
|
||||
SerializerFixedBinaryLength.intSerializer(dictionary.getAllocator()),
|
||||
null
|
||||
SerializerFixedBinaryLength.intSerializer(dictionary.getAllocator())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import static org.assertj.core.api.Assertions.*;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.DbTestUtils.TestAllocator;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.utils.SimpleResource;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectLinkedOpenHashMap;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
import java.util.Arrays;
|
||||
@ -269,14 +270,14 @@ public abstract class TestDictionaryMapDeep {
|
||||
.flatMap(v_ -> Mono.using(
|
||||
() -> v_,
|
||||
v -> v.set(value),
|
||||
ResourceSupport::close
|
||||
SimpleResource::close
|
||||
))
|
||||
.then(map
|
||||
.at(null, "capra")
|
||||
.flatMap(v_ -> Mono.using(
|
||||
() -> v_,
|
||||
v -> v.set(new Object2ObjectLinkedOpenHashMap<>(Map.of("normal", "123", "ormaln", "456"))),
|
||||
ResourceSupport::close
|
||||
SimpleResource::close
|
||||
))
|
||||
)
|
||||
.thenMany(map
|
||||
@ -287,7 +288,7 @@ public abstract class TestDictionaryMapDeep {
|
||||
.doFinally(s -> v.getValue().close())
|
||||
)
|
||||
),
|
||||
ResourceSupport::close
|
||||
SimpleResource::close
|
||||
))
|
||||
));
|
||||
if (shouldFail) {
|
||||
|
@ -178,6 +178,6 @@ public abstract class TestSingletons {
|
||||
public static Mono<DatabaseSingleton<String>> tempSingleton(LLKeyValueDatabase database, String name) {
|
||||
return database
|
||||
.getSingleton("longs", name)
|
||||
.map(singleton -> new DatabaseSingleton<>(singleton, Serializer.UTF8_SERIALIZER, null));
|
||||
.map(singleton -> new DatabaseSingleton<>(singleton, Serializer.UTF8_SERIALIZER));
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user