Fix more leaks
This commit is contained in:
parent
bfc78f1465
commit
6a5a9a3e94
2
.gitignore
vendored
2
.gitignore
vendored
@ -186,3 +186,5 @@ dbengine.iml
|
||||
/.classpath
|
||||
/.project
|
||||
/.settings/
|
||||
|
||||
.flattened-pom.xml
|
||||
|
6
pom.xml
6
pom.xml
@ -188,7 +188,7 @@
|
||||
<dependency>
|
||||
<groupId>org.rocksdb</groupId>
|
||||
<artifactId>rocksdbjni</artifactId>
|
||||
<version>7.1.2</version>
|
||||
<version>7.2.2</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.lucene</groupId>
|
||||
@ -374,7 +374,7 @@
|
||||
<dependency>
|
||||
<groupId>it.cavallium</groupId>
|
||||
<artifactId>data-generator-runtime</artifactId>
|
||||
<version>1.0.66</version>
|
||||
<version>1.0.68</version>
|
||||
<exclusions>
|
||||
<exclusion>
|
||||
<groupId>org.jetbrains</groupId>
|
||||
@ -470,7 +470,7 @@
|
||||
<plugin>
|
||||
<groupId>it.cavallium</groupId>
|
||||
<artifactId>data-generator</artifactId>
|
||||
<version>0.9.131</version>
|
||||
<version>0.9.133</version>
|
||||
<executions>
|
||||
<execution>
|
||||
<id>generate-lucene-query-sources</id>
|
||||
|
@ -277,14 +277,14 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
|
||||
static ReleasableSlice emptyReleasableSlice() {
|
||||
var arr = new byte[0];
|
||||
|
||||
return new ReleasableSliceImplWithoutRelease(new Slice(arr));
|
||||
return new ReleasableSliceImplWithRelease(new Slice(arr));
|
||||
}
|
||||
|
||||
/**
|
||||
* This method should not modify or move the writerIndex/readerIndex of the buffers inside the range
|
||||
*/
|
||||
@NotNull
|
||||
public RocksIteratorTuple getRocksIterator(boolean allowNettyDirect,
|
||||
public RocksIteratorTuple newRocksIterator(boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
LLRange range,
|
||||
boolean reverse) throws RocksDBException {
|
||||
@ -301,9 +301,9 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
|
||||
} else {
|
||||
sliceMax = emptyReleasableSlice();
|
||||
}
|
||||
SafeCloseable seekFromOrTo = null;
|
||||
var rocksIterator = this.newIterator(readOptions);
|
||||
try {
|
||||
SafeCloseable seekFromOrTo;
|
||||
if (reverse) {
|
||||
if (!LLLocalDictionary.PREFER_AUTO_SEEK_BOUND && range.hasMax()) {
|
||||
seekFromOrTo = Objects.requireNonNullElseGet(rocksIterator.seekFrom(range.getMaxUnsafe()),
|
||||
@ -324,6 +324,11 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
|
||||
return new RocksIteratorTuple(rocksIterator, sliceMin, sliceMax, seekFromOrTo);
|
||||
} catch (Throwable ex) {
|
||||
rocksIterator.close();
|
||||
sliceMax.close();
|
||||
sliceMax.close();
|
||||
if (seekFromOrTo != null) {
|
||||
seekFromOrTo.close();
|
||||
}
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
|
@ -8,7 +8,6 @@ import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect;
|
||||
import static it.cavallium.dbengine.database.LLUtils.toStringSafe;
|
||||
import static it.cavallium.dbengine.database.disk.UpdateAtomicResultMode.DELTA;
|
||||
import static java.util.Objects.requireNonNull;
|
||||
import static java.util.Objects.requireNonNullElse;
|
||||
|
||||
import io.micrometer.core.instrument.Counter;
|
||||
import io.micrometer.core.instrument.Timer;
|
||||
@ -35,7 +34,6 @@ import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.ForkJoinPool;
|
||||
@ -926,16 +924,17 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
}
|
||||
}
|
||||
ro.setVerifyChecksums(true);
|
||||
try (var rocksIteratorTuple = db.getRocksIterator(nettyDirect, ro, range, false)) {
|
||||
var rocksIterator = rocksIteratorTuple.iterator();
|
||||
rocksIterator.seekToFirst();
|
||||
while (rocksIterator.isValid() && !sink.isCancelled()) {
|
||||
try {
|
||||
rocksIterator.key(DUMMY_WRITE_ONLY_BYTE_BUFFER);
|
||||
rocksIterator.value(DUMMY_WRITE_ONLY_BYTE_BUFFER);
|
||||
rocksIterator.next();
|
||||
} catch (RocksDBException ex) {
|
||||
sink.next(new BadBlock(databaseName, ColumnUtils.special(columnName), null, ex));
|
||||
try (var rocksIteratorTuple = db.newRocksIterator(nettyDirect, ro, range, false)) {
|
||||
try (var rocksIterator = rocksIteratorTuple.iterator()) {
|
||||
rocksIterator.seekToFirst();
|
||||
while (rocksIterator.isValid() && !sink.isCancelled()) {
|
||||
try {
|
||||
rocksIterator.key(DUMMY_WRITE_ONLY_BYTE_BUFFER);
|
||||
rocksIterator.value(DUMMY_WRITE_ONLY_BYTE_BUFFER);
|
||||
rocksIterator.next();
|
||||
} catch (RocksDBException ex) {
|
||||
sink.next(new BadBlock(databaseName, ColumnUtils.special(columnName), null, ex));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1603,7 +1602,7 @@ public class LLLocalDictionary implements LLDictionary {
|
||||
if (sliceBegin != null) {
|
||||
rangeReadOpts.setIterateLowerBound(sliceBegin);
|
||||
}
|
||||
if (sliceBegin != null) {
|
||||
if (sliceEnd != null) {
|
||||
rangeReadOpts.setIterateUpperBound(sliceEnd);
|
||||
}
|
||||
try (var rocksIterator = db.newIterator(rangeReadOpts)) {
|
||||
|
@ -19,7 +19,6 @@ import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.util.function.Tuples;
|
||||
|
||||
public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
|
||||
ResourceSupport<LLLocalGroupedReactiveRocksIterator<T>, LLLocalGroupedReactiveRocksIterator<T>> {
|
||||
@ -86,17 +85,16 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public final Flux<List<T>> flux() {
|
||||
return Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions, true, isBoundedRange(range), smallRange);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(range));
|
||||
}
|
||||
return Tuples.of(readOptions, db.getRocksIterator(allowNettyDirect, readOptions, range, false));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, range, false));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.getT2().iterator();
|
||||
var rocksIterator = tuple.iter().iterator();
|
||||
ObjectArrayList<T> values = new ObjectArrayList<>();
|
||||
Buffer firstGroupKey = null;
|
||||
try {
|
||||
@ -159,10 +157,7 @@ public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, t -> {
|
||||
t.getT2().close();
|
||||
t.getT1().close();
|
||||
});
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}
|
||||
|
||||
public abstract T getEntry(@Nullable Send<Buffer> key, @Nullable Send<Buffer> value);
|
||||
|
@ -90,10 +90,10 @@ public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(rangeShared));
|
||||
}
|
||||
return Tuples.of(readOptions, db.getRocksIterator(allowNettyDirect, readOptions, rangeShared, false));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, rangeShared, false));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.getT2().iterator();
|
||||
var rocksIterator = tuple.iter().iterator();
|
||||
Buffer firstGroupKey = null;
|
||||
try {
|
||||
while (rocksIterator.isValid()) {
|
||||
@ -150,10 +150,7 @@ public class LLLocalKeyPrefixReactiveRocksIterator extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, t -> {
|
||||
t.getT2().close();
|
||||
t.getT1().close();
|
||||
});
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1,21 +1,14 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
|
||||
import static it.cavallium.dbengine.database.LLUtils.generateCustomReadOptions;
|
||||
import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
|
||||
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.Drop;
|
||||
import io.netty5.buffer.api.Owned;
|
||||
import io.netty5.buffer.api.Send;
|
||||
import io.netty5.buffer.api.internal.ResourceSupport;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.disk.LLLocalMigrationReactiveRocksIterator.ByteEntry;
|
||||
import java.util.Map;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import reactor.core.publisher.Flux;
|
||||
@ -76,11 +69,11 @@ public final class LLLocalMigrationReactiveRocksIterator extends
|
||||
public Flux<ByteEntry> flux() {
|
||||
return Flux.generate(() -> {
|
||||
var readOptions = generateCustomReadOptions(this.readOptions, false, false, false);
|
||||
return Tuples.of(readOptions, db.getRocksIterator(false, readOptions, rangeShared, false));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(false, readOptions, rangeShared, false));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
//noinspection resource
|
||||
var rocksIterator = tuple.getT2().iterator();
|
||||
var rocksIterator = tuple.iter().iterator();
|
||||
if (rocksIterator.isValid()) {
|
||||
byte[] key = rocksIterator.key();
|
||||
byte[] value = rocksIterator.value();
|
||||
@ -93,11 +86,7 @@ public final class LLLocalMigrationReactiveRocksIterator extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, t -> {
|
||||
t.getT2().close();
|
||||
t.getT1().close();
|
||||
this.close();
|
||||
});
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,10 +87,10 @@ public abstract class LLLocalReactiveRocksIterator<T> extends
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(MARKER_ROCKSDB, "Range {} started", LLUtils.toStringSafe(rangeShared));
|
||||
}
|
||||
return Tuples.of(readOptions, db.getRocksIterator(allowNettyDirect, readOptions, rangeShared, reverse));
|
||||
return new RocksIterWithReadOpts(readOptions, db.newRocksIterator(allowNettyDirect, readOptions, rangeShared, reverse));
|
||||
}, (tuple, sink) -> {
|
||||
try {
|
||||
var rocksIterator = tuple.getT2().iterator();
|
||||
var rocksIterator = tuple.iter().iterator();
|
||||
if (rocksIterator.isValid()) {
|
||||
Buffer key;
|
||||
if (allowNettyDirect) {
|
||||
@ -145,10 +145,7 @@ public abstract class LLLocalReactiveRocksIterator<T> extends
|
||||
sink.error(ex);
|
||||
}
|
||||
return tuple;
|
||||
}, t -> {
|
||||
t.getT2().close();
|
||||
t.getT1().close();
|
||||
});
|
||||
}, RocksIterWithReadOpts::close);
|
||||
}
|
||||
|
||||
public abstract T getEntry(@Nullable Send<Buffer> key, @Nullable Send<Buffer> value);
|
||||
|
@ -3,10 +3,8 @@ package it.cavallium.dbengine.database.disk;
|
||||
import io.micrometer.core.instrument.MeterRegistry;
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.BufferAllocator;
|
||||
import io.netty5.buffer.api.Send;
|
||||
import it.cavallium.dbengine.database.LLRange;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationFunction;
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
@ -16,7 +14,6 @@ import org.rocksdb.CompactRangeOptions;
|
||||
import org.rocksdb.FlushOptions;
|
||||
import org.rocksdb.ReadOptions;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.RocksIterator;
|
||||
import org.rocksdb.WriteBatch;
|
||||
import org.rocksdb.WriteOptions;
|
||||
|
||||
@ -26,7 +23,7 @@ public sealed interface RocksDBColumn permits AbstractRocksDBColumn {
|
||||
* This method should not modify or move the writerIndex/readerIndex of the buffers inside the range
|
||||
*/
|
||||
@NotNull
|
||||
RocksIteratorTuple getRocksIterator(boolean allowNettyDirect,
|
||||
RocksIteratorTuple newRocksIterator(boolean allowNettyDirect,
|
||||
ReadOptions readOptions,
|
||||
LLRange range,
|
||||
boolean reverse) throws RocksDBException;
|
||||
|
@ -0,0 +1,15 @@
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import org.rocksdb.ReadOptions;
|
||||
|
||||
record RocksIterWithReadOpts(ReadOptions readOptions, RocksIteratorTuple iter) implements SafeCloseable {
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
if (readOptions != null) {
|
||||
readOptions.close();
|
||||
}
|
||||
iter.close();
|
||||
}
|
||||
}
|
@ -101,16 +101,17 @@ public class HugePqPriorityQueue<T> implements PriorityQueue<T>, Reversable<Reve
|
||||
}
|
||||
|
||||
private T databaseTop() {
|
||||
try (var it = rocksDB.getRocksIterator(true, READ_OPTIONS, LLRange.all(), false)) {
|
||||
var rocksIterator = it.iterator();
|
||||
rocksIterator.seekToFirst();
|
||||
if (rocksIterator.isValid()) {
|
||||
var key = rocksIterator.key();
|
||||
try (var keyBuf = rocksDB.getAllocator().copyOf(key)) {
|
||||
return deserializeKey(keyBuf);
|
||||
try (var it = rocksDB.newRocksIterator(true, READ_OPTIONS, LLRange.all(), false)) {
|
||||
try (var rocksIterator = it.iterator()) {
|
||||
rocksIterator.seekToFirst();
|
||||
if (rocksIterator.isValid()) {
|
||||
var key = rocksIterator.key();
|
||||
try (var keyBuf = rocksDB.getAllocator().copyOf(key)) {
|
||||
return deserializeKey(keyBuf);
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} catch (RocksDBException e) {
|
||||
throw new IllegalStateException(e);
|
||||
@ -120,18 +121,19 @@ public class HugePqPriorityQueue<T> implements PriorityQueue<T>, Reversable<Reve
|
||||
@Override
|
||||
public T pop() {
|
||||
ensureThread();
|
||||
try (var it = rocksDB.getRocksIterator(true, READ_OPTIONS, LLRange.all(), false)) {
|
||||
var rocksIterator = it.iterator();
|
||||
rocksIterator.seekToFirst();
|
||||
if (rocksIterator.isValid()) {
|
||||
var key = rocksIterator.key();
|
||||
try (var keyBuf = rocksDB.getAllocator().copyOf(key)) {
|
||||
rocksDB.updateAtomic(READ_OPTIONS, WRITE_OPTIONS, keyBuf, this::reduceOrRemove, UpdateAtomicResultMode.NOTHING);
|
||||
--size;
|
||||
return deserializeKey(keyBuf);
|
||||
try (var it = rocksDB.newRocksIterator(true, READ_OPTIONS, LLRange.all(), false)) {
|
||||
try (var rocksIterator = it.iterator()) {
|
||||
rocksIterator.seekToFirst();
|
||||
if (rocksIterator.isValid()) {
|
||||
var key = rocksIterator.key();
|
||||
try (var keyBuf = rocksDB.getAllocator().copyOf(key)) {
|
||||
rocksDB.updateAtomic(READ_OPTIONS, WRITE_OPTIONS, keyBuf, this::reduceOrRemove, UpdateAtomicResultMode.NOTHING);
|
||||
--size;
|
||||
return deserializeKey(keyBuf);
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
} catch (RocksDBException | IOException e) {
|
||||
throw new IllegalStateException(e);
|
||||
@ -231,7 +233,7 @@ public class HugePqPriorityQueue<T> implements PriorityQueue<T>, Reversable<Reve
|
||||
|
||||
private Flux<T> iterate(long skips, boolean reverse) {
|
||||
return Flux.<List<T>, RocksIteratorTuple>generate(() -> {
|
||||
var it = rocksDB.getRocksIterator(true, READ_OPTIONS, LLRange.all(), reverse);
|
||||
var it = rocksDB.newRocksIterator(true, READ_OPTIONS, LLRange.all(), reverse);
|
||||
var rocksIterator = it.iterator();
|
||||
if (reverse) {
|
||||
rocksIterator.seekToLast();
|
||||
|
Loading…
Reference in New Issue
Block a user