Use log4j for logging, rewrite some local dictionary parts

This commit is contained in:
Andrea Cavalli 2021-12-17 01:48:49 +01:00
parent 7a712722d7
commit 1dffb55572
30 changed files with 244 additions and 243 deletions

32
pom.xml
View File

@ -81,21 +81,6 @@
<tag>HEAD</tag>
</scm>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
</dependency>
<dependency>
<groupId>uk.org.lidalia</groupId>
<artifactId>lidalia-slf4j-ext</artifactId>
<version>1.0.0</version>
<exclusions>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -176,12 +161,10 @@
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
@ -278,11 +261,6 @@
</dependencies>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.30</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -368,12 +346,12 @@
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-core</artifactId>
<version>2.15.0</version>
<version>2.16.0</version>
</dependency>
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j-impl</artifactId>
<version>2.15.0</version>
<version>2.16.0</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>
@ -443,17 +421,17 @@
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-core</artifactId>
<version>3.4.11</version>
<version>3.4.13</version>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-tools</artifactId>
<version>3.4.11</version>
<version>3.4.13</version>
</dependency>
<dependency>
<groupId>io.projectreactor</groupId>
<artifactId>reactor-test</artifactId>
<version>3.4.11</version>
<version>3.4.13</version>
<scope>test</scope>
</dependency>
<dependency>

View File

@ -6,13 +6,13 @@ import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import java.util.StringJoiner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLDelta extends ResourceSupport<LLDelta, LLDelta> {
private static final Logger logger = LoggerFactory.getLogger(LLDelta.class);
private static final Logger logger = LogManager.getLogger(LLDelta.class);
private static final Drop<LLDelta> DROP = new Drop<>() {
@Override

View File

@ -8,14 +8,14 @@ import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import java.util.Objects;
import java.util.StringJoiner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLEntry extends ResourceSupport<LLEntry, LLEntry> {
private static final Logger logger = LoggerFactory.getLogger(LLEntry.class);
private static final Logger logger = LogManager.getLogger(LLEntry.class);
private static final Drop<LLEntry> DROP = new Drop<>() {
@Override

View File

@ -8,8 +8,9 @@ import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import java.util.StringJoiner;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
/**
@ -17,7 +18,7 @@ import org.warp.commonutils.log.LoggerFactory;
*/
public class LLRange extends ResourceSupport<LLRange, LLRange> {
private static final Logger logger = LoggerFactory.getLogger(LLRange.class);
private static final Logger logger = LogManager.getLogger(LLRange.class);
private static final Drop<LLRange> DROP = new Drop<>() {
@Override
@ -83,11 +84,11 @@ public class LLRange extends ResourceSupport<LLRange, LLRange> {
}
private boolean isAllAccessible() {
assert min == null || min.isAccessible();
assert max == null || max.isAccessible();
assert single == null || single.isAccessible();
assert this.isAccessible();
assert this.isOwned();
assert min == null || min.isAccessible() : "Range min not owned";
assert max == null || max.isAccessible() : "Range max not owned";
assert single == null || single.isAccessible() : "Range single not owned";
assert this.isAccessible() : "Range not accessible";
assert this.isOwned() : "Range not owned";
return true;
}

View File

@ -5,13 +5,13 @@ import io.net5.buffer.api.Owned;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import java.util.Objects;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import reactor.core.publisher.Flux;
public final class LLSearchResultShard extends ResourceSupport<LLSearchResultShard, LLSearchResultShard> {
private static final Logger logger = LoggerFactory.getLogger(LLSearchResultShard.class);
private static final Logger logger = LogManager.getLogger(LLSearchResultShard.class);
private static final Drop<LLSearchResultShard> DROP = new Drop<>() {
@Override

View File

@ -38,6 +38,10 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Function;
import java.util.function.ToIntFunction;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.Marker;
import org.apache.logging.log4j.MarkerManager;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.FloatPoint;
@ -57,10 +61,6 @@ import org.apache.lucene.search.SortedNumericSortField;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.RocksDB;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import org.slf4j.Marker;
import org.slf4j.MarkerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
@ -70,9 +70,9 @@ import reactor.util.function.Tuple3;
@SuppressWarnings("unused")
public class LLUtils {
private static final Logger logger = LoggerFactory.getLogger(LLUtils.class);
public static final Marker MARKER_ROCKSDB = MarkerFactory.getMarker("ROCKSDB");
public static final Marker MARKER_LUCENE = MarkerFactory.getMarker("LUCENE");
private static final Logger logger = LogManager.getLogger(LLUtils.class);
public static final Marker MARKER_ROCKSDB = MarkerManager.getMarker("ROCKSDB");
public static final Marker MARKER_LUCENE = MarkerManager.getMarker("LUCENE");
public static final int INITIAL_DIRECT_READ_BYTE_BUF_SIZE_BYTES = 4096;
public static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0).asReadOnlyBuffer();
@ -855,11 +855,23 @@ public class LLUtils {
}
public static Mono<Send<Buffer>> lazyRetain(Buffer buf) {
return Mono.just(buf).map(b -> b.copy().send());
return Mono.fromSupplier(() -> {
if (buf != null && buf.isAccessible()) {
return buf.copy().send();
} else {
return null;
}
});
}
public static Mono<Send<LLRange>> lazyRetainRange(LLRange range) {
return Mono.just(range).map(r -> r.copy().send());
return Mono.fromSupplier(() -> {
if (range != null && range.isAccessible()) {
return range.copy().send();
} else {
return null;
}
});
}
public static Mono<Send<Buffer>> lazyRetain(Callable<Send<Buffer>> bufCallable) {

View File

@ -19,10 +19,10 @@ import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.Map;
import java.util.Map.Entry;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -30,7 +30,7 @@ import reactor.core.publisher.Mono;
public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> extends ResourceSupport<DatabaseStage<Map<T, U>>, DatabaseMapDictionaryDeep<T, U, US>>
implements DatabaseStageMap<T, U, US> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseMapDictionaryDeep.class);
private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryDeep.class);
private static final Drop<DatabaseMapDictionaryDeep<?, ?, ?>> DROP = new Drop<>() {
@Override

View File

@ -22,10 +22,10 @@ import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.function.Function;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -33,7 +33,7 @@ import reactor.core.publisher.Mono;
public class DatabaseMapDictionaryHashed<T, U, TH> extends ResourceSupport<DatabaseStage<Map<T, U>>, DatabaseMapDictionaryHashed<T, U, TH>>
implements DatabaseStageMap<T, U, DatabaseStageEntry<U>> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseMapDictionaryHashed.class);
private static final Logger logger = LogManager.getLogger(DatabaseMapDictionaryHashed.class);
private static final Drop<DatabaseMapDictionaryHashed<?, ?, ?>> DROP = new Drop<>() {
@Override

View File

@ -18,9 +18,9 @@ import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.SynchronousSink;
@ -28,7 +28,7 @@ import reactor.core.publisher.SynchronousSink;
public class DatabaseSingle<U> extends ResourceSupport<DatabaseStage<U>, DatabaseSingle<U>> implements
DatabaseStageEntry<U> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseSingle.class);
private static final Logger logger = LogManager.getLogger(DatabaseSingle.class);
private static final Drop<DatabaseSingle<?>> DROP = new Drop<>() {
@Override

View File

@ -15,10 +15,10 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -27,7 +27,7 @@ public class DatabaseSingleBucket<K, V, TH>
extends ResourceSupport<DatabaseStage<V>, DatabaseSingleBucket<K, V, TH>>
implements DatabaseStageEntry<V> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseSingleBucket.class);
private static final Logger logger = LogManager.getLogger(DatabaseSingleBucket.class);
private static final Drop<DatabaseSingleBucket<?, ?, ?>> DROP = new Drop<>() {
@Override

View File

@ -13,9 +13,9 @@ import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.publisher.SynchronousSink;
@ -24,7 +24,7 @@ import reactor.core.publisher.SynchronousSink;
public class DatabaseSingleMapped<A, B> extends ResourceSupport<DatabaseStage<A>, DatabaseSingleMapped<A, B>>
implements DatabaseStageEntry<A> {
private static final Logger logger = LoggerFactory.getLogger(DatabaseSingleMapped.class);
private static final Logger logger = LogManager.getLogger(DatabaseSingleMapped.class);
private static final Drop<DatabaseSingleMapped<?, ?>> DROP = new Drop<>() {
@Override

View File

@ -25,6 +25,8 @@ import it.cavallium.dbengine.database.RepeatedElementList;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.concurrent.atomic.AtomicInteger;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.ColumnFamilyHandle;
@ -40,8 +42,6 @@ import org.rocksdb.RocksIterator;
import org.rocksdb.Transaction;
import org.rocksdb.WriteBatch;
import org.rocksdb.WriteOptions;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.scheduler.Schedulers;
import sun.misc.Unsafe;
@ -51,7 +51,7 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
private static final byte[] NO_DATA = new byte[0];
protected static final UpdateAtomicResult RESULT_NOTHING = new UpdateAtomicResultNothing();
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
protected final Logger logger = LogManager.getLogger(this.getClass());
private final T db;
private final DatabaseOptions opts;

View File

@ -17,6 +17,8 @@ import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.SearcherFactory;
@ -25,8 +27,6 @@ import org.apache.lucene.search.similarities.Similarity;
import org.apache.lucene.store.AlreadyClosedException;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import org.warp.commonutils.type.ShortNamedThreadFactory;
import reactor.core.publisher.Mono;
import reactor.core.publisher.Sinks;
@ -35,7 +35,7 @@ import reactor.core.scheduler.Schedulers;
public class CachedIndexSearcherManager implements IndexSearcherManager {
private static final Logger logger = LoggerFactory.getLogger(CachedIndexSearcherManager.class);
private static final Logger logger = LogManager.getLogger(CachedIndexSearcherManager.class);
private final Executor SEARCH_EXECUTOR = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors(),
new ShortNamedThreadFactory("lucene-search").withGroup(new ThreadGroup("lucene-search")));
private final SearcherFactory SEARCHER_FACTORY = new ExecutorSearcherFactory(SEARCH_EXECUTOR);

View File

@ -3,14 +3,14 @@ package it.cavallium.dbengine.database.disk;
import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.internal.ResourceSupport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.search.IndexSearcher;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class LLIndexSearcher extends ResourceSupport<LLIndexSearcher, LLIndexSearcher> {
private static final Logger logger = LoggerFactory.getLogger(LLIndexSearcher.class);
private static final Logger logger = LogManager.getLogger(LLIndexSearcher.class);
private static final Drop<LLIndexSearcher> DROP = new Drop<>() {
@Override

View File

@ -12,11 +12,11 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.MultiReader;
import org.apache.lucene.search.IndexSearcher;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public interface LLIndexSearchers extends Resource<LLIndexSearchers> {
@ -37,7 +37,7 @@ public interface LLIndexSearchers extends Resource<LLIndexSearchers> {
class UnshardedIndexSearchers extends ResourceSupport<LLIndexSearchers, UnshardedIndexSearchers>
implements LLIndexSearchers {
private static final Logger logger = LoggerFactory.getLogger(UnshardedIndexSearchers.class);
private static final Logger logger = LogManager.getLogger(UnshardedIndexSearchers.class);
private static final Drop<UnshardedIndexSearchers> DROP = new Drop<>() {
@Override
@ -128,7 +128,7 @@ public interface LLIndexSearchers extends Resource<LLIndexSearchers> {
class ShardedIndexSearchers extends ResourceSupport<LLIndexSearchers, ShardedIndexSearchers>
implements LLIndexSearchers {
private static final Logger logger = LoggerFactory.getLogger(ShardedIndexSearchers.class);
private static final Logger logger = LogManager.getLogger(ShardedIndexSearchers.class);
private static final Drop<ShardedIndexSearchers> DROP = new Drop<>() {
@Override

View File

@ -5,6 +5,7 @@ import static io.net5.buffer.api.StandardAllocationTypes.OFF_HEAP;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.asReadOnlyDirect;
import static it.cavallium.dbengine.database.LLUtils.fromByteArray;
import static it.cavallium.dbengine.database.LLUtils.toStringSafe;
import static java.util.Objects.requireNonNull;
import static java.util.Objects.requireNonNullElse;
@ -33,15 +34,20 @@ import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ForkJoinTask;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.AbstractSlice;
@ -57,8 +63,6 @@ import org.rocksdb.Slice;
import org.rocksdb.Snapshot;
import org.rocksdb.WriteBatch;
import org.rocksdb.WriteOptions;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
@ -70,7 +74,7 @@ import reactor.util.function.Tuples;
public class LLLocalDictionary implements LLDictionary {
protected static final Logger logger = LoggerFactory.getLogger(LLLocalDictionary.class);
protected static final Logger logger = LogManager.getLogger(LLLocalDictionary.class);
private static final boolean USE_CURRENT_FASTSIZE_FOR_OLD_SNAPSHOTS = false;
static final int RESERVED_WRITE_BATCH_SIZE = 2 * 1024 * 1024; // 2MiB
static final long MAX_WRITE_BATCH_SIZE = 1024L * 1024L * 1024L; // 1GiB
@ -197,145 +201,151 @@ public class LLLocalDictionary implements LLDictionary {
public Mono<Send<Buffer>> get(@Nullable LLSnapshot snapshot,
Mono<Send<Buffer>> keyMono,
boolean existsAlmostCertainly) {
return Mono.usingWhen(keyMono,
keySend -> runOnDb(() -> {
return keyMono
.publishOn(Schedulers.boundedElastic())
.<Send<Buffer>>handle((keySend, sink) -> {
try (var key = keySend.receive()) {
try {
Buffer logKey;
if (logger.isTraceEnabled(MARKER_ROCKSDB)) {
logKey = key.copy();
var readOptions = requireNonNullElse(resolveSnapshot(snapshot), EMPTY_READ_OPTIONS);
var result = db.get(readOptions, key, existsAlmostCertainly);
logger.trace(MARKER_ROCKSDB, "Read {}: {}", () -> toStringSafe(key), () -> toStringSafe(result));
if (result != null) {
sink.next(result.send());
} else {
logKey = null;
}
try (logKey) {
var readOptions = requireNonNullElse(resolveSnapshot(snapshot), EMPTY_READ_OPTIONS);
var result = db.get(readOptions, key, existsAlmostCertainly);
if (logger.isTraceEnabled(MARKER_ROCKSDB)) {
logger.trace(MARKER_ROCKSDB, "Reading {}: {}", LLUtils.toStringSafe(logKey),
LLUtils.toString(result));
return result == null ? null : result.send();
} else {
return result == null ? null : result.send();
}
sink.complete();
}
} catch (Exception ex) {
throw new IOException("Failed to read " + LLUtils.toStringSafe(key), ex);
sink.error(new IOException("Failed to read " + toStringSafe(key), ex));
}
}
}).onErrorMap(cause -> new IOException("Failed to read", cause)),
keySend -> Mono.fromRunnable(keySend::close)
);
})
.onErrorMap(cause -> new IOException("Failed to read", cause));
}
@Override
public Mono<Boolean> isRangeEmpty(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> rangeMono) {
return Mono.usingWhen(rangeMono,
rangeSend -> {
return rangeMono
.publishOn(Schedulers.boundedElastic())
.<Boolean>handle((rangeSend, sink) -> {
try (var range = rangeSend.receive()) {
if (range.isSingle()) {
return this.containsKey(snapshot, Mono.fromCallable(range::getSingle));
} else {
return this.containsRange(snapshot, rangeMono);
}
sink.next(containsRange(snapshot, range));
} catch (Throwable ex) {
sink.error(ex);
}
},
rangeSend -> Mono.fromRunnable(rangeSend::close)
).map(isContained -> !isContained);
})
.map(isContained -> !isContained);
}
public Mono<Boolean> containsRange(@Nullable LLSnapshot snapshot, Mono<Send<LLRange>> rangeMono) {
return Mono.usingWhen(rangeMono,
rangeSend -> runOnDb(() -> {
// Temporary resources to release after finished
AbstractSlice<?> slice1 = null;
AbstractSlice<?> slice2 = null;
try (var range = rangeSend.receive()) {
if (Schedulers.isInNonBlockingThread()) {
throw new UnsupportedOperationException("Called containsRange in a nonblocking thread");
}
try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) {
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
readOpts.setFillCache(false);
if (range.hasMin()) {
var rangeMinInternalByteBuffer = asReadOnlyDirect(range.getMinUnsafe());
if (nettyDirect && rangeMinInternalByteBuffer != null) {
readOpts.setIterateLowerBound(slice1 = new DirectSlice(rangeMinInternalByteBuffer,
range.getMinUnsafe().readableBytes()));
} else {
readOpts.setIterateLowerBound(slice1 = new Slice(LLUtils.toArray(range.getMinUnsafe())));
}
}
if (range.hasMax()) {
var rangeMaxInternalByteBuffer = asReadOnlyDirect(range.getMaxUnsafe());
if (nettyDirect && rangeMaxInternalByteBuffer != null) {
readOpts.setIterateUpperBound(slice2 = new DirectSlice(rangeMaxInternalByteBuffer,
range.getMaxUnsafe().readableBytes()));
} else {
readOpts.setIterateUpperBound(slice2 = new Slice(LLUtils.toArray(range.getMaxUnsafe())));
}
}
try (RocksIterator rocksIterator = db.newIterator(readOpts)) {
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
var rangeMinInternalByteBuffer = asReadOnlyDirect(range.getMinUnsafe());
if (nettyDirect && rangeMinInternalByteBuffer != null) {
rocksIterator.seek(rangeMinInternalByteBuffer);
} else {
rocksIterator.seek(LLUtils.toArray(range.getMinUnsafe()));
}
} else {
rocksIterator.seekToFirst();
}
rocksIterator.status();
return rocksIterator.isValid();
}
}
} finally {
if (slice1 != null) slice1.close();
if (slice2 != null) slice2.close();
public boolean containsRange(@Nullable LLSnapshot snapshot, LLRange range) throws RocksDBException {
assert !Schedulers.isInNonBlockingThread() : "Called containsRange in a nonblocking thread";
if (range.isSingle()) {
var unmodifiableReadOpts = resolveSnapshot(snapshot);
return db.exists(unmodifiableReadOpts, range.getSingleUnsafe());
} else {
// Temporary resources to release after finished
AbstractSlice<?> slice1 = null;
AbstractSlice<?> slice2 = null;
try (var readOpts = new ReadOptions(resolveSnapshot(snapshot))) {
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
readOpts.setFillCache(false);
if (range.hasMin()) {
var rangeMinInternalByteBuffer = asReadOnlyDirect(range.getMinUnsafe());
if (nettyDirect && rangeMinInternalByteBuffer != null) {
readOpts.setIterateLowerBound(slice1 = new DirectSlice(rangeMinInternalByteBuffer,
range.getMinUnsafe().readableBytes()));
} else {
readOpts.setIterateLowerBound(slice1 = new Slice(LLUtils.toArray(range.getMinUnsafe())));
}
}).onErrorMap(cause -> new IOException("Failed to read range", cause)),
rangeSend -> Mono.fromRunnable(rangeSend::close));
}
if (range.hasMax()) {
var rangeMaxInternalByteBuffer = asReadOnlyDirect(range.getMaxUnsafe());
if (nettyDirect && rangeMaxInternalByteBuffer != null) {
readOpts.setIterateUpperBound(slice2 = new DirectSlice(rangeMaxInternalByteBuffer,
range.getMaxUnsafe().readableBytes()));
} else {
readOpts.setIterateUpperBound(slice2 = new Slice(LLUtils.toArray(range.getMaxUnsafe())));
}
}
try (RocksIterator rocksIterator = db.newIterator(readOpts)) {
if (!LLLocalDictionary.PREFER_SEEK_TO_FIRST && range.hasMin()) {
var rangeMinInternalByteBuffer = asReadOnlyDirect(range.getMinUnsafe());
if (nettyDirect && rangeMinInternalByteBuffer != null) {
rocksIterator.seek(rangeMinInternalByteBuffer);
} else {
rocksIterator.seek(LLUtils.toArray(range.getMinUnsafe()));
}
} else {
rocksIterator.seekToFirst();
}
rocksIterator.status();
return rocksIterator.isValid();
}
} finally {
if (slice1 != null) slice1.close();
if (slice2 != null) slice2.close();
}
}
}
private Mono<Boolean> containsKey(@Nullable LLSnapshot snapshot, Mono<Send<Buffer>> keyMono) {
return Mono.usingWhen(keyMono,
keySend -> runOnDb(() -> {
var unmodifiableReadOpts = resolveSnapshot(snapshot);
return keyMono
.publishOn(Schedulers.boundedElastic())
.handle((keySend, sink) -> {
try (var key = keySend.receive()) {
return db.exists(unmodifiableReadOpts, key);
sink.next(containsKey(snapshot, key));
} catch (Throwable ex) {
sink.error(ex);
}
}).onErrorMap(cause -> new IOException("Failed to read", cause)),
keySend -> Mono.fromRunnable(keySend::close)
);
});
}
private boolean containsKey(@Nullable LLSnapshot snapshot, Buffer key) throws RocksDBException {
var unmodifiableReadOpts = resolveSnapshot(snapshot);
return db.exists(unmodifiableReadOpts, key);
}
@Override
public Mono<Send<Buffer>> put(Mono<Send<Buffer>> keyMono,
Mono<Send<Buffer>> valueMono,
public Mono<Send<Buffer>> put(Mono<Send<Buffer>> keyMono, Mono<Send<Buffer>> valueMono,
LLDictionaryResultType resultType) {
return Mono.usingWhen(keyMono,
keySend -> this
.getPreviousData(keyMono, resultType, false)
.concatWith(Mono.usingWhen(valueMono,
valueSend -> this.<Send<Buffer>>runOnDb(() -> {
try (var key = keySend.receive()) {
try (var value = valueSend.receive()) {
assert key.isAccessible();
assert value.isAccessible();
if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Writing {}: {}",
LLUtils.toStringSafe(key), LLUtils.toStringSafe(value));
}
db.put(EMPTY_WRITE_OPTIONS, key, value);
return null;
}
}
}),
value -> Mono.fromRunnable(value::close)
).onErrorMap(cause -> new IOException("Failed to write", cause)))
.singleOrEmpty(),
keySend -> Mono.fromRunnable(keySend::close)
);
// Zip the entry to write to the database
var entryMono = Mono.zip(keyMono, valueMono, Map::entry);
// Obtain the previous value from the database
var previousDataMono = this.getPreviousData(keyMono, resultType, false);
// Write the new entry to the database
var putMono = entryMono
.publishOn(Schedulers.boundedElastic())
.<Void>handle((entry, sink) -> {
try (var key = entry.getKey().receive()) {
try (var value = entry.getValue().receive()) {
assert key.isAccessible();
assert value.isAccessible();
logger.trace(MARKER_ROCKSDB,
"Writing {}: {}",
(Supplier<String>) () -> toStringSafe(key),
(Supplier<String>) () -> toStringSafe(value)
);
db.put(EMPTY_WRITE_OPTIONS, key, value);
sink.complete();
}
} catch (Throwable ex) {
sink.error(ex);
}
});
// Read the previous data, then write the new data, then return the previous data
return Flux
.concat(previousDataMono, putMono.then(Mono.empty()))
.singleOrEmpty()
// Clean discarded elements
.doOnDiscard(Send.class, Send::close)
.doOnDiscard(Entry.class, entry -> {
if (entry.getKey() instanceof SafeCloseable safeCloseable) {
safeCloseable.close();
}
if (entry.getValue() instanceof SafeCloseable safeCloseable) {
safeCloseable.close();
}
})
.onErrorMap(cause -> new IOException("Failed to write", cause));
}
@Override
@ -414,7 +424,7 @@ public class LLLocalDictionary implements LLDictionary {
.<Send<Buffer>>runOnDb(() -> {
try (var key = keySend.receive()) {
if (logger.isTraceEnabled()) {
logger.trace(MARKER_ROCKSDB, "Deleting {}", LLUtils.toStringSafe(key));
logger.trace(MARKER_ROCKSDB, "Deleting {}", toStringSafe(key));
db.delete(EMPTY_WRITE_OPTIONS, key);
} else {
db.delete(EMPTY_WRITE_OPTIONS, key);
@ -446,9 +456,9 @@ public class LLLocalDictionary implements LLDictionary {
}
if (logger.isTraceEnabled()) {
var keyString = LLUtils.toStringSafe(key);
var keyString = toStringSafe(key);
var result = db.get(EMPTY_READ_OPTIONS, key, existsAlmostCertainly);
logger.trace(MARKER_ROCKSDB, "Reading {}: {}", keyString, LLUtils.toStringSafe(result));
logger.trace(MARKER_ROCKSDB, "Reading {}: {}", keyString, toStringSafe(result));
return result == null ? null : result.send();
} else {
var result = db.get(EMPTY_READ_OPTIONS, key, existsAlmostCertainly);
@ -1237,10 +1247,10 @@ public class LLLocalDictionary implements LLDictionary {
return new SimpleSliceWithoutRelease(new Slice(arr), null, arr);
}
public static record SimpleSliceWithoutRelease(AbstractSlice<?> slice, @Nullable Buffer byteBuf,
public record SimpleSliceWithoutRelease(AbstractSlice<?> slice, @Nullable Buffer byteBuf,
@Nullable Object additionalData) implements ReleasableSlice {}
public static record ReleasableSliceImpl(AbstractSlice<?> slice, @Nullable Buffer byteBuf,
public record ReleasableSliceImpl(AbstractSlice<?> slice, @Nullable Buffer byteBuf,
@Nullable Object additionalData) implements ReleasableSlice {
@Override
@ -1600,7 +1610,7 @@ public class LLLocalDictionary implements LLDictionary {
}
})
.map(commonPool::submit)
.collect(Collectors.toList());
.toList();
long count = 0;
for (ForkJoinTask<Long> future : futures) {
count += future.join();

View File

@ -11,17 +11,17 @@ import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLUtils;
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDBException;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
public abstract class LLLocalGroupedReactiveRocksIterator<T> extends
ResourceSupport<LLLocalGroupedReactiveRocksIterator<T>, LLLocalGroupedReactiveRocksIterator<T>> {
protected static final Logger logger = LoggerFactory.getLogger(LLLocalGroupedReactiveRocksIterator.class);
protected static final Logger logger = LogManager.getLogger(LLLocalGroupedReactiveRocksIterator.class);
private static final Drop<LLLocalGroupedReactiveRocksIterator<?>> DROP = new Drop<>() {
@Override
public void drop(LLLocalGroupedReactiveRocksIterator<?> obj) {

View File

@ -10,19 +10,19 @@ import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.ColumnFamilyHandle;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
public class LLLocalKeyPrefixReactiveRocksIterator extends
ResourceSupport<LLLocalKeyPrefixReactiveRocksIterator, LLLocalKeyPrefixReactiveRocksIterator> {
protected static final Logger logger = LoggerFactory.getLogger(LLLocalKeyPrefixReactiveRocksIterator.class);
protected static final Logger logger = LogManager.getLogger(LLLocalKeyPrefixReactiveRocksIterator.class);
private static final Drop<LLLocalKeyPrefixReactiveRocksIterator> DROP = new Drop<>() {
@Override
public void drop(LLLocalKeyPrefixReactiveRocksIterator obj) {

View File

@ -32,6 +32,8 @@ import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.lang3.time.StopWatch;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.BlockBasedTableConfig;
import org.rocksdb.BloomFilter;
@ -58,8 +60,6 @@ import org.rocksdb.TransactionDB;
import org.rocksdb.TransactionDBOptions;
import org.rocksdb.WALRecoveryMode;
import org.rocksdb.WriteBufferManager;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Scheduler;
import reactor.core.scheduler.Schedulers;
@ -70,7 +70,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
RocksDB.loadLibrary();
}
protected static final Logger logger = LoggerFactory.getLogger(LLLocalKeyValueDatabase.class);
protected static final Logger logger = LogManager.getLogger(LLLocalKeyValueDatabase.class);
private static final ColumnFamilyDescriptor DEFAULT_COLUMN_FAMILY = new ColumnFamilyDescriptor(
RocksDB.DEFAULT_COLUMN_FAMILY);

View File

@ -50,6 +50,8 @@ import java.util.concurrent.Executors;
import java.util.concurrent.Phaser;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.miscellaneous.PerFieldAnalyzerWrapper;
import org.apache.lucene.index.ConcurrentMergeScheduler;
import org.apache.lucene.index.IndexWriter;
@ -72,8 +74,6 @@ import org.apache.lucene.util.Constants;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.functional.IORunnable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import org.warp.commonutils.type.ShortNamedThreadFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
@ -83,7 +83,7 @@ import reactor.util.function.Tuple2;
public class LLLocalLuceneIndex implements LLLuceneIndex {
protected static final Logger logger = LoggerFactory.getLogger(LLLocalLuceneIndex.class);
protected static final Logger logger = LogManager.getLogger(LLLocalLuceneIndex.class);
private final LocalSearcher localSearcher;
private final DecimalBucketMultiSearcher decimalBucketMultiSearcher = new DecimalBucketMultiSearcher();
/**

View File

@ -10,17 +10,17 @@ import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import it.cavallium.dbengine.database.LLRange;
import it.cavallium.dbengine.database.LLUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDBException;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
public abstract class LLLocalReactiveRocksIterator<T> extends
ResourceSupport<LLLocalReactiveRocksIterator<T>, LLLocalReactiveRocksIterator<T>> {
protected static final Logger logger = LoggerFactory.getLogger(LLLocalReactiveRocksIterator.class);
protected static final Logger logger = LogManager.getLogger(LLLocalReactiveRocksIterator.class);
private static final Drop<LLLocalReactiveRocksIterator<?>> DROP = new Drop<>() {
@Override
public void drop(LLLocalReactiveRocksIterator<?> obj) {

View File

@ -37,6 +37,8 @@ import java.util.Map.Entry;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.stream.Collectors;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.LowerCaseFilter;
import org.apache.lucene.analysis.TokenStream;
@ -75,8 +77,6 @@ import org.novasearch.lucene.search.similarities.BM25Similarity.BM25Model;
import org.novasearch.lucene.search.similarities.LdpSimilarity;
import org.novasearch.lucene.search.similarities.LtcSimilarity;
import org.novasearch.lucene.search.similarities.RobertsonSimilarity;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
@ -85,7 +85,7 @@ import reactor.util.function.Tuple2;
public class LuceneUtils {
private static final Logger logger = LoggerFactory.getLogger(LuceneUtils.class);
private static final Logger logger = LogManager.getLogger(LuceneUtils.class);
private static final Analyzer lucene4GramWordsAnalyzerEdgeInstance = new NCharGramEdgeAnalyzer(true, 4, 4);
private static final Analyzer lucene4GramStringAnalyzerEdgeInstance = new NCharGramEdgeAnalyzer(false, 4, 4);

View File

@ -6,18 +6,18 @@ import it.cavallium.dbengine.database.disk.LLIndexSearchers;
import it.cavallium.dbengine.lucene.collector.Buckets;
import it.cavallium.dbengine.lucene.collector.DecimalBucketMultiCollectorManager;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.Query;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class DecimalBucketMultiSearcher {
protected static final Logger logger = LoggerFactory.getLogger(DecimalBucketMultiSearcher.class);
protected static final Logger logger = LogManager.getLogger(DecimalBucketMultiSearcher.class);
public Mono<Buckets> collectMulti(Mono<Send<LLIndexSearchers>> indexSearchersMono,
BucketParams bucketParams,

View File

@ -6,13 +6,13 @@ import it.cavallium.dbengine.client.query.current.data.TotalHitsCount;
import it.cavallium.dbengine.database.LLKeyScore;
import io.net5.buffer.api.internal.ResourceSupport;
import java.util.Objects;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import reactor.core.publisher.Flux;
public final class LuceneSearchResult extends ResourceSupport<LuceneSearchResult, LuceneSearchResult> {
private static final Logger logger = LoggerFactory.getLogger(LuceneSearchResult.class);
private static final Logger logger = LogManager.getLogger(LuceneSearchResult.class);
private static final Drop<LuceneSearchResult> DROP = new Drop<>() {
@Override

View File

@ -8,19 +8,19 @@ import it.cavallium.dbengine.database.disk.LLTempLMDBEnv;
import it.cavallium.dbengine.lucene.LuceneUtils;
import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer.TransformerInput;
import java.util.List;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.TimeLimitingCollector;
import org.apache.lucene.search.TopDocs;
import org.apache.lucene.search.TopFieldCollector;
import org.apache.lucene.search.TopScoreDocCollector;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class OfficialSearcher implements MultiSearcher {
protected static final Logger logger = LoggerFactory.getLogger(OfficialSearcher.class);
protected static final Logger logger = LogManager.getLogger(OfficialSearcher.class);
public OfficialSearcher() {
}

View File

@ -14,20 +14,20 @@ import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer.TransformerInpu
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.FieldDoc;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.ScoreMode;
import org.apache.lucene.search.Sort;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.core.scheduler.Schedulers;
public class ScoredPagedMultiSearcher implements MultiSearcher {
protected static final Logger logger = LoggerFactory.getLogger(ScoredPagedMultiSearcher.class);
protected static final Logger logger = LogManager.getLogger(ScoredPagedMultiSearcher.class);
public ScoredPagedMultiSearcher() {
}

View File

@ -10,15 +10,15 @@ import it.cavallium.dbengine.lucene.LLFieldDoc;
import it.cavallium.dbengine.lucene.LuceneUtils;
import it.cavallium.dbengine.lucene.collector.LMDBFullFieldDocCollector;
import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer.TransformerInput;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class SortedScoredFullMultiSearcher implements MultiSearcher {
protected static final Logger logger = LoggerFactory.getLogger(SortedScoredFullMultiSearcher.class);
protected static final Logger logger = LogManager.getLogger(SortedScoredFullMultiSearcher.class);
private final LLTempLMDBEnv env;

View File

@ -10,15 +10,15 @@ import it.cavallium.dbengine.lucene.FullDocs;
import it.cavallium.dbengine.lucene.LLScoreDoc;
import it.cavallium.dbengine.lucene.collector.LMDBFullScoreDocCollector;
import it.cavallium.dbengine.lucene.searcher.LLSearchTransformer.TransformerInput;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.lucene.search.IndexSearcher;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
public class UnsortedScoredFullMultiSearcher implements MultiSearcher {
protected static final Logger logger = LoggerFactory.getLogger(UnsortedScoredFullMultiSearcher.class);
protected static final Logger logger = LogManager.getLogger(UnsortedScoredFullMultiSearcher.class);
private final LLTempLMDBEnv env;

View File

@ -5,13 +5,13 @@ import io.net5.buffer.api.Drop;
import io.net5.buffer.api.Owned;
import io.net5.buffer.api.Send;
import io.net5.buffer.api.internal.ResourceSupport;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.jetbrains.annotations.Nullable;
import org.warp.commonutils.log.Logger;
import org.warp.commonutils.log.LoggerFactory;
public class NullableBuffer extends ResourceSupport<NullableBuffer, NullableBuffer> {
private static final Logger logger = LoggerFactory.getLogger(NullableBuffer.class);
private static final Logger logger = LogManager.getLogger(NullableBuffer.class);
private static final Drop<NullableBuffer> DROP = new Drop<>() {
@Override

View File

@ -3,7 +3,7 @@
xmlns="http://logging.apache.org/log4j/2.0/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://logging.apache.org/log4j/2.0/config
https://raw.githubusercontent.com/apache/logging-log4j2/log4j-2.15.0/log4j-core/src/main/resources/Log4j-config.xsd"
https://raw.githubusercontent.com/apache/logging-log4j2/log4j-2.16.0/log4j-core/src/main/resources/Log4j-config.xsd"
status="ALL">
<Appenders>
<Console name="Console" target="SYSTEM_OUT">