Java modules
This commit is contained in:
parent
95d436860f
commit
6315175dc4
@ -245,6 +245,7 @@ versions:
|
||||
optimistic: boolean
|
||||
maxOpenFiles: -int
|
||||
blockCache: -long
|
||||
compressedBlockCache: -long
|
||||
spinning: boolean
|
||||
defaultColumnOptions: DefaultColumnOptions
|
||||
columnOptions: NamedColumnOptions[]
|
||||
|
@ -1,60 +0,0 @@
|
||||
package io.netty5.buffer.api.pool;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
public class PooledBufferAllocatorMetricUtils implements BufferAllocatorMetric {
|
||||
|
||||
private final PooledBufferAllocator allocator;
|
||||
|
||||
@SuppressWarnings("RedundantThrows")
|
||||
public PooledBufferAllocatorMetricUtils(PooledBufferAllocator allocator) throws Throwable {
|
||||
this.allocator = allocator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of arenas.
|
||||
*/
|
||||
public int numArenas() {
|
||||
return allocator.numArenas();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return a {@link List} of all {@link PoolArenaMetric}s that are provided by this pool.
|
||||
*/
|
||||
public List<PoolArenaMetric> arenaMetrics() {
|
||||
return allocator.arenaMetrics();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the number of thread local caches used by this {@link PooledBufferAllocator}.
|
||||
*/
|
||||
public int numThreadLocalCaches() {
|
||||
return allocator.numThreadLocalCaches();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the small cache.
|
||||
*/
|
||||
public int smallCacheSize() {
|
||||
return allocator.smallCacheSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the size of the normal cache.
|
||||
*/
|
||||
public int normalCacheSize() {
|
||||
return allocator.normalCacheSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the chunk size for an arena.
|
||||
*/
|
||||
public int chunkSize() {
|
||||
return allocator.chunkSize();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long usedMemory() {
|
||||
return allocator.usedMemory();
|
||||
}
|
||||
}
|
@ -1,5 +1,7 @@
|
||||
package io.netty5.buffer.api.pool;
|
||||
package it.cavallium.dbengine;
|
||||
|
||||
import io.netty5.buffer.api.pool.PoolArenaMetric;
|
||||
import io.netty5.buffer.api.pool.PooledBufferAllocator;
|
||||
import java.lang.invoke.MethodHandle;
|
||||
import java.lang.invoke.MethodHandles;
|
||||
import java.lang.invoke.MethodType;
|
@ -43,6 +43,7 @@ public class DefaultDatabaseOptions {
|
||||
true,
|
||||
Nullableint.empty(),
|
||||
Nullablelong.empty(),
|
||||
Nullablelong.empty(),
|
||||
false,
|
||||
DEFAULT_DEFAULT_COLUMN_OPTIONS,
|
||||
List.of()
|
||||
|
@ -7,9 +7,11 @@ import io.netty5.buffer.api.BufferAllocator;
|
||||
import it.cavallium.dbengine.client.MemoryStats;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseInt;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseLong;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseSingleton;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Map.Entry;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.TableProperties;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseStructure {
|
||||
@ -56,6 +58,10 @@ public interface LLKeyValueDatabase extends LLSnapshottable, LLKeyValueDatabaseS
|
||||
|
||||
Mono<MemoryStats> getMemoryStats();
|
||||
|
||||
Mono<String> getRocksDBStats();
|
||||
|
||||
Flux<TableWithProperties> getTableProperties();
|
||||
|
||||
Mono<Void> verifyChecksum();
|
||||
|
||||
BufferAllocator getAllocator();
|
||||
|
@ -0,0 +1,5 @@
|
||||
package it.cavallium.dbengine.database;
|
||||
|
||||
import org.rocksdb.TableProperties;
|
||||
|
||||
public record TableWithProperties(String name, TableProperties properties) {}
|
@ -1,4 +1,4 @@
|
||||
package org.rocksdb;
|
||||
package it.cavallium.dbengine.database.disk;
|
||||
|
||||
import static it.cavallium.dbengine.database.LLUtils.isDirect;
|
||||
import static it.cavallium.dbengine.database.LLUtils.isReadOnlyDirect;
|
||||
@ -13,6 +13,10 @@ import it.cavallium.dbengine.database.disk.RocksDBColumn;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.WriteBatch;
|
||||
import org.rocksdb.WriteOptions;
|
||||
|
||||
public class CappedWriteBatch extends WriteBatch {
|
||||
|
@ -48,7 +48,6 @@ import org.apache.logging.log4j.util.Supplier;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.AbstractSlice;
|
||||
import org.rocksdb.CappedWriteBatch;
|
||||
import org.rocksdb.ColumnFamilyHandle;
|
||||
import org.rocksdb.CompactRangeOptions;
|
||||
import org.rocksdb.DirectSlice;
|
||||
|
@ -14,6 +14,7 @@ import it.cavallium.dbengine.database.ColumnUtils;
|
||||
import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.TableWithProperties;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import it.cavallium.dbengine.rpc.current.data.ColumnOptions;
|
||||
@ -36,6 +37,7 @@ import java.util.HashSet;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Objects;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
@ -67,6 +69,7 @@ import org.rocksdb.OptimisticTransactionDB;
|
||||
import org.rocksdb.RocksDB;
|
||||
import org.rocksdb.RocksDBException;
|
||||
import org.rocksdb.Snapshot;
|
||||
import org.rocksdb.TableProperties;
|
||||
import org.rocksdb.TransactionDB;
|
||||
import org.rocksdb.TransactionDBOptions;
|
||||
import org.rocksdb.TxnDBWritePolicy;
|
||||
@ -74,9 +77,11 @@ import org.rocksdb.WALRecoveryMode;
|
||||
import org.rocksdb.WriteBufferManager;
|
||||
import org.rocksdb.util.SizeUnit;
|
||||
import org.warp.commonutils.type.ShortNamedThreadFactory;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.core.scheduler.Scheduler;
|
||||
import reactor.core.scheduler.Schedulers;
|
||||
import reactor.util.function.Tuple2;
|
||||
|
||||
public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
|
||||
@ -180,7 +185,8 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
}
|
||||
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setMaxBytesForLevelBase((databaseOptions.spinning() ? 512 : 256) * SizeUnit.MB);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMaxBytesForLevelBase((databaseOptions.spinning() ? 1024 : 256) * SizeUnit.MB);
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
columnFamilyOptions.setMaxBytesForLevelMultiplier(10);
|
||||
// https://www.arangodb.com/docs/stable/programs-arangod-rocksdb.html
|
||||
@ -247,6 +253,10 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
if (databaseOptions.spinning()) {
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
cacheIndexAndFilterBlocks = true;
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMinWriteBufferNumberToMerge(3);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setMaxWriteBufferNumber(4);
|
||||
}
|
||||
tableOptions
|
||||
// https://github.com/facebook/rocksdb/wiki/Partitioned-Index-Filters
|
||||
@ -264,7 +274,8 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
.setBlockCache(optionsWithCache.standardCache())
|
||||
// Spinning disks: 64KiB to 256KiB (also 512KiB). SSDs: 16KiB
|
||||
// https://github.com/facebook/rocksdb/wiki/Tuning-RocksDB-on-Spinning-Disks
|
||||
.setBlockSize((databaseOptions.spinning() ? 256 : 16) * SizeUnit.KB);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
.setBlockSize((databaseOptions.spinning() ? 128 : 16) * SizeUnit.KB);
|
||||
|
||||
columnFamilyOptions.setTableFormatConfig(tableOptions);
|
||||
columnFamilyOptions.setCompactionPriority(CompactionPriority.MinOverlappingRatio);
|
||||
@ -283,10 +294,11 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
// // Increasing this value can reduce the frequency of compaction and reduce write amplification,
|
||||
// // but it will also cause old data to be unable to be cleaned up in time, thus increasing read amplification.
|
||||
// // This parameter is not easy to adjust. It is generally not recommended to set it above 256MB.
|
||||
// columnOptions.setTargetFileSizeBase((databaseOptions.spinning() ? 128 : 64) * SizeUnit.MB);
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
columnFamilyOptions.setTargetFileSizeBase((databaseOptions.spinning() ? 256 : 64) * SizeUnit.MB);
|
||||
// // For each level up, the threshold is multiplied by the factor target_file_size_multiplier
|
||||
// // (but the default value is 1, which means that the maximum sstable of each level is the same).
|
||||
// columnOptions.setTargetFileSizeMultiplier(1);
|
||||
columnFamilyOptions.setTargetFileSizeMultiplier(1);
|
||||
|
||||
descriptors.add(new ColumnFamilyDescriptor(column.name().getBytes(StandardCharsets.US_ASCII), columnFamilyOptions));
|
||||
}
|
||||
@ -577,6 +589,10 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
List<DbPath> paths = convertPaths(databasesDirPath, path.getFileName(), databaseOptions.volumes());
|
||||
options.setDbPaths(paths);
|
||||
options.setMaxOpenFiles(databaseOptions.maxOpenFiles().orElse(-1));
|
||||
if (databaseOptions.spinning()) {
|
||||
// https://nightlies.apache.org/flink/flink-docs-release-1.3/api/java/org/apache/flink/contrib/streaming/state/PredefinedOptions.html
|
||||
options.setUseFsync(false);
|
||||
}
|
||||
|
||||
Cache blockCache;
|
||||
Cache compressedCache;
|
||||
@ -592,7 +608,7 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
.setMaxTotalWalSize(0) // automatic
|
||||
;
|
||||
blockCache = new ClockCache(databaseOptions.blockCache().orElse( 8L * SizeUnit.MB), 6, false);
|
||||
compressedCache = null;
|
||||
compressedCache = new ClockCache(databaseOptions.compressedBlockCache().orElse( 8L * SizeUnit.MB), 6, false);
|
||||
|
||||
if (databaseOptions.spinning()) {
|
||||
options
|
||||
@ -621,8 +637,8 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
.setWalSizeLimitMB(0)
|
||||
.setMaxTotalWalSize(80 * SizeUnit.MB) // 80MiB max wal directory size
|
||||
;
|
||||
blockCache = new ClockCache(databaseOptions.blockCache().orElse( 512 * SizeUnit.MB) / 2);
|
||||
compressedCache = null;
|
||||
blockCache = new ClockCache(databaseOptions.blockCache().orElse( 512 * SizeUnit.MB), 6, false);
|
||||
compressedCache = new ClockCache(databaseOptions.compressedBlockCache().orElse( 512 * SizeUnit.MB), 6, false);
|
||||
|
||||
if (databaseOptions.useDirectIO()) {
|
||||
options
|
||||
@ -792,17 +808,53 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
|
||||
@Override
|
||||
public Mono<MemoryStats> getMemoryStats() {
|
||||
return Mono
|
||||
.fromCallable(() -> new MemoryStats(db.getAggregatedLongProperty("rocksdb.estimate-table-readers-mem"),
|
||||
db.getAggregatedLongProperty("rocksdb.size-all-mem-tables"),
|
||||
db.getAggregatedLongProperty("rocksdb.cur-size-all-mem-tables"),
|
||||
db.getAggregatedLongProperty("rocksdb.estimate-num-keys"),
|
||||
db.getAggregatedLongProperty("rocksdb.block-cache-usage"),
|
||||
db.getAggregatedLongProperty("rocksdb.block-cache-pinned-usage")
|
||||
))
|
||||
.fromCallable(() -> {
|
||||
if (!closed) {
|
||||
return new MemoryStats(db.getAggregatedLongProperty("rocksdb.estimate-table-readers-mem"),
|
||||
db.getAggregatedLongProperty("rocksdb.size-all-mem-tables"),
|
||||
db.getAggregatedLongProperty("rocksdb.cur-size-all-mem-tables"),
|
||||
db.getAggregatedLongProperty("rocksdb.estimate-num-keys"),
|
||||
db.getAggregatedLongProperty("rocksdb.block-cache-usage"),
|
||||
db.getAggregatedLongProperty("rocksdb.block-cache-pinned-usage")
|
||||
);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.onErrorMap(cause -> new IOException("Failed to read memory stats", cause))
|
||||
.subscribeOn(dbRScheduler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> getRocksDBStats() {
|
||||
return Mono
|
||||
.fromCallable(() -> {
|
||||
if (!closed) {
|
||||
return db.getProperty("rocksdb.stats");
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.onErrorMap(cause -> new IOException("Failed to read stats", cause))
|
||||
.subscribeOn(dbRScheduler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<TableWithProperties> getTableProperties() {
|
||||
return Mono
|
||||
.fromCallable(() -> {
|
||||
if (!closed) {
|
||||
return db.getPropertiesOfAllTables();
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.flatMapIterable(Map::entrySet)
|
||||
.map(entry -> new TableWithProperties(entry.getKey(), entry.getValue()))
|
||||
.onErrorMap(cause -> new IOException("Failed to read stats", cause))
|
||||
.subscribeOn(dbRScheduler);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> verifyChecksum() {
|
||||
return Mono
|
||||
|
@ -8,6 +8,7 @@ import it.cavallium.dbengine.database.LLKeyValueDatabase;
|
||||
import it.cavallium.dbengine.database.LLSingleton;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLUtils;
|
||||
import it.cavallium.dbengine.database.TableWithProperties;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.rpc.current.data.Column;
|
||||
import it.unimi.dsi.fastutil.bytes.ByteList;
|
||||
@ -17,6 +18,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
|
||||
public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase {
|
||||
@ -98,6 +100,16 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase {
|
||||
return Mono.just(new MemoryStats(0, 0, 0, 0, 0, 0));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> getRocksDBStats() {
|
||||
return Mono.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<TableWithProperties> getTableProperties() {
|
||||
return Flux.empty();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> verifyChecksum() {
|
||||
return Mono.empty();
|
||||
|
@ -21,6 +21,7 @@ import it.cavallium.dbengine.database.LLSingleton;
|
||||
import it.cavallium.dbengine.database.LLSnapshot;
|
||||
import it.cavallium.dbengine.database.LLTerm;
|
||||
import it.cavallium.dbengine.database.LLUpdateDocument;
|
||||
import it.cavallium.dbengine.database.TableWithProperties;
|
||||
import it.cavallium.dbengine.database.UpdateMode;
|
||||
import it.cavallium.dbengine.database.UpdateReturnMode;
|
||||
import it.cavallium.dbengine.database.disk.BinarySerializationFunction;
|
||||
@ -65,6 +66,8 @@ import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.function.Function;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
import org.rocksdb.DBOptions;
|
||||
import org.rocksdb.TableProperties;
|
||||
import reactor.core.publisher.Flux;
|
||||
import reactor.core.publisher.Mono;
|
||||
import reactor.netty.incubator.quic.QuicClient;
|
||||
@ -328,6 +331,16 @@ public class LLQuicConnection implements LLDatabaseConnection {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<String> getRocksDBStats() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Flux<TableWithProperties> getTableProperties() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Mono<Void> verifyChecksum() {
|
||||
return null;
|
||||
|
@ -18,7 +18,7 @@ import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.SortedNumericSelector;
|
||||
import org.apache.lucene.search.comparators.HugePqDocComparator;
|
||||
import it.cavallium.dbengine.lucene.hugepq.search.comparators.HugePqDocComparator;
|
||||
|
||||
public class HugePqComparator {
|
||||
|
||||
|
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package it.cavallium.dbengine.lucene.hugepq.mirrored;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
|
||||
/** Used for defining custom algorithms to allow searches to early terminate */
|
||||
public abstract class HitsThresholdChecker {
|
||||
/** Implementation of HitsThresholdChecker which allows global hit counting */
|
||||
private static class GlobalHitsThresholdChecker extends HitsThresholdChecker {
|
||||
private final int totalHitsThreshold;
|
||||
private final AtomicLong globalHitCount;
|
||||
|
||||
public GlobalHitsThresholdChecker(int totalHitsThreshold) {
|
||||
|
||||
if (totalHitsThreshold < 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
|
||||
}
|
||||
|
||||
this.totalHitsThreshold = totalHitsThreshold;
|
||||
this.globalHitCount = new AtomicLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrementHitCount() {
|
||||
globalHitCount.incrementAndGet();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isThresholdReached() {
|
||||
return globalHitCount.getAcquire() > totalHitsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScoreMode scoreMode() {
|
||||
return totalHitsThreshold == Integer.MAX_VALUE ? ScoreMode.COMPLETE : ScoreMode.TOP_SCORES;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHitsThreshold() {
|
||||
return totalHitsThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
/** Default implementation of HitsThresholdChecker to be used for single threaded execution */
|
||||
private static class LocalHitsThresholdChecker extends HitsThresholdChecker {
|
||||
private final int totalHitsThreshold;
|
||||
private int hitCount;
|
||||
|
||||
public LocalHitsThresholdChecker(int totalHitsThreshold) {
|
||||
|
||||
if (totalHitsThreshold < 0) {
|
||||
throw new IllegalArgumentException(
|
||||
"totalHitsThreshold must be >= 0, got " + totalHitsThreshold);
|
||||
}
|
||||
|
||||
this.totalHitsThreshold = totalHitsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrementHitCount() {
|
||||
++hitCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isThresholdReached() {
|
||||
return hitCount > totalHitsThreshold;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ScoreMode scoreMode() {
|
||||
return totalHitsThreshold == Integer.MAX_VALUE ? ScoreMode.COMPLETE : ScoreMode.TOP_SCORES;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHitsThreshold() {
|
||||
return totalHitsThreshold;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a threshold checker that is useful for single threaded searches
|
||||
*/
|
||||
public static HitsThresholdChecker create(final int totalHitsThreshold) {
|
||||
return new LocalHitsThresholdChecker(totalHitsThreshold);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns a threshold checker that is based on a shared counter
|
||||
*/
|
||||
public static HitsThresholdChecker createShared(final int totalHitsThreshold) {
|
||||
return new GlobalHitsThresholdChecker(totalHitsThreshold);
|
||||
}
|
||||
|
||||
public abstract void incrementHitCount();
|
||||
|
||||
public abstract ScoreMode scoreMode();
|
||||
|
||||
public abstract int getHitsThreshold();
|
||||
|
||||
public abstract boolean isThresholdReached();
|
||||
}
|
@ -0,0 +1,63 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package it.cavallium.dbengine.lucene.hugepq.mirrored;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
|
||||
/** Docs iterator that starts iterating from a configurable minimum document */
|
||||
public class MinDocIterator extends DocIdSetIterator {
|
||||
final int segmentMinDoc;
|
||||
final int maxDoc;
|
||||
int doc = -1;
|
||||
|
||||
public MinDocIterator(int segmentMinDoc, int maxDoc) {
|
||||
this.segmentMinDoc = segmentMinDoc;
|
||||
this.maxDoc = maxDoc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int docID() {
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int nextDoc() throws IOException {
|
||||
return advance(doc + 1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int advance(int target) throws IOException {
|
||||
assert target > doc;
|
||||
if (doc == -1) {
|
||||
// skip directly to minDoc
|
||||
doc = Math.max(target, segmentMinDoc);
|
||||
} else {
|
||||
doc = target;
|
||||
}
|
||||
if (doc >= maxDoc) {
|
||||
doc = NO_MORE_DOCS;
|
||||
}
|
||||
return doc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long cost() {
|
||||
return maxDoc - segmentMinDoc;
|
||||
}
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache License, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package it.cavallium.dbengine.lucene.hugepq.mirrored;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
|
||||
public final class MultiLeafFieldComparator implements LeafFieldComparator {
|
||||
|
||||
private final LeafFieldComparator[] comparators;
|
||||
private final int[] reverseMul;
|
||||
// we extract the first comparator to avoid array access in the common case
|
||||
// that the first comparator compares worse than the bottom entry in the queue
|
||||
private final LeafFieldComparator firstComparator;
|
||||
private final int firstReverseMul;
|
||||
|
||||
public MultiLeafFieldComparator(LeafFieldComparator[] comparators, int[] reverseMul) {
|
||||
if (comparators.length != reverseMul.length) {
|
||||
throw new IllegalArgumentException(
|
||||
"Must have the same number of comparators and reverseMul, got "
|
||||
+ comparators.length
|
||||
+ " and "
|
||||
+ reverseMul.length);
|
||||
}
|
||||
this.comparators = comparators;
|
||||
this.reverseMul = reverseMul;
|
||||
this.firstComparator = comparators[0];
|
||||
this.firstReverseMul = reverseMul[0];
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setBottom(int slot) throws IOException {
|
||||
for (LeafFieldComparator comparator : comparators) {
|
||||
comparator.setBottom(slot);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareBottom(int doc) throws IOException {
|
||||
int cmp = firstReverseMul * firstComparator.compareBottom(doc);
|
||||
if (cmp != 0) {
|
||||
return cmp;
|
||||
}
|
||||
for (int i = 1; i < comparators.length; ++i) {
|
||||
cmp = reverseMul[i] * comparators[i].compareBottom(doc);
|
||||
if (cmp != 0) {
|
||||
return cmp;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compareTop(int doc) throws IOException {
|
||||
int cmp = firstReverseMul * firstComparator.compareTop(doc);
|
||||
if (cmp != 0) {
|
||||
return cmp;
|
||||
}
|
||||
for (int i = 1; i < comparators.length; ++i) {
|
||||
cmp = reverseMul[i] * comparators[i].compareTop(doc);
|
||||
if (cmp != 0) {
|
||||
return cmp;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void copy(int slot, int doc) throws IOException {
|
||||
for (LeafFieldComparator comparator : comparators) {
|
||||
comparator.copy(slot, doc);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setScorer(Scorable scorer) throws IOException {
|
||||
for (LeafFieldComparator comparator : comparators) {
|
||||
comparator.setScorer(scorer);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setHitsThresholdReached() throws IOException {
|
||||
// this is needed for skipping functionality that is only relevant for the 1st comparator
|
||||
firstComparator.setHitsThresholdReached();
|
||||
}
|
||||
|
||||
@Override
|
||||
public DocIdSetIterator competitiveIterator() throws IOException {
|
||||
// this is needed for skipping functionality that is only relevant for the 1st comparator
|
||||
return firstComparator.competitiveIterator();
|
||||
}
|
||||
}
|
@ -15,9 +15,10 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search;
|
||||
package it.cavallium.dbengine.lucene.hugepq.search;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
|
||||
/** Used for defining custom algorithms to allow searches to early terminate */
|
||||
public abstract class CustomHitsThresholdChecker {
|
@ -14,7 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
package it.cavallium.dbengine.lucene.hugepq.search;
|
||||
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.disk.LLTempHugePqEnv;
|
||||
@ -29,10 +29,23 @@ import it.cavallium.dbengine.lucene.PriorityQueue;
|
||||
import it.cavallium.dbengine.lucene.ResourceIterable;
|
||||
import it.cavallium.dbengine.lucene.collector.FullDocsCollector;
|
||||
import it.cavallium.dbengine.lucene.collector.FullFieldDocs;
|
||||
import it.cavallium.dbengine.lucene.hugepq.mirrored.HitsThresholdChecker;
|
||||
import it.cavallium.dbengine.lucene.hugepq.mirrored.MultiLeafFieldComparator;
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.CollectionTerminatedException;
|
||||
import org.apache.lucene.search.CollectorManager;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
||||
import org.apache.lucene.search.FieldComparator;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.LeafFieldComparator;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.search.Sort;
|
||||
import org.apache.lucene.search.SortField;
|
||||
import org.apache.lucene.search.TotalHits;
|
||||
import org.apache.lucene.search.TotalHits.Relation;
|
||||
import reactor.core.publisher.Flux;
|
||||
|
@ -14,7 +14,7 @@
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.lucene.search;
|
||||
package it.cavallium.dbengine.lucene.hugepq.search;
|
||||
|
||||
import it.cavallium.dbengine.database.disk.LLTempHugePqEnv;
|
||||
import it.cavallium.dbengine.lucene.FullDocs;
|
||||
@ -28,6 +28,13 @@ import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import it.cavallium.dbengine.lucene.MaxScoreAccumulator.DocAndScore;
|
||||
import org.apache.lucene.search.Collector;
|
||||
import org.apache.lucene.search.CollectorManager;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.LeafCollector;
|
||||
import org.apache.lucene.search.Scorable;
|
||||
import org.apache.lucene.search.ScoreMode;
|
||||
import org.apache.lucene.search.TotalHits;
|
||||
import org.jetbrains.annotations.NotNull;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
@ -15,13 +15,14 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.lucene.search.comparators;
|
||||
package it.cavallium.dbengine.lucene.hugepq.search.comparators;
|
||||
|
||||
import it.cavallium.dbengine.database.SafeCloseable;
|
||||
import it.cavallium.dbengine.database.disk.LLTempHugePqEnv;
|
||||
import it.cavallium.dbengine.lucene.IArray;
|
||||
import it.cavallium.dbengine.lucene.IntCodec;
|
||||
import it.cavallium.dbengine.lucene.HugePqArray;
|
||||
import it.cavallium.dbengine.lucene.hugepq.mirrored.MinDocIterator;
|
||||
import java.io.IOException;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.search.DocIdSetIterator;
|
@ -10,7 +10,7 @@ import it.cavallium.dbengine.database.disk.LLTempHugePqEnv;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.FullDocs;
|
||||
import it.cavallium.dbengine.lucene.LLScoreDoc;
|
||||
import org.apache.lucene.search.HugePqFullScoreDocCollector;
|
||||
import it.cavallium.dbengine.lucene.hugepq.search.HugePqFullScoreDocCollector;
|
||||
import java.io.IOException;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -10,7 +10,7 @@ import it.cavallium.dbengine.database.disk.LLTempHugePqEnv;
|
||||
import it.cavallium.dbengine.lucene.FullDocs;
|
||||
import it.cavallium.dbengine.lucene.LLFieldDoc;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import org.apache.lucene.search.HugePqFullFieldDocCollector;
|
||||
import it.cavallium.dbengine.lucene.hugepq.search.HugePqFullFieldDocCollector;
|
||||
import java.io.IOException;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
|
@ -10,7 +10,7 @@ import it.cavallium.dbengine.database.disk.LLIndexSearchers;
|
||||
import it.cavallium.dbengine.lucene.LuceneUtils;
|
||||
import it.cavallium.dbengine.lucene.MaxScoreAccumulator;
|
||||
import java.util.List;
|
||||
import org.apache.lucene.search.CustomHitsThresholdChecker;
|
||||
import it.cavallium.dbengine.lucene.hugepq.search.CustomHitsThresholdChecker;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.jetbrains.annotations.Nullable;
|
||||
|
@ -2,14 +2,10 @@ package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty5.buffer.api.BufferAllocator;
|
||||
import io.netty5.buffer.api.DefaultBufferAllocators;
|
||||
import io.netty5.buffer.api.pool.MetricUtils;
|
||||
import io.netty5.buffer.api.pool.PoolArenaMetric;
|
||||
import it.cavallium.dbengine.MetricUtils;
|
||||
import io.netty5.buffer.api.pool.PooledBufferAllocator;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Stream;
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
|
@ -2,24 +2,17 @@ package it.cavallium.dbengine.netty;
|
||||
|
||||
import io.netty5.buffer.api.pool.BufferAllocatorMetric;
|
||||
import io.netty5.buffer.api.pool.PooledBufferAllocator;
|
||||
import io.netty5.buffer.api.pool.PooledBufferAllocatorMetricUtils;
|
||||
import java.lang.reflect.Field;
|
||||
|
||||
public class JMXPooledNettyMonitoring extends JMXNettyMonitoring implements JMXNettyMonitoringMBean {
|
||||
|
||||
private final PooledBufferAllocator alloc;
|
||||
private final BufferAllocatorMetric metric;
|
||||
private PooledBufferAllocatorMetricUtils metricUtils;
|
||||
|
||||
public JMXPooledNettyMonitoring(String name, PooledBufferAllocator alloc) {
|
||||
super(name, alloc.isDirectBufferPooled(), alloc.metric());
|
||||
this.alloc = alloc;
|
||||
this.metric = alloc.metric();
|
||||
try {
|
||||
this.metricUtils = new PooledBufferAllocatorMetricUtils(alloc);
|
||||
} catch (Throwable e) {
|
||||
this.metricUtils = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -34,7 +27,7 @@ public class JMXPooledNettyMonitoring extends JMXNettyMonitoring implements JMXN
|
||||
|
||||
@Override
|
||||
public Integer getNumThreadLocalCachesArenas() {
|
||||
return metricUtils != null ? metricUtils.numThreadLocalCaches() : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Deprecated
|
||||
@ -45,16 +38,16 @@ public class JMXPooledNettyMonitoring extends JMXNettyMonitoring implements JMXN
|
||||
|
||||
@Override
|
||||
public Integer getSmallCacheSize() {
|
||||
return metricUtils != null ? metricUtils.smallCacheSize() : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getNormalCacheSize() {
|
||||
return metricUtils != null ? metricUtils.normalCacheSize() : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Integer getChunkSize() {
|
||||
return metricUtils != null ? metricUtils.chunkSize() : 0;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
53
src/main/java/module-info.java
Normal file
53
src/main/java/module-info.java
Normal file
@ -0,0 +1,53 @@
|
||||
module dbengine {
|
||||
exports it.cavallium.dbengine.lucene;
|
||||
exports it.cavallium.dbengine.database;
|
||||
exports it.cavallium.dbengine.rpc.current.data;
|
||||
exports it.cavallium.dbengine.database.remote;
|
||||
exports it.cavallium.dbengine.database.disk;
|
||||
exports it.cavallium.dbengine.rpc.current.data.nullables;
|
||||
exports it.cavallium.dbengine.database.serialization;
|
||||
exports it.cavallium.dbengine.client;
|
||||
exports it.cavallium.dbengine.client.query.current.data;
|
||||
exports it.cavallium.dbengine.lucene.collector;
|
||||
exports it.cavallium.dbengine.lucene.searcher;
|
||||
exports it.cavallium.dbengine.database.collections;
|
||||
exports it.cavallium.dbengine.lucene.analyzer;
|
||||
exports it.cavallium.dbengine.client.query;
|
||||
exports it.cavallium.dbengine.database.memory;
|
||||
exports it.cavallium.dbengine.netty;
|
||||
requires org.jetbrains.annotations;
|
||||
requires reactor.core;
|
||||
requires com.google.common;
|
||||
requires micrometer.core;
|
||||
requires io.netty5.buffer;
|
||||
requires rocksdbjni;
|
||||
requires org.reactivestreams;
|
||||
requires org.apache.logging.log4j;
|
||||
requires io.soabase.recordbuilder.core;
|
||||
requires moshi;
|
||||
requires io.netty5.common;
|
||||
requires it.unimi.dsi.fastutil;
|
||||
requires common.utils;
|
||||
requires data.generator.runtime;
|
||||
requires java.logging;
|
||||
requires org.apache.lucene.core;
|
||||
requires org.apache.commons.lang3;
|
||||
requires java.compiler;
|
||||
requires org.apache.lucene.analysis.common;
|
||||
requires org.apache.lucene.misc;
|
||||
requires lucene.relevance;
|
||||
requires io.netty.buffer;
|
||||
requires io.netty.transport;
|
||||
requires io.netty.codec;
|
||||
requires org.apache.lucene.facet;
|
||||
requires java.management;
|
||||
requires reactor.netty.core;
|
||||
requires jdk.unsupported;
|
||||
requires com.ibm.icu;
|
||||
requires org.apache.lucene.analysis.icu;
|
||||
requires io.netty.handler;
|
||||
requires io.netty.incubator.codec.classes.quic;
|
||||
requires org.apache.lucene.queryparser;
|
||||
requires reactor.netty.incubator.quic;
|
||||
|
||||
}
|
@ -4,7 +4,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
|
||||
|
||||
import io.netty5.buffer.api.Buffer;
|
||||
import io.netty5.buffer.api.MemoryManager;
|
||||
import io.netty5.buffer.api.pool.MetricUtils;
|
||||
import io.netty5.buffer.api.pool.PoolArenaMetric;
|
||||
import io.netty5.buffer.api.pool.PooledBufferAllocator;
|
||||
import io.netty5.util.internal.PlatformDependent;
|
||||
@ -22,7 +21,6 @@ import it.cavallium.dbengine.database.collections.DatabaseStageEntry;
|
||||
import it.cavallium.dbengine.database.collections.DatabaseStageMap;
|
||||
import it.cavallium.dbengine.database.collections.SubStageGetterHashMap;
|
||||
import it.cavallium.dbengine.database.collections.SubStageGetterMap;
|
||||
import it.cavallium.dbengine.database.serialization.SerializationException;
|
||||
import it.cavallium.dbengine.database.serialization.Serializer;
|
||||
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
|
||||
import it.unimi.dsi.fastutil.objects.Object2ObjectSortedMap;
|
||||
|
@ -21,7 +21,7 @@ import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.queryparser.classic.ParseException;
|
||||
import org.apache.lucene.queryparser.classic.QueryParser;
|
||||
import org.apache.lucene.search.CustomHitsThresholdChecker;
|
||||
import it.cavallium.dbengine.lucene.hugepq.search.CustomHitsThresholdChecker;
|
||||
import org.apache.lucene.search.IndexSearcher;
|
||||
import org.apache.lucene.search.Query;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
|
Loading…
x
Reference in New Issue
Block a user