Performance optimization and code cleanup

- Refactor options
- Update dependencies
- Separate Read-Write pool
This commit is contained in:
Andrea Cavalli 2024-04-18 14:48:16 +02:00
parent ff3cbc11b6
commit 5888bc96b4
29 changed files with 285 additions and 97 deletions

22
pom.xml
View File

@ -5,7 +5,7 @@
<groupId>it.cavallium</groupId>
<artifactId>dbengine</artifactId>
<version>4.0.${revision}</version>
<version>4.2.${revision}</version>
<packaging>jar</packaging>
<properties>
@ -13,8 +13,8 @@
<revision>0-SNAPSHOT</revision>
<dbengine.ci>false</dbengine.ci>
<micrometer.version>1.10.4</micrometer.version>
<lucene.version>9.9.1</lucene.version>
<rocksdb.version>8.10.0</rocksdb.version>
<lucene.version>9.10.0</lucene.version>
<rocksdb.version>9.0.0</rocksdb.version>
<junit.jupiter.version>5.9.0</junit.jupiter.version>
<data.generator.version>1.0.26</data.generator.version>
</properties>
@ -66,11 +66,23 @@
<id>mchv-release-distribution</id>
<name>MCHV Release Apache Maven Packages Distribution</name>
<url>https://mvn.mchv.eu/repository/mchv</url>
<releases>
<enabled>true</enabled>
</releases>
<snapshots>
<enabled>false</enabled>
</snapshots>
</repository>
<snapshotRepository>
<id>mchv-snapshot-distribution</id>
<name>MCHV Snapshot Apache Maven Packages Distribution</name>
<url>https://mvn.mchv.eu/repository/mchv-snapshot</url>
<releases>
<enabled>false</enabled>
</releases>
<snapshots>
<enabled>true</enabled>
</snapshots>
</snapshotRepository>
</distributionManagement>
<scm>
@ -92,7 +104,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>31.1-jre</version>
<version>33.0.0-jre</version>
</dependency>
<dependency>
<groupId>org.yaml</groupId>
@ -159,7 +171,7 @@
<dependency>
<groupId>org.apache.logging.log4j</groupId>
<artifactId>log4j-slf4j2-impl</artifactId>
<version>2.20.0</version>
<version>2.22.1</version>
<scope>test</scope>
<exclusions>
<exclusion>

View File

@ -13,20 +13,6 @@ interfacesData:
extendInterfaces: [RPCEvent]
ServerBoundResponse:
extendInterfaces: [RPCEvent]
ColumnOptions:
commonGetters:
levels: DatabaseLevel[]
memtableMemoryBudgetBytes: -long
cacheIndexAndFilterBlocks: -boolean
partitionFilters: -boolean
filter: -Filter
blockSize: -int
persistentCacheId: -String
writeBufferSize: -long
blobFiles: boolean
minBlobSize: -long
blobFileSize: -long
blobCompressionType: -Compression
superTypesData:
RPCEvent: [
Empty,
@ -94,10 +80,6 @@ superTypesData:
NoFilter,
BloomFilter
]
ColumnOptions: [
DefaultColumnOptions,
NamedColumnOptions
]
customTypesData:
Path:
javaClass: java.nio.file.Path
@ -251,14 +233,13 @@ baseTypesData:
persistentCaches: PersistentCache[]
writeBufferManager: -long
spinning: boolean
defaultColumnOptions: DefaultColumnOptions
defaultColumnOptions: ColumnOptions
columnOptions: NamedColumnOptions[]
logPath: -String
walPath: -String
openAsSecondary: boolean
secondaryDirectoryName: -String
# Remember to update ColumnOptions common getters
DefaultColumnOptions:
ColumnOptions:
data:
levels: DatabaseLevel[]
memtableMemoryBudgetBytes: -long
@ -272,22 +253,10 @@ baseTypesData:
minBlobSize: -long
blobFileSize: -long
blobCompressionType: -Compression
# Remember to update ColumnOptions common getters
NamedColumnOptions:
data:
columnName: String
levels: DatabaseLevel[]
memtableMemoryBudgetBytes: -long
cacheIndexAndFilterBlocks: -boolean
partitionFilters: -boolean
filter: -Filter
blockSize: -int
persistentCacheId: -String
writeBufferSize: -long
blobFiles: boolean
minBlobSize: -long
blobFileSize: -long
blobCompressionType: -Compression
name: String
options: ColumnOptions
NoFilter:
data: {}
BloomFilter:

View File

@ -4,10 +4,10 @@ import it.cavallium.datagen.nativedata.NullableString;
import it.cavallium.datagen.nativedata.Nullableboolean;
import it.cavallium.datagen.nativedata.Nullableint;
import it.cavallium.datagen.nativedata.Nullablelong;
import it.cavallium.dbengine.rpc.current.data.ColumnOptions;
import it.cavallium.dbengine.rpc.current.data.ColumnOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
import it.cavallium.dbengine.rpc.current.data.DatabaseOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.DefaultColumnOptions;
import it.cavallium.dbengine.rpc.current.data.DefaultColumnOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptions;
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptionsBuilder;
import it.cavallium.dbengine.rpc.current.data.nullables.NullableCompression;
@ -20,7 +20,7 @@ import org.rocksdb.RocksDB;
public class DefaultDatabaseOptions {
public static DefaultColumnOptions DEFAULT_DEFAULT_COLUMN_OPTIONS = new DefaultColumnOptions(
public static ColumnOptions DEFAULT_DEFAULT_COLUMN_OPTIONS = new ColumnOptions(
Collections.emptyList(),
Nullablelong.empty(),
Nullableboolean.empty(),
@ -37,18 +37,7 @@ public class DefaultDatabaseOptions {
public static NamedColumnOptions DEFAULT_NAMED_COLUMN_OPTIONS = new NamedColumnOptions(
new String(RocksDB.DEFAULT_COLUMN_FAMILY, StandardCharsets.UTF_8),
Collections.emptyList(),
Nullablelong.empty(),
Nullableboolean.empty(),
Nullableboolean.empty(),
NullableFilter.empty(),
Nullableint.empty(),
NullableString.empty(),
Nullablelong.empty(),
false,
Nullablelong.empty(),
Nullablelong.empty(),
NullableCompression.empty()
DEFAULT_DEFAULT_COLUMN_OPTIONS
);
public static DatabaseOptions DEFAULT_DATABASE_OPTIONS = new DatabaseOptions(List.of(),
@ -75,8 +64,8 @@ public class DefaultDatabaseOptions {
return DatabaseOptionsBuilder.builder(DEFAULT_DATABASE_OPTIONS);
}
public static DefaultColumnOptionsBuilder defaultColumnOptionsBuilder() {
return DefaultColumnOptionsBuilder.builder(DEFAULT_DEFAULT_COLUMN_OPTIONS);
public static ColumnOptionsBuilder defaultColumnOptionsBuilder() {
return ColumnOptionsBuilder.builder(DEFAULT_DEFAULT_COLUMN_OPTIONS);
}
public static NamedColumnOptionsBuilder namedColumnOptionsBuilder() {

View File

@ -6,6 +6,7 @@ import it.cavallium.dbengine.client.SSTVerificationProgress;
import it.cavallium.dbengine.database.serialization.KVSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.util.List;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;

View File

@ -1,6 +1,12 @@
package it.cavallium.dbengine.database;
import java.util.concurrent.ForkJoinPool;
public interface LLKeyValueDatabaseStructure {
String getDatabaseName();
ForkJoinPool getDbReadPool();
ForkJoinPool getDbWritePool();
}

View File

@ -1,6 +1,6 @@
package it.cavallium.dbengine.database;
import static it.cavallium.dbengine.utils.StreamUtils.ROCKSDB_POOL;
import static it.cavallium.dbengine.utils.StreamUtils.collect;
import static it.cavallium.dbengine.utils.StreamUtils.collectOn;
import static it.cavallium.dbengine.utils.StreamUtils.executing;
@ -88,7 +88,7 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection {
@Override
public LLDatabaseConnection connect() {
collectOn(ROCKSDB_POOL, allConnections.stream(), executing(connection -> {
collect(allConnections.stream(), executing(connection -> {
try {
connection.connect();
} catch (Exception ex) {
@ -166,7 +166,7 @@ public class LLMultiDatabaseConnection implements LLDatabaseConnection {
@Override
public void disconnect() {
collectOn(ROCKSDB_POOL, allConnections.stream(), executing(connection -> {
collect(allConnections.stream(), executing(connection -> {
try {
connection.disconnect();
} catch (Exception ex) {

View File

@ -6,6 +6,7 @@ import it.cavallium.dbengine.database.LLKeyValueDatabaseStructure;
import it.cavallium.dbengine.database.LLSingleton;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.concurrent.ForkJoinPool;
import org.jetbrains.annotations.Nullable;
public class DatabaseInt implements LLKeyValueDatabaseStructure {
@ -33,4 +34,14 @@ public class DatabaseInt implements LLKeyValueDatabaseStructure {
public String getDatabaseName() {
return singleton.getDatabaseName();
}
@Override
public ForkJoinPool getDbReadPool() {
return singleton.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return singleton.getDbWritePool();
}
}

View File

@ -8,6 +8,7 @@ import it.cavallium.dbengine.database.LLSingleton;
import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializerFixedBinaryLength;
import java.util.concurrent.ForkJoinPool;
import org.jetbrains.annotations.Nullable;
public class DatabaseLong implements LLKeyValueDatabaseStructure {
@ -81,4 +82,14 @@ public class DatabaseLong implements LLKeyValueDatabaseStructure {
public String getDatabaseName() {
return singleton.getDatabaseName();
}
@Override
public ForkJoinPool getDbReadPool() {
return singleton.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return singleton.getDbWritePool();
}
}

View File

@ -24,6 +24,7 @@ import java.util.Map.Entry;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import org.apache.commons.lang3.function.TriFunction;
@ -310,6 +311,16 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
return resourceStream(() -> this.getAllEntries(null, false), () -> setAllEntries(entries));
}
@Override
public ForkJoinPool getDbReadPool() {
return dictionary.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return dictionary.getDbWritePool();
}
@Override
public void clear() {
if (range.isAll()) {

View File

@ -19,6 +19,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
@ -109,6 +110,16 @@ public class DatabaseMapDictionaryHashed<T, U, TH> implements DatabaseStageMap<T
return newMap;
}
@Override
public ForkJoinPool getDbReadPool() {
return subDictionary.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return subDictionary.getDbWritePool();
}
@Override
public Object2ObjectSortedMap<T, U> get(@Nullable CompositeSnapshot snapshot) {
var v = subDictionary.get(snapshot);

View File

@ -17,6 +17,7 @@ import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -67,6 +68,16 @@ public final class DatabaseMapSingle<U> implements DatabaseStageEntry<U> {
return valBuf.asList();
}
@Override
public ForkJoinPool getDbReadPool() {
return dictionary.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return dictionary.getDbWritePool();
}
@Override
public U get(@Nullable CompositeSnapshot snapshot) {
var result = dictionary.get(resolveSnapshot(snapshot), key);

View File

@ -12,6 +12,7 @@ import java.util.Map;
import java.util.Map.Entry;
import java.util.Objects;
import java.util.Set;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -37,6 +38,16 @@ public class DatabaseSingleBucket<K, V, TH> implements DatabaseStageEntry<V> {
this.bucketStage = (DatabaseStageEntry<ObjectArraySet<Entry<K, V>>>) bucketStage;
}
@Override
public ForkJoinPool getDbReadPool() {
return bucketStage.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return bucketStage.getDbWritePool();
}
@Override
public V get(@Nullable CompositeSnapshot snapshot) {
var entries = bucketStage.get(snapshot);

View File

@ -9,6 +9,7 @@ import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
@ -29,6 +30,16 @@ public class DatabaseSingleMapped<A, B> implements DatabaseStageEntry<A> {
this.serializedSingle = (DatabaseStageEntry<B>) serializedSingle;
}
@Override
public ForkJoinPool getDbReadPool() {
return serializedSingle.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return serializedSingle.getDbWritePool();
}
@Override
public A get(@Nullable CompositeSnapshot snapshot) {
var data = serializedSingle.get(snapshot);

View File

@ -14,6 +14,7 @@ import it.cavallium.dbengine.database.disk.CachedSerializationFunction;
import it.cavallium.dbengine.database.serialization.SerializationException;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.database.serialization.Serializer;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -65,6 +66,16 @@ public class DatabaseSingleton<U> implements DatabaseStageEntry<U> {
return valBuf.asList();
}
@Override
public ForkJoinPool getDbReadPool() {
return this.singleton.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return this.singleton.getDbWritePool();
}
@Override
public U get(@Nullable CompositeSnapshot snapshot) {
Buf result = singleton.get(resolveSnapshot(snapshot));

View File

@ -8,11 +8,15 @@ import it.cavallium.dbengine.database.LLUtils;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.util.Objects;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
public interface DatabaseStage<T> extends DatabaseStageWithEntry<T> {
ForkJoinPool getDbReadPool();
ForkJoinPool getDbWritePool();
@Nullable T get(@Nullable CompositeSnapshot snapshot);
default T getOrDefault(@Nullable CompositeSnapshot snapshot, T defaultValue, boolean existsAlmostCertainly) {

View File

@ -1,6 +1,5 @@
package it.cavallium.dbengine.database.collections;
import static it.cavallium.dbengine.utils.StreamUtils.ROCKSDB_POOL;
import static it.cavallium.dbengine.utils.StreamUtils.collectOn;
import static it.cavallium.dbengine.utils.StreamUtils.count;
import static it.cavallium.dbengine.utils.StreamUtils.executing;
@ -104,7 +103,7 @@ public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends Dat
}
default void putMulti(Stream<Entry<T, U>> entries) {
collectOn(ROCKSDB_POOL, entries, executing(entry -> this.putValue(entry.getKey(), entry.getValue())));
collectOn(getDbWritePool(), entries, executing(entry -> this.putValue(entry.getKey(), entry.getValue())));
}
Stream<SubStageEntry<T, US>> getAllStages(@Nullable CompositeSnapshot snapshot, boolean smallRange);
@ -149,7 +148,7 @@ public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends Dat
this.setAllEntries(entries.map(entriesReplacer));
}
} else {
collectOn(ROCKSDB_POOL,
collectOn(getDbWritePool(),
this.getAllEntries(null, smallRange).map(entriesReplacer),
executing(replacedEntry -> this.at(null, replacedEntry.getKey()).set(replacedEntry.getValue()))
);
@ -157,7 +156,7 @@ public interface DatabaseStageMap<T, U, US extends DatabaseStage<U>> extends Dat
}
default void replaceAll(Consumer<Entry<T, US>> entriesReplacer) {
collectOn(ROCKSDB_POOL, this.getAllStages(null, false), executing(entriesReplacer));
collectOn(getDbWritePool(), this.getAllStages(null, false), executing(entriesReplacer));
}
@Override

View File

@ -21,6 +21,7 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.StampedLock;
import java.util.stream.Stream;
@ -55,6 +56,8 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
private final ColumnFamilyHandle cfh;
protected final MeterRegistry meterRegistry;
private final ForkJoinPool dbReadPool;
private final ForkJoinPool dbWritePool;
protected final StampedLock closeLock;
protected final String columnName;
@ -89,7 +92,9 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
String databaseName,
ColumnFamilyHandle cfh,
MeterRegistry meterRegistry,
StampedLock closeLock) {
StampedLock closeLock,
ForkJoinPool dbReadPool,
ForkJoinPool dbWritePool) {
this.db = db;
this.cfh = cfh;
String columnName;
@ -100,6 +105,8 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
}
this.columnName = columnName;
this.meterRegistry = meterRegistry;
this.dbReadPool = dbReadPool;
this.dbWritePool = dbWritePool;
this.closeLock = closeLock;
this.keyBufferSize = DistributionSummary
@ -273,6 +280,16 @@ public sealed abstract class AbstractRocksDBColumn<T extends RocksDB> implements
return cfh;
}
@Override
public ForkJoinPool getDbReadPool() {
return dbReadPool;
}
@Override
public ForkJoinPool getDbWritePool() {
return dbWritePool;
}
protected void ensureOpen() {
RocksDBUtils.ensureOpen(db, cfh);
}

View File

@ -5,11 +5,9 @@ import static it.cavallium.dbengine.database.LLUtils.isBoundedRange;
import static it.cavallium.dbengine.database.LLUtils.mapList;
import static it.cavallium.dbengine.database.LLUtils.toStringSafe;
import static it.cavallium.dbengine.database.disk.UpdateAtomicResultMode.DELTA;
import static it.cavallium.dbengine.utils.StreamUtils.ROCKSDB_POOL;
import static it.cavallium.dbengine.utils.StreamUtils.collectOn;
import static it.cavallium.dbengine.utils.StreamUtils.executing;
import static it.cavallium.dbengine.utils.StreamUtils.fastSummingLong;
import static it.cavallium.dbengine.utils.StreamUtils.resourceStream;
import static java.util.Objects.requireNonNull;
import static it.cavallium.dbengine.utils.StreamUtils.batches;
@ -54,6 +52,7 @@ import java.util.Objects;
import java.util.Set;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.atomic.AtomicReference;
import java.util.concurrent.atomic.LongAdder;
@ -200,6 +199,16 @@ public class LLLocalDictionary implements LLDictionary {
return columnName;
}
@Override
public ForkJoinPool getDbReadPool() {
return db.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return db.getDbWritePool();
}
@NotNull
private LLReadOptions generateReadOptionsOrNew(LLSnapshot snapshot) {
return generateReadOptions(snapshot != null ? snapshotResolver.apply(snapshot) : null);
@ -355,9 +364,7 @@ public class LLLocalDictionary implements LLDictionary {
}
assert result != null;
return switch (updateReturnMode) {
case NOTHING -> {
yield null;
}
case NOTHING -> null;
case GET_NEW_VALUE -> ((UpdateAtomicResultCurrent) result).current();
case GET_OLD_VALUE -> ((UpdateAtomicResultPrevious) result).previous();
};
@ -451,7 +458,7 @@ public class LLLocalDictionary implements LLDictionary {
@Override
public void putMulti(Stream<LLEntry> entries) {
collectOn(ROCKSDB_POOL,
collectOn(getDbWritePool(),
batches(entries, Math.min(MULTI_GET_WINDOW, CAPPED_WRITE_BATCH_CAP)),
executing(entriesWindow -> {
try (var writeOptions = new LLWriteOptions()) {
@ -778,7 +785,7 @@ public class LLLocalDictionary implements LLDictionary {
throw new DBException("Failed to set a range: " + ex.getMessage());
}
collectOn(ROCKSDB_POOL, batches(entries, MULTI_GET_WINDOW), executing(entriesList -> {
collectOn(getDbWritePool(), batches(entries, MULTI_GET_WINDOW), executing(entriesList -> {
try (var writeOptions = new LLWriteOptions()) {
if (!USE_WRITE_BATCHES_IN_SET_RANGE) {
for (LLEntry entry : entriesList) {
@ -814,7 +821,7 @@ public class LLLocalDictionary implements LLDictionary {
if (USE_WRITE_BATCHES_IN_SET_RANGE) {
throw new UnsupportedOperationException("Can't use write batches in setRange without window. Please fix the parameters");
}
collectOn(ROCKSDB_POOL, this.getRange(null, range, false, smallRange), executing(oldValue -> {
collectOn(getDbWritePool(), this.getRange(null, range, false, smallRange), executing(oldValue -> {
try (var writeOptions = new LLWriteOptions()) {
db.delete(writeOptions, oldValue.getKey());
} catch (RocksDBException ex) {
@ -822,7 +829,7 @@ public class LLLocalDictionary implements LLDictionary {
}
}));
collectOn(ROCKSDB_POOL, entries, executing(entry -> {
collectOn(getDbWritePool(), entries, executing(entry -> {
if (entry.getKey() != null && entry.getValue() != null) {
this.putInternal(entry.getKey(), entry.getValue());
}
@ -1142,7 +1149,7 @@ public class LLLocalDictionary implements LLDictionary {
readOpts.setVerifyChecksums(VERIFY_CHECKSUMS_WHEN_NOT_NEEDED);
if (PARALLEL_EXACT_SIZE) {
return collectOn(ROCKSDB_POOL, parallelizeRange(LLRange.all()).map(range -> {
return collectOn(getDbReadPool(), parallelizeRange(LLRange.all()).map(range -> {
long partialCount = 0;
try (var rangeReadOpts = readOpts.copy()) {
try {

View File

@ -31,6 +31,7 @@ import it.cavallium.dbengine.rpc.current.data.DatabaseOptions;
import it.cavallium.dbengine.rpc.current.data.DatabaseVolume;
import it.cavallium.dbengine.rpc.current.data.NamedColumnOptions;
import it.cavallium.dbengine.rpc.current.data.NoFilter;
import it.cavallium.dbengine.utils.StreamUtils;
import java.io.File;
import java.io.IOException;
import it.cavallium.dbengine.utils.DBException;
@ -48,6 +49,7 @@ import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletionException;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
@ -126,6 +128,8 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
protected static final Logger logger = LogManager.getLogger(LLLocalKeyValueDatabase.class);
private final MeterRegistry meterRegistry;
private ForkJoinPool dbReadPool;
private ForkJoinPool dbWritePool;
private final Timer snapshotTime;
@ -159,6 +163,8 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
DatabaseOptions databaseOptions) {
this.name = name;
this.meterRegistry = meterRegistry;
this.dbReadPool = StreamUtils.newNamedForkJoinPool("db-" + name, false);
this.dbWritePool = StreamUtils.newNamedForkJoinPool("db-" + name, false);
this.snapshotTime = Timer
.builder("db.snapshot.timer")
@ -186,9 +192,9 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
// Check column names validity
for (NamedColumnOptions columnOption : databaseOptions.columnOptions()) {
if (columns.stream().map(Column::name).noneMatch(columnName -> columnName.equals(columnOption.columnName()))) {
if (columns.stream().map(Column::name).noneMatch(columnName -> columnName.equals(columnOption.name()))) {
throw new IllegalArgumentException(
"Column " + columnOption.columnName() + " does not exist. Available columns: " + columns
"Column " + columnOption.name() + " does not exist. Available columns: " + columns
.stream()
.map(Column::name)
.collect(Collectors.joining(", ", "[", "]")));
@ -209,10 +215,10 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
var columnOptions = databaseOptions
.columnOptions()
.stream()
.filter(opts -> opts.columnName().equals(column.name()))
.filter(opts -> opts.name().equals(column.name()))
.findFirst()
.map(opts -> (ColumnOptions) opts)
.orElse(databaseOptions.defaultColumnOptions());
.map(NamedColumnOptions::options)
.orElseGet(databaseOptions::defaultColumnOptions);
//noinspection ConstantConditions
if (columnOptions.memtableMemoryBudgetBytes() != null) {
@ -794,6 +800,16 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
return name;
}
@Override
public ForkJoinPool getDbReadPool() {
return dbReadPool;
}
@Override
public ForkJoinPool getDbWritePool() {
return dbWritePool;
}
public StampedLock getCloseLock() {
return closeLock;
}
@ -1236,17 +1252,21 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
name,
cfh,
meterRegistry,
closeLock
closeLock,
dbReadPool,
dbWritePool
);
} else if (db instanceof TransactionDB transactionDB) {
return new PessimisticRocksDBColumn(transactionDB,
name,
cfh,
meterRegistry,
closeLock
closeLock,
dbReadPool,
dbWritePool
);
} else {
return new StandardRocksDBColumn(db, name, cfh, meterRegistry, closeLock);
return new StandardRocksDBColumn(db, name, cfh, meterRegistry, closeLock, dbReadPool, dbWritePool);
}
}
@ -1558,8 +1578,12 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
public void close() {
closeRequested = true;
if (statistics != null) {
statistics.close();
statistics = null;
try {
statistics.close();
statistics = null;
} catch (Exception ex) {
logger.error("Failed to close db statistics", ex);
}
}
try {
flushAndCloseDb(db,
@ -1575,6 +1599,23 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
deleteUnusedOldLogFiles();
} catch (Exception e) {
throw new DBException("Failed to close", e);
} finally {
if (dbReadPool != null) {
try {
dbReadPool.close();
dbReadPool = null;
} catch (Exception ex) {
logger.error("Failed to close db pool", ex);
}
}
if (dbWritePool != null) {
try {
dbWritePool.close();
dbWritePool = null;
} catch (Exception ex) {
logger.error("Failed to close db pool", ex);
}
}
}
}
@ -1590,7 +1631,7 @@ public class LLLocalKeyValueDatabase extends Backuppable implements LLKeyValueDa
private void resumeWrites() {
try {
db.continueBackgroundWork();
db.enableFileDeletions(false);
db.enableFileDeletions();
} catch (RocksDBException e) {
throw new DBException(e);
}

View File

@ -16,6 +16,7 @@ import it.cavallium.dbengine.utils.DBException;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.Callable;
import java.util.concurrent.ForkJoinPool;
import java.util.function.Function;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
@ -135,6 +136,16 @@ public class LLLocalSingleton implements LLSingleton {
return databaseName;
}
@Override
public ForkJoinPool getDbReadPool() {
return db.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return db.getDbWritePool();
}
@Override
public String getColumnName() {
return columnName;

View File

@ -13,6 +13,7 @@ import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.lucene.ExponentialPageLimits;
import it.cavallium.dbengine.utils.DBException;
import java.io.IOException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.StampedLock;
@ -38,8 +39,10 @@ public final class OptimisticRocksDBColumn extends AbstractRocksDBColumn<Optimis
String databaseName,
ColumnFamilyHandle cfh,
MeterRegistry meterRegistry,
StampedLock closeLock) {
super(db, databaseName, cfh, meterRegistry, closeLock);
StampedLock closeLock,
ForkJoinPool dbReadPool,
ForkJoinPool dbWritePool) {
super(db, databaseName, cfh, meterRegistry, closeLock, dbReadPool, dbWritePool);
this.optimisticAttempts = DistributionSummary
.builder("db.optimistic.attempts.distribution")
.publishPercentiles(0.2, 0.5, 0.95)

View File

@ -11,6 +11,7 @@ import it.cavallium.dbengine.database.disk.rocksdb.LLWriteOptions;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.utils.DBException;
import java.io.IOException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.locks.StampedLock;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
@ -30,8 +31,10 @@ public final class PessimisticRocksDBColumn extends AbstractRocksDBColumn<Transa
String dbName,
ColumnFamilyHandle cfh,
MeterRegistry meterRegistry,
StampedLock closeLock) {
super(db, dbName, cfh, meterRegistry, closeLock);
StampedLock closeLock,
ForkJoinPool dbReadPool,
ForkJoinPool dbWritePool) {
super(db, dbName, cfh, meterRegistry, closeLock, dbReadPool, dbWritePool);
}
@Override

View File

@ -10,6 +10,7 @@ import it.cavallium.dbengine.database.disk.rocksdb.RocksIteratorObj;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
@ -89,4 +90,8 @@ public sealed interface RocksDBColumn permits AbstractRocksDBColumn {
boolean supportsTransactions();
void forceCompaction(int volumeId);
ForkJoinPool getDbReadPool();
ForkJoinPool getDbWritePool();
}

View File

@ -11,6 +11,7 @@ import it.cavallium.dbengine.database.disk.rocksdb.LLWriteOptions;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import it.cavallium.dbengine.utils.DBException;
import java.io.IOException;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.locks.StampedLock;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
@ -27,8 +28,10 @@ public final class StandardRocksDBColumn extends AbstractRocksDBColumn<RocksDB>
String dbName,
ColumnFamilyHandle cfh,
MeterRegistry meterRegistry,
StampedLock closeLock) {
super(db, dbName, cfh, meterRegistry, closeLock);
StampedLock closeLock,
ForkJoinPool dbReadPool,
ForkJoinPool dbWritePool) {
super(db, dbName, cfh, meterRegistry, closeLock, dbReadPool, dbWritePool);
}
@Override

View File

@ -28,6 +28,7 @@ import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentNavigableMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.atomic.AtomicReference;
import java.util.function.Supplier;
import java.util.stream.Collectors;
@ -449,4 +450,14 @@ public class LLMemoryDictionary implements LLDictionary {
public String getDatabaseName() {
return databaseName;
}
@Override
public ForkJoinPool getDbReadPool() {
return ForkJoinPool.commonPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return ForkJoinPool.commonPool();
}
}

View File

@ -21,6 +21,7 @@ import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
@ -162,6 +163,16 @@ public class LLMemoryKeyValueDatabase implements LLKeyValueDatabase {
return name;
}
@Override
public ForkJoinPool getDbReadPool() {
return ForkJoinPool.commonPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return ForkJoinPool.commonPool();
}
@Override
public LLSnapshot takeSnapshot() {
var snapshotNumber = nextSnapshotNumber.getAndIncrement();

View File

@ -8,6 +8,7 @@ import it.cavallium.dbengine.database.LLSnapshot;
import it.cavallium.dbengine.database.UpdateReturnMode;
import it.cavallium.dbengine.database.serialization.SerializationFunction;
import java.nio.charset.StandardCharsets;
import java.util.concurrent.ForkJoinPool;
import org.jetbrains.annotations.Nullable;
public class LLMemorySingleton implements LLSingleton {
@ -27,6 +28,16 @@ public class LLMemorySingleton implements LLSingleton {
return dict.getDatabaseName();
}
@Override
public ForkJoinPool getDbReadPool() {
return dict.getDbReadPool();
}
@Override
public ForkJoinPool getDbWritePool() {
return dict.getDbWritePool();
}
@Override
public Buf get(@Nullable LLSnapshot snapshot) {
return dict.get(snapshot, singletonName);

View File

@ -44,10 +44,6 @@ public class StreamUtils {
public static final ForkJoinPool LUCENE_POOL = newNamedForkJoinPool("Lucene", false);
public static final ForkJoinPool GRAPH_POOL = newNamedForkJoinPool("Graph", false);
public static final ForkJoinPool ROCKSDB_POOL = newNamedForkJoinPool("RocksDB", false);
private static final Collector<?, ?, ?> TO_LIST_FAKE_COLLECTOR = new FakeCollector();
private static final Collector<?, ?, ?> COUNT_FAKE_COLLECTOR = new FakeCollector();
private static final Collector<?, ?, ?> FIRST_FAKE_COLLECTOR = new FakeCollector();

View File

@ -24,6 +24,7 @@ import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.List;
import java.util.concurrent.ForkJoinPool;
import java.util.stream.Stream;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.ColumnFamilyHandle;
@ -70,7 +71,7 @@ public class TestVersionsLeak {
var keyF = key;
toByteArray(key, keyBytes);
StreamUtils.collectOn(StreamUtils.ROCKSDB_POOL,
StreamUtils.collectOn(ForkJoinPool.commonPool(),
Stream.of(1, 2, 3, 4).parallel(),
StreamUtils.executing(x -> {
dict.put(Buf.wrap(keyBytes), val, LLDictionaryResultType.PREVIOUS_VALUE);