Fix logging, configurable compression

This commit is contained in:
Andrea Cavalli 2021-12-27 17:34:44 +01:00
parent 582813b6c7
commit aa04a64c34
5 changed files with 31 additions and 14 deletions

View File

@ -361,7 +361,7 @@
<dependency>
<groupId>org.rocksdb</groupId>
<artifactId>rocksdbjni</artifactId>
<version>6.26.1</version>
<version>6.27.3</version>
</dependency>
<dependency>
<groupId>org.apache.lucene</groupId>

View File

@ -0,0 +1,23 @@
package it.cavallium.dbengine.client;
import org.rocksdb.CompressionType;
public enum Compression {
PLAIN(CompressionType.NO_COMPRESSION),
SNAPPY(CompressionType.SNAPPY_COMPRESSION),
LZ4(CompressionType.LZ4_COMPRESSION),
LZ4_HC(CompressionType.LZ4HC_COMPRESSION),
ZSTD(CompressionType.ZSTD_COMPRESSION),
ZLIB(CompressionType.ZLIB_COMPRESSION),
BZLIB2(CompressionType.BZLIB2_COMPRESSION);
private final CompressionType type;
Compression(CompressionType compressionType) {
this.type = compressionType;
}
public CompressionType getType() {
return type;
}
}

View File

@ -11,4 +11,4 @@ import java.nio.file.Path;
* Target size can be exceeded if all the volumes are full
*/
@RecordBuilder
public record DatabaseVolume(Path volumePath, long targetSizeBytes) {}
public record DatabaseVolume(Path volumePath, long targetSizeBytes, Compression compression) {}

View File

@ -1,6 +1,5 @@
package it.cavallium.dbengine.database.disk;
import static io.net5.buffer.Unpooled.wrappedBuffer;
import static io.net5.buffer.api.StandardAllocationTypes.OFF_HEAP;
import static it.cavallium.dbengine.database.LLUtils.MARKER_ROCKSDB;
import static it.cavallium.dbengine.database.LLUtils.asReadOnlyDirect;
@ -42,12 +41,11 @@ import java.util.concurrent.Callable;
import java.util.concurrent.ForkJoinPool;
import java.util.concurrent.ForkJoinTask;
import java.util.function.Function;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.util.Supplier;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.rocksdb.AbstractSlice;
@ -319,11 +317,8 @@ public class LLLocalDictionary implements LLDictionary {
try (var value = entry.getValue().receive()) {
assert key.isAccessible();
assert value.isAccessible();
logger.trace(MARKER_ROCKSDB,
"Writing {}: {}",
(Supplier<String>) () -> toStringSafe(key),
(Supplier<String>) () -> toStringSafe(value)
);
var varargs = new Supplier<?>[]{() -> toStringSafe(key), () -> toStringSafe(value)};
logger.trace(MARKER_ROCKSDB, "Writing {}: {}", varargs);
db.put(EMPTY_WRITE_OPTIONS, key, value);
sink.complete();
}

View File

@ -297,10 +297,9 @@ public class LLLocalKeyValueDatabase implements LLKeyValueDatabase {
options.setCompactionStyle(CompactionStyle.LEVEL);
options.setTargetFileSizeBase(64 * 1024 * 1024); // 64MiB sst file
options.setTargetFileSizeMultiplier(2); // Each level is 2 times the previous level
options.setCompressionPerLevel(List.of(CompressionType.NO_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION,
CompressionType.SNAPPY_COMPRESSION
));
if (!databaseOptions.volumes().isEmpty()) {
options.setCompressionPerLevel(databaseOptions.volumes().stream().map(v -> v.compression().getType()).toList());
}
options.setManualWalFlush(false);
options.setMinWriteBufferNumberToMerge(3);
options.setMaxWriteBufferNumber(4);