Exclusive range

This commit is contained in:
Andrea Cavalli 2021-03-13 19:01:36 +01:00
parent 79ba4d8dd2
commit 32d1d76f69
5 changed files with 285 additions and 157 deletions

View File

@ -3,6 +3,9 @@ package it.cavallium.dbengine.database;
import java.util.Arrays; import java.util.Arrays;
import java.util.StringJoiner; import java.util.StringJoiner;
/**
* Range of data, from min (inclusive),to max (exclusive)
*/
public class LLRange { public class LLRange {
private static final LLRange RANGE_ALL = new LLRange(null, null); private static final LLRange RANGE_ALL = new LLRange(null, null);

View File

@ -25,12 +25,35 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
protected final int keyExtLength; protected final int keyExtLength;
protected final LLRange range; protected final LLRange range;
protected static byte[] firstKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) { private static byte[] incrementPrefix(byte[] key, int prefixLength) {
boolean remainder = true;
final byte ff = (byte) 0xFF;
for (int i = prefixLength - 1; i >= 0; i--) {
if (key[i] != ff) {
key[i]++;
remainder = false;
break;
} else {
key[i] = 0x00;
remainder = true;
}
}
if (remainder) {
Arrays.fill(key, 0, prefixLength, (byte) 0xFF);
return Arrays.copyOf(key, key.length + 1);
} else {
return key;
}
}
static byte[] firstRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
return fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0x00); return fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
} }
protected static byte[] lastKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) { static byte[] nextRangeKey(byte[] prefixKey, int prefixLength, int suffixLength, int extLength) {
return fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0xFF); byte[] nonIncremented = fillKeySuffixAndExt(prefixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
return incrementPrefix(nonIncremented, prefixLength);
} }
protected static byte[] fillKeySuffixAndExt(byte[] prefixKey, protected static byte[] fillKeySuffixAndExt(byte[] prefixKey,
@ -46,7 +69,7 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
return result; return result;
} }
protected static byte[] firstKey(byte[] prefixKey, static byte[] firstRangeKey(byte[] prefixKey,
byte[] suffixKey, byte[] suffixKey,
int prefixLength, int prefixLength,
int suffixLength, int suffixLength,
@ -54,12 +77,13 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
return fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0x00); return fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
} }
protected static byte[] lastKey(byte[] prefixKey, static byte[] nextRangeKey(byte[] prefixKey,
byte[] suffixKey, byte[] suffixKey,
int prefixLength, int prefixLength,
int suffixLength, int suffixLength,
int extLength) { int extLength) {
return fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0xFF); byte[] nonIncremented = fillKeyExt(prefixKey, suffixKey, prefixLength, suffixLength, extLength, (byte) 0x00);
return incrementPrefix(nonIncremented, prefixLength + suffixLength);
} }
protected static byte[] fillKeyExt(byte[] prefixKey, protected static byte[] fillKeyExt(byte[] prefixKey,
@ -114,9 +138,9 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
this.keyPrefix = prefixKey; this.keyPrefix = prefixKey;
this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength(); this.keySuffixLength = keySuffixSerializer.getSerializedBinaryLength();
this.keyExtLength = keyExtLength; this.keyExtLength = keyExtLength;
byte[] firstKey = firstKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength); byte[] firstKey = firstRangeKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength);
byte[] lastKey = lastKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength); byte[] nextRangeKey = nextRangeKey(keyPrefix, keyPrefix.length, keySuffixLength, keyExtLength);
this.range = keyPrefix.length == 0 ? LLRange.all() : LLRange.of(firstKey, lastKey); this.range = keyPrefix.length == 0 ? LLRange.all() : LLRange.of(firstKey, nextRangeKey);
assert subStageKeysConsistency(keyPrefix.length + keySuffixLength + keyExtLength); assert subStageKeysConsistency(keyPrefix.length + keySuffixLength + keyExtLength);
} }
@ -169,8 +193,8 @@ public class DatabaseMapDictionaryDeep<T, U, US extends DatabaseStage<U>> implem
} }
protected LLRange toExtRange(byte[] keySuffix) { protected LLRange toExtRange(byte[] keySuffix) {
byte[] first = firstKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength); byte[] first = firstRangeKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength);
byte[] end = lastKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength); byte[] end = nextRangeKey(keyPrefix, keySuffix, keyPrefix.length, keySuffixLength, keyExtLength);
return LLRange.of(first, end); return LLRange.of(first, end);
} }

View File

@ -33,7 +33,7 @@ public class CappedWriteBatch implements WriteBatchInterface, AutoCloseable {
this.writeBatch.setMaxBytes(maxWriteBatchSize); this.writeBatch.setMaxBytes(maxWriteBatchSize);
} }
private void flushIfNeeded(boolean force) throws RocksDBException { private synchronized void flushIfNeeded(boolean force) throws RocksDBException {
if (this.writeBatch.count() >= (force ? 1 : cap)) { if (this.writeBatch.count() >= (force ? 1 : cap)) {
db.write(writeOptions, this.writeBatch); db.write(writeOptions, this.writeBatch);
this.writeBatch.clear(); this.writeBatch.clear();
@ -41,151 +41,151 @@ public class CappedWriteBatch implements WriteBatchInterface, AutoCloseable {
} }
@Override @Override
public int count() { public synchronized int count() {
return writeBatch.count(); return writeBatch.count();
} }
@Override @Override
public void put(byte[] key, byte[] value) throws RocksDBException { public synchronized void put(byte[] key, byte[] value) throws RocksDBException {
writeBatch.put(key, value); writeBatch.put(key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException { public synchronized void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException {
writeBatch.put(columnFamilyHandle, key, value); writeBatch.put(columnFamilyHandle, key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void put(ByteBuffer key, ByteBuffer value) throws RocksDBException { public synchronized void put(ByteBuffer key, ByteBuffer value) throws RocksDBException {
writeBatch.put(key, value); writeBatch.put(key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void put(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key, ByteBuffer value) throws RocksDBException { public synchronized void put(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key, ByteBuffer value) throws RocksDBException {
writeBatch.put(columnFamilyHandle, key, value); writeBatch.put(columnFamilyHandle, key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void merge(byte[] key, byte[] value) throws RocksDBException { public synchronized void merge(byte[] key, byte[] value) throws RocksDBException {
writeBatch.merge(key, value); writeBatch.merge(key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException { public synchronized void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, byte[] value) throws RocksDBException {
writeBatch.merge(columnFamilyHandle, key, value); writeBatch.merge(columnFamilyHandle, key, value);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Deprecated @Deprecated
@Override @Override
public void remove(byte[] key) throws RocksDBException { public synchronized void remove(byte[] key) throws RocksDBException {
writeBatch.remove(key); writeBatch.remove(key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Deprecated @Deprecated
@Override @Override
public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { public synchronized void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException {
writeBatch.remove(columnFamilyHandle, key); writeBatch.remove(columnFamilyHandle, key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void delete(byte[] key) throws RocksDBException { public synchronized void delete(byte[] key) throws RocksDBException {
writeBatch.delete(key); writeBatch.delete(key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { public synchronized void delete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException {
writeBatch.delete(columnFamilyHandle, key); writeBatch.delete(columnFamilyHandle, key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void singleDelete(byte[] key) throws RocksDBException { public synchronized void singleDelete(byte[] key) throws RocksDBException {
writeBatch.singleDelete(key); writeBatch.singleDelete(key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException { public synchronized void singleDelete(ColumnFamilyHandle columnFamilyHandle, byte[] key) throws RocksDBException {
writeBatch.singleDelete(columnFamilyHandle, key); writeBatch.singleDelete(columnFamilyHandle, key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void remove(ByteBuffer key) throws RocksDBException { public synchronized void remove(ByteBuffer key) throws RocksDBException {
writeBatch.remove(key); writeBatch.remove(key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void remove(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key) throws RocksDBException { public synchronized void remove(ColumnFamilyHandle columnFamilyHandle, ByteBuffer key) throws RocksDBException {
writeBatch.remove(columnFamilyHandle, key); writeBatch.remove(columnFamilyHandle, key);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void deleteRange(byte[] beginKey, byte[] endKey) throws RocksDBException { public synchronized void deleteRange(byte[] beginKey, byte[] endKey) throws RocksDBException {
writeBatch.deleteRange(beginKey, endKey); writeBatch.deleteRange(beginKey, endKey);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey) public synchronized void deleteRange(ColumnFamilyHandle columnFamilyHandle, byte[] beginKey, byte[] endKey)
throws RocksDBException { throws RocksDBException {
writeBatch.deleteRange(columnFamilyHandle, beginKey, endKey); writeBatch.deleteRange(columnFamilyHandle, beginKey, endKey);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void putLogData(byte[] blob) throws RocksDBException { public synchronized void putLogData(byte[] blob) throws RocksDBException {
writeBatch.putLogData(blob); writeBatch.putLogData(blob);
flushIfNeeded(false); flushIfNeeded(false);
} }
@Override @Override
public void clear() { public synchronized void clear() {
writeBatch.clear(); writeBatch.clear();
} }
@Override @Override
public void setSavePoint() { public synchronized void setSavePoint() {
writeBatch.setSavePoint(); writeBatch.setSavePoint();
} }
@Override @Override
public void rollbackToSavePoint() throws RocksDBException { public synchronized void rollbackToSavePoint() throws RocksDBException {
writeBatch.rollbackToSavePoint(); writeBatch.rollbackToSavePoint();
} }
@Override @Override
public void popSavePoint() throws RocksDBException { public synchronized void popSavePoint() throws RocksDBException {
writeBatch.popSavePoint(); writeBatch.popSavePoint();
} }
@Override @Override
public void setMaxBytes(long maxBytes) { public synchronized void setMaxBytes(long maxBytes) {
writeBatch.setMaxBytes(maxBytes); writeBatch.setMaxBytes(maxBytes);
} }
@Override @Override
public WriteBatch getWriteBatch() { public synchronized WriteBatch getWriteBatch() {
return writeBatch; return writeBatch;
} }
public void writeToDbAndClose() throws RocksDBException { public synchronized void writeToDbAndClose() throws RocksDBException {
flushIfNeeded(true); flushIfNeeded(true);
} }
@Override @Override
public void close() { public synchronized void close() {
writeBatch.close(); writeBatch.close();
} }
} }

View File

@ -29,6 +29,7 @@ import org.rocksdb.ReadOptions;
import org.rocksdb.RocksDB; import org.rocksdb.RocksDB;
import org.rocksdb.RocksDBException; import org.rocksdb.RocksDBException;
import org.rocksdb.RocksIterator; import org.rocksdb.RocksIterator;
import org.rocksdb.Slice;
import org.rocksdb.Snapshot; import org.rocksdb.Snapshot;
import org.rocksdb.WriteOptions; import org.rocksdb.WriteOptions;
import org.warp.commonutils.concurrency.atomicity.NotAtomic; import org.warp.commonutils.concurrency.atomicity.NotAtomic;
@ -167,22 +168,17 @@ public class LLLocalDictionary implements LLDictionary {
public Mono<Boolean> containsRange(@Nullable LLSnapshot snapshot, LLRange range) { public Mono<Boolean> containsRange(@Nullable LLSnapshot snapshot, LLRange range) {
return Mono return Mono
.fromCallable(() -> { .fromCallable(() -> {
try (RocksIterator iter = db.newIterator(cfh, resolveSnapshot(snapshot))) { var readOpts = resolveSnapshot(snapshot);
if (range.hasMin()) { readOpts.setVerifyChecksums(false);
iter.seek(range.getMin()); if (range.hasMin()) {
} else { readOpts.setIterateLowerBound(new Slice(range.getMin()));
iter.seekToFirst(); }
} if (range.hasMax()) {
if (!iter.isValid()) { readOpts.setIterateUpperBound(new Slice(range.getMax()));
return false; }
} try (RocksIterator iter = db.newIterator(cfh, readOpts)) {
iter.seekToFirst();
if (range.hasMax()) { return iter.isValid();
byte[] key1 = iter.key();
return Arrays.compareUnsigned(key1, range.getMax()) <= 0;
} else {
return true;
}
} }
}) })
.onErrorMap(cause -> new IOException("Failed to read range " + range.toString(), cause)) .onErrorMap(cause -> new IOException("Failed to read range " + range.toString(), cause))
@ -508,22 +504,22 @@ public class LLLocalDictionary implements LLDictionary {
@NotNull @NotNull
private Mono<Entry<byte[], byte[]>> putEntryToWriteBatch(Entry<byte[], byte[]> newEntry, boolean getOldValues, private Mono<Entry<byte[], byte[]>> putEntryToWriteBatch(Entry<byte[], byte[]> newEntry, boolean getOldValues,
CappedWriteBatch writeBatch) { CappedWriteBatch writeBatch) {
return Mono.from(Mono Mono<byte[]> getOldValueMono;
.defer(() -> { if (getOldValues) {
if (getOldValues) { getOldValueMono = get(null, newEntry.getKey());
return get(null, newEntry.getKey()); } else {
} else { getOldValueMono = Mono.empty();
return Mono.empty(); }
} return getOldValueMono
}) .concatWith(Mono
.concatWith(Mono.<byte[]>fromCallable(() -> { .<byte[]>fromCallable(() -> {
synchronized (writeBatch) { writeBatch.put(cfh, newEntry.getKey(), newEntry.getValue());
writeBatch.put(cfh, newEntry.getKey(), newEntry.getValue()); return null;
} })
return null; .subscribeOn(dbScheduler)
}) )
.subscribeOn(dbScheduler)) .singleOrEmpty()
.map(oldValue -> Map.entry(newEntry.getKey(), oldValue))); .map(oldValue -> Map.entry(newEntry.getKey(), oldValue));
} }
@Override @Override
@ -637,7 +633,6 @@ public class LLLocalDictionary implements LLDictionary {
}.generateNonblocking(dbScheduler, 128); }.generateNonblocking(dbScheduler, 128);
} }
//todo: replace implementation with a simple Flux.push
@Override @Override
public Flux<Entry<byte[], byte[]>> setRange(LLRange range, public Flux<Entry<byte[], byte[]>> setRange(LLRange range,
Flux<Entry<byte[], byte[]>> entries, Flux<Entry<byte[], byte[]>> entries,
@ -646,51 +641,49 @@ public class LLLocalDictionary implements LLDictionary {
if (range.isAll()) { if (range.isAll()) {
return clear().thenMany(Flux.empty()); return clear().thenMany(Flux.empty());
} else { } else {
return Mono return Flux
.fromCallable(() -> new CappedWriteBatch(db, .usingWhen(
CAPPED_WRITE_BATCH_CAP, Mono
RESERVED_WRITE_BATCH_SIZE, .fromCallable(() -> new CappedWriteBatch(db,
MAX_WRITE_BATCH_SIZE, CAPPED_WRITE_BATCH_CAP,
BATCH_WRITE_OPTIONS RESERVED_WRITE_BATCH_SIZE,
)) MAX_WRITE_BATCH_SIZE,
.subscribeOn(dbScheduler) BATCH_WRITE_OPTIONS)
.flatMapMany(writeBatch -> Mono )
.fromCallable(() -> { .subscribeOn(dbScheduler),
synchronized (writeBatch) { writeBatch -> Mono
if (range.hasMin() && range.hasMax()) { .fromCallable(() -> {
writeBatch.deleteRange(cfh, range.getMin(), range.getMax()); if (range.hasMin() && range.hasMax()) {
writeBatch.delete(cfh, range.getMax()); writeBatch.deleteRange(cfh, range.getMin(), range.getMax());
} else if (range.hasMax()) { } else if (range.hasMax()) {
writeBatch.deleteRange(cfh, FIRST_KEY, range.getMax()); writeBatch.deleteRange(cfh, FIRST_KEY, range.getMax());
writeBatch.delete(cfh, range.getMax()); } else {
} else { // Delete from x to end of column
try (var it = db.newIterator(cfh, getReadOptions(null))) { var readOpts = getReadOptions(null);
it.seekToLast(); try (var it = db.newIterator(cfh, readOpts)) {
if (it.isValid()) { it.seekToLast();
writeBatch.deleteRange(cfh, range.getMin(), it.key()); if (it.isValid()) {
writeBatch.delete(cfh, it.key()); writeBatch.deleteRange(cfh, range.getMin(), it.key());
// Delete the last key because we are deleting everything from "min" onward
writeBatch.delete(it.key());
}
} }
} }
} return null;
} })
return null; .subscribeOn(dbScheduler)
}) .thenMany(entries)
.subscribeOn(dbScheduler) .flatMap(newEntry -> putEntryToWriteBatch(newEntry, getOldValues, writeBatch)),
.thenMany(entries) writeBatch -> Mono
.flatMap(newEntry -> putEntryToWriteBatch(newEntry, getOldValues, writeBatch)) .fromCallable(() -> {
.concatWith(Mono.<Entry<byte[], byte[]>>fromCallable(() -> { try (writeBatch) {
synchronized (writeBatch) { writeBatch.writeToDbAndClose();
writeBatch.writeToDbAndClose(); }
writeBatch.close(); return null;
} })
return null; .subscribeOn(dbScheduler)
}).subscribeOn(dbScheduler))
.doFinally(signalType -> {
synchronized (writeBatch) {
writeBatch.close();
}
})
) )
.subscribeOn(dbScheduler)
.onErrorMap(cause -> new IOException("Failed to write range", cause)); .onErrorMap(cause -> new IOException("Failed to write range", cause));
} }
}); });
@ -741,21 +734,23 @@ public class LLLocalDictionary implements LLDictionary {
} else { } else {
return Mono return Mono
.fromCallable(() -> { .fromCallable(() -> {
try (var iter = db.newIterator(cfh, resolveSnapshot(snapshot))) { var readOpts = resolveSnapshot(snapshot);
if (range.hasMin()) { readOpts.setFillCache(false);
iter.seek(range.getMin()); readOpts.setVerifyChecksums(false);
} else { if (range.hasMin()) {
iter.seekToFirst(); readOpts.setIterateLowerBound(new Slice(range.getMin()));
} }
if (range.hasMax()) {
readOpts.setIterateUpperBound(new Slice(range.getMax()));
}
if (fast) {
readOpts.setIgnoreRangeDeletions(true);
readOpts.setPinData(false);
}
try (var iter = db.newIterator(cfh, readOpts)) {
iter.seekToFirst();
long i = 0; long i = 0;
while (iter.isValid()) { while (iter.isValid()) {
if (range.hasMax()) {
byte[] key1 = iter.key();
if (Arrays.compareUnsigned(key1, range.getMax()) > 0) {
break;
}
}
iter.next(); iter.next();
i++; i++;
} }
@ -773,18 +768,18 @@ public class LLLocalDictionary implements LLDictionary {
public Mono<Entry<byte[], byte[]>> getOne(@Nullable LLSnapshot snapshot, LLRange range) { public Mono<Entry<byte[], byte[]>> getOne(@Nullable LLSnapshot snapshot, LLRange range) {
return Mono return Mono
.fromCallable(() -> { .fromCallable(() -> {
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) { var readOpts = resolveSnapshot(snapshot);
if (range.hasMin()) { if (range.hasMin()) {
rocksIterator.seek(range.getMin()); readOpts.setIterateLowerBound(new Slice(range.getMin()));
} else { }
rocksIterator.seekToFirst(); if (range.hasMax()) {
} readOpts.setIterateUpperBound(new Slice(range.getMax()));
}
try (var rocksIterator = db.newIterator(cfh, readOpts)) {
rocksIterator.seekToFirst();
byte[] key; byte[] key;
if (rocksIterator.isValid()) { if (rocksIterator.isValid()) {
key = rocksIterator.key(); key = rocksIterator.key();
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
return null;
}
return Map.entry(key, rocksIterator.value()); return Map.entry(key, rocksIterator.value());
} else { } else {
return null; return null;
@ -798,18 +793,18 @@ public class LLLocalDictionary implements LLDictionary {
public Mono<byte[]> getOneKey(@Nullable LLSnapshot snapshot, LLRange range) { public Mono<byte[]> getOneKey(@Nullable LLSnapshot snapshot, LLRange range) {
return Mono return Mono
.fromCallable(() -> { .fromCallable(() -> {
try (var rocksIterator = db.newIterator(cfh, resolveSnapshot(snapshot))) { var readOpts = resolveSnapshot(snapshot);
if (range.hasMin()) { if (range.hasMin()) {
rocksIterator.seek(range.getMin()); readOpts.setIterateLowerBound(new Slice(range.getMin()));
} else { }
rocksIterator.seekToFirst(); if (range.hasMax()) {
} readOpts.setIterateUpperBound(new Slice(range.getMax()));
}
try (var rocksIterator = db.newIterator(cfh, readOpts)) {
rocksIterator.seekToFirst();
byte[] key; byte[] key;
if (rocksIterator.isValid()) { if (rocksIterator.isValid()) {
key = rocksIterator.key(); key = rocksIterator.key();
if (range.hasMax() && Arrays.compareUnsigned(key, range.getMax()) > 0) {
return null;
}
return key; return key;
} else { } else {
return null; return null;
@ -820,7 +815,7 @@ public class LLLocalDictionary implements LLDictionary {
} }
private long fastSizeAll(@Nullable LLSnapshot snapshot) { private long fastSizeAll(@Nullable LLSnapshot snapshot) {
var rocksdbSnapshot = resolveSnapshot(snapshot); var rocksdbSnapshot = resolveSnapshot(snapshot).setFillCache(false).setVerifyChecksums(false);
if (USE_CURRENT_FASTSIZE_FOR_OLD_SNAPSHOTS || rocksdbSnapshot.snapshot() == null) { if (USE_CURRENT_FASTSIZE_FOR_OLD_SNAPSHOTS || rocksdbSnapshot.snapshot() == null) {
try { try {
return db.getLongProperty(cfh, "rocksdb.estimate-num-keys"); return db.getLongProperty(cfh, "rocksdb.estimate-num-keys");
@ -829,11 +824,15 @@ public class LLLocalDictionary implements LLDictionary {
return 0; return 0;
} }
} else { } else {
rocksdbSnapshot.setFillCache(false);
rocksdbSnapshot.setVerifyChecksums(false);
rocksdbSnapshot.setIgnoreRangeDeletions(true);
rocksdbSnapshot.setPinData(false);
long count = 0; long count = 0;
try (RocksIterator iter = db.newIterator(cfh, rocksdbSnapshot)) { try (RocksIterator iter = db.newIterator(cfh, rocksdbSnapshot)) {
iter.seekToFirst(); iter.seekToFirst();
// If it's a fast size of a snapshot, count only up to 1000 elements // If it's a fast size of a snapshot, count only up to 1'000'000 elements
while (iter.isValid() && count < 1000) { while (iter.isValid() && count < 1_000_000) {
count++; count++;
iter.next(); iter.next();
} }
@ -843,8 +842,12 @@ public class LLLocalDictionary implements LLDictionary {
} }
private long exactSizeAll(@Nullable LLSnapshot snapshot) { private long exactSizeAll(@Nullable LLSnapshot snapshot) {
var readOpts = resolveSnapshot(snapshot);
readOpts.setFillCache(false);
readOpts.setVerifyChecksums(false);
readOpts.setPinData(false);
long count = 0; long count = 0;
try (RocksIterator iter = db.newIterator(cfh, resolveSnapshot(snapshot))) { try (RocksIterator iter = db.newIterator(cfh, readOpts)) {
iter.seekToFirst(); iter.seekToFirst();
while (iter.isValid()) { while (iter.isValid()) {
count++; count++;
@ -858,18 +861,18 @@ public class LLLocalDictionary implements LLDictionary {
public Mono<Entry<byte[], byte[]>> removeOne(LLRange range) { public Mono<Entry<byte[], byte[]>> removeOne(LLRange range) {
return Mono return Mono
.fromCallable(() -> { .fromCallable(() -> {
try (RocksIterator iter = db.newIterator(cfh)) { var readOpts = getReadOptions(null);
if (range.hasMin()) { if (range.hasMin()) {
iter.seek(range.getMin()); readOpts.setIterateLowerBound(new Slice(range.getMin()));
} else { }
iter.seekToFirst(); if (range.hasMax()) {
} readOpts.setIterateUpperBound(new Slice(range.getMax()));
}
try (RocksIterator iter = db.newIterator(cfh, readOpts)) {
iter.seekToFirst();
if (!iter.isValid()) { if (!iter.isValid()) {
return null; return null;
} }
if (range.hasMax() && Arrays.compareUnsigned(iter.key(), range.getMax()) > 0) {
return null;
}
byte[] key = iter.key(); byte[] key = iter.key();
byte[] value = iter.value(); byte[] value = iter.value();
db.delete(cfh, key); db.delete(cfh, key);

View File

@ -0,0 +1,98 @@
package it.cavallium.dbengine.database.collections;
import java.util.Arrays;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.Test;
public class TestRanges {
@Test
public void testNextRangeKey() {
testNextRangeKey(new byte[] {0x00, 0x00, 0x00});
testNextRangeKey(new byte[] {0x00, 0x00, 0x01});
testNextRangeKey(new byte[] {0x00, 0x00, 0x02});
testNextRangeKey(new byte[] {0x00, 0x01, 0x02});
testNextRangeKey(new byte[] {0x00, 0x00, (byte) 0xFF});
testNextRangeKey(new byte[] {0x00, 0x01, (byte) 0xFF});
testNextRangeKey(new byte[] {0x00, (byte) 0xFF, (byte) 0xFF});
testNextRangeKey(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF});
testNextRangeKey(new byte[] {(byte) 0xFF, (byte) 0, (byte) 0xFF});
testNextRangeKey(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0});
}
public void testNextRangeKey(byte[] prefixKey) {
byte[] firstRangeKey = DatabaseMapDictionaryDeep.firstRangeKey(prefixKey, prefixKey.length, 7, 3);
byte[] nextRangeKey = DatabaseMapDictionaryDeep.nextRangeKey(prefixKey, prefixKey.length, 7, 3);
if (Arrays.equals(prefixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF})) {
Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, nextRangeKey);
} else {
long biPrefix = 0;
var s = 0;
for (int i = prefixKey.length - 1; i >= 0; i--) {
biPrefix += ((long) (prefixKey[i] & 0xFF)) << s;
s += Byte.SIZE;
}
var nrPrefix = Arrays.copyOf(nextRangeKey, prefixKey.length);
long biNextPrefix = 0;
s = 0;
for (int i = prefixKey.length - 1; i >= 0; i--) {
biNextPrefix += ((long) (nrPrefix[i] & 0xFF)) << s;
s += Byte.SIZE;
}
Assertions.assertEquals(biPrefix + 1, biNextPrefix);
Assertions.assertArrayEquals(
new byte[7 + 3],
Arrays.copyOfRange(nextRangeKey, prefixKey.length, prefixKey.length + 7 + 3)
);
}
}
@Test
public void testNextRangeKeyWithSuffix() {
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x01, (byte) 0xFF}, new byte[] {0x00, 0x00, 0x00});
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x00, 0x01}, new byte[] {0x00, 0x00, 0x01});
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x00, 0x02}, new byte[] {0x00, 0x00, 0x02});
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x01, 0x02}, new byte[] {0x00, 0x01, 0x02});
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x00, (byte) 0xFF}, new byte[] {0x00, 0x00, (byte) 0xFF});
testNextRangeKeyWithSuffix(new byte[] {0x00, 0x01, (byte) 0xFF}, new byte[] {0x00, 0x01, (byte) 0xFF});
testNextRangeKeyWithSuffix(new byte[] {0x00, (byte) 0xFF, (byte) 0xFF}, new byte[] {0x00, (byte) 0xFF, (byte) 0xFF});
testNextRangeKeyWithSuffix(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF}, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF});
testNextRangeKeyWithSuffix(new byte[] {(byte) 0xFF, (byte) 0, (byte) 0xFF}, new byte[] {(byte) 0xFF, (byte) 0, (byte) 0xFF});
testNextRangeKeyWithSuffix(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0}, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0});
}
public void testNextRangeKeyWithSuffix(byte[] prefixKey, byte[] suffixKey) {
byte[] firstRangeKey = DatabaseMapDictionaryDeep.firstRangeKey(prefixKey, suffixKey, prefixKey.length, 3, 7);
byte[] nextRangeKey = DatabaseMapDictionaryDeep.nextRangeKey(prefixKey, suffixKey, prefixKey.length, 3, 7);
if (Arrays.equals(prefixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF}) && Arrays.equals(suffixKey, new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF})) {
Assertions.assertArrayEquals(new byte[] {(byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, (byte) 0xFF, 0, 0, 0, 0, 0, 0, 0, 0}, nextRangeKey);
} else {
long biPrefix = 0;
var s = 0;
for (int i = (suffixKey.length) - 1; i >= 0; i--) {
biPrefix += ((long) (suffixKey[i] & 0xFF)) << s;
s += Byte.SIZE;
}
for (int i = (prefixKey.length) - 1; i >= 0; i--) {
biPrefix += ((long) (prefixKey[i] & 0xFF)) << s;
s += Byte.SIZE;
}
var nrPrefix = Arrays.copyOf(nextRangeKey, prefixKey.length + suffixKey.length);
long biNextPrefix = 0;
s = 0;
for (int i = (prefixKey.length + suffixKey.length) - 1; i >= 0; i--) {
biNextPrefix += ((long) (nrPrefix[i] & 0xFF)) << s;
s += Byte.SIZE;
}
Assertions.assertEquals(biPrefix + 1, biNextPrefix);
Assertions.assertArrayEquals(
new byte[7],
Arrays.copyOfRange(nextRangeKey, prefixKey.length + suffixKey.length, prefixKey.length + suffixKey.length + 7)
);
}
}
}