[RocksJava] Fix test failure of compactRangeToLevel
Summary: Rewrite Java tests compactRangeToLevel and compactRangeToLevelColumnFamily to make them more deterministic and robust. Test Plan: make rocksdbjava make jtest Reviewers: anthony, fyrz, adamretter, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D40941
This commit is contained in:
parent
05e2831966
commit
c00948d5e1
@ -601,48 +601,73 @@ public class RocksDBTest {
|
||||
RocksDB db = null;
|
||||
Options opt = null;
|
||||
try {
|
||||
final int NUM_KEYS_PER_L0_FILE = 100;
|
||||
final int KEY_SIZE = 20;
|
||||
final int VALUE_SIZE = 300;
|
||||
final int L0_FILE_SIZE =
|
||||
NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
|
||||
final int NUM_L0_FILES = 10;
|
||||
final int TEST_SCALE = 5;
|
||||
final int KEY_INTERVAL = 100;
|
||||
opt = new Options().
|
||||
setCreateIfMissing(true).
|
||||
setCompactionStyle(CompactionStyle.LEVEL).
|
||||
setNumLevels(4).
|
||||
setWriteBufferSize(100<<10).
|
||||
setLevelZeroFileNumCompactionTrigger(3).
|
||||
setTargetFileSizeBase(200 << 10).
|
||||
setNumLevels(5).
|
||||
// a slightly bigger write buffer than L0 file
|
||||
// so that we can ensure manual flush always
|
||||
// go before background flush happens.
|
||||
setWriteBufferSize(L0_FILE_SIZE * 2).
|
||||
// Disable auto L0 -> L1 compaction
|
||||
setLevelZeroFileNumCompactionTrigger(20).
|
||||
setTargetFileSizeBase(L0_FILE_SIZE * 100).
|
||||
setTargetFileSizeMultiplier(1).
|
||||
setMaxBytesForLevelBase(500 << 10).
|
||||
setMaxBytesForLevelMultiplier(1).
|
||||
setDisableAutoCompactions(false);
|
||||
// open database
|
||||
// To disable auto compaction
|
||||
setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
|
||||
setMaxBytesForLevelMultiplier(2).
|
||||
setDisableAutoCompactions(true);
|
||||
db = RocksDB.open(opt,
|
||||
dbFolder.getRoot().getAbsolutePath());
|
||||
// fill database with key/value pairs
|
||||
byte[] b = new byte[10000];
|
||||
for (int i = 0; i < 200; i++) {
|
||||
rand.nextBytes(b);
|
||||
db.put((String.valueOf(i)).getBytes(), b);
|
||||
}
|
||||
db.flush(new FlushOptions().setWaitForFlush(true));
|
||||
db.close();
|
||||
opt.setTargetFileSizeBase(Long.MAX_VALUE).
|
||||
setTargetFileSizeMultiplier(1).
|
||||
setMaxBytesForLevelBase(Long.MAX_VALUE).
|
||||
setMaxBytesForLevelMultiplier(1).
|
||||
setDisableAutoCompactions(true);
|
||||
byte[] value = new byte[VALUE_SIZE];
|
||||
int int_key = 0;
|
||||
for (int round = 0; round < 5; ++round) {
|
||||
int initial_key = int_key;
|
||||
for (int f = 1; f <= NUM_L0_FILES; ++f) {
|
||||
for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
|
||||
int_key += KEY_INTERVAL;
|
||||
rand.nextBytes(value);
|
||||
|
||||
db = RocksDB.open(opt,
|
||||
dbFolder.getRoot().getAbsolutePath());
|
||||
|
||||
db.compactRange(true, 0, 0);
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (i == 0) {
|
||||
db.put(String.format("%020d", int_key).getBytes(),
|
||||
value);
|
||||
}
|
||||
db.flush(new FlushOptions().setWaitForFlush(true));
|
||||
// Make sure we do create one more L0 files.
|
||||
assertThat(
|
||||
db.getProperty("rocksdb.num-files-at-level" + i)).
|
||||
isEqualTo("1");
|
||||
} else {
|
||||
assertThat(
|
||||
db.getProperty("rocksdb.num-files-at-level" + i)).
|
||||
isEqualTo("0");
|
||||
db.getProperty("rocksdb.num-files-at-level0")).
|
||||
isEqualTo("" + f);
|
||||
}
|
||||
|
||||
// Compact all L0 files we just created
|
||||
db.compactRange(
|
||||
String.format("%020d", initial_key).getBytes(),
|
||||
String.format("%020d", int_key - 1).getBytes());
|
||||
// Making sure there isn't any L0 files.
|
||||
assertThat(
|
||||
db.getProperty("rocksdb.num-files-at-level0")).
|
||||
isEqualTo("0");
|
||||
// Making sure there are some L1 files.
|
||||
// Here we only use != 0 instead of a specific number
|
||||
// as we don't want the test make any assumption on
|
||||
// how compaction works.
|
||||
assertThat(
|
||||
db.getProperty("rocksdb.num-files-at-level1")).
|
||||
isNotEqualTo("0");
|
||||
// Because we only compacted those keys we issued
|
||||
// in this round, there shouldn't be any L1 -> L2
|
||||
// compaction. So we expect zero L2 files here.
|
||||
assertThat(
|
||||
db.getProperty("rocksdb.num-files-at-level2")).
|
||||
isEqualTo("0");
|
||||
}
|
||||
} finally {
|
||||
if (db != null) {
|
||||
@ -662,6 +687,14 @@ public class RocksDBTest {
|
||||
List<ColumnFamilyHandle> columnFamilyHandles =
|
||||
new ArrayList<>();
|
||||
try {
|
||||
final int NUM_KEYS_PER_L0_FILE = 100;
|
||||
final int KEY_SIZE = 20;
|
||||
final int VALUE_SIZE = 300;
|
||||
final int L0_FILE_SIZE =
|
||||
NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
|
||||
final int NUM_L0_FILES = 10;
|
||||
final int TEST_SCALE = 5;
|
||||
final int KEY_INTERVAL = 100;
|
||||
opt = new DBOptions().
|
||||
setCreateIfMissing(true).
|
||||
setCreateMissingColumnFamilies(true);
|
||||
@ -672,62 +705,73 @@ public class RocksDBTest {
|
||||
columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
|
||||
"new_cf".getBytes(),
|
||||
new ColumnFamilyOptions().
|
||||
setDisableAutoCompactions(true).
|
||||
setCompactionStyle(CompactionStyle.LEVEL).
|
||||
setNumLevels(4).
|
||||
setWriteBufferSize(100 << 10).
|
||||
setLevelZeroFileNumCompactionTrigger(3).
|
||||
setTargetFileSizeBase(200 << 10).
|
||||
setNumLevels(5).
|
||||
// a slightly bigger write buffer than L0 file
|
||||
// so that we can ensure manual flush always
|
||||
// go before background flush happens.
|
||||
setWriteBufferSize(L0_FILE_SIZE * 2).
|
||||
// Disable auto L0 -> L1 compaction
|
||||
setLevelZeroFileNumCompactionTrigger(20).
|
||||
setTargetFileSizeBase(L0_FILE_SIZE * 100).
|
||||
setTargetFileSizeMultiplier(1).
|
||||
setMaxBytesForLevelBase(500 << 10).
|
||||
setMaxBytesForLevelMultiplier(1).
|
||||
setDisableAutoCompactions(false)));
|
||||
// To disable auto compaction
|
||||
setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
|
||||
setMaxBytesForLevelMultiplier(2).
|
||||
setDisableAutoCompactions(true)));
|
||||
// open database
|
||||
db = RocksDB.open(opt,
|
||||
dbFolder.getRoot().getAbsolutePath(),
|
||||
columnFamilyDescriptors,
|
||||
columnFamilyHandles);
|
||||
// fill database with key/value pairs
|
||||
byte[] b = new byte[10000];
|
||||
for (int i = 0; i < 200; i++) {
|
||||
rand.nextBytes(b);
|
||||
db.put(columnFamilyHandles.get(1),
|
||||
String.valueOf(i).getBytes(), b);
|
||||
}
|
||||
db.flush(new FlushOptions().setWaitForFlush(true),
|
||||
columnFamilyHandles.get(1));
|
||||
// free column families
|
||||
for (ColumnFamilyHandle handle : columnFamilyHandles) {
|
||||
handle.dispose();
|
||||
}
|
||||
// clear column family handles for reopen
|
||||
columnFamilyHandles.clear();
|
||||
db.close();
|
||||
columnFamilyDescriptors.get(1).
|
||||
columnFamilyOptions().
|
||||
setTargetFileSizeBase(Long.MAX_VALUE).
|
||||
setTargetFileSizeMultiplier(1).
|
||||
setMaxBytesForLevelBase(Long.MAX_VALUE).
|
||||
setMaxBytesForLevelMultiplier(1).
|
||||
setDisableAutoCompactions(true);
|
||||
// reopen database
|
||||
db = RocksDB.open(opt,
|
||||
dbFolder.getRoot().getAbsolutePath(),
|
||||
columnFamilyDescriptors,
|
||||
columnFamilyHandles);
|
||||
// compact new column family
|
||||
db.compactRange(columnFamilyHandles.get(1), true, 0, 0);
|
||||
// check if new column family is compacted to level zero
|
||||
for (int i = 0; i < 4; i++) {
|
||||
if (i == 0) {
|
||||
assertThat(db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level" + i)).
|
||||
isEqualTo("1");
|
||||
} else {
|
||||
assertThat(db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level" + i)).
|
||||
isEqualTo("0");
|
||||
byte[] value = new byte[VALUE_SIZE];
|
||||
int int_key = 0;
|
||||
for (int round = 0; round < 5; ++round) {
|
||||
int initial_key = int_key;
|
||||
for (int f = 1; f <= NUM_L0_FILES; ++f) {
|
||||
for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
|
||||
int_key += KEY_INTERVAL;
|
||||
rand.nextBytes(value);
|
||||
|
||||
db.put(columnFamilyHandles.get(1),
|
||||
String.format("%020d", int_key).getBytes(),
|
||||
value);
|
||||
}
|
||||
db.flush(new FlushOptions().setWaitForFlush(true),
|
||||
columnFamilyHandles.get(1));
|
||||
// Make sure we do create one more L0 files.
|
||||
assertThat(
|
||||
db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level0")).
|
||||
isEqualTo("" + f);
|
||||
}
|
||||
|
||||
// Compact all L0 files we just created
|
||||
db.compactRange(
|
||||
columnFamilyHandles.get(1),
|
||||
String.format("%020d", initial_key).getBytes(),
|
||||
String.format("%020d", int_key - 1).getBytes());
|
||||
// Making sure there isn't any L0 files.
|
||||
assertThat(
|
||||
db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level0")).
|
||||
isEqualTo("0");
|
||||
// Making sure there are some L1 files.
|
||||
// Here we only use != 0 instead of a specific number
|
||||
// as we don't want the test make any assumption on
|
||||
// how compaction works.
|
||||
assertThat(
|
||||
db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level1")).
|
||||
isNotEqualTo("0");
|
||||
// Because we only compacted those keys we issued
|
||||
// in this round, there shouldn't be any L1 -> L2
|
||||
// compaction. So we expect zero L2 files here.
|
||||
assertThat(
|
||||
db.getProperty(columnFamilyHandles.get(1),
|
||||
"rocksdb.num-files-at-level2")).
|
||||
isEqualTo("0");
|
||||
}
|
||||
} finally {
|
||||
for (ColumnFamilyHandle handle : columnFamilyHandles) {
|
||||
|
Loading…
Reference in New Issue
Block a user