[RocksJava] Fix test failure of compactRangeToLevel

Summary:
Rewrite Java tests compactRangeToLevel and compactRangeToLevelColumnFamily
to make them more deterministic and robust.

Test Plan:
make rocksdbjava
make jtest

Reviewers: anthony, fyrz, adamretter, igor

Reviewed By: igor

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D40941
This commit is contained in:
Yueh-Hsuan Chiang 2015-07-01 16:03:56 -07:00
parent 05e2831966
commit c00948d5e1

View File

@ -601,48 +601,73 @@ public class RocksDBTest {
RocksDB db = null; RocksDB db = null;
Options opt = null; Options opt = null;
try { try {
final int NUM_KEYS_PER_L0_FILE = 100;
final int KEY_SIZE = 20;
final int VALUE_SIZE = 300;
final int L0_FILE_SIZE =
NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
final int NUM_L0_FILES = 10;
final int TEST_SCALE = 5;
final int KEY_INTERVAL = 100;
opt = new Options(). opt = new Options().
setCreateIfMissing(true). setCreateIfMissing(true).
setCompactionStyle(CompactionStyle.LEVEL). setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4). setNumLevels(5).
setWriteBufferSize(100<<10). // a slightly bigger write buffer than L0 file
setLevelZeroFileNumCompactionTrigger(3). // so that we can ensure manual flush always
setTargetFileSizeBase(200 << 10). // go before background flush happens.
setWriteBufferSize(L0_FILE_SIZE * 2).
// Disable auto L0 -> L1 compaction
setLevelZeroFileNumCompactionTrigger(20).
setTargetFileSizeBase(L0_FILE_SIZE * 100).
setTargetFileSizeMultiplier(1). setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10). // To disable auto compaction
setMaxBytesForLevelMultiplier(1). setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
setDisableAutoCompactions(false); setMaxBytesForLevelMultiplier(2).
// open database setDisableAutoCompactions(true);
db = RocksDB.open(opt, db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath()); dbFolder.getRoot().getAbsolutePath());
// fill database with key/value pairs // fill database with key/value pairs
byte[] b = new byte[10000]; byte[] value = new byte[VALUE_SIZE];
for (int i = 0; i < 200; i++) { int int_key = 0;
rand.nextBytes(b); for (int round = 0; round < 5; ++round) {
db.put((String.valueOf(i)).getBytes(), b); int initial_key = int_key;
} for (int f = 1; f <= NUM_L0_FILES; ++f) {
db.flush(new FlushOptions().setWaitForFlush(true)); for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
db.close(); int_key += KEY_INTERVAL;
opt.setTargetFileSizeBase(Long.MAX_VALUE). rand.nextBytes(value);
setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(Long.MAX_VALUE).
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(true);
db = RocksDB.open(opt, db.put(String.format("%020d", int_key).getBytes(),
dbFolder.getRoot().getAbsolutePath()); value);
}
db.compactRange(true, 0, 0); db.flush(new FlushOptions().setWaitForFlush(true));
for (int i = 0; i < 4; i++) { // Make sure we do create one more L0 files.
if (i == 0) {
assertThat( assertThat(
db.getProperty("rocksdb.num-files-at-level" + i)). db.getProperty("rocksdb.num-files-at-level0")).
isEqualTo("1"); isEqualTo("" + f);
} else {
assertThat(
db.getProperty("rocksdb.num-files-at-level" + i)).
isEqualTo("0");
} }
// Compact all L0 files we just created
db.compactRange(
String.format("%020d", initial_key).getBytes(),
String.format("%020d", int_key - 1).getBytes());
// Making sure there isn't any L0 files.
assertThat(
db.getProperty("rocksdb.num-files-at-level0")).
isEqualTo("0");
// Making sure there are some L1 files.
// Here we only use != 0 instead of a specific number
// as we don't want the test make any assumption on
// how compaction works.
assertThat(
db.getProperty("rocksdb.num-files-at-level1")).
isNotEqualTo("0");
// Because we only compacted those keys we issued
// in this round, there shouldn't be any L1 -> L2
// compaction. So we expect zero L2 files here.
assertThat(
db.getProperty("rocksdb.num-files-at-level2")).
isEqualTo("0");
} }
} finally { } finally {
if (db != null) { if (db != null) {
@ -662,6 +687,14 @@ public class RocksDBTest {
List<ColumnFamilyHandle> columnFamilyHandles = List<ColumnFamilyHandle> columnFamilyHandles =
new ArrayList<>(); new ArrayList<>();
try { try {
final int NUM_KEYS_PER_L0_FILE = 100;
final int KEY_SIZE = 20;
final int VALUE_SIZE = 300;
final int L0_FILE_SIZE =
NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE);
final int NUM_L0_FILES = 10;
final int TEST_SCALE = 5;
final int KEY_INTERVAL = 100;
opt = new DBOptions(). opt = new DBOptions().
setCreateIfMissing(true). setCreateIfMissing(true).
setCreateMissingColumnFamilies(true); setCreateMissingColumnFamilies(true);
@ -672,62 +705,73 @@ public class RocksDBTest {
columnFamilyDescriptors.add(new ColumnFamilyDescriptor( columnFamilyDescriptors.add(new ColumnFamilyDescriptor(
"new_cf".getBytes(), "new_cf".getBytes(),
new ColumnFamilyOptions(). new ColumnFamilyOptions().
setDisableAutoCompactions(true).
setCompactionStyle(CompactionStyle.LEVEL). setCompactionStyle(CompactionStyle.LEVEL).
setNumLevels(4). setNumLevels(5).
setWriteBufferSize(100 << 10). // a slightly bigger write buffer than L0 file
setLevelZeroFileNumCompactionTrigger(3). // so that we can ensure manual flush always
setTargetFileSizeBase(200 << 10). // go before background flush happens.
setWriteBufferSize(L0_FILE_SIZE * 2).
// Disable auto L0 -> L1 compaction
setLevelZeroFileNumCompactionTrigger(20).
setTargetFileSizeBase(L0_FILE_SIZE * 100).
setTargetFileSizeMultiplier(1). setTargetFileSizeMultiplier(1).
setMaxBytesForLevelBase(500 << 10). // To disable auto compaction
setMaxBytesForLevelMultiplier(1). setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100).
setDisableAutoCompactions(false))); setMaxBytesForLevelMultiplier(2).
setDisableAutoCompactions(true)));
// open database // open database
db = RocksDB.open(opt, db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(), dbFolder.getRoot().getAbsolutePath(),
columnFamilyDescriptors, columnFamilyDescriptors,
columnFamilyHandles); columnFamilyHandles);
// fill database with key/value pairs // fill database with key/value pairs
byte[] b = new byte[10000]; byte[] value = new byte[VALUE_SIZE];
for (int i = 0; i < 200; i++) { int int_key = 0;
rand.nextBytes(b); for (int round = 0; round < 5; ++round) {
db.put(columnFamilyHandles.get(1), int initial_key = int_key;
String.valueOf(i).getBytes(), b); for (int f = 1; f <= NUM_L0_FILES; ++f) {
} for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) {
db.flush(new FlushOptions().setWaitForFlush(true), int_key += KEY_INTERVAL;
columnFamilyHandles.get(1)); rand.nextBytes(value);
// free column families
for (ColumnFamilyHandle handle : columnFamilyHandles) { db.put(columnFamilyHandles.get(1),
handle.dispose(); String.format("%020d", int_key).getBytes(),
} value);
// clear column family handles for reopen }
columnFamilyHandles.clear(); db.flush(new FlushOptions().setWaitForFlush(true),
db.close(); columnFamilyHandles.get(1));
columnFamilyDescriptors.get(1). // Make sure we do create one more L0 files.
columnFamilyOptions(). assertThat(
setTargetFileSizeBase(Long.MAX_VALUE). db.getProperty(columnFamilyHandles.get(1),
setTargetFileSizeMultiplier(1). "rocksdb.num-files-at-level0")).
setMaxBytesForLevelBase(Long.MAX_VALUE). isEqualTo("" + f);
setMaxBytesForLevelMultiplier(1).
setDisableAutoCompactions(true);
// reopen database
db = RocksDB.open(opt,
dbFolder.getRoot().getAbsolutePath(),
columnFamilyDescriptors,
columnFamilyHandles);
// compact new column family
db.compactRange(columnFamilyHandles.get(1), true, 0, 0);
// check if new column family is compacted to level zero
for (int i = 0; i < 4; i++) {
if (i == 0) {
assertThat(db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level" + i)).
isEqualTo("1");
} else {
assertThat(db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level" + i)).
isEqualTo("0");
} }
// Compact all L0 files we just created
db.compactRange(
columnFamilyHandles.get(1),
String.format("%020d", initial_key).getBytes(),
String.format("%020d", int_key - 1).getBytes());
// Making sure there isn't any L0 files.
assertThat(
db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level0")).
isEqualTo("0");
// Making sure there are some L1 files.
// Here we only use != 0 instead of a specific number
// as we don't want the test make any assumption on
// how compaction works.
assertThat(
db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level1")).
isNotEqualTo("0");
// Because we only compacted those keys we issued
// in this round, there shouldn't be any L1 -> L2
// compaction. So we expect zero L2 files here.
assertThat(
db.getProperty(columnFamilyHandles.get(1),
"rocksdb.num-files-at-level2")).
isEqualTo("0");
} }
} finally { } finally {
for (ColumnFamilyHandle handle : columnFamilyHandles) { for (ColumnFamilyHandle handle : columnFamilyHandles) {