Fix unit tests for universal compaction (step 2)

Summary:
Continue fixing existing unit tests for universal compaction. I have
tried to apply universal compaction to all unit tests those haven't
called ChangeOptions(). I left a few which are either apparently not
applicable to universal compaction (because they check files/keys/values
at level 1 or above levels), or apparently not related to compaction
(e.g., open a file, open a db).

I also add a new unit test for universal compaction.

Good news is I didn't see any bugs during this round.

Test Plan: Ran "make all check" yesterday. Has rebased and is rerunning

Reviewers: haobo, dhruba

Differential Revision: https://reviews.facebook.net/D12135
This commit is contained in:
Xing Jin 2013-08-07 15:20:41 -07:00
parent 17b8f786a3
commit 8ae905ed63
2 changed files with 1093 additions and 874 deletions

View File

@ -1238,13 +1238,17 @@ void DBImpl::TEST_CompactRange(int level, const Slice* begin,const Slice* end) {
manual.level = level; manual.level = level;
manual.done = false; manual.done = false;
manual.in_progress = false; manual.in_progress = false;
if (begin == nullptr) { // For universal compaction, we enforce every manual compaction to compact
// all files.
if (begin == nullptr ||
options_.compaction_style == kCompactionStyleUniversal) {
manual.begin = nullptr; manual.begin = nullptr;
} else { } else {
begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek); begin_storage = InternalKey(*begin, kMaxSequenceNumber, kValueTypeForSeek);
manual.begin = &begin_storage; manual.begin = &begin_storage;
} }
if (end == nullptr) { if (end == nullptr ||
options_.compaction_style == kCompactionStyleUniversal) {
manual.end = nullptr; manual.end = nullptr;
} else { } else {
end_storage = InternalKey(*end, 0, static_cast<ValueType>(0)); end_storage = InternalKey(*end, 0, static_cast<ValueType>(0));

View File

@ -281,6 +281,17 @@ class DBTest {
} }
} }
// Switch between different compaction styles (we have only 2 now).
bool ChangeCompactOptions() {
if (option_config_ == kDefault) {
option_config_ = kUniversalCompaction;
DestroyAndReopen();
return true;
} else {
return false;
}
}
// Return the current option configuration. // Return the current option configuration.
Options CurrentOptions() { Options CurrentOptions() {
Options options; Options options;
@ -838,6 +849,7 @@ TEST(DBTest, KeyMayExist) {
// A delete is skipped for key if KeyMayExist(key) returns False // A delete is skipped for key if KeyMayExist(key) returns False
// Tests Writebatch consistency and proper delete behaviour // Tests Writebatch consistency and proper delete behaviour
TEST(DBTest, FilterDeletes) { TEST(DBTest, FilterDeletes) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.filter_policy = NewBloomFilterPolicy(20); options.filter_policy = NewBloomFilterPolicy(20);
options.filter_deletes = true; options.filter_deletes = true;
@ -871,9 +883,11 @@ TEST(DBTest, FilterDeletes) {
batch.Clear(); batch.Clear();
delete options.filter_policy; delete options.filter_policy;
} while (ChangeCompactOptions());
} }
TEST(DBTest, IterEmpty) { TEST(DBTest, IterEmpty) {
do {
Iterator* iter = db_->NewIterator(ReadOptions()); Iterator* iter = db_->NewIterator(ReadOptions());
iter->SeekToFirst(); iter->SeekToFirst();
@ -886,9 +900,11 @@ TEST(DBTest, IterEmpty) {
ASSERT_EQ(IterStatus(iter), "(invalid)"); ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter; delete iter;
} while (ChangeCompactOptions());
} }
TEST(DBTest, IterSingle) { TEST(DBTest, IterSingle) {
do {
ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("a", "va"));
Iterator* iter = db_->NewIterator(ReadOptions()); Iterator* iter = db_->NewIterator(ReadOptions());
@ -924,9 +940,11 @@ TEST(DBTest, IterSingle) {
ASSERT_EQ(IterStatus(iter), "(invalid)"); ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter; delete iter;
} while (ChangeCompactOptions());
} }
TEST(DBTest, IterMulti) { TEST(DBTest, IterMulti) {
do {
ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", "vb")); ASSERT_OK(Put("b", "vb"));
ASSERT_OK(Put("c", "vc")); ASSERT_OK(Put("c", "vc"));
@ -1007,9 +1025,11 @@ TEST(DBTest, IterMulti) {
ASSERT_EQ(IterStatus(iter), "(invalid)"); ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter; delete iter;
} while (ChangeCompactOptions());
} }
TEST(DBTest, IterSmallAndLargeMix) { TEST(DBTest, IterSmallAndLargeMix) {
do {
ASSERT_OK(Put("a", "va")); ASSERT_OK(Put("a", "va"));
ASSERT_OK(Put("b", std::string(100000, 'b'))); ASSERT_OK(Put("b", std::string(100000, 'b')));
ASSERT_OK(Put("c", "vc")); ASSERT_OK(Put("c", "vc"));
@ -1045,6 +1065,7 @@ TEST(DBTest, IterSmallAndLargeMix) {
ASSERT_EQ(IterStatus(iter), "(invalid)"); ASSERT_EQ(IterStatus(iter), "(invalid)");
delete iter; delete iter;
} while (ChangeCompactOptions());
} }
TEST(DBTest, IterMultiWithDelete) { TEST(DBTest, IterMultiWithDelete) {
@ -1106,6 +1127,7 @@ TEST(DBTest, RollLog) {
} }
TEST(DBTest, WAL) { TEST(DBTest, WAL) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions(); WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true; writeOpt.disableWAL = true;
@ -1135,16 +1157,22 @@ TEST(DBTest, WAL) {
// again both values should be present. // again both values should be present.
ASSERT_EQ("v3", Get("foo")); ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v3", Get("bar")); ASSERT_EQ("v3", Get("bar"));
} while (ChangeCompactOptions());
} }
TEST(DBTest, CheckLock) { TEST(DBTest, CheckLock) {
do {
DB* localdb; DB* localdb;
Options options = CurrentOptions(); Options options = CurrentOptions();
ASSERT_TRUE(TryReopen(&options).ok()); ASSERT_TRUE(TryReopen(&options).ok());
ASSERT_TRUE(!(PureReopen(&options, &localdb).ok())); // second open should fail
// second open should fail
ASSERT_TRUE(!(PureReopen(&options, &localdb).ok()));
} while (ChangeCompactOptions());
} }
TEST(DBTest, FLUSH) { TEST(DBTest, FLUSH) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
WriteOptions writeOpt = WriteOptions(); WriteOptions writeOpt = WriteOptions();
writeOpt.disableWAL = true; writeOpt.disableWAL = true;
@ -1176,6 +1204,7 @@ TEST(DBTest, FLUSH) {
// has WAL enabled. // has WAL enabled.
ASSERT_EQ("v3", Get("foo")); ASSERT_EQ("v3", Get("foo"));
ASSERT_EQ("v3", Get("bar")); ASSERT_EQ("v3", Get("bar"));
} while (ChangeCompactOptions());
} }
TEST(DBTest, RecoveryWithEmptyLog) { TEST(DBTest, RecoveryWithEmptyLog) {
@ -1214,6 +1243,7 @@ TEST(DBTest, RecoverDuringMemtableCompaction) {
} }
TEST(DBTest, MinorCompactionsHappen) { TEST(DBTest, MinorCompactionsHappen) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.write_buffer_size = 10000; options.write_buffer_size = 10000;
Reopen(&options); Reopen(&options);
@ -1236,9 +1266,11 @@ TEST(DBTest, MinorCompactionsHappen) {
for (int i = 0; i < N; i++) { for (int i = 0; i < N; i++) {
ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i))); ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(Key(i)));
} }
} while (ChangeCompactOptions());
} }
TEST(DBTest, ManifestRollOver) { TEST(DBTest, ManifestRollOver) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.max_manifest_file_size = 10 ; // 10 bytes options.max_manifest_file_size = 10 ; // 10 bytes
Reopen(&options); Reopen(&options);
@ -1260,10 +1292,11 @@ TEST(DBTest, ManifestRollOver) {
ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2")); ASSERT_EQ(std::string(1000, '2'), Get("manifest_key2"));
ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3")); ASSERT_EQ(std::string(1000, '3'), Get("manifest_key3"));
} }
} while (ChangeCompactOptions());
} }
TEST(DBTest, RecoverWithLargeLog) { TEST(DBTest, RecoverWithLargeLog) {
do {
{ {
Options options = CurrentOptions(); Options options = CurrentOptions();
Reopen(&options); Reopen(&options);
@ -1285,6 +1318,7 @@ TEST(DBTest, RecoverWithLargeLog) {
ASSERT_EQ(std::string(10, '3'), Get("small3")); ASSERT_EQ(std::string(10, '3'), Get("small3"));
ASSERT_EQ(std::string(10, '4'), Get("small4")); ASSERT_EQ(std::string(10, '4'), Get("small4"));
ASSERT_GT(NumTableFilesAtLevel(0), 1); ASSERT_GT(NumTableFilesAtLevel(0), 1);
} while (ChangeCompactOptions());
} }
TEST(DBTest, CompactionsGenerateMultipleFiles) { TEST(DBTest, CompactionsGenerateMultipleFiles) {
@ -1348,6 +1382,139 @@ TEST(DBTest, CompactionTrigger) {
ASSERT_EQ(NumTableFilesAtLevel(1), 1); ASSERT_EQ(NumTableFilesAtLevel(1), 1);
} }
TEST(DBTest, UniversalCompactionTrigger) {
Options options = CurrentOptions();
options.compaction_style = kCompactionStyleUniversal;
options.write_buffer_size = 100<<10; //100KB
// trigger compaction if there are > 3 files
options.level0_file_num_compaction_trigger = 3;
Reopen(&options);
Random rnd(301);
int key_idx = 0;
// Stage 1:
// Generate a set of files at level 0, but don't trigger level-0
// compaction.
for (int num = 0;
num < options.level0_file_num_compaction_trigger;
num++) {
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 1);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Suppose each file flushed from mem table has size 1. Now we compact
// (level0_file_num_compaction_trigger+1)=4 files and should have a big
// file of size 4.
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 2:
// Now we have one file at level 0, with size 4. We also have some data in
// mem table. Let's continue generating new files at level 0, but don't
// trigger level-0 compaction.
// First, clean up memtable before inserting new data. This will generate
// a level-0 file, with size around 0.4 (according to previously written
// data amount).
dbfull()->Flush(FlushOptions());
for (int num = 0;
num < options.level0_file_num_compaction_trigger-2;
num++) {
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1.
// After comapction, we should have 2 files, with size 4, 2.4.
ASSERT_EQ(NumTableFilesAtLevel(0), 2);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 3:
// Now we have 2 files at level 0, with size 4 and 2.4. Continue
// generating new files at level 0.
for (int num = 0;
num < options.level0_file_num_compaction_trigger-2;
num++) {
// Write 120KB (12 values, each 10K)
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompactMemTable();
ASSERT_EQ(NumTableFilesAtLevel(0), num + 3);
}
// Generate one more file at level-0, which should trigger level-0
// compaction.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1.
// After comapction, we should have 3 files, with size 4, 2.4, 2.
ASSERT_EQ(NumTableFilesAtLevel(0), 3);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 4:
// Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a
// new file of size 1.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// Level-0 compaction is triggered, but no file will be picked up.
ASSERT_EQ(NumTableFilesAtLevel(0), 4);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
// Stage 5:
// Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate
// a new file of size 1.
for (int i = 0; i < 12; i++) {
ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 10000)));
key_idx++;
}
dbfull()->TEST_WaitForCompact();
// All files at level 0 will be compacted into a single one.
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
for (int i = 1; i < options.num_levels ; i++) {
ASSERT_EQ(NumTableFilesAtLevel(i), 0);
}
}
void MinLevelHelper(DBTest* self, Options& options) { void MinLevelHelper(DBTest* self, Options& options) {
Random rnd(301); Random rnd(301);
@ -1413,6 +1580,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
} }
return true; return true;
} }
TEST(DBTest, MinLevelToCompress1) { TEST(DBTest, MinLevelToCompress1) {
Options options = CurrentOptions(); Options options = CurrentOptions();
CompressionType type; CompressionType type;
@ -1454,6 +1622,7 @@ TEST(DBTest, MinLevelToCompress2) {
} }
TEST(DBTest, RepeatedWritesToSameKey) { TEST(DBTest, RepeatedWritesToSameKey) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.env = env_; options.env = env_;
options.write_buffer_size = 100000; // Small write buffer options.write_buffer_size = 100000; // Small write buffer
@ -1470,6 +1639,7 @@ TEST(DBTest, RepeatedWritesToSameKey) {
Put("key", value); Put("key", value);
ASSERT_LE(TotalTableFiles(), kMaxFiles); ASSERT_LE(TotalTableFiles(), kMaxFiles);
} }
} while (ChangeCompactOptions());
} }
// This is a static filter used for filtering // This is a static filter used for filtering
@ -1669,6 +1839,7 @@ TEST(DBTest, CompactionFilter) {
} }
TEST(DBTest, CompactionFilterWithValueChange) { TEST(DBTest, CompactionFilterWithValueChange) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.num_levels = 3; options.num_levels = 3;
options.max_mem_compaction_level = 0; options.max_mem_compaction_level = 0;
@ -1714,9 +1885,11 @@ TEST(DBTest, CompactionFilterWithValueChange) {
std::string newvalue = Get(key); std::string newvalue = Get(key);
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
} }
} while (ChangeCompactOptions());
} }
TEST(DBTest, SparseMerge) { TEST(DBTest, SparseMerge) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.compression = kNoCompression; options.compression = kNoCompression;
Reopen(&options); Reopen(&options);
@ -1754,6 +1927,7 @@ TEST(DBTest, SparseMerge) {
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
dbfull()->TEST_CompactRange(1, nullptr, nullptr); dbfull()->TEST_CompactRange(1, nullptr, nullptr);
ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576); ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(), 20*1048576);
} while (ChangeCompactOptions());
} }
static bool Between(uint64_t val, uint64_t low, uint64_t high) { static bool Between(uint64_t val, uint64_t low, uint64_t high) {
@ -1856,6 +2030,7 @@ TEST(DBTest, ApproximateSizes_MixOfSmallAndLarge) {
} }
TEST(DBTest, IteratorPinsRef) { TEST(DBTest, IteratorPinsRef) {
do {
Put("foo", "hello"); Put("foo", "hello");
// Get iterator that will yield the current contents of the DB. // Get iterator that will yield the current contents of the DB.
@ -1875,6 +2050,7 @@ TEST(DBTest, IteratorPinsRef) {
iter->Next(); iter->Next();
ASSERT_TRUE(!iter->Valid()); ASSERT_TRUE(!iter->Valid());
delete iter; delete iter;
} while (ChangeCompactOptions());
} }
TEST(DBTest, Snapshot) { TEST(DBTest, Snapshot) {
@ -2092,6 +2268,7 @@ TEST(DBTest, OverlapInLevel0) {
} }
TEST(DBTest, L0_CompactionBug_Issue44_a) { TEST(DBTest, L0_CompactionBug_Issue44_a) {
do {
Reopen(); Reopen();
ASSERT_OK(Put("b", "v")); ASSERT_OK(Put("b", "v"));
Reopen(); Reopen();
@ -2106,9 +2283,11 @@ TEST(DBTest, L0_CompactionBug_Issue44_a) {
ASSERT_EQ("(a->v)", Contents()); ASSERT_EQ("(a->v)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(a->v)", Contents()); ASSERT_EQ("(a->v)", Contents());
} while (ChangeCompactOptions());
} }
TEST(DBTest, L0_CompactionBug_Issue44_b) { TEST(DBTest, L0_CompactionBug_Issue44_b) {
do {
Reopen(); Reopen();
Put("",""); Put("","");
Reopen(); Reopen();
@ -2132,6 +2311,7 @@ TEST(DBTest, L0_CompactionBug_Issue44_b) {
ASSERT_EQ("(->)(c->cv)", Contents()); ASSERT_EQ("(->)(c->cv)", Contents());
env_->SleepForMicroseconds(1000000); // Wait for compaction to finish env_->SleepForMicroseconds(1000000); // Wait for compaction to finish
ASSERT_EQ("(->)(c->cv)", Contents()); ASSERT_EQ("(->)(c->cv)", Contents());
} while (ChangeCompactOptions());
} }
TEST(DBTest, ComparatorCheck) { TEST(DBTest, ComparatorCheck) {
@ -2148,6 +2328,8 @@ TEST(DBTest, ComparatorCheck) {
BytewiseComparator()->FindShortSuccessor(key); BytewiseComparator()->FindShortSuccessor(key);
} }
}; };
do {
NewComparator cmp; NewComparator cmp;
Options new_options = CurrentOptions(); Options new_options = CurrentOptions();
new_options.comparator = &cmp; new_options.comparator = &cmp;
@ -2155,6 +2337,7 @@ TEST(DBTest, ComparatorCheck) {
ASSERT_TRUE(!s.ok()); ASSERT_TRUE(!s.ok());
ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos) ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos)
<< s.ToString(); << s.ToString();
} while (ChangeCompactOptions());
} }
TEST(DBTest, CustomComparator) { TEST(DBTest, CustomComparator) {
@ -2183,6 +2366,8 @@ TEST(DBTest, CustomComparator) {
return val; return val;
} }
}; };
do {
NumberComparator cmp; NumberComparator cmp;
Options new_options = CurrentOptions(); Options new_options = CurrentOptions();
new_options.create_if_missing = true; new_options.create_if_missing = true;
@ -2210,6 +2395,7 @@ TEST(DBTest, CustomComparator) {
} }
Compact("[0]", "[1000000]"); Compact("[0]", "[1000000]");
} }
} while (ChangeCompactOptions());
} }
TEST(DBTest, ManualCompaction) { TEST(DBTest, ManualCompaction) {
@ -2342,9 +2528,9 @@ TEST(DBTest, DestroyDBMetaDatabase) {
ASSERT_TRUE(!DB::Open(opts, metametadbname, &db).ok()); ASSERT_TRUE(!DB::Open(opts, metametadbname, &db).ok());
} }
// Check that number of files does not grow when we are out of space // Check that number of files does not grow when we are out of space
TEST(DBTest, NoSpace) { TEST(DBTest, NoSpace) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.env = env_; options.env = env_;
Reopen(&options); Reopen(&options);
@ -2365,10 +2551,11 @@ TEST(DBTest, NoSpace) {
// Check that compaction attempts slept after errors // Check that compaction attempts slept after errors
ASSERT_GE(env_->sleep_counter_.Read(), 5); ASSERT_GE(env_->sleep_counter_.Read(), 5);
} while (ChangeCompactOptions());
} }
TEST(DBTest, NonWritableFileSystem) TEST(DBTest, NonWritableFileSystem) {
{ do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.write_buffer_size = 1000; options.write_buffer_size = 1000;
options.env = env_; options.env = env_;
@ -2385,6 +2572,7 @@ TEST(DBTest, NonWritableFileSystem)
} }
ASSERT_GT(errors, 0); ASSERT_GT(errors, 0);
env_->non_writable_.Release_Store(nullptr); env_->non_writable_.Release_Store(nullptr);
} while (ChangeCompactOptions());
} }
TEST(DBTest, ManifestWriteError) { TEST(DBTest, ManifestWriteError) {
@ -2429,6 +2617,7 @@ TEST(DBTest, ManifestWriteError) {
} }
TEST(DBTest, FilesDeletedAfterCompaction) { TEST(DBTest, FilesDeletedAfterCompaction) {
do {
ASSERT_OK(Put("foo", "v2")); ASSERT_OK(Put("foo", "v2"));
Compact("a", "z"); Compact("a", "z");
const int num_files = CountLiveFiles(); const int num_files = CountLiveFiles();
@ -2437,9 +2626,11 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
Compact("a", "z"); Compact("a", "z");
} }
ASSERT_EQ(CountLiveFiles(), num_files); ASSERT_EQ(CountLiveFiles(), num_files);
} while (ChangeCompactOptions());
} }
TEST(DBTest, BloomFilter) { TEST(DBTest, BloomFilter) {
do {
env_->count_random_reads_ = true; env_->count_random_reads_ = true;
Options options = CurrentOptions(); Options options = CurrentOptions();
options.env = env_; options.env = env_;
@ -2483,9 +2674,11 @@ TEST(DBTest, BloomFilter) {
env_->delay_sstable_sync_.Release_Store(nullptr); env_->delay_sstable_sync_.Release_Store(nullptr);
Close(); Close();
delete options.filter_policy; delete options.filter_policy;
} while (ChangeCompactOptions());
} }
TEST(DBTest, SnapshotFiles) { TEST(DBTest, SnapshotFiles) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
const EnvOptions soptions; const EnvOptions soptions;
options.write_buffer_size = 100000000; // Large write buffer options.write_buffer_size = 100000000; // Large write buffer
@ -2613,9 +2806,11 @@ TEST(DBTest, SnapshotFiles) {
// release file snapshot // release file snapshot
dbfull()->DisableFileDeletions(); dbfull()->DisableFileDeletions();
} while (ChangeCompactOptions());
} }
TEST(DBTest, CompactOnFlush) { TEST(DBTest, CompactOnFlush) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.purge_redundant_kvs_while_flush = true; options.purge_redundant_kvs_while_flush = true;
options.disable_auto_compactions = true; options.disable_auto_compactions = true;
@ -2697,6 +2892,7 @@ TEST(DBTest, CompactOnFlush) {
ASSERT_OK(dbfull()->TEST_CompactMemTable()); ASSERT_OK(dbfull()->TEST_CompactMemTable());
ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]"); ASSERT_EQ(AllEntriesFor("foo"), "[ v9 ]");
db_->ReleaseSnapshot(snapshot1); db_->ReleaseSnapshot(snapshot1);
} while (ChangeCompactOptions());
} }
std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) { std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
@ -2716,6 +2912,7 @@ std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
} }
TEST(DBTest, WALArchival) { TEST(DBTest, WALArchival) {
do {
std::string value(1024, '1'); std::string value(1024, '1');
Options options = CurrentOptions(); Options options = CurrentOptions();
options.create_if_missing = true; options.create_if_missing = true;
@ -2756,10 +2953,11 @@ TEST(DBTest, WALArchival) {
logFiles = ListLogFiles(env_, archiveDir); logFiles = ListLogFiles(env_, archiveDir);
ASSERT_TRUE(logFiles.size() == 0); ASSERT_TRUE(logFiles.size() == 0);
} while (ChangeCompactOptions());
} }
TEST(DBTest, WALClear) { TEST(DBTest, WALClear) {
do {
Options options = CurrentOptions(); Options options = CurrentOptions();
options.create_if_missing = true; options.create_if_missing = true;
options.WAL_ttl_seconds = 1; options.WAL_ttl_seconds = 1;
@ -2775,6 +2973,7 @@ TEST(DBTest, WALClear) {
dbfull()->TEST_PurgeObsoleteteWAL(); dbfull()->TEST_PurgeObsoleteteWAL();
log_files = ListLogFiles(env_, archive_dir); log_files = ListLogFiles(env_, archive_dir);
ASSERT_TRUE(log_files.empty()); ASSERT_TRUE(log_files.empty());
} while (ChangeCompactOptions());
} }
void ExpectRecords( void ExpectRecords(
@ -2794,6 +2993,7 @@ void ExpectRecords(
} }
TEST(DBTest, TransactionLogIterator) { TEST(DBTest, TransactionLogIterator) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
Put("key1", DummyString(1024)); Put("key1", DummyString(1024));
@ -2814,9 +3014,11 @@ TEST(DBTest, TransactionLogIterator) {
auto iter = OpenTransactionLogIter(0); auto iter = OpenTransactionLogIter(0);
ExpectRecords(6, iter); ExpectRecords(6, iter);
} }
} while (ChangeCompactOptions());
} }
TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) { TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
// Do a plain Reopen. // Do a plain Reopen.
@ -2829,9 +3031,11 @@ TEST(DBTest, TransactionLogIteratorMoveOverZeroFiles) {
auto iter = OpenTransactionLogIter(0); auto iter = OpenTransactionLogIter(0);
ExpectRecords(2, iter); ExpectRecords(2, iter);
} while (ChangeCompactOptions());
} }
TEST(DBTest, TransactionLogIteratorStallAtLastRecord) { TEST(DBTest, TransactionLogIteratorStallAtLastRecord) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
Put("key1", DummyString(1024)); Put("key1", DummyString(1024));
@ -2845,17 +3049,21 @@ TEST(DBTest, TransactionLogIteratorStallAtLastRecord) {
iter->Next(); iter->Next();
ASSERT_OK(iter->status()); ASSERT_OK(iter->status());
ASSERT_TRUE(iter->Valid()); ASSERT_TRUE(iter->Valid());
} while (ChangeCompactOptions());
} }
TEST(DBTest, TransactionLogIteratorJustEmptyFile) { TEST(DBTest, TransactionLogIteratorJustEmptyFile) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
unique_ptr<TransactionLogIterator> iter; unique_ptr<TransactionLogIterator> iter;
Status status = dbfull()->GetUpdatesSince(0, &iter); Status status = dbfull()->GetUpdatesSince(0, &iter);
ASSERT_TRUE(!status.ok()); ASSERT_TRUE(!status.ok());
} while (ChangeCompactOptions());
} }
TEST(DBTest, TransactionLogIteratorCheckAfterRestart) { TEST(DBTest, TransactionLogIteratorCheckAfterRestart) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
Put("key1", DummyString(1024)); Put("key1", DummyString(1024));
@ -2864,9 +3072,11 @@ TEST(DBTest, TransactionLogIteratorCheckAfterRestart) {
Reopen(&options); Reopen(&options);
auto iter = OpenTransactionLogIter(0); auto iter = OpenTransactionLogIter(0);
ExpectRecords(2, iter); ExpectRecords(2, iter);
} while (ChangeCompactOptions());
} }
TEST(DBTest, TransactionLogIteratorBatchOperations) { TEST(DBTest, TransactionLogIteratorBatchOperations) {
do {
Options options = OptionsForLogIterTest(); Options options = OptionsForLogIterTest();
DestroyAndReopen(&options); DestroyAndReopen(&options);
WriteBatch batch; WriteBatch batch;
@ -2880,6 +3090,7 @@ TEST(DBTest, TransactionLogIteratorBatchOperations) {
Put("key4", DummyString(1024)); Put("key4", DummyString(1024));
auto iter = OpenTransactionLogIter(3); auto iter = OpenTransactionLogIter(3);
ExpectRecords(1, iter); ExpectRecords(1, iter);
} while (ChangeCompactOptions());
} }
TEST(DBTest, ReadCompaction) { TEST(DBTest, ReadCompaction) {
@ -3334,6 +3545,7 @@ TEST(DBTest, Randomized) {
} }
TEST(DBTest, MultiGetSimple) { TEST(DBTest, MultiGetSimple) {
do {
ASSERT_OK(db_->Put(WriteOptions(),"k1","v1")); ASSERT_OK(db_->Put(WriteOptions(),"k1","v1"));
ASSERT_OK(db_->Put(WriteOptions(),"k2","v2")); ASSERT_OK(db_->Put(WriteOptions(),"k2","v2"));
ASSERT_OK(db_->Put(WriteOptions(),"k3","v3")); ASSERT_OK(db_->Put(WriteOptions(),"k3","v3"));
@ -3365,9 +3577,11 @@ TEST(DBTest, MultiGetSimple) {
ASSERT_TRUE(s[3].IsNotFound()); ASSERT_TRUE(s[3].IsNotFound());
ASSERT_OK(s[4]); ASSERT_OK(s[4]);
ASSERT_TRUE(s[5].IsNotFound()); ASSERT_TRUE(s[5].IsNotFound());
} while (ChangeCompactOptions());
} }
TEST(DBTest, MultiGetEmpty) { TEST(DBTest, MultiGetEmpty) {
do {
// Empty Key Set // Empty Key Set
std::vector<Slice> keys; std::vector<Slice> keys;
std::vector<std::string> values; std::vector<std::string> values;
@ -3386,6 +3600,7 @@ TEST(DBTest, MultiGetEmpty) {
s = db_->MultiGet(ReadOptions(),keys,&values); s = db_->MultiGet(ReadOptions(),keys,&values);
ASSERT_EQ((int)s.size(), 2); ASSERT_EQ((int)s.size(), 2);
ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound()); ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound());
} while (ChangeCompactOptions());
} }
std::string MakeKey(unsigned int num) { std::string MakeKey(unsigned int num) {