Add test showing range tombstones can create excessively large compactions (#5956)

Summary:
For more information on the original problem see this [link](https://github.com/facebook/rocksdb/issues/3977).

This change adds two new tests. They are identical other than one uses range tombstones and the other does not. Each test generates sub files at L2 which overlap with keys L3. The test that uses range tombstones generates a single file at L2. This single file will generate a very large range overlap that will in turn create excessively large compaction.

1: T001 - T005
2:  000 -  005

In contrast, the test that uses key ranges generates 3 files at L2. As a single file is compacted at a time, those 3 files will generate less work per compaction iteration.

1:  001 - 002
1:  003 - 004
1:  005
2:  000 - 005
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5956

Differential Revision: D18071631

Pulled By: dlambrig

fbshipit-source-id: 12abae75fb3e0b022d228c6371698aa5e53385df
This commit is contained in:
Dan Lambright 2019-10-24 11:07:11 -07:00 committed by Facebook Github Bot
parent 9f1e5a0b87
commit 2509531123

View File

@ -1521,6 +1521,84 @@ TEST_F(DBRangeDelTest, RangeTombstoneWrittenToMinimalSsts) {
ASSERT_EQ(1, num_range_deletions); ASSERT_EQ(1, num_range_deletions);
} }
TEST_F(DBRangeDelTest, OverlappedTombstones) {
const int kNumPerFile = 4, kNumFiles = 2;
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.max_compaction_bytes = 9 * 1024;
DestroyAndReopen(options);
Random rnd(301);
for (int i = 0; i < kNumFiles; ++i) {
std::vector<std::string> values;
// Write 12K (4 values, each 3K)
for (int j = 0; j < kNumPerFile; j++) {
values.push_back(RandomString(&rnd, 3 << 10));
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
}
}
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0));
MoveFilesToLevel(2);
ASSERT_EQ(2, NumTableFilesAtLevel(2));
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), Key(1),
Key((kNumFiles)*kNumPerFile + 1)));
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0));
dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
true /* disallow_trivial_move */);
// The tombstone range is not broken up into multiple SSTs which may incur a
// large compaction with L2.
ASSERT_EQ(1, NumTableFilesAtLevel(1));
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_CompactRange(1, nullptr, nullptr, nullptr,
true /* disallow_trivial_move */);
ASSERT_EQ(1, NumTableFilesAtLevel(2));
ASSERT_EQ(0, NumTableFilesAtLevel(1));
}
TEST_F(DBRangeDelTest, OverlappedKeys) {
const int kNumPerFile = 4, kNumFiles = 2;
Options options = CurrentOptions();
options.disable_auto_compactions = true;
options.max_compaction_bytes = 9 * 1024;
DestroyAndReopen(options);
Random rnd(301);
for (int i = 0; i < kNumFiles; ++i) {
std::vector<std::string> values;
// Write 12K (4 values, each 3K)
for (int j = 0; j < kNumPerFile; j++) {
values.push_back(RandomString(&rnd, 3 << 10));
ASSERT_OK(Put(Key(i * kNumPerFile + j), values[j]));
}
}
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0));
MoveFilesToLevel(2);
ASSERT_EQ(2, NumTableFilesAtLevel(2));
for (int i = 1; i < kNumFiles * kNumPerFile + 1; i++) {
ASSERT_OK(Put(Key(i), "0x123"));
}
ASSERT_OK(db_->Flush(FlushOptions()));
ASSERT_EQ(1, NumTableFilesAtLevel(0));
// The key range is broken up into three SSTs to avoid a future big compaction
// with the grandparent
dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
true /* disallow_trivial_move */);
ASSERT_EQ(3, NumTableFilesAtLevel(1));
std::vector<std::vector<FileMetaData>> files;
dbfull()->TEST_CompactRange(1, nullptr, nullptr, nullptr,
true /* disallow_trivial_move */);
ASSERT_EQ(1, NumTableFilesAtLevel(2));
ASSERT_EQ(0, NumTableFilesAtLevel(1));
}
#endif // ROCKSDB_LITE #endif // ROCKSDB_LITE
} // namespace rocksdb } // namespace rocksdb