Trivial move to cover multiple input levels
Summary: Now trivial move is only triggered when moving from level n to n+1. With dynamic level base, it is possible that file is moved from level 0 to level n, while levels from 1 to n-1 are empty. Extend trivial move to this case. Test Plan: Add a more unit test of sequential loading. Non-trivial compaction happened without the patch and now doesn't happen. Reviewers: rven, yhchiang, MarkCallaghan, igor Reviewed By: igor Subscribers: leveldb, dhruba, IslamAbdelRahman Differential Revision: https://reviews.facebook.net/D36669
This commit is contained in:
parent
e7adfe690b
commit
b118238a57
@ -144,14 +144,25 @@ bool Compaction::InputCompressionMatchesOutput() const {
|
||||
}
|
||||
|
||||
bool Compaction::IsTrivialMove() const {
|
||||
// If start_level_== output_level_, the purpose is to force compaction
|
||||
// filter to be applied to that level, and thus cannot be a trivia move.
|
||||
if (start_level_ == output_level_) {
|
||||
return false;
|
||||
}
|
||||
// If compaction involves more than one file, it is not trivial move.
|
||||
if (num_input_files(0) != 1) {
|
||||
return false;
|
||||
}
|
||||
for (size_t l = 1u; l < num_input_levels(); l++) {
|
||||
if (num_input_files(l) != 0) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid a move if there is lots of overlapping grandparent data.
|
||||
// Otherwise, the move could create a parent file that will require
|
||||
// a very expensive merge later on.
|
||||
// If start_level_== output_level_, the purpose is to force compaction
|
||||
// filter to be applied to that level, and thus cannot be a trivia move.
|
||||
return (start_level_ != output_level_ && num_input_levels() == 2 &&
|
||||
num_input_files(0) == 1 && num_input_files(1) == 0 &&
|
||||
input(0, 0)->fd.GetPathId() == GetOutputPathId() &&
|
||||
return (input(0, 0)->fd.GetPathId() == GetOutputPathId() &&
|
||||
InputCompressionMatchesOutput() &&
|
||||
TotalFileSize(grandparents_) <= max_grandparent_overlap_bytes_);
|
||||
}
|
||||
|
@ -11086,6 +11086,45 @@ TEST_F(DBTest, DynamicLevelMaxBytesBase2) {
|
||||
ASSERT_EQ(1U, int_prop);
|
||||
}
|
||||
|
||||
TEST_F(DBTest, DynamicLevelMaxBytesBaseInc) {
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.db_write_buffer_size = 2048;
|
||||
options.write_buffer_size = 2048;
|
||||
options.max_write_buffer_number = 2;
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.level0_slowdown_writes_trigger = 2;
|
||||
options.level0_stop_writes_trigger = 2;
|
||||
options.target_file_size_base = 2048;
|
||||
options.level_compaction_dynamic_level_bytes = true;
|
||||
options.max_bytes_for_level_base = 10240;
|
||||
options.max_bytes_for_level_multiplier = 4;
|
||||
options.hard_rate_limit = 1.1;
|
||||
options.max_background_compactions = 2;
|
||||
options.num_levels = 5;
|
||||
|
||||
DestroyAndReopen(options);
|
||||
|
||||
int non_trivial = 0;
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"DBImpl::BackgroundCompaction:NonTrivial", [&]() { non_trivial++; });
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Random rnd(301);
|
||||
for (int i = 0; i < 3000; i++) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 102)));
|
||||
}
|
||||
Flush();
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
||||
|
||||
ASSERT_EQ(non_trivial, 0);
|
||||
|
||||
env_->SetBackgroundThreads(1, Env::LOW);
|
||||
env_->SetBackgroundThreads(1, Env::HIGH);
|
||||
}
|
||||
|
||||
|
||||
TEST_F(DBTest, DynamicLevelCompressionPerLevel) {
|
||||
if (!Snappy_Supported()) {
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user