Universal Compaction with multiple levels won't allocate up to output size
Summary: Universal compactions with multiple levels should use file preallocation size based on file size if output level is not level 0 Test Plan: Run all tests. Reviewers: igor Reviewed By: igor Subscribers: leveldb, dhruba Differential Revision: https://reviews.facebook.net/D38439
This commit is contained in:
parent
714fcc067d
commit
ec43a8b9fb
@ -325,13 +325,14 @@ void Compaction::Summary(char* output, int len) {
|
||||
uint64_t Compaction::OutputFilePreallocationSize() {
|
||||
uint64_t preallocation_size = 0;
|
||||
|
||||
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
|
||||
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel ||
|
||||
output_level() > 0) {
|
||||
preallocation_size = max_output_file_size_;
|
||||
} else {
|
||||
for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
|
||||
for (const auto& f : inputs_[level_iter].files) {
|
||||
preallocation_size += f->fd.GetFileSize();
|
||||
}
|
||||
// output_level() == 0
|
||||
assert(num_input_levels() > 0);
|
||||
for (const auto& f : inputs_[0].files) {
|
||||
preallocation_size += f->fd.GetFileSize();
|
||||
}
|
||||
}
|
||||
// Over-estimate slightly so we don't end up just barely crossing
|
||||
|
@ -200,7 +200,14 @@ class SpecialEnv : public EnvWrapper {
|
||||
return base_->Append(data);
|
||||
}
|
||||
}
|
||||
Status Close() override { return base_->Close(); }
|
||||
Status Close() override {
|
||||
// Check preallocation size
|
||||
// preallocation size is never passed to base file.
|
||||
size_t preallocation_size = preallocation_block_size();
|
||||
TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus",
|
||||
&preallocation_size);
|
||||
return base_->Close();
|
||||
}
|
||||
Status Flush() override { return base_->Flush(); }
|
||||
Status Sync() override {
|
||||
++env_->sync_counter_;
|
||||
@ -4067,6 +4074,16 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
||||
"DBTestWritableFile.GetPreallocationStatus", [&](void* arg) {
|
||||
ASSERT_TRUE(arg != nullptr);
|
||||
size_t preallocation_size = *(static_cast<size_t*>(arg));
|
||||
if (num_levels_ > 3) {
|
||||
ASSERT_LE(preallocation_size, options.target_file_size_base * 1.1);
|
||||
}
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Random rnd(301);
|
||||
int key_idx = 0;
|
||||
|
||||
@ -4175,6 +4192,8 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
// All files at level 0 will be compacted into a single one.
|
||||
ASSERT_EQ(NumSortedRuns(1), 1);
|
||||
|
||||
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
||||
}
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) {
|
||||
|
@ -532,6 +532,8 @@ class WritableFile {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
size_t preallocation_block_size() { return preallocation_block_size_; }
|
||||
|
||||
private:
|
||||
size_t last_preallocated_block_;
|
||||
size_t preallocation_block_size_;
|
||||
|
Loading…
x
Reference in New Issue
Block a user