2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
#include "db/compaction_picker.h"
|
2014-12-16 06:48:16 +01:00
|
|
|
#include <limits>
|
2014-10-27 23:49:46 +01:00
|
|
|
#include <string>
|
2015-10-06 02:40:18 +02:00
|
|
|
#include <utility>
|
2017-04-07 05:06:34 +02:00
|
|
|
#include "db/compaction.h"
|
|
|
|
#include "db/compaction_picker_universal.h"
|
2015-10-06 02:40:18 +02:00
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
#include "util/logging.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2014-10-27 23:49:46 +01:00
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class CountingLogger : public Logger {
|
|
|
|
public:
|
2015-02-01 20:08:19 +01:00
|
|
|
using Logger::Logv;
|
2017-07-22 03:13:59 +02:00
|
|
|
virtual void Logv(const char* format, va_list ap) override { log_count++; }
|
2014-10-27 23:49:46 +01:00
|
|
|
size_t log_count;
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class CompactionPickerTest : public testing::Test {
|
2014-10-27 23:49:46 +01:00
|
|
|
public:
|
2014-11-13 22:41:43 +01:00
|
|
|
const Comparator* ucmp_;
|
|
|
|
InternalKeyComparator icmp_;
|
|
|
|
Options options_;
|
|
|
|
ImmutableCFOptions ioptions_;
|
|
|
|
MutableCFOptions mutable_cf_options_;
|
2014-10-27 23:49:46 +01:00
|
|
|
LevelCompactionPicker level_compaction_picker;
|
2014-11-13 22:41:43 +01:00
|
|
|
std::string cf_name_;
|
|
|
|
CountingLogger logger_;
|
|
|
|
LogBuffer log_buffer_;
|
|
|
|
uint32_t file_num_;
|
|
|
|
CompactionOptionsFIFO fifo_options_;
|
|
|
|
std::unique_ptr<VersionStorageInfo> vstorage_;
|
|
|
|
std::vector<std::unique_ptr<FileMetaData>> files_;
|
2015-10-06 02:40:18 +02:00
|
|
|
// does not own FileMetaData
|
|
|
|
std::unordered_map<uint32_t, std::pair<FileMetaData*, int>> file_map_;
|
2015-09-09 01:18:14 +02:00
|
|
|
// input files to compaction process.
|
|
|
|
std::vector<CompactionInputFiles> input_files_;
|
|
|
|
int compaction_level_start_;
|
2014-10-27 23:49:46 +01:00
|
|
|
|
|
|
|
CompactionPickerTest()
|
2014-11-13 22:41:43 +01:00
|
|
|
: ucmp_(BytewiseComparator()),
|
|
|
|
icmp_(ucmp_),
|
|
|
|
ioptions_(options_),
|
2016-09-14 06:11:59 +02:00
|
|
|
mutable_cf_options_(options_),
|
2014-11-13 22:41:43 +01:00
|
|
|
level_compaction_picker(ioptions_, &icmp_),
|
|
|
|
cf_name_("dummy"),
|
|
|
|
log_buffer_(InfoLogLevel::INFO_LEVEL, &logger_),
|
|
|
|
file_num_(1),
|
|
|
|
vstorage_(nullptr) {
|
|
|
|
fifo_options_.max_table_files_size = 1;
|
|
|
|
mutable_cf_options_.RefreshDerivedOptions(ioptions_);
|
2014-12-16 06:48:16 +01:00
|
|
|
ioptions_.db_paths.emplace_back("dummy",
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
~CompactionPickerTest() {
|
2014-11-13 22:41:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void NewVersionStorage(int num_levels, CompactionStyle style) {
|
|
|
|
DeleteVersionStorage();
|
|
|
|
options_.num_levels = num_levels;
|
2016-10-08 02:21:45 +02:00
|
|
|
vstorage_.reset(new VersionStorageInfo(&icmp_, ucmp_, options_.num_levels,
|
|
|
|
style, nullptr, false));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
|
2014-11-13 22:41:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteVersionStorage() {
|
|
|
|
vstorage_.reset();
|
|
|
|
files_.clear();
|
2015-10-06 02:40:18 +02:00
|
|
|
file_map_.clear();
|
2015-09-09 01:18:14 +02:00
|
|
|
input_files_.clear();
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Add(int level, uint32_t file_number, const char* smallest,
|
2016-04-08 02:40:42 +02:00
|
|
|
const char* largest, uint64_t file_size = 1, uint32_t path_id = 0,
|
2014-10-27 23:49:46 +01:00
|
|
|
SequenceNumber smallest_seq = 100,
|
|
|
|
SequenceNumber largest_seq = 100) {
|
2014-11-13 22:41:43 +01:00
|
|
|
assert(level < vstorage_->num_levels());
|
2014-10-27 23:49:46 +01:00
|
|
|
FileMetaData* f = new FileMetaData;
|
|
|
|
f->fd = FileDescriptor(file_number, path_id, file_size);
|
|
|
|
f->smallest = InternalKey(smallest, smallest_seq, kTypeValue);
|
|
|
|
f->largest = InternalKey(largest, largest_seq, kTypeValue);
|
2015-07-07 23:18:55 +02:00
|
|
|
f->smallest_seqno = smallest_seq;
|
|
|
|
f->largest_seqno = largest_seq;
|
2014-10-27 23:49:46 +01:00
|
|
|
f->compensated_file_size = file_size;
|
2014-10-31 16:48:19 +01:00
|
|
|
f->refs = 0;
|
2014-11-13 22:41:43 +01:00
|
|
|
vstorage_->AddFile(level, f);
|
|
|
|
files_.emplace_back(f);
|
2015-10-06 02:40:18 +02:00
|
|
|
file_map_.insert({file_number, {f, level}});
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
|
2015-10-06 02:40:18 +02:00
|
|
|
void SetCompactionInputFilesLevels(int level_count, int start_level) {
|
2015-09-09 01:18:14 +02:00
|
|
|
input_files_.resize(level_count);
|
|
|
|
for (int i = 0; i < level_count; ++i) {
|
|
|
|
input_files_[i].level = start_level + i;
|
|
|
|
}
|
|
|
|
compaction_level_start_ = start_level;
|
|
|
|
}
|
|
|
|
|
2015-10-06 02:40:18 +02:00
|
|
|
void AddToCompactionFiles(uint32_t file_number) {
|
|
|
|
auto iter = file_map_.find(file_number);
|
|
|
|
assert(iter != file_map_.end());
|
|
|
|
int level = iter->second.second;
|
2015-09-09 01:18:14 +02:00
|
|
|
assert(level < vstorage_->num_levels());
|
2015-10-06 02:40:18 +02:00
|
|
|
input_files_[level - compaction_level_start_].files.emplace_back(
|
|
|
|
iter->second.first);
|
2015-09-09 01:18:14 +02:00
|
|
|
}
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
void UpdateVersionStorageInfo() {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
|
2016-09-14 06:11:59 +02:00
|
|
|
vstorage_->UpdateFilesByCompactionPri(ioptions_.compaction_pri);
|
2014-11-13 22:41:43 +01:00
|
|
|
vstorage_->UpdateNumNonEmptyLevels();
|
|
|
|
vstorage_->GenerateFileIndexer();
|
|
|
|
vstorage_->GenerateLevelFilesBrief();
|
2016-09-14 06:11:59 +02:00
|
|
|
vstorage_->ComputeCompactionScore(ioptions_, mutable_cf_options_);
|
Allowing L0 -> L1 trivial move on sorted data
Summary:
This diff updates the logic of how we do trivial move, now trivial move can run on any number of files in input level as long as they are not overlapping
The conditions for trivial move have been updated
Introduced conditions:
- Trivial move cannot happen if we have a compaction filter (except if the compaction is not manual)
- Input level files cannot be overlapping
Removed conditions:
- Trivial move only run when the compaction is not manual
- Input level should can contain only 1 file
More context on what tests failed because of Trivial move
```
DBTest.CompactionsGenerateMultipleFiles
This test is expecting compaction on a file in L0 to generate multiple files in L1, this test will fail with trivial move because we end up with one file in L1
```
```
DBTest.NoSpaceCompactRange
This test expect compaction to fail when we force environment to report running out of space, of course this is not valid in trivial move situation
because trivial move does not need any extra space, and did not check for that
```
```
DBTest.DropWrites
Similar to DBTest.NoSpaceCompactRange
```
```
DBTest.DeleteObsoleteFilesPendingOutputs
This test expect that a file in L2 is deleted after it's moved to L3, this is not valid with trivial move because although the file was moved it is now used by L3
```
```
CuckooTableDBTest.CompactionIntoMultipleFiles
Same as DBTest.CompactionsGenerateMultipleFiles
```
This diff is based on a work by @sdong https://reviews.facebook.net/D34149
Test Plan: make -j64 check
Reviewers: rven, sdong, igor
Reviewed By: igor
Subscribers: yhchiang, ott, march, dhruba, sdong
Differential Revision: https://reviews.facebook.net/D34797
2015-06-05 01:51:25 +02:00
|
|
|
vstorage_->GenerateLevel0NonOverlapping();
|
Fix a bug that may cause a deleted row to appear again
Summary:
The previous fix of reappearing of a deleted row 0ce258f9b37c8661ea326039372bef8f185615ef missed a corner case, which can be reproduced using test CompactionPickerTest.OverlappingUserKeys7. Consider such an example:
input level file: 1[B E] 2[F H]
output level file: 3[A C] 4[D I] 5[I K]
First file 2 is picked, which overlaps to file 4. 4 expands to 5. Now the all range is [D K] with 2 output level files. When we try to expand that, [D K] overlaps with file 1 and 2 in the input level, and 1 and 2 overlaps with 3 and 4 in the output level. So we end up with picking 3 and 4 in the output level. Without expanding, it also has 2 files, so we determine the output level doesn't change, although they are the different two files.
The fix is to expand the output level files after we picked 3 and 4. In that case, there will be three output level files so we will abort the expanding.
I also added two unit tests related to marked_for_compaction and being_compacted. They have been passing though.
Test Plan: Run the new unit test, as well as all other tests.
Reviewers: andrewkr, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: yoshinorim, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D65373
2016-10-21 21:50:01 +02:00
|
|
|
vstorage_->ComputeFilesMarkedForCompaction();
|
2014-11-13 22:41:43 +01:00
|
|
|
vstorage_->SetFinalized();
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Empty) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2014-10-27 23:49:46 +01:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_TRUE(compaction.get() == nullptr);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Single) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
2014-10-27 23:49:46 +01:00
|
|
|
Add(0, 1U, "p", "q");
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_TRUE(compaction.get() == nullptr);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level0Trigger) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
2014-10-27 23:49:46 +01:00
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(0, 2U, "200", "250");
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
2014-11-11 22:47:22 +01:00
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level1Trigger) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2014-10-27 23:49:46 +01:00
|
|
|
Add(1, 66U, "150", "200", 1000000000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
2014-11-11 22:47:22 +01:00
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level1Trigger2) {
|
2018-01-23 01:36:00 +01:00
|
|
|
mutable_cf_options_.target_file_size_base = 10000000000;
|
|
|
|
mutable_cf_options_.RefreshDerivedOptions(ioptions_);
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2014-10-30 05:03:45 +01:00
|
|
|
Add(1, 66U, "150", "200", 1000000001U);
|
2014-10-27 23:49:46 +01:00
|
|
|
Add(1, 88U, "201", "300", 1000000000U);
|
2014-10-30 00:45:07 +01:00
|
|
|
Add(2, 6U, "150", "179", 1000000000U);
|
2014-10-27 23:49:46 +01:00
|
|
|
Add(2, 7U, "180", "220", 1000000000U);
|
2014-10-30 00:45:07 +01:00
|
|
|
Add(2, 8U, "221", "300", 1000000000U);
|
2014-10-27 23:49:46 +01:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
2014-11-11 22:47:22 +01:00
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(1));
|
2014-10-27 23:49:46 +01:00
|
|
|
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
|
2018-01-23 01:36:00 +01:00
|
|
|
ASSERT_EQ(uint64_t{1073741824}, compaction->OutputFilePreallocationSize());
|
2014-10-27 23:49:46 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, LevelMaxScore) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.target_file_size_base = 10000000;
|
2016-03-24 20:45:50 +01:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
2018-01-23 01:36:00 +01:00
|
|
|
mutable_cf_options_.RefreshDerivedOptions(ioptions_);
|
2017-04-05 02:57:27 +02:00
|
|
|
Add(0, 1U, "150", "200", 1000000U);
|
2014-10-30 00:45:07 +01:00
|
|
|
// Level 1 score 1.2
|
|
|
|
Add(1, 66U, "150", "200", 6000000U);
|
|
|
|
Add(1, 88U, "201", "300", 6000000U);
|
|
|
|
// Level 2 score 1.8. File 7 is the largest. Should be picked
|
|
|
|
Add(2, 6U, "150", "179", 60000000U);
|
|
|
|
Add(2, 7U, "180", "220", 60000001U);
|
|
|
|
Add(2, 8U, "221", "300", 60000000U);
|
|
|
|
// Level 3 score slightly larger than 1
|
|
|
|
Add(3, 26U, "150", "170", 260000000U);
|
|
|
|
Add(3, 27U, "171", "179", 260000000U);
|
|
|
|
Add(3, 28U, "191", "220", 260000000U);
|
|
|
|
Add(3, 29U, "221", "300", 260000000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
2014-11-13 22:41:43 +01:00
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
2014-10-30 00:45:07 +01:00
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
2014-11-11 22:47:22 +01:00
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
2014-10-30 00:45:07 +01:00
|
|
|
ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
|
2018-01-23 01:36:00 +01:00
|
|
|
ASSERT_EQ(mutable_cf_options_.target_file_size_base +
|
|
|
|
mutable_cf_options_.target_file_size_base / 10,
|
|
|
|
compaction->OutputFilePreallocationSize());
|
2014-10-30 00:45:07 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, NeedsCompactionLevel) {
|
2014-11-13 22:41:43 +01:00
|
|
|
const int kLevels = 6;
|
|
|
|
const int kFileCount = 20;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
|
2014-11-13 22:41:43 +01:00
|
|
|
for (int level = 0; level < kLevels - 1; ++level) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
|
|
|
uint64_t file_size = vstorage_->MaxBytesForLevel(level) * 2 / kFileCount;
|
2014-11-13 22:41:43 +01:00
|
|
|
for (int file_count = 1; file_count <= kFileCount; ++file_count) {
|
|
|
|
// start a brand new version in each test.
|
|
|
|
NewVersionStorage(kLevels, kCompactionStyleLevel);
|
|
|
|
for (int i = 0; i < file_count; ++i) {
|
2014-11-25 05:44:49 +01:00
|
|
|
Add(level, i, ToString((i + 100) * 1000).c_str(),
|
|
|
|
ToString((i + 100) * 1000 + 999).c_str(),
|
2014-11-13 22:41:43 +01:00
|
|
|
file_size, 0, i * 100, i * 100 + 99);
|
|
|
|
}
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level);
|
2014-11-14 00:21:04 +01:00
|
|
|
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
|
|
|
vstorage_->CompactionScore(0) >= 1);
|
2014-11-13 22:41:43 +01:00
|
|
|
// release the version storage
|
|
|
|
DeleteVersionStorage();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level0TriggerDynamic) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(0, 2U, "200", "250");
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-11 00:01:54 +02:00
|
|
|
ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
ASSERT_EQ(num_levels - 1, compaction->output_level());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level0TriggerDynamic2) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(0, 2U, "200", "250");
|
|
|
|
Add(num_levels - 1, 3U, "200", "250", 300U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(vstorage_->base_level(), num_levels - 2);
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-11 00:01:54 +02:00
|
|
|
ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
ASSERT_EQ(num_levels - 2, compaction->output_level());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level0TriggerDynamic3) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(0, 2U, "200", "250");
|
|
|
|
Add(num_levels - 1, 3U, "200", "250", 300U);
|
|
|
|
Add(num_levels - 1, 4U, "300", "350", 3000U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(vstorage_->base_level(), num_levels - 3);
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-11 00:01:54 +02:00
|
|
|
ASSERT_EQ(1, static_cast<int>(compaction->num_input_levels()));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
ASSERT_EQ(num_levels - 3, compaction->output_level());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, Level0TriggerDynamic4) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
2016-04-08 02:40:42 +02:00
|
|
|
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(0, 2U, "200", "250");
|
|
|
|
Add(num_levels - 1, 3U, "200", "250", 300U);
|
|
|
|
Add(num_levels - 1, 4U, "300", "350", 3000U);
|
|
|
|
Add(num_levels - 3, 5U, "150", "180", 3U);
|
|
|
|
Add(num_levels - 3, 6U, "181", "300", 3U);
|
|
|
|
Add(num_levels - 3, 7U, "400", "450", 3U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(vstorage_->base_level(), num_levels - 3);
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
Make Compaction class easier to use
Summary:
The goal of this diff is to make Compaction class easier to use. This should also make new compaction algorithms easier to write (like CompactFiles from @yhchiang and dynamic leveled and multi-leveled universal from @sdong).
Here are couple of things demonstrating that Compaction class is hard to use:
1. we have two constructors of Compaction class
2. there's this thing called grandparents_, but it appears to only be setup for leveled compaction and not compactfiles
3. it's easy to introduce a subtle and dangerous bug like this: D36225
4. SetupBottomMostLevel() is hard to understand and it shouldn't be. See this comment: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction.cc#L236-L241. It also made it harder for @yhchiang to write CompactFiles, as evidenced by this: https://github.com/facebook/rocksdb/blob/afbafeaeaebfd27a0f3e992fee8e0c57d07658fa/db/compaction_picker.cc#L204-L210
The problem is that we create Compaction object, which holds a lot of state, and then pass it around to some functions. After those functions are done mutating, then we call couple of functions on Compaction object, like SetupBottommostLevel() and MarkFilesBeingCompacted(). It is very hard to see what's happening with all that Compaction's state while it's travelling across different functions. If you're writing a new PickCompaction() function you need to try really hard to understand what are all the functions you need to run on Compaction object and what state you need to setup.
My proposed solution is to make important parts of Compaction immutable after construction. PickCompaction() should calculate compaction inputs and then pass them onto Compaction object once they are finalized. That makes it easy to create a new compaction -- just provide all the parameters to the constructor and you're done. No need to call confusing functions after you created your object.
This diff doesn't fully achieve that goal, but it comes pretty close. Here are some of the changes:
* have one Compaction constructor instead of two.
* inputs_ is constant after construction
* MarkFilesBeingCompacted() is now private to Compaction class and automatically called on construction/destruction.
* SetupBottommostLevel() is gone. Compaction figures it out on its own based on the input.
* CompactionPicker's functions are not passing around Compaction object anymore. They are only passing around the state that they need.
Test Plan:
make check
make asan_check
make valgrind_check
Reviewers: rven, anthony, sdong, yhchiang
Reviewed By: yhchiang
Subscribers: sdong, yhchiang, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D36687
2015-04-11 00:01:54 +02:00
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(num_levels - 3, compaction->level(1));
|
|
|
|
ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(6U, compaction->input(1, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2, static_cast<int>(compaction->num_input_levels()));
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
ASSERT_EQ(num_levels - 3, compaction->output_level());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, LevelTriggerDynamic4) {
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
2016-09-14 06:11:59 +02:00
|
|
|
ioptions_.compaction_pri = kMinOverlappingRatio;
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200");
|
|
|
|
Add(num_levels - 1, 3U, "200", "250", 300U);
|
|
|
|
Add(num_levels - 1, 4U, "300", "350", 3000U);
|
|
|
|
Add(num_levels - 1, 4U, "400", "450", 3U);
|
|
|
|
Add(num_levels - 2, 5U, "150", "180", 300U);
|
|
|
|
Add(num_levels - 2, 6U, "181", "350", 500U);
|
|
|
|
Add(num_levels - 2, 7U, "400", "450", 200U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
2016-04-08 02:40:42 +02:00
|
|
|
ASSERT_EQ(5U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(0, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_levels());
|
options.level_compaction_dynamic_level_bytes to allow RocksDB to pick size bases of levels dynamically.
Summary:
When having fixed max_bytes_for_level_base, the ratio of size of largest level and the second one can range from 0 to the multiplier. This makes LSM tree frequently irregular and unpredictable. It can also cause poor space amplification in some cases.
In this improvement (proposed by Igor Kabiljo), we introduce a parameter option.level_compaction_use_dynamic_max_bytes. When turning it on, RocksDB is free to pick a level base in the range of (options.max_bytes_for_level_base/options.max_bytes_for_level_multiplier, options.max_bytes_for_level_base] so that real level ratios are close to options.max_bytes_for_level_multiplier.
Test Plan: New unit tests and pass tests suites including valgrind.
Reviewers: MarkCallaghan, rven, yhchiang, igor, ikabiljo
Reviewed By: ikabiljo
Subscribers: yoshinorim, ikabiljo, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D31437
2015-02-05 20:44:17 +01:00
|
|
|
ASSERT_EQ(num_levels - 1, compaction->output_level());
|
|
|
|
}
|
|
|
|
|
2015-07-20 19:46:09 +02:00
|
|
|
// Universal and FIFO Compactions are not supported in ROCKSDB_LITE
|
|
|
|
#ifndef ROCKSDB_LITE
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, NeedsCompactionUniversal) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(1, kCompactionStyleUniversal);
|
|
|
|
UniversalCompactionPicker universal_compaction_picker(
|
|
|
|
ioptions_, &icmp_);
|
|
|
|
// must return false when there's no files.
|
2014-11-14 00:21:04 +01:00
|
|
|
ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
|
|
|
|
false);
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
UpdateVersionStorageInfo();
|
2014-11-13 22:41:43 +01:00
|
|
|
|
|
|
|
// verify the trigger given different number of L0 files.
|
|
|
|
for (int i = 1;
|
2014-11-14 00:21:04 +01:00
|
|
|
i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) {
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
NewVersionStorage(1, kCompactionStyleUniversal);
|
2014-11-25 05:44:49 +01:00
|
|
|
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
|
|
|
ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100,
|
2014-11-14 00:21:04 +01:00
|
|
|
i * 100 + 99);
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
UpdateVersionStorageInfo();
|
2014-11-14 00:21:04 +01:00
|
|
|
ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()),
|
|
|
|
vstorage_->CompactionScore(0) >= 1);
|
2014-11-13 22:41:43 +01:00
|
|
|
}
|
|
|
|
}
|
2017-05-17 20:32:26 +02:00
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, CompactionUniversalIngestBehindReservedLevel) {
|
|
|
|
const uint64_t kFileSize = 100000;
|
|
|
|
NewVersionStorage(1, kCompactionStyleUniversal);
|
|
|
|
ioptions_.allow_ingest_behind = true;
|
|
|
|
ioptions_.num_levels = 3;
|
|
|
|
UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
|
|
|
|
// must return false when there's no files.
|
|
|
|
ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
|
|
|
|
false);
|
|
|
|
|
|
|
|
NewVersionStorage(3, kCompactionStyleUniversal);
|
|
|
|
|
|
|
|
Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
|
|
|
|
Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
|
|
|
|
Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
|
|
|
|
Add(1, 5U, "100", "151", kFileSize, 0, 200, 251);
|
|
|
|
Add(1, 3U, "301", "350", kFileSize, 0, 101, 150);
|
|
|
|
Add(2, 6U, "120", "200", kFileSize, 0, 20, 100);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(
|
|
|
|
universal_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
|
|
|
|
// output level should be the one above the bottom-most
|
|
|
|
ASSERT_EQ(1, compaction->output_level());
|
|
|
|
}
|
2015-07-07 23:18:55 +02:00
|
|
|
// Tests if the files can be trivially moved in multi level
|
|
|
|
// universal compaction when allow_trivial_move option is set
|
|
|
|
// In this test as the input files overlaps, they cannot
|
|
|
|
// be trivially moved.
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, CannotTrivialMoveUniversal) {
|
|
|
|
const uint64_t kFileSize = 100000;
|
|
|
|
|
2017-12-11 22:12:12 +01:00
|
|
|
mutable_cf_options_.compaction_options_universal.allow_trivial_move = true;
|
2015-07-07 23:18:55 +02:00
|
|
|
NewVersionStorage(1, kCompactionStyleUniversal);
|
|
|
|
UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
|
|
|
|
// must return false when there's no files.
|
|
|
|
ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()),
|
|
|
|
false);
|
|
|
|
|
|
|
|
NewVersionStorage(3, kCompactionStyleUniversal);
|
|
|
|
|
|
|
|
Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
|
|
|
|
Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
|
|
|
|
Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
|
|
|
|
Add(1, 5U, "100", "151", kFileSize, 0, 200, 251);
|
|
|
|
Add(1, 3U, "301", "350", kFileSize, 0, 101, 150);
|
|
|
|
Add(2, 6U, "120", "200", kFileSize, 0, 20, 100);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(
|
|
|
|
universal_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
|
|
|
|
ASSERT_TRUE(!compaction->is_trivial_move());
|
|
|
|
}
|
|
|
|
// Tests if the files can be trivially moved in multi level
|
|
|
|
// universal compaction when allow_trivial_move option is set
|
|
|
|
// In this test as the input files doesn't overlaps, they should
|
|
|
|
// be trivially moved.
|
|
|
|
TEST_F(CompactionPickerTest, AllowsTrivialMoveUniversal) {
|
|
|
|
const uint64_t kFileSize = 100000;
|
|
|
|
|
2017-12-11 22:12:12 +01:00
|
|
|
mutable_cf_options_.compaction_options_universal.allow_trivial_move = true;
|
2015-07-07 23:18:55 +02:00
|
|
|
UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_);
|
|
|
|
|
|
|
|
NewVersionStorage(3, kCompactionStyleUniversal);
|
|
|
|
|
|
|
|
Add(0, 1U, "150", "200", kFileSize, 0, 500, 550);
|
|
|
|
Add(0, 2U, "201", "250", kFileSize, 0, 401, 450);
|
|
|
|
Add(0, 4U, "260", "300", kFileSize, 0, 260, 300);
|
|
|
|
Add(1, 5U, "010", "080", kFileSize, 0, 200, 251);
|
|
|
|
Add(2, 3U, "301", "350", kFileSize, 0, 101, 150);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(
|
|
|
|
universal_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
|
|
|
|
ASSERT_TRUE(compaction->is_trivial_move());
|
|
|
|
}
|
2014-11-13 22:41:43 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CompactionPickerTest, NeedsCompactionFIFO) {
|
2014-11-13 22:41:43 +01:00
|
|
|
NewVersionStorage(1, kCompactionStyleFIFO);
|
|
|
|
const int kFileCount =
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger * 3;
|
|
|
|
const uint64_t kFileSize = 100000;
|
|
|
|
const uint64_t kMaxSize = kFileSize * kFileCount / 2;
|
|
|
|
|
|
|
|
fifo_options_.max_table_files_size = kMaxSize;
|
2017-10-20 00:19:20 +02:00
|
|
|
mutable_cf_options_.compaction_options_fifo = fifo_options_;
|
2014-11-13 22:41:43 +01:00
|
|
|
FIFOCompactionPicker fifo_compaction_picker(ioptions_, &icmp_);
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
UpdateVersionStorageInfo();
|
2014-11-13 22:41:43 +01:00
|
|
|
// must return false when there's no files.
|
2014-11-14 00:21:04 +01:00
|
|
|
ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()), false);
|
2014-11-13 22:41:43 +01:00
|
|
|
|
|
|
|
// verify whether compaction is needed based on the current
|
|
|
|
// size of L0 files.
|
|
|
|
uint64_t current_size = 0;
|
|
|
|
for (int i = 1; i <= kFileCount; ++i) {
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
NewVersionStorage(1, kCompactionStyleFIFO);
|
2014-11-25 05:44:49 +01:00
|
|
|
Add(0, i, ToString((i + 100) * 1000).c_str(),
|
|
|
|
ToString((i + 100) * 1000 + 999).c_str(),
|
2014-11-13 22:41:43 +01:00
|
|
|
kFileSize, 0, i * 100, i * 100 + 99);
|
|
|
|
current_size += kFileSize;
|
Add experimental API MarkForCompaction()
Summary:
Some Mongo+Rocks datasets in Parse's environment are not doing compactions very frequently. During the quiet period (with no IO), we'd like to schedule compactions so that our reads become faster. Also, aggressively compacting during quiet periods helps when write bursts happen. In addition, we also want to compact files that are containing deleted key ranges (like old oplog keys).
All of this is currently not possible with CompactRange() because it's single-threaded and blocks all other compactions from happening. Running CompactRange() risks an issue of blocking writes because we generate too much Level 0 files before the compaction is over. Stopping writes is very dangerous because they hold transaction locks. We tried running manual compaction once on Mongo+Rocks and everything fell apart.
MarkForCompaction() solves all of those problems. This is very light-weight manual compaction. It is lower priority than automatic compactions, which means it shouldn't interfere with background process keeping the LSM tree clean. However, if no automatic compactions need to be run (or we have extra background threads available), we will start compacting files that are marked for compaction.
Test Plan: added a new unit test
Reviewers: yhchiang, rven, MarkCallaghan, sdong
Reviewed By: sdong
Subscribers: yoshinorim, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D37083
2015-04-18 01:44:45 +02:00
|
|
|
UpdateVersionStorageInfo();
|
2017-07-26 21:08:31 +02:00
|
|
|
ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()),
|
2014-11-14 00:21:04 +01:00
|
|
|
vstorage_->CompactionScore(0) >= 1);
|
2014-11-13 22:41:43 +01:00
|
|
|
}
|
|
|
|
}
|
2015-07-20 19:46:09 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
2014-11-13 22:41:43 +01:00
|
|
|
|
2016-02-11 22:45:53 +01:00
|
|
|
TEST_F(CompactionPickerTest, CompactionPriMinOverlapping1) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2016-09-14 06:11:59 +02:00
|
|
|
ioptions_.compaction_pri = kMinOverlappingRatio;
|
2018-01-23 01:36:00 +01:00
|
|
|
mutable_cf_options_.target_file_size_base = 100000000000;
|
2016-02-11 22:45:53 +01:00
|
|
|
mutable_cf_options_.target_file_size_multiplier = 10;
|
2016-03-24 20:45:50 +01:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
2018-01-23 01:36:00 +01:00
|
|
|
mutable_cf_options_.RefreshDerivedOptions(ioptions_);
|
2016-02-11 22:45:53 +01:00
|
|
|
|
|
|
|
Add(2, 6U, "150", "179", 50000000U);
|
|
|
|
Add(2, 7U, "180", "220", 50000000U);
|
|
|
|
Add(2, 8U, "321", "400", 50000000U); // File not overlapping
|
|
|
|
Add(2, 9U, "721", "800", 50000000U);
|
|
|
|
|
|
|
|
Add(3, 26U, "150", "170", 260000000U);
|
|
|
|
Add(3, 27U, "171", "179", 260000000U);
|
|
|
|
Add(3, 28U, "191", "220", 260000000U);
|
|
|
|
Add(3, 29U, "221", "300", 260000000U);
|
|
|
|
Add(3, 30U, "750", "900", 260000000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
// Pick file 8 because it overlaps with 0 files on level 3.
|
|
|
|
ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber());
|
2018-01-23 01:36:00 +01:00
|
|
|
// Compaction input size * 1.1
|
|
|
|
ASSERT_GE(uint64_t{55000000}, compaction->OutputFilePreallocationSize());
|
2016-02-11 22:45:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2016-09-14 06:11:59 +02:00
|
|
|
ioptions_.compaction_pri = kMinOverlappingRatio;
|
2016-02-11 22:45:53 +01:00
|
|
|
mutable_cf_options_.target_file_size_base = 10000000;
|
|
|
|
mutable_cf_options_.target_file_size_multiplier = 10;
|
2016-03-24 20:45:50 +01:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
2016-02-11 22:45:53 +01:00
|
|
|
|
|
|
|
Add(2, 6U, "150", "175",
|
|
|
|
60000000U); // Overlaps with file 26, 27, total size 521M
|
|
|
|
Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27, 28, total size
|
|
|
|
// 520M, the smalelst overlapping
|
|
|
|
Add(2, 8U, "201", "300",
|
|
|
|
60000000U); // Overlaps with file 28, 29, total size 521M
|
|
|
|
|
|
|
|
Add(3, 26U, "100", "110", 261000000U);
|
|
|
|
Add(3, 26U, "150", "170", 261000000U);
|
|
|
|
Add(3, 27U, "171", "179", 260000000U);
|
|
|
|
Add(3, 28U, "191", "220", 260000000U);
|
|
|
|
Add(3, 29U, "221", "300", 261000000U);
|
|
|
|
Add(3, 30U, "321", "400", 261000000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
// Picking file 7 because overlapping ratio is the biggest.
|
|
|
|
ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, CompactionPriMinOverlapping3) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2016-09-14 06:11:59 +02:00
|
|
|
ioptions_.compaction_pri = kMinOverlappingRatio;
|
2016-03-24 20:45:50 +01:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 10000000;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
2016-02-11 22:45:53 +01:00
|
|
|
|
|
|
|
// file 7 and 8 over lap with the same file, but file 8 is smaller so
|
|
|
|
// it will be picked.
|
2016-03-24 20:45:50 +01:00
|
|
|
Add(2, 6U, "150", "167", 60000000U); // Overlaps with file 26, 27
|
|
|
|
Add(2, 7U, "168", "169", 60000000U); // Overlaps with file 27
|
|
|
|
Add(2, 8U, "201", "300", 61000000U); // Overlaps with file 28, but the file
|
|
|
|
// itself is larger. Should be picked.
|
2016-02-11 22:45:53 +01:00
|
|
|
|
|
|
|
Add(3, 26U, "160", "165", 260000000U);
|
2016-03-24 20:45:50 +01:00
|
|
|
Add(3, 27U, "166", "170", 260000000U);
|
|
|
|
Add(3, 28U, "180", "400", 260000000U);
|
|
|
|
Add(3, 29U, "401", "500", 260000000U);
|
2016-02-11 22:45:53 +01:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
// Picking file 8 because overlapping ratio is the biggest.
|
|
|
|
ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2015-05-12 20:16:25 +02:00
|
|
|
// This test exhibits the bug where we don't properly reset parent_index in
|
|
|
|
// PickCompaction()
|
|
|
|
TEST_F(CompactionPickerTest, ParentIndexResetBug) {
|
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 200;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200"); // <- marked for compaction
|
|
|
|
Add(1, 3U, "400", "500", 600); // <- this one needs compacting
|
|
|
|
Add(2, 4U, "150", "200");
|
|
|
|
Add(2, 5U, "201", "210");
|
|
|
|
Add(2, 6U, "300", "310");
|
|
|
|
Add(2, 7U, "400", "500"); // <- being compacted
|
|
|
|
|
|
|
|
vstorage_->LevelFiles(2)[3]->being_compacted = true;
|
|
|
|
vstorage_->LevelFiles(0)[0]->marked_for_compaction = true;
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
}
|
2014-11-13 22:41:43 +01:00
|
|
|
|
2015-07-07 07:25:27 +02:00
|
|
|
// This test checks ExpandWhileOverlapping() by having overlapping user keys
|
|
|
|
// ranges (with different sequence numbers) in the input files.
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2016-09-14 06:11:59 +02:00
|
|
|
ioptions_.compaction_pri = kByCompensatedSize;
|
2016-04-08 02:40:42 +02:00
|
|
|
|
2015-07-07 07:25:27 +02:00
|
|
|
Add(1, 1U, "100", "150", 1U);
|
|
|
|
// Overlapping user keys
|
|
|
|
Add(1, 2U, "200", "400", 1U);
|
|
|
|
Add(1, 3U, "400", "500", 1000000000U, 0, 0);
|
|
|
|
Add(2, 4U, "600", "700", 1U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys2) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// Overlapping user keys on same level and output level
|
|
|
|
Add(1, 1U, "200", "400", 1000000000U);
|
|
|
|
Add(1, 2U, "400", "500", 1U, 0, 0);
|
2016-07-26 03:08:49 +02:00
|
|
|
Add(2, 3U, "000", "100", 1U);
|
|
|
|
Add(2, 4U, "100", "600", 1U, 0, 0);
|
|
|
|
Add(2, 5U, "600", "700", 1U, 0, 0);
|
2015-07-07 07:25:27 +02:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(0));
|
2016-07-26 03:08:49 +02:00
|
|
|
ASSERT_EQ(3U, compaction->num_input_files(1));
|
2015-07-07 07:25:27 +02:00
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(1, 0)->fd.GetNumber());
|
2016-07-26 03:08:49 +02:00
|
|
|
ASSERT_EQ(4U, compaction->input(1, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(5U, compaction->input(1, 2)->fd.GetNumber());
|
2015-07-07 07:25:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys3) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// Chain of overlapping user key ranges (forces ExpandWhileOverlapping() to
|
|
|
|
// expand multiple times)
|
|
|
|
Add(1, 1U, "100", "150", 1U);
|
|
|
|
Add(1, 2U, "150", "200", 1U, 0, 0);
|
|
|
|
Add(1, 3U, "200", "250", 1000000000U, 0, 0);
|
|
|
|
Add(1, 4U, "250", "300", 1U, 0, 0);
|
|
|
|
Add(1, 5U, "300", "350", 1U, 0, 0);
|
|
|
|
// Output level overlaps with the beginning and the end of the chain
|
|
|
|
Add(2, 6U, "050", "100", 1U);
|
|
|
|
Add(2, 7U, "350", "400", 1U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(5U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(4U, compaction->input(0, 3)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(5U, compaction->input(0, 4)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2016-07-26 03:08:49 +02:00
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys4) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000000;
|
|
|
|
|
|
|
|
Add(1, 1U, "100", "150", 1U);
|
|
|
|
Add(1, 2U, "150", "199", 1U, 0, 0);
|
|
|
|
Add(1, 3U, "200", "250", 1100000U, 0, 0);
|
|
|
|
Add(1, 4U, "251", "300", 1U, 0, 0);
|
|
|
|
Add(1, 5U, "300", "350", 1U, 0, 0);
|
|
|
|
|
|
|
|
Add(2, 6U, "100", "115", 1U);
|
|
|
|
Add(2, 7U, "125", "325", 1U);
|
|
|
|
Add(2, 8U, "350", "400", 1U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
Fix a bug that may cause a deleted row to appear again
Summary:
The previous fix of reappearing of a deleted row 0ce258f9b37c8661ea326039372bef8f185615ef missed a corner case, which can be reproduced using test CompactionPickerTest.OverlappingUserKeys7. Consider such an example:
input level file: 1[B E] 2[F H]
output level file: 3[A C] 4[D I] 5[I K]
First file 2 is picked, which overlaps to file 4. 4 expands to 5. Now the all range is [D K] with 2 output level files. When we try to expand that, [D K] overlaps with file 1 and 2 in the input level, and 1 and 2 overlaps with 3 and 4 in the output level. So we end up with picking 3 and 4 in the output level. Without expanding, it also has 2 files, so we determine the output level doesn't change, although they are the different two files.
The fix is to expand the output level files after we picked 3 and 4. In that case, there will be three output level files so we will abort the expanding.
I also added two unit tests related to marked_for_compaction and being_compacted. They have been passing though.
Test Plan: Run the new unit test, as well as all other tests.
Reviewers: andrewkr, IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: yoshinorim, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D65373
2016-10-21 21:50:01 +02:00
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys5) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// Overlapping user keys on same level and output level
|
|
|
|
Add(1, 1U, "200", "400", 1000000000U);
|
|
|
|
Add(1, 2U, "400", "500", 1U, 0, 0);
|
|
|
|
Add(2, 3U, "000", "100", 1U);
|
|
|
|
Add(2, 4U, "100", "600", 1U, 0, 0);
|
|
|
|
Add(2, 5U, "600", "700", 1U, 0, 0);
|
|
|
|
|
|
|
|
vstorage_->LevelFiles(2)[2]->being_compacted = true;
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() == nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys6) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// Overlapping user keys on same level and output level
|
|
|
|
Add(1, 1U, "200", "400", 1U, 0, 0);
|
|
|
|
Add(1, 2U, "401", "500", 1U, 0, 0);
|
|
|
|
Add(2, 3U, "000", "100", 1U);
|
|
|
|
Add(2, 4U, "100", "300", 1U, 0, 0);
|
|
|
|
Add(2, 5U, "305", "450", 1U, 0, 0);
|
|
|
|
Add(2, 6U, "460", "600", 1U, 0, 0);
|
|
|
|
Add(2, 7U, "600", "700", 1U, 0, 0);
|
|
|
|
|
|
|
|
vstorage_->LevelFiles(1)[0]->marked_for_compaction = true;
|
|
|
|
vstorage_->LevelFiles(1)[1]->marked_for_compaction = true;
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(3U, compaction->num_input_files(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys7) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
// Overlapping user keys on same level and output level
|
|
|
|
Add(1, 1U, "200", "400", 1U, 0, 0);
|
|
|
|
Add(1, 2U, "401", "500", 1000000000U, 0, 0);
|
|
|
|
Add(2, 3U, "100", "250", 1U);
|
|
|
|
Add(2, 4U, "300", "600", 1U, 0, 0);
|
|
|
|
Add(2, 5U, "600", "800", 1U, 0, 0);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_GE(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_GE(2U, compaction->num_input_files(1));
|
|
|
|
// File 5 has to be included in the compaction
|
|
|
|
ASSERT_EQ(5U, compaction->inputs(1)->back()->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 19:11:04 +01:00
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys8) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
// grow the number of inputs in "level" without
|
|
|
|
// changing the number of "level+1" files we pick up
|
|
|
|
// Expand input level as much as possible
|
|
|
|
// no overlapping case
|
|
|
|
Add(1, 1U, "101", "150", 1U);
|
|
|
|
Add(1, 2U, "151", "200", 1U);
|
|
|
|
Add(1, 3U, "201", "300", 1000000000U);
|
|
|
|
Add(1, 4U, "301", "400", 1U);
|
|
|
|
Add(1, 5U, "401", "500", 1U);
|
|
|
|
Add(2, 6U, "150", "200", 1U);
|
|
|
|
Add(2, 7U, "200", "450", 1U, 0, 0);
|
|
|
|
Add(2, 8U, "500", "600", 1U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(3U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(4U, compaction->input(0, 2)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys9) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
// grow the number of inputs in "level" without
|
|
|
|
// changing the number of "level+1" files we pick up
|
|
|
|
// Expand input level as much as possible
|
|
|
|
// overlapping case
|
|
|
|
Add(1, 1U, "121", "150", 1U);
|
|
|
|
Add(1, 2U, "151", "200", 1U);
|
|
|
|
Add(1, 3U, "201", "300", 1000000000U);
|
|
|
|
Add(1, 4U, "301", "400", 1U);
|
|
|
|
Add(1, 5U, "401", "500", 1U);
|
|
|
|
Add(2, 6U, "100", "120", 1U);
|
|
|
|
Add(2, 7U, "150", "200", 1U);
|
|
|
|
Add(2, 8U, "200", "450", 1U, 0, 0);
|
|
|
|
Add(2, 9U, "501", "600", 1U);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(5U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(4U, compaction->input(0, 3)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(8U, compaction->input(1, 1)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2017-07-20 05:33:52 +02:00
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys10) {
|
|
|
|
// Locked file encountered when pulling in extra input-level files with same
|
|
|
|
// user keys. Verify we pick the next-best file from the same input level.
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
|
|
|
|
// file_number 2U is largest and thus first choice. But it overlaps with
|
|
|
|
// file_number 1U which is being compacted. So instead we pick the next-
|
|
|
|
// biggest file, 3U, which is eligible for compaction.
|
|
|
|
Add(1 /* level */, 1U /* file_number */, "100" /* smallest */,
|
|
|
|
"150" /* largest */, 1U /* file_size */);
|
|
|
|
file_map_[1U].first->being_compacted = true;
|
|
|
|
Add(1 /* level */, 2U /* file_number */, "150" /* smallest */,
|
|
|
|
"200" /* largest */, 1000000000U /* file_size */, 0 /* smallest_seq */,
|
|
|
|
0 /* largest_seq */);
|
|
|
|
Add(1 /* level */, 3U /* file_number */, "201" /* smallest */,
|
|
|
|
"250" /* largest */, 900000000U /* file_size */);
|
|
|
|
Add(2 /* level */, 4U /* file_number */, "100" /* smallest */,
|
|
|
|
"150" /* largest */, 1U /* file_size */);
|
|
|
|
Add(2 /* level */, 5U /* file_number */, "151" /* smallest */,
|
|
|
|
"200" /* largest */, 1U /* file_size */);
|
|
|
|
Add(2 /* level */, 6U /* file_number */, "201" /* smallest */,
|
|
|
|
"250" /* largest */, 1U /* file_size */);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, OverlappingUserKeys11) {
|
|
|
|
// Locked file encountered when pulling in extra output-level files with same
|
|
|
|
// user keys. Expected to skip that compaction and pick the next-best choice.
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
|
|
|
|
// score(L1) = 3.7
|
|
|
|
// score(L2) = 1.85
|
|
|
|
// There is no eligible file in L1 to compact since both candidates pull in
|
|
|
|
// file_number 5U, which overlaps with a file pending compaction (6U). The
|
|
|
|
// first eligible compaction is from L2->L3.
|
|
|
|
Add(1 /* level */, 2U /* file_number */, "151" /* smallest */,
|
|
|
|
"200" /* largest */, 1000000000U /* file_size */);
|
|
|
|
Add(1 /* level */, 3U /* file_number */, "201" /* smallest */,
|
|
|
|
"250" /* largest */, 1U /* file_size */);
|
|
|
|
Add(2 /* level */, 4U /* file_number */, "100" /* smallest */,
|
|
|
|
"149" /* largest */, 5000000000U /* file_size */);
|
|
|
|
Add(2 /* level */, 5U /* file_number */, "150" /* smallest */,
|
|
|
|
"201" /* largest */, 1U /* file_size */);
|
|
|
|
Add(2 /* level */, 6U /* file_number */, "201" /* smallest */,
|
|
|
|
"249" /* largest */, 1U /* file_size */, 0 /* smallest_seq */,
|
|
|
|
0 /* largest_seq */);
|
|
|
|
file_map_[6U].first->being_compacted = true;
|
|
|
|
Add(3 /* level */, 7U /* file_number */, "100" /* smallest */,
|
|
|
|
"149" /* largest */, 1U /* file_size */);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2016-01-08 03:09:34 +01:00
|
|
|
TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 900000000U;
|
|
|
|
|
|
|
|
// 6 L0 files, score 3.
|
|
|
|
Add(0, 1U, "000", "400", 1U);
|
|
|
|
Add(0, 2U, "001", "400", 1U, 0, 0);
|
|
|
|
Add(0, 3U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 31U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 32U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 33U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
|
|
|
|
// L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1.
|
|
|
|
Add(1, 4U, "050", "300", 1000000000U, 0, 0);
|
|
|
|
file_map_[4u].first->being_compacted = true;
|
|
|
|
Add(1, 5U, "301", "350", 1000000000U, 0, 0);
|
|
|
|
|
|
|
|
// Output level overlaps with the beginning and the end of the chain
|
|
|
|
Add(2, 6U, "050", "100", 1U);
|
|
|
|
Add(2, 7U, "300", "400", 1U);
|
|
|
|
|
|
|
|
// No compaction should be scheduled, if L0 has higher priority than L1
|
|
|
|
// but L0->L1 compaction is blocked by a file in L1 being compacted.
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0));
|
|
|
|
ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1));
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() == nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri2) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 900000000U;
|
|
|
|
|
|
|
|
// 6 L0 files, score 3.
|
|
|
|
Add(0, 1U, "000", "400", 1U);
|
|
|
|
Add(0, 2U, "001", "400", 1U, 0, 0);
|
|
|
|
Add(0, 3U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 31U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 32U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 33U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
|
|
|
|
// L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1.
|
|
|
|
Add(1, 4U, "050", "300", 1000000000U, 0, 0);
|
|
|
|
Add(1, 5U, "301", "350", 1000000000U, 0, 0);
|
|
|
|
|
|
|
|
// Output level overlaps with the beginning and the end of the chain
|
|
|
|
Add(2, 6U, "050", "100", 1U);
|
|
|
|
Add(2, 7U, "300", "400", 1U);
|
|
|
|
|
|
|
|
// If no file in L1 being compacted, L0->L1 compaction will be scheduled.
|
|
|
|
UpdateVersionStorageInfo(); // being_compacted flag is cleared here.
|
|
|
|
ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0));
|
|
|
|
ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1));
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri3) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 2;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 900000000U;
|
|
|
|
|
|
|
|
// 6 L0 files, score 3.
|
|
|
|
Add(0, 1U, "000", "400", 1U);
|
|
|
|
Add(0, 2U, "001", "400", 1U, 0, 0);
|
|
|
|
Add(0, 3U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 31U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 32U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
Add(0, 33U, "001", "400", 1000000000U, 0, 0);
|
|
|
|
|
|
|
|
// L1 score more than 6.
|
|
|
|
Add(1, 4U, "050", "300", 1000000000U, 0, 0);
|
|
|
|
file_map_[4u].first->being_compacted = true;
|
|
|
|
Add(1, 5U, "301", "350", 1000000000U, 0, 0);
|
|
|
|
Add(1, 51U, "351", "400", 6000000000U, 0, 0);
|
|
|
|
|
|
|
|
// Output level overlaps with the beginning and the end of the chain
|
|
|
|
Add(2, 6U, "050", "100", 1U);
|
|
|
|
Add(2, 7U, "300", "400", 1U);
|
|
|
|
|
|
|
|
// If score in L1 is larger than L0, L1 compaction goes through despite
|
|
|
|
// there is pending L0 compaction.
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
ASSERT_EQ(1, vstorage_->CompactionScoreLevel(0));
|
|
|
|
ASSERT_EQ(0, vstorage_->CompactionScoreLevel(1));
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
}
|
|
|
|
|
2015-08-14 06:42:20 +02:00
|
|
|
TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded1) {
|
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
2017-06-02 02:54:06 +02:00
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 4;
|
2015-08-14 06:42:20 +02:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200", 200);
|
|
|
|
Add(0, 2U, "150", "200", 200);
|
|
|
|
Add(0, 3U, "150", "200", 200);
|
|
|
|
// Level 1 is over target by 200
|
|
|
|
Add(1, 4U, "400", "500", 600);
|
|
|
|
Add(1, 5U, "600", "700", 600);
|
|
|
|
// Level 2 is less than target 10000 even added size of level 1
|
2016-05-06 01:50:32 +02:00
|
|
|
// Size ratio of L2/L1 is 9600 / 1200 = 8
|
2015-08-14 06:42:20 +02:00
|
|
|
Add(2, 6U, "150", "200", 2500);
|
|
|
|
Add(2, 7U, "201", "210", 2000);
|
2016-05-06 01:50:32 +02:00
|
|
|
Add(2, 8U, "300", "310", 2600);
|
2015-08-14 06:42:20 +02:00
|
|
|
Add(2, 9U, "400", "500", 2500);
|
|
|
|
// Level 3 exceeds target 100,000 of 1000
|
|
|
|
Add(3, 10U, "400", "500", 101000);
|
2016-05-06 01:50:32 +02:00
|
|
|
// Level 4 exceeds target 1,000,000 by 900 after adding size from level 3
|
|
|
|
// Size ratio L4/L3 is 9.9
|
|
|
|
// After merge from L3, L4 size is 1000900
|
|
|
|
Add(4, 11U, "400", "500", 999900);
|
|
|
|
Add(5, 11U, "400", "500", 8007200);
|
2015-08-14 06:42:20 +02:00
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
2016-05-06 01:50:32 +02:00
|
|
|
ASSERT_EQ(200u * 9u + 10900u + 900u * 9,
|
2015-08-14 06:42:20 +02:00
|
|
|
vstorage_->estimated_compaction_needed_bytes());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded2) {
|
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 3;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200", 200);
|
|
|
|
Add(0, 2U, "150", "200", 200);
|
|
|
|
Add(0, 4U, "150", "200", 200);
|
|
|
|
Add(0, 5U, "150", "200", 200);
|
|
|
|
Add(0, 6U, "150", "200", 200);
|
2016-05-06 01:50:32 +02:00
|
|
|
// Level 1 size will be 1400 after merging with L0
|
2015-08-14 06:42:20 +02:00
|
|
|
Add(1, 7U, "400", "500", 200);
|
|
|
|
Add(1, 8U, "600", "700", 200);
|
|
|
|
// Level 2 is less than target 10000 even added size of level 1
|
2016-05-06 01:50:32 +02:00
|
|
|
Add(2, 9U, "150", "200", 9100);
|
|
|
|
// Level 3 over the target, but since level 4 is empty, we assume it will be
|
|
|
|
// a trivial move.
|
2015-08-14 06:42:20 +02:00
|
|
|
Add(3, 10U, "400", "500", 101000);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
2016-05-06 01:50:32 +02:00
|
|
|
// estimated L1->L2 merge: 400 * (9100.0 / 1400.0 + 1.0)
|
|
|
|
ASSERT_EQ(1400u + 3000u, vstorage_->estimated_compaction_needed_bytes());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded3) {
|
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 3;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "150", "200", 2000);
|
|
|
|
Add(0, 2U, "150", "200", 2000);
|
|
|
|
Add(0, 4U, "150", "200", 2000);
|
|
|
|
Add(0, 5U, "150", "200", 2000);
|
|
|
|
Add(0, 6U, "150", "200", 1000);
|
|
|
|
// Level 1 size will be 10000 after merging with L0
|
|
|
|
Add(1, 7U, "400", "500", 500);
|
|
|
|
Add(1, 8U, "600", "700", 500);
|
|
|
|
|
|
|
|
Add(2, 9U, "150", "200", 10000);
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
ASSERT_EQ(10000u + 18000u, vstorage_->estimated_compaction_needed_bytes());
|
2015-08-14 06:42:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, EstimateCompactionBytesNeededDynamicLevel) {
|
|
|
|
int num_levels = ioptions_.num_levels;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = true;
|
|
|
|
mutable_cf_options_.level0_file_num_compaction_trigger = 3;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000;
|
|
|
|
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
|
|
|
NewVersionStorage(num_levels, kCompactionStyleLevel);
|
|
|
|
|
|
|
|
// Set Last level size 50000
|
|
|
|
// num_levels - 1 target 5000
|
2017-05-05 03:03:22 +02:00
|
|
|
// num_levels - 2 is base level with target 1000 (rounded up to
|
|
|
|
// max_bytes_for_level_base).
|
2015-08-14 06:42:20 +02:00
|
|
|
Add(num_levels - 1, 10U, "400", "500", 50000);
|
|
|
|
|
|
|
|
Add(0, 1U, "150", "200", 200);
|
|
|
|
Add(0, 2U, "150", "200", 200);
|
|
|
|
Add(0, 4U, "150", "200", 200);
|
|
|
|
Add(0, 5U, "150", "200", 200);
|
|
|
|
Add(0, 6U, "150", "200", 200);
|
|
|
|
// num_levels - 3 is over target by 100 + 1000
|
2017-05-05 03:03:22 +02:00
|
|
|
Add(num_levels - 3, 7U, "400", "500", 550);
|
|
|
|
Add(num_levels - 3, 8U, "600", "700", 550);
|
2016-05-06 01:50:32 +02:00
|
|
|
// num_levels - 2 is over target by 1100 + 200
|
|
|
|
Add(num_levels - 2, 9U, "150", "200", 5200);
|
2015-08-14 06:42:20 +02:00
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
2017-05-05 03:03:22 +02:00
|
|
|
// Merging to the second last level: (5200 / 2100 + 1) * 1100
|
2016-05-06 01:50:32 +02:00
|
|
|
// Merging to the last level: (50000 / 6300 + 1) * 1300
|
2017-05-05 03:03:22 +02:00
|
|
|
ASSERT_EQ(2100u + 3823u + 11617u,
|
2015-08-14 06:42:20 +02:00
|
|
|
vstorage_->estimated_compaction_needed_bytes());
|
|
|
|
}
|
|
|
|
|
2015-09-09 01:18:14 +02:00
|
|
|
TEST_F(CompactionPickerTest, IsBottommostLevelTest) {
|
|
|
|
// case 1: Higher levels are empty
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2015-10-06 02:40:18 +02:00
|
|
|
Add(0, 1U, "a", "m");
|
|
|
|
Add(0, 2U, "c", "z");
|
2015-09-09 01:18:14 +02:00
|
|
|
Add(1, 3U, "d", "e");
|
|
|
|
Add(1, 4U, "l", "p");
|
|
|
|
Add(2, 5U, "g", "i");
|
|
|
|
Add(2, 6U, "x", "z");
|
|
|
|
UpdateVersionStorageInfo();
|
2015-10-06 02:40:18 +02:00
|
|
|
SetCompactionInputFilesLevels(2, 1);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(5U);
|
2015-09-09 01:18:14 +02:00
|
|
|
bool result =
|
|
|
|
Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_TRUE(result);
|
|
|
|
|
|
|
|
// case 2: Higher levels have no overlap
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2015-10-06 02:40:18 +02:00
|
|
|
Add(0, 1U, "a", "m");
|
|
|
|
Add(0, 2U, "c", "z");
|
2015-09-09 01:18:14 +02:00
|
|
|
Add(1, 3U, "d", "e");
|
|
|
|
Add(1, 4U, "l", "p");
|
|
|
|
Add(2, 5U, "g", "i");
|
|
|
|
Add(2, 6U, "x", "z");
|
|
|
|
Add(3, 7U, "k", "p");
|
|
|
|
Add(3, 8U, "t", "w");
|
|
|
|
Add(4, 9U, "a", "b");
|
|
|
|
Add(5, 10U, "c", "cc");
|
|
|
|
UpdateVersionStorageInfo();
|
2015-10-06 02:40:18 +02:00
|
|
|
SetCompactionInputFilesLevels(2, 1);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(5U);
|
2015-09-09 01:18:14 +02:00
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_TRUE(result);
|
|
|
|
|
|
|
|
// case 3.1: Higher levels (level 3) have overlap
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2015-10-06 02:40:18 +02:00
|
|
|
Add(0, 1U, "a", "m");
|
|
|
|
Add(0, 2U, "c", "z");
|
2015-09-09 01:18:14 +02:00
|
|
|
Add(1, 3U, "d", "e");
|
|
|
|
Add(1, 4U, "l", "p");
|
|
|
|
Add(2, 5U, "g", "i");
|
|
|
|
Add(2, 6U, "x", "z");
|
|
|
|
Add(3, 7U, "e", "g");
|
|
|
|
Add(3, 8U, "h", "k");
|
|
|
|
Add(4, 9U, "a", "b");
|
|
|
|
Add(5, 10U, "c", "cc");
|
|
|
|
UpdateVersionStorageInfo();
|
2015-10-06 02:40:18 +02:00
|
|
|
SetCompactionInputFilesLevels(2, 1);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(5U);
|
2015-09-09 01:18:14 +02:00
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_FALSE(result);
|
|
|
|
|
2015-10-06 02:40:18 +02:00
|
|
|
// case 3.2: Higher levels (level 5) have overlap
|
2015-09-09 01:18:14 +02:00
|
|
|
DeleteVersionStorage();
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2015-10-06 02:40:18 +02:00
|
|
|
Add(0, 1U, "a", "m");
|
|
|
|
Add(0, 2U, "c", "z");
|
2015-09-09 01:18:14 +02:00
|
|
|
Add(1, 3U, "d", "e");
|
|
|
|
Add(1, 4U, "l", "p");
|
|
|
|
Add(2, 5U, "g", "i");
|
|
|
|
Add(2, 6U, "x", "z");
|
|
|
|
Add(3, 7U, "j", "k");
|
|
|
|
Add(3, 8U, "l", "m");
|
|
|
|
Add(4, 9U, "a", "b");
|
|
|
|
Add(5, 10U, "c", "cc");
|
|
|
|
Add(5, 11U, "h", "k");
|
|
|
|
Add(5, 12U, "y", "yy");
|
|
|
|
Add(5, 13U, "z", "zz");
|
|
|
|
UpdateVersionStorageInfo();
|
2015-10-06 02:40:18 +02:00
|
|
|
SetCompactionInputFilesLevels(2, 1);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(5U);
|
2015-09-09 01:18:14 +02:00
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_FALSE(result);
|
|
|
|
|
2015-10-06 02:40:18 +02:00
|
|
|
// case 3.3: Higher levels (level 5) have overlap, but it's only overlapping
|
|
|
|
// one key ("d")
|
2015-09-09 01:18:14 +02:00
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
2015-10-06 02:40:18 +02:00
|
|
|
Add(0, 1U, "a", "m");
|
|
|
|
Add(0, 2U, "c", "z");
|
2015-09-09 01:18:14 +02:00
|
|
|
Add(1, 3U, "d", "e");
|
|
|
|
Add(1, 4U, "l", "p");
|
|
|
|
Add(2, 5U, "g", "i");
|
|
|
|
Add(2, 6U, "x", "z");
|
|
|
|
Add(3, 7U, "j", "k");
|
|
|
|
Add(3, 8U, "l", "m");
|
|
|
|
Add(4, 9U, "a", "b");
|
|
|
|
Add(5, 10U, "c", "cc");
|
|
|
|
Add(5, 11U, "ccc", "d");
|
|
|
|
Add(5, 12U, "y", "yy");
|
|
|
|
Add(5, 13U, "z", "zz");
|
|
|
|
UpdateVersionStorageInfo();
|
2015-10-06 02:40:18 +02:00
|
|
|
SetCompactionInputFilesLevels(2, 1);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(5U);
|
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_FALSE(result);
|
|
|
|
|
|
|
|
// Level 0 files overlap
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "s", "t");
|
|
|
|
Add(0, 2U, "a", "m");
|
|
|
|
Add(0, 3U, "b", "z");
|
|
|
|
Add(0, 4U, "e", "f");
|
|
|
|
Add(5, 10U, "y", "z");
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
SetCompactionInputFilesLevels(1, 0);
|
|
|
|
AddToCompactionFiles(1U);
|
|
|
|
AddToCompactionFiles(2U);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(4U);
|
2015-09-09 01:18:14 +02:00
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_FALSE(result);
|
2015-10-06 02:40:18 +02:00
|
|
|
|
|
|
|
// Level 0 files don't overlap
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "s", "t");
|
|
|
|
Add(0, 2U, "a", "m");
|
|
|
|
Add(0, 3U, "b", "k");
|
|
|
|
Add(0, 4U, "e", "f");
|
|
|
|
Add(5, 10U, "y", "z");
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
SetCompactionInputFilesLevels(1, 0);
|
|
|
|
AddToCompactionFiles(1U);
|
|
|
|
AddToCompactionFiles(2U);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(4U);
|
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_TRUE(result);
|
|
|
|
|
|
|
|
// Level 1 files overlap
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
Add(0, 1U, "s", "t");
|
|
|
|
Add(0, 2U, "a", "m");
|
|
|
|
Add(0, 3U, "b", "k");
|
|
|
|
Add(0, 4U, "e", "f");
|
|
|
|
Add(1, 5U, "a", "m");
|
|
|
|
Add(1, 6U, "n", "o");
|
|
|
|
Add(1, 7U, "w", "y");
|
|
|
|
Add(5, 10U, "y", "z");
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
SetCompactionInputFilesLevels(2, 0);
|
|
|
|
AddToCompactionFiles(1U);
|
|
|
|
AddToCompactionFiles(2U);
|
|
|
|
AddToCompactionFiles(3U);
|
|
|
|
AddToCompactionFiles(4U);
|
|
|
|
AddToCompactionFiles(5U);
|
|
|
|
AddToCompactionFiles(6U);
|
|
|
|
AddToCompactionFiles(7U);
|
|
|
|
result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_);
|
|
|
|
ASSERT_FALSE(result);
|
|
|
|
|
2015-09-09 01:18:14 +02:00
|
|
|
DeleteVersionStorage();
|
|
|
|
}
|
|
|
|
|
2016-06-17 01:02:52 +02:00
|
|
|
TEST_F(CompactionPickerTest, MaxCompactionBytesHit) {
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000000u;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 800000u;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// A compaction should be triggered and pick file 2 and 5.
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 19:11:04 +01:00
|
|
|
// It can expand because adding file 1 and 3, the compaction size will
|
2016-06-17 01:02:52 +02:00
|
|
|
// exceed mutable_cf_options_.max_bytes_for_level_base.
|
|
|
|
Add(1, 1U, "100", "150", 300000U);
|
|
|
|
Add(1, 2U, "151", "200", 300001U, 0, 0);
|
|
|
|
Add(1, 3U, "201", "250", 300000U, 0, 0);
|
|
|
|
Add(1, 4U, "251", "300", 300000U, 0, 0);
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 19:11:04 +01:00
|
|
|
Add(2, 5U, "100", "256", 1U);
|
2016-06-17 01:02:52 +02:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, MaxCompactionBytesNotHit) {
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 19:11:04 +01:00
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 800000u;
|
2016-06-17 01:02:52 +02:00
|
|
|
mutable_cf_options_.max_compaction_bytes = 1000000u;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// A compaction should be triggered and pick file 2 and 5.
|
|
|
|
// and it expands to file 1 and 3 too.
|
|
|
|
Add(1, 1U, "100", "150", 300000U);
|
|
|
|
Add(1, 2U, "151", "200", 300001U, 0, 0);
|
|
|
|
Add(1, 3U, "201", "250", 300000U, 0, 0);
|
|
|
|
Add(1, 4U, "251", "300", 300000U, 0, 0);
|
level compaction expansion
Summary:
reimplement the compaction expansion on lower level.
Considering such a case:
input level file: 1[B E] 2[F G] 3[H I] 4 [J M]
output level file: 5[A C] 6[D K] 7[L O]
If we initially pick file 2, now we will compact file 2 and 6. But we can safely compact 2, 3 and 6 without expanding the output level.
The previous code is messy and wrong.
In this diff, I first determine the input range [a, b], and output range [c, d],
then we get the range [e,f] = [min(a, c), max(b, d] and put all eligible clean-cut files within [e, f] into this compaction.
**Note: clean-cut means the files don't have the same user key on the boundaries of some files that are not chosen in this compaction**.
Closes https://github.com/facebook/rocksdb/pull/1760
Differential Revision: D4395564
Pulled By: lightmark
fbshipit-source-id: 2dc2c5c
2017-02-21 19:11:04 +01:00
|
|
|
Add(2, 5U, "000", "251", 1U);
|
2016-06-17 01:02:52 +02:00
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(2U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(3U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
|
|
|
|
}
|
|
|
|
|
2016-12-07 20:42:49 +01:00
|
|
|
TEST_F(CompactionPickerTest, IsTrivialMoveOn) {
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 10000u;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 10001u;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// A compaction should be triggered and pick file 2
|
|
|
|
Add(1, 1U, "100", "150", 3000U);
|
|
|
|
Add(1, 2U, "151", "200", 3001U);
|
|
|
|
Add(1, 3U, "201", "250", 3000U);
|
|
|
|
Add(1, 4U, "251", "300", 3000U);
|
|
|
|
|
|
|
|
Add(3, 5U, "120", "130", 7000U);
|
|
|
|
Add(3, 6U, "170", "180", 7000U);
|
|
|
|
Add(3, 5U, "220", "230", 7000U);
|
|
|
|
Add(3, 5U, "270", "280", 7000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_TRUE(compaction->IsTrivialMove());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CompactionPickerTest, IsTrivialMoveOff) {
|
|
|
|
mutable_cf_options_.max_bytes_for_level_base = 1000000u;
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 10000u;
|
|
|
|
ioptions_.level_compaction_dynamic_level_bytes = false;
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
// A compaction should be triggered and pick all files from level 1
|
|
|
|
Add(1, 1U, "100", "150", 300000U, 0, 0);
|
|
|
|
Add(1, 2U, "150", "200", 300000U, 0, 0);
|
|
|
|
Add(1, 3U, "200", "250", 300000U, 0, 0);
|
|
|
|
Add(1, 4U, "250", "300", 300000U, 0, 0);
|
|
|
|
|
|
|
|
Add(3, 5U, "120", "130", 6000U);
|
|
|
|
Add(3, 6U, "140", "150", 6000U);
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_FALSE(compaction->IsTrivialMove());
|
|
|
|
}
|
|
|
|
|
2017-07-22 05:56:32 +02:00
|
|
|
TEST_F(CompactionPickerTest, CacheNextCompactionIndex) {
|
|
|
|
NewVersionStorage(6, kCompactionStyleLevel);
|
|
|
|
mutable_cf_options_.max_compaction_bytes = 100000000000u;
|
|
|
|
|
|
|
|
Add(1 /* level */, 1U /* file_number */, "100" /* smallest */,
|
|
|
|
"149" /* largest */, 1000000000U /* file_size */);
|
|
|
|
file_map_[1U].first->being_compacted = true;
|
|
|
|
Add(1 /* level */, 2U /* file_number */, "150" /* smallest */,
|
|
|
|
"199" /* largest */, 900000000U /* file_size */);
|
|
|
|
Add(1 /* level */, 3U /* file_number */, "200" /* smallest */,
|
|
|
|
"249" /* largest */, 800000000U /* file_size */);
|
|
|
|
Add(1 /* level */, 4U /* file_number */, "250" /* smallest */,
|
|
|
|
"299" /* largest */, 700000000U /* file_size */);
|
|
|
|
Add(2 /* level */, 5U /* file_number */, "150" /* smallest */,
|
|
|
|
"199" /* largest */, 1U /* file_size */);
|
|
|
|
file_map_[5U].first->being_compacted = true;
|
|
|
|
|
|
|
|
UpdateVersionStorageInfo();
|
|
|
|
|
|
|
|
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(0U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(2, vstorage_->NextCompactionIndex(1 /* level */));
|
|
|
|
|
|
|
|
compaction.reset(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() != nullptr);
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_levels());
|
|
|
|
ASSERT_EQ(1U, compaction->num_input_files(0));
|
|
|
|
ASSERT_EQ(0U, compaction->num_input_files(1));
|
|
|
|
ASSERT_EQ(4U, compaction->input(0, 0)->fd.GetNumber());
|
|
|
|
ASSERT_EQ(3, vstorage_->NextCompactionIndex(1 /* level */));
|
|
|
|
|
|
|
|
compaction.reset(level_compaction_picker.PickCompaction(
|
|
|
|
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
|
|
|
ASSERT_TRUE(compaction.get() == nullptr);
|
|
|
|
ASSERT_EQ(4, vstorage_->NextCompactionIndex(1 /* level */));
|
|
|
|
}
|
|
|
|
|
2014-10-27 23:49:46 +01:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|