Merge options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes
Summary: To reduce number of options, merge source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes. Test Plan: Add two new unit tests. Run all existing tests, including jtest. Reviewers: yhchiang, igor, IslamAbdelRahman Reviewed By: IslamAbdelRahman Subscribers: leveldb, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D59829
This commit is contained in:
parent
4590b53a4b
commit
32149059f9
@ -2,6 +2,7 @@
|
||||
## Unreleased
|
||||
### Public API Change
|
||||
* CancelAllBackgroundWork() flushes all memtables for databases containing writes that have bypassed the WAL (writes issued with WriteOptions::disableWAL=true) before shutting down background threads.
|
||||
* Merge options source_compaction_factor, max_grandparent_overlap_bytes and expanded_compaction_factor into max_compaction_bytes
|
||||
|
||||
### New Features
|
||||
* Introduce NewClockCache, which is based on CLOCK algorithm with better concurrent performance in some cases. It can be used to replace the default LRU-based block cache and table cache. To use it, RocksDB need to be linked with TBB lib.
|
||||
|
16
db/c.cc
16
db/c.cc
@ -1515,14 +1515,9 @@ void rocksdb_options_set_max_bytes_for_level_multiplier(
|
||||
opt->rep.max_bytes_for_level_multiplier = n;
|
||||
}
|
||||
|
||||
void rocksdb_options_set_expanded_compaction_factor(
|
||||
rocksdb_options_t* opt, int n) {
|
||||
opt->rep.expanded_compaction_factor = n;
|
||||
}
|
||||
|
||||
void rocksdb_options_set_max_grandparent_overlap_factor(
|
||||
rocksdb_options_t* opt, int n) {
|
||||
opt->rep.max_grandparent_overlap_factor = n;
|
||||
void rocksdb_options_set_max_compaction_bytes(rocksdb_options_t* opt,
|
||||
uint64_t n) {
|
||||
opt->rep.max_compaction_bytes = n;
|
||||
}
|
||||
|
||||
void rocksdb_options_set_max_bytes_for_level_multiplier_additional(
|
||||
@ -1778,11 +1773,6 @@ void rocksdb_options_set_delete_obsolete_files_period_micros(
|
||||
opt->rep.delete_obsolete_files_period_micros = v;
|
||||
}
|
||||
|
||||
void rocksdb_options_set_source_compaction_factor(
|
||||
rocksdb_options_t* opt, int n) {
|
||||
opt->rep.expanded_compaction_factor = n;
|
||||
}
|
||||
|
||||
void rocksdb_options_prepare_for_bulk_load(rocksdb_options_t* opt) {
|
||||
opt->rep.PrepareForBulkLoad();
|
||||
}
|
||||
|
@ -204,6 +204,10 @@ ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
|
||||
result.level0_stop_writes_trigger = std::numeric_limits<int>::max();
|
||||
}
|
||||
|
||||
if (result.max_bytes_for_level_multiplier <= 0) {
|
||||
result.max_bytes_for_level_multiplier = 1;
|
||||
}
|
||||
|
||||
if (result.level0_file_num_compaction_trigger == 0) {
|
||||
Warn(db_options.info_log.get(),
|
||||
"level0_file_num_compaction_trigger cannot be 0");
|
||||
@ -262,6 +266,10 @@ ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options,
|
||||
}
|
||||
}
|
||||
|
||||
if (result.max_compaction_bytes == 0) {
|
||||
result.max_compaction_bytes = result.target_file_size_base * 25;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,8 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
// This source code is licensed under the BSD-style license found in
|
||||
// the LICENSE file in the root directory of this source tree. An
|
||||
// additional grant of patent rights can be found in the PATENTS file
|
||||
// in the same directory.
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
@ -1146,7 +1147,8 @@ TEST_F(ColumnFamilyTest, DifferentCompactionStyles) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = static_cast<uint64_t>(1) << 60;
|
||||
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1218,7 +1220,7 @@ TEST_F(ColumnFamilyTest, MultipleManualCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1317,7 +1319,7 @@ TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1410,7 +1412,7 @@ TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1506,7 +1508,7 @@ TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1605,7 +1607,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1695,7 +1697,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1792,7 +1794,7 @@ TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -1912,7 +1914,7 @@ TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) {
|
||||
default_cf.num_levels = 3;
|
||||
default_cf.write_buffer_size = 64 << 10; // 64KB
|
||||
default_cf.target_file_size_base = 30 << 10;
|
||||
default_cf.source_compaction_factor = 100;
|
||||
default_cf.max_compaction_bytes = default_cf.target_file_size_base * 1100;
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.no_block_cache = true;
|
||||
default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
|
@ -143,8 +143,8 @@ Compaction::Compaction(VersionStorageInfo* vstorage,
|
||||
const MutableCFOptions& _mutable_cf_options,
|
||||
std::vector<CompactionInputFiles> _inputs,
|
||||
int _output_level, uint64_t _target_file_size,
|
||||
uint64_t _max_grandparent_overlap_bytes,
|
||||
uint32_t _output_path_id, CompressionType _compression,
|
||||
uint64_t _max_compaction_bytes, uint32_t _output_path_id,
|
||||
CompressionType _compression,
|
||||
std::vector<FileMetaData*> _grandparents,
|
||||
bool _manual_compaction, double _score,
|
||||
bool _deletion_compaction,
|
||||
@ -152,7 +152,7 @@ Compaction::Compaction(VersionStorageInfo* vstorage,
|
||||
: start_level_(_inputs[0].level),
|
||||
output_level_(_output_level),
|
||||
max_output_file_size_(_target_file_size),
|
||||
max_grandparent_overlap_bytes_(_max_grandparent_overlap_bytes),
|
||||
max_compaction_bytes_(_max_compaction_bytes),
|
||||
mutable_cf_options_(_mutable_cf_options),
|
||||
input_version_(nullptr),
|
||||
number_levels_(vstorage->num_levels()),
|
||||
@ -247,7 +247,7 @@ bool Compaction::IsTrivialMove() const {
|
||||
return (start_level_ != output_level_ && num_input_levels() == 1 &&
|
||||
input(0, 0)->fd.GetPathId() == output_path_id() &&
|
||||
InputCompressionMatchesOutput() &&
|
||||
TotalFileSize(grandparents_) <= max_grandparent_overlap_bytes_);
|
||||
TotalFileSize(grandparents_) <= max_compaction_bytes_);
|
||||
}
|
||||
|
||||
void Compaction::AddInputDeletions(VersionEdit* out_edit) {
|
||||
|
@ -37,7 +37,7 @@ class Compaction {
|
||||
Compaction(VersionStorageInfo* input_version,
|
||||
const MutableCFOptions& mutable_cf_options,
|
||||
std::vector<CompactionInputFiles> inputs, int output_level,
|
||||
uint64_t target_file_size, uint64_t max_grandparent_overlap_bytes,
|
||||
uint64_t target_file_size, uint64_t max_compaction_bytes,
|
||||
uint32_t output_path_id, CompressionType compression,
|
||||
std::vector<FileMetaData*> grandparents,
|
||||
bool manual_compaction = false, double score = -1,
|
||||
@ -229,9 +229,7 @@ class Compaction {
|
||||
return grandparents_;
|
||||
}
|
||||
|
||||
uint64_t max_grandparent_overlap_bytes() const {
|
||||
return max_grandparent_overlap_bytes_;
|
||||
}
|
||||
uint64_t max_compaction_bytes() const { return max_compaction_bytes_; }
|
||||
|
||||
private:
|
||||
// mark (or clear) all files that are being compacted
|
||||
@ -254,7 +252,7 @@ class Compaction {
|
||||
const int start_level_; // the lowest level to be compacted
|
||||
const int output_level_; // levels to which output files are stored
|
||||
uint64_t max_output_file_size_;
|
||||
uint64_t max_grandparent_overlap_bytes_;
|
||||
uint64_t max_compaction_bytes_;
|
||||
MutableCFOptions mutable_cf_options_;
|
||||
Version* input_version_;
|
||||
VersionEdit edit_;
|
||||
|
@ -99,6 +99,8 @@ struct CompactionJob::SubcompactionState {
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t current_output_file_size;
|
||||
|
||||
// State during the subcompaction
|
||||
uint64_t total_bytes;
|
||||
uint64_t num_input_records;
|
||||
@ -121,6 +123,7 @@ struct CompactionJob::SubcompactionState {
|
||||
end(_end),
|
||||
outfile(nullptr),
|
||||
builder(nullptr),
|
||||
current_output_file_size(0),
|
||||
total_bytes(0),
|
||||
num_input_records(0),
|
||||
num_output_records(0),
|
||||
@ -161,7 +164,7 @@ struct CompactionJob::SubcompactionState {
|
||||
|
||||
// Returns true iff we should stop building the current output
|
||||
// before processing "internal_key".
|
||||
bool ShouldStopBefore(const Slice& internal_key) {
|
||||
bool ShouldStopBefore(const Slice& internal_key, uint64_t curr_file_size) {
|
||||
const InternalKeyComparator* icmp =
|
||||
&compaction->column_family_data()->internal_comparator();
|
||||
const std::vector<FileMetaData*>& grandparents = compaction->grandparents();
|
||||
@ -182,7 +185,8 @@ struct CompactionJob::SubcompactionState {
|
||||
}
|
||||
seen_key = true;
|
||||
|
||||
if (overlapped_bytes > compaction->max_grandparent_overlap_bytes()) {
|
||||
if (overlapped_bytes + curr_file_size >
|
||||
compaction->max_compaction_bytes()) {
|
||||
// Too much overlap for current output; start new output
|
||||
overlapped_bytes = 0;
|
||||
return true;
|
||||
@ -757,7 +761,8 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
if (end != nullptr &&
|
||||
cfd->user_comparator()->Compare(c_iter->user_key(), *end) >= 0) {
|
||||
break;
|
||||
} else if (sub_compact->ShouldStopBefore(key) &&
|
||||
} else if (sub_compact->ShouldStopBefore(
|
||||
key, sub_compact->current_output_file_size) &&
|
||||
sub_compact->builder != nullptr) {
|
||||
status = FinishCompactionOutputFile(input->status(), sub_compact);
|
||||
if (!status.ok()) {
|
||||
@ -782,6 +787,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
assert(sub_compact->builder != nullptr);
|
||||
assert(sub_compact->current_output() != nullptr);
|
||||
sub_compact->builder->Add(key, value);
|
||||
sub_compact->current_output_file_size = sub_compact->builder->FileSize();
|
||||
sub_compact->current_output()->meta.UpdateBoundaries(
|
||||
key, c_iter->ikey().sequence);
|
||||
sub_compact->num_output_records++;
|
||||
@ -840,7 +846,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
// during subcompactions (i.e. if output size, estimated by input size, is
|
||||
// going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB
|
||||
// and 0.6MB instead of 1MB and 0.2MB)
|
||||
if (sub_compact->builder->FileSize() >=
|
||||
if (sub_compact->current_output_file_size >=
|
||||
sub_compact->compaction->max_output_file_size()) {
|
||||
status = FinishCompactionOutputFile(input->status(), sub_compact);
|
||||
if (sub_compact->outputs.size() == 1) {
|
||||
@ -1021,6 +1027,7 @@ Status CompactionJob::FinishCompactionOutputFile(
|
||||
}
|
||||
|
||||
sub_compact->builder.reset();
|
||||
sub_compact->current_output_file_size = 0;
|
||||
return s;
|
||||
}
|
||||
|
||||
|
@ -241,8 +241,8 @@ class CompactionJobTest : public testing::Test {
|
||||
|
||||
Compaction compaction(cfd->current()->storage_info(),
|
||||
*cfd->GetLatestMutableCFOptions(),
|
||||
compaction_input_files, 1, 1024 * 1024, 10, 0,
|
||||
kNoCompression, {}, true);
|
||||
compaction_input_files, 1, 1024 * 1024,
|
||||
10 * 1024 * 1024, 0, kNoCompression, {}, true);
|
||||
compaction.SetInputVersion(cfd->current());
|
||||
|
||||
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get());
|
||||
|
@ -266,10 +266,6 @@ Compaction* CompactionPicker::FormCompaction(
|
||||
const std::vector<CompactionInputFiles>& input_files, int output_level,
|
||||
VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options,
|
||||
uint32_t output_path_id) {
|
||||
uint64_t max_grandparent_overlap_bytes =
|
||||
output_level + 1 < vstorage->num_levels()
|
||||
? mutable_cf_options.MaxGrandParentOverlapBytes(output_level + 1)
|
||||
: std::numeric_limits<uint64_t>::max();
|
||||
assert(input_files.size());
|
||||
|
||||
// TODO(rven ): we might be able to run concurrent level 0 compaction
|
||||
@ -278,10 +274,11 @@ Compaction* CompactionPicker::FormCompaction(
|
||||
if ((input_files[0].level == 0) && !level0_compactions_in_progress_.empty()) {
|
||||
return nullptr;
|
||||
}
|
||||
auto c = new Compaction(
|
||||
vstorage, mutable_cf_options, input_files, output_level,
|
||||
compact_options.output_file_size_limit, max_grandparent_overlap_bytes,
|
||||
output_path_id, compact_options.compression, /* grandparents */ {}, true);
|
||||
auto c =
|
||||
new Compaction(vstorage, mutable_cf_options, input_files, output_level,
|
||||
compact_options.output_file_size_limit,
|
||||
mutable_cf_options.max_compaction_bytes, output_path_id,
|
||||
compact_options.compression, /* grandparents */ {}, true);
|
||||
|
||||
// If it's level 0 compaction, make sure we don't execute any other level 0
|
||||
// compactions in parallel
|
||||
@ -416,8 +413,7 @@ bool CompactionPicker::SetupOtherInputs(
|
||||
const uint64_t inputs1_size =
|
||||
TotalCompensatedFileSize(output_level_inputs->files);
|
||||
const uint64_t expanded0_size = TotalCompensatedFileSize(expanded0.files);
|
||||
uint64_t limit =
|
||||
mutable_cf_options.ExpandedCompactionByteSizeLimit(input_level);
|
||||
uint64_t limit = mutable_cf_options.max_compaction_bytes;
|
||||
if (expanded0.size() > inputs->size() &&
|
||||
inputs1_size + expanded0_size < limit &&
|
||||
!FilesInCompaction(expanded0.files) &&
|
||||
@ -513,7 +509,7 @@ Compaction* CompactionPicker::CompactRange(
|
||||
Compaction* c = new Compaction(
|
||||
vstorage, mutable_cf_options, std::move(inputs), output_level,
|
||||
mutable_cf_options.MaxFileSizeForLevel(output_level),
|
||||
/* max_grandparent_overlap_bytes */ LLONG_MAX, output_path_id,
|
||||
/* max_compaction_bytes */ LLONG_MAX, output_path_id,
|
||||
GetCompressionType(ioptions_, vstorage, mutable_cf_options,
|
||||
output_level, 1),
|
||||
/* grandparents */ {}, /* is manual */ true);
|
||||
@ -551,8 +547,7 @@ Compaction* CompactionPicker::CompactRange(
|
||||
// and we must not pick one file and drop another older file if the
|
||||
// two files overlap.
|
||||
if (input_level > 0) {
|
||||
const uint64_t limit = mutable_cf_options.MaxFileSizeForLevel(input_level) *
|
||||
mutable_cf_options.source_compaction_factor;
|
||||
const uint64_t limit = mutable_cf_options.max_compaction_bytes;
|
||||
uint64_t total = 0;
|
||||
for (size_t i = 0; i + 1 < inputs.size(); ++i) {
|
||||
uint64_t s = inputs[i]->compensated_file_size;
|
||||
@ -614,8 +609,7 @@ Compaction* CompactionPicker::CompactRange(
|
||||
Compaction* compaction = new Compaction(
|
||||
vstorage, mutable_cf_options, std::move(compaction_inputs), output_level,
|
||||
mutable_cf_options.MaxFileSizeForLevel(output_level),
|
||||
mutable_cf_options.MaxGrandParentOverlapBytes(input_level),
|
||||
output_path_id,
|
||||
mutable_cf_options.max_compaction_bytes, output_path_id,
|
||||
GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level,
|
||||
vstorage->base_level()),
|
||||
std::move(grandparents), /* is manual compaction */ true);
|
||||
@ -1020,7 +1014,7 @@ Compaction* LevelCompactionPicker::PickCompaction(
|
||||
auto c = new Compaction(
|
||||
vstorage, mutable_cf_options, std::move(compaction_inputs), output_level,
|
||||
mutable_cf_options.MaxFileSizeForLevel(output_level),
|
||||
mutable_cf_options.MaxGrandParentOverlapBytes(level),
|
||||
mutable_cf_options.max_compaction_bytes,
|
||||
GetPathId(ioptions_, mutable_cf_options, output_level),
|
||||
GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level,
|
||||
vstorage->base_level()),
|
||||
|
@ -1061,6 +1061,57 @@ TEST_F(CompactionPickerTest, IsBottommostLevelTest) {
|
||||
DeleteVersionStorage();
|
||||
}
|
||||
|
||||
TEST_F(CompactionPickerTest, MaxCompactionBytesHit) {
|
||||
mutable_cf_options_.max_bytes_for_level_base = 1000000u;
|
||||
mutable_cf_options_.max_compaction_bytes = 800000u;
|
||||
ioptions_.level_compaction_dynamic_level_bytes = false;
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
// A compaction should be triggered and pick file 2 and 5.
|
||||
// It cannot expand because adding file 1 and 3, the compaction size will
|
||||
// exceed mutable_cf_options_.max_bytes_for_level_base.
|
||||
Add(1, 1U, "100", "150", 300000U);
|
||||
Add(1, 2U, "151", "200", 300001U, 0, 0);
|
||||
Add(1, 3U, "201", "250", 300000U, 0, 0);
|
||||
Add(1, 4U, "251", "300", 300000U, 0, 0);
|
||||
Add(2, 5U, "160", "256", 1U);
|
||||
UpdateVersionStorageInfo();
|
||||
|
||||
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
||||
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
||||
ASSERT_TRUE(compaction.get() != nullptr);
|
||||
ASSERT_EQ(2U, compaction->num_input_levels());
|
||||
ASSERT_EQ(1U, compaction->num_input_files(0));
|
||||
ASSERT_EQ(1U, compaction->num_input_files(1));
|
||||
ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber());
|
||||
ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
|
||||
}
|
||||
|
||||
TEST_F(CompactionPickerTest, MaxCompactionBytesNotHit) {
|
||||
mutable_cf_options_.max_bytes_for_level_base = 1000000u;
|
||||
mutable_cf_options_.max_compaction_bytes = 1000000u;
|
||||
ioptions_.level_compaction_dynamic_level_bytes = false;
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
// A compaction should be triggered and pick file 2 and 5.
|
||||
// and it expands to file 1 and 3 too.
|
||||
Add(1, 1U, "100", "150", 300000U);
|
||||
Add(1, 2U, "151", "200", 300001U, 0, 0);
|
||||
Add(1, 3U, "201", "250", 300000U, 0, 0);
|
||||
Add(1, 4U, "251", "300", 300000U, 0, 0);
|
||||
Add(2, 5U, "000", "233", 1U);
|
||||
UpdateVersionStorageInfo();
|
||||
|
||||
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
||||
cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_));
|
||||
ASSERT_TRUE(compaction.get() != nullptr);
|
||||
ASSERT_EQ(2U, compaction->num_input_levels());
|
||||
ASSERT_EQ(3U, compaction->num_input_files(0));
|
||||
ASSERT_EQ(1U, compaction->num_input_files(1));
|
||||
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
|
||||
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
|
||||
ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber());
|
||||
ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber());
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
@ -137,7 +137,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
options.max_bytes_for_level_multiplier = 4;
|
||||
options.max_background_compactions = 2;
|
||||
options.num_levels = 5;
|
||||
options.expanded_compaction_factor = 0; // Force not expanding in compactions
|
||||
options.max_compaction_bytes = 0; // Force not expanding in compactions
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.block_size = 1024;
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
@ -223,8 +223,9 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) {
|
||||
ASSERT_OK(dbfull()->SetOptions({
|
||||
{"disable_auto_compactions", "true"},
|
||||
}));
|
||||
// Write about 600K more
|
||||
for (int i = 0; i < 1500; i++) {
|
||||
// Write about 650K more.
|
||||
// Each file is about 11KB, with 9KB of data.
|
||||
for (int i = 0; i < 1300; i++) {
|
||||
ASSERT_OK(Put(Key(static_cast<int>(rnd.Uniform(kMaxKey))),
|
||||
RandomString(&rnd, 380)));
|
||||
}
|
||||
@ -294,7 +295,7 @@ TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) {
|
||||
options.max_background_compactions = 1;
|
||||
const int kNumLevels = 5;
|
||||
options.num_levels = kNumLevels;
|
||||
options.expanded_compaction_factor = 0; // Force not expanding in compactions
|
||||
options.max_compaction_bytes = 1; // Force not expanding in compactions
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.block_size = 1024;
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
|
@ -4271,10 +4271,8 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
options.level0_slowdown_writes_trigger = 4;
|
||||
options.level0_stop_writes_trigger = 8;
|
||||
options.max_grandparent_overlap_factor = 10;
|
||||
options.expanded_compaction_factor = 25;
|
||||
options.source_compaction_factor = 1;
|
||||
options.target_file_size_base = k64KB;
|
||||
options.max_compaction_bytes = options.target_file_size_base * 10;
|
||||
options.target_file_size_multiplier = 1;
|
||||
options.max_bytes_for_level_base = k128KB;
|
||||
options.max_bytes_for_level_multiplier = 4;
|
||||
@ -5023,7 +5021,7 @@ TEST_F(DBTest, SuggestCompactRangeTest) {
|
||||
options.compression = kNoCompression;
|
||||
options.max_bytes_for_level_base = 450 << 10;
|
||||
options.target_file_size_base = 98 << 10;
|
||||
options.max_grandparent_overlap_factor = 1 << 20; // inf
|
||||
options.max_compaction_bytes = static_cast<uint64_t>(1) << 60; // inf
|
||||
|
||||
Reopen(options);
|
||||
|
||||
|
@ -1384,7 +1384,53 @@ TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) {
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(PinL0IndexAndFilterBlocksTest,
|
||||
PinL0IndexAndFilterBlocksTest, ::testing::Bool());
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
TEST_F(DBTest2, MaxCompactionBytesTest) {
|
||||
Options options = CurrentOptions();
|
||||
options.memtable_factory.reset(
|
||||
new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile));
|
||||
options.compaction_style = kCompactionStyleLevel;
|
||||
options.write_buffer_size = 200 << 10;
|
||||
options.arena_block_size = 4 << 10;
|
||||
options.level0_file_num_compaction_trigger = 4;
|
||||
options.num_levels = 4;
|
||||
options.compression = kNoCompression;
|
||||
options.max_bytes_for_level_base = 450 << 10;
|
||||
options.target_file_size_base = 100 << 10;
|
||||
// Infinite for full compaction.
|
||||
options.max_compaction_bytes = options.target_file_size_base * 100;
|
||||
|
||||
Reopen(options);
|
||||
|
||||
Random rnd(301);
|
||||
|
||||
for (int num = 0; num < 8; num++) {
|
||||
GenerateNewRandomFile(&rnd);
|
||||
}
|
||||
CompactRangeOptions cro;
|
||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
||||
ASSERT_EQ("0,0,8", FilesPerLevel(0));
|
||||
|
||||
// When compact from Ln -> Ln+1, cut a file if the file overlaps with
|
||||
// more than three files in Ln+1.
|
||||
options.max_compaction_bytes = options.target_file_size_base * 3;
|
||||
Reopen(options);
|
||||
|
||||
GenerateNewRandomFile(&rnd);
|
||||
// Add three more small files that overlap with the previous file
|
||||
for (int i = 0; i < 3; i++) {
|
||||
Put("a", "z");
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
dbfull()->TEST_WaitForCompact();
|
||||
|
||||
// Output files to L1 are cut to three pieces, according to
|
||||
// options.max_compaction_bytes
|
||||
ASSERT_EQ("0,3,8", FilesPerLevel(0));
|
||||
}
|
||||
|
||||
static void UniqueIdCallback(void* arg) {
|
||||
int* result = reinterpret_cast<int*>(arg);
|
||||
if (*result == -1) {
|
||||
|
@ -532,30 +532,12 @@ struct ColumnFamilyOptions {
|
||||
// Dynamically changeable through SetOptions() API
|
||||
std::vector<int> max_bytes_for_level_multiplier_additional;
|
||||
|
||||
// Maximum number of bytes in all compacted files. We avoid expanding
|
||||
// the lower level file set of a compaction if it would make the
|
||||
// total compaction cover more than
|
||||
// (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
// We try to limit number of bytes in one compaction to be lower than this
|
||||
// threshold. But it's not guaranteed.
|
||||
// Value 0 will be sanitized.
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
int expanded_compaction_factor;
|
||||
|
||||
// Maximum number of bytes in all source files to be compacted in a
|
||||
// single compaction run. We avoid picking too many files in the
|
||||
// source level so that we do not exceed the total source bytes
|
||||
// for compaction to exceed
|
||||
// (source_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
// Default:1, i.e. pick maxfilesize amount of data as the source of
|
||||
// a compaction.
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
int source_compaction_factor;
|
||||
|
||||
// Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
|
||||
// stop building a single file in a level->level+1 compaction.
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
int max_grandparent_overlap_factor;
|
||||
// Default: result.target_file_size_base * 25
|
||||
uint64_t max_compaction_bytes;
|
||||
|
||||
// DEPRECATED -- this options is no longer used
|
||||
// Puts are delayed to options.delayed_write_rate when any level has a
|
||||
|
@ -1531,74 +1531,24 @@ void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier(
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: expandedCompactionFactor
|
||||
* Method: maxCompactionBytes
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_Options_expandedCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->expanded_compaction_factor;
|
||||
jlong Java_org_rocksdb_Options_maxCompactionBytes(JNIEnv* env, jobject jobj,
|
||||
jlong jhandle) {
|
||||
return static_cast<jlong>(
|
||||
reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: setExpandedCompactionFactor
|
||||
* Method: setMaxCompactionBytes
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_Options_setExpandedCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jexpanded_compaction_factor) {
|
||||
reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->expanded_compaction_factor =
|
||||
static_cast<int>(jexpanded_compaction_factor);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: sourceCompactionFactor
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_Options_sourceCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->source_compaction_factor;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: setSourceCompactionFactor
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_Options_setSourceCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jsource_compaction_factor) {
|
||||
reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->source_compaction_factor =
|
||||
static_cast<int>(jsource_compaction_factor);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: maxGrandparentOverlapFactor
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_Options_maxGrandparentOverlapFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->max_grandparent_overlap_factor;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: setMaxGrandparentOverlapFactor
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_Options_setMaxGrandparentOverlapFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jmax_grandparent_overlap_factor) {
|
||||
reinterpret_cast<rocksdb::Options*>(
|
||||
jhandle)->max_grandparent_overlap_factor =
|
||||
static_cast<int>(jmax_grandparent_overlap_factor);
|
||||
void Java_org_rocksdb_Options_setMaxCompactionBytes(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_compaction_bytes) {
|
||||
reinterpret_cast<rocksdb::Options*>(jhandle)->max_compaction_bytes =
|
||||
static_cast<uint64_t>(jmax_compaction_bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2882,74 +2832,26 @@ void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier(
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: expandedCompactionFactor
|
||||
* Method: maxCompactionBytes
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_ColumnFamilyOptions_expandedCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->expanded_compaction_factor;
|
||||
jlong Java_org_rocksdb_ColumnFamilyOptions_maxCompactionBytes(JNIEnv* env,
|
||||
jobject jobj,
|
||||
jlong jhandle) {
|
||||
return static_cast<jlong>(
|
||||
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
|
||||
->max_compaction_bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: setExpandedCompactionFactor
|
||||
* Method: setMaxCompactionBytes
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_ColumnFamilyOptions_setExpandedCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jexpanded_compaction_factor) {
|
||||
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->expanded_compaction_factor =
|
||||
static_cast<int>(jexpanded_compaction_factor);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: sourceCompactionFactor
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_ColumnFamilyOptions_sourceCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->source_compaction_factor;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: setSourceCompactionFactor
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_ColumnFamilyOptions_setSourceCompactionFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jsource_compaction_factor) {
|
||||
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->source_compaction_factor =
|
||||
static_cast<int>(jsource_compaction_factor);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: maxGrandparentOverlapFactor
|
||||
* Signature: (J)I
|
||||
*/
|
||||
jint Java_org_rocksdb_ColumnFamilyOptions_maxGrandparentOverlapFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle) {
|
||||
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->max_grandparent_overlap_factor;
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_ColumnFamilyOptions
|
||||
* Method: setMaxGrandparentOverlapFactor
|
||||
* Signature: (JI)V
|
||||
*/
|
||||
void Java_org_rocksdb_ColumnFamilyOptions_setMaxGrandparentOverlapFactor(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle,
|
||||
jint jmax_grandparent_overlap_factor) {
|
||||
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(
|
||||
jhandle)->max_grandparent_overlap_factor =
|
||||
static_cast<int>(jmax_grandparent_overlap_factor);
|
||||
void Java_org_rocksdb_ColumnFamilyOptions_setMaxCompactionBytes(
|
||||
JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_compaction_bytes) {
|
||||
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)
|
||||
->max_compaction_bytes = static_cast<uint64_t>(jmax_compaction_bytes);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -364,39 +364,14 @@ public class ColumnFamilyOptions extends RocksObject
|
||||
}
|
||||
|
||||
@Override
|
||||
public ColumnFamilyOptions setExpandedCompactionFactor(
|
||||
final int expandedCompactionFactor) {
|
||||
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
|
||||
public ColumnFamilyOptions setMaxCompactionBytes(final long maxCompactionBytes) {
|
||||
setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int expandedCompactionFactor() {
|
||||
return expandedCompactionFactor(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ColumnFamilyOptions setSourceCompactionFactor(
|
||||
final int sourceCompactionFactor) {
|
||||
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int sourceCompactionFactor() {
|
||||
return sourceCompactionFactor(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ColumnFamilyOptions setMaxGrandparentOverlapFactor(
|
||||
final int maxGrandparentOverlapFactor) {
|
||||
setMaxGrandparentOverlapFactor(nativeHandle_, maxGrandparentOverlapFactor);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxGrandparentOverlapFactor() {
|
||||
return maxGrandparentOverlapFactor(nativeHandle_);
|
||||
public long maxCompactionBytes() {
|
||||
return maxCompactionBytes(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -803,15 +778,8 @@ public class ColumnFamilyOptions extends RocksObject
|
||||
private native void setMaxBytesForLevelMultiplier(
|
||||
long handle, int multiplier);
|
||||
private native int maxBytesForLevelMultiplier(long handle);
|
||||
private native void setExpandedCompactionFactor(
|
||||
long handle, int expandedCompactionFactor);
|
||||
private native int expandedCompactionFactor(long handle);
|
||||
private native void setSourceCompactionFactor(
|
||||
long handle, int sourceCompactionFactor);
|
||||
private native int sourceCompactionFactor(long handle);
|
||||
private native void setMaxGrandparentOverlapFactor(
|
||||
long handle, int maxGrandparentOverlapFactor);
|
||||
private native int maxGrandparentOverlapFactor(long handle);
|
||||
private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
|
||||
private native long maxCompactionBytes(long handle);
|
||||
private native void setSoftRateLimit(
|
||||
long handle, double softRateLimit);
|
||||
private native double softRateLimit(long handle);
|
||||
|
@ -447,6 +447,88 @@ public interface ColumnFamilyOptionsInterface {
|
||||
*/
|
||||
boolean levelCompactionDynamicLevelBytes();
|
||||
|
||||
/**
|
||||
* The ratio between the total size of level-(L+1) files and the total
|
||||
* size of level-L files for all L.
|
||||
* DEFAULT: 10
|
||||
*
|
||||
* @param multiplier the ratio between the total size of level-(L+1)
|
||||
* files and the total size of level-L files for all L.
|
||||
* @return the reference to the current option.
|
||||
* @see #setMaxBytesForLevelBase(long)
|
||||
*/
|
||||
Object setMaxBytesForLevelMultiplier(int multiplier);
|
||||
|
||||
/**
|
||||
* The ratio between the total size of level-(L+1) files and the total
|
||||
* size of level-L files for all L.
|
||||
* DEFAULT: 10
|
||||
*
|
||||
* @return the ratio between the total size of level-(L+1) files and
|
||||
* the total size of level-L files for all L.
|
||||
* @see #maxBytesForLevelBase()
|
||||
*/
|
||||
int maxBytesForLevelMultiplier();
|
||||
|
||||
/**
|
||||
* Maximum size of each compaction (not guarantee)
|
||||
*
|
||||
* @param maxCompactionBytes the compaction size limit
|
||||
* @return the reference to the current option.
|
||||
*/
|
||||
Object setMaxCompactionBytes(long maxCompactionBytes);
|
||||
|
||||
/**
|
||||
* Control maximum size of each compaction (not guaranteed)
|
||||
*
|
||||
* @return compaction size threshold
|
||||
*/
|
||||
long maxCompactionBytes();
|
||||
|
||||
/**
|
||||
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
|
||||
* soft_rate_limit. This is ignored when == 0.0.
|
||||
* CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not
|
||||
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
|
||||
* Default: 0 (disabled)
|
||||
*
|
||||
* @param softRateLimit the soft-rate-limit of a compaction score
|
||||
* for put delay.
|
||||
* @return the reference to the current option.
|
||||
*/
|
||||
Object setSoftRateLimit(double softRateLimit);
|
||||
|
||||
/**
|
||||
* Puts are delayed 0-1 ms when any level has a compaction score that exceeds
|
||||
* soft_rate_limit. This is ignored when == 0.0.
|
||||
* CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not
|
||||
* hold, RocksDB will set soft_rate_limit = hard_rate_limit
|
||||
* Default: 0 (disabled)
|
||||
*
|
||||
* @return soft-rate-limit for put delay.
|
||||
*/
|
||||
double softRateLimit();
|
||||
|
||||
/**
|
||||
* Puts are delayed 1ms at a time when any level has a compaction score that
|
||||
* exceeds hard_rate_limit. This is ignored when ≤ 1.0.
|
||||
* Default: 0 (disabled)
|
||||
*
|
||||
* @param hardRateLimit the hard-rate-limit of a compaction score for put
|
||||
* delay.
|
||||
* @return the reference to the current option.
|
||||
*/
|
||||
Object setHardRateLimit(double hardRateLimit);
|
||||
|
||||
/**
|
||||
* Puts are delayed 1ms at a time when any level has a compaction score that
|
||||
* exceeds hard_rate_limit. This is ignored when ≤ 1.0.
|
||||
* Default: 0 (disabled)
|
||||
*
|
||||
* @return the hard-rate-limit of a compaction score for put delay.
|
||||
*/
|
||||
double hardRateLimit();
|
||||
|
||||
/**
|
||||
* The maximum time interval a put will be stalled when hard_rate_limit
|
||||
* is enforced. If 0, then there is no limit.
|
||||
|
@ -143,9 +143,7 @@ public class MutableColumnFamilyOptions {
|
||||
level0_file_num_compaction_trigger(ValueType.INT),
|
||||
level0_slowdown_writes_trigger(ValueType.INT),
|
||||
level0_stop_writes_trigger(ValueType.INT),
|
||||
max_grandparent_overlap_factor(ValueType.INT),
|
||||
expanded_compaction_factor(ValueType.INT),
|
||||
source_compaction_factor(ValueType.INT),
|
||||
max_compaction_bytes(ValueType.LONG),
|
||||
target_file_size_base(ValueType.LONG),
|
||||
target_file_size_multiplier(ValueType.INT),
|
||||
max_bytes_for_level_base(ValueType.LONG),
|
||||
@ -797,40 +795,15 @@ public class MutableColumnFamilyOptions {
|
||||
}
|
||||
|
||||
@Override
|
||||
public MutableColumnFamilyOptionsBuilder setMaxGrandparentOverlapFactor(
|
||||
final int maxGrandparentOverlapFactor) {
|
||||
return setInt(CompactionOption.max_grandparent_overlap_factor,
|
||||
maxGrandparentOverlapFactor);
|
||||
public MutableColumnFamilyOptionsBuilder setMaxCompactionBytes(final long maxCompactionBytes) {
|
||||
return setLong(CompactionOption.max_compaction_bytes, maxCompactionBytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxGrandparentOverlapFactor() {
|
||||
return getInt(CompactionOption.max_grandparent_overlap_factor);
|
||||
public long maxCompactionBytes() {
|
||||
return getLong(CompactionOption.max_compaction_bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MutableColumnFamilyOptionsBuilder setExpandedCompactionFactor(
|
||||
final int expandedCompactionFactor) {
|
||||
return setInt(CompactionOption.expanded_compaction_factor,
|
||||
expandedCompactionFactor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int expandedCompactionFactor() {
|
||||
return getInt(CompactionOption.expanded_compaction_factor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MutableColumnFamilyOptionsBuilder setSourceCompactionFactor(
|
||||
final int sourceCompactionFactor) {
|
||||
return setInt(CompactionOption.source_compaction_factor,
|
||||
sourceCompactionFactor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int sourceCompactionFactor() {
|
||||
return getInt(CompactionOption.source_compaction_factor);
|
||||
}
|
||||
|
||||
@Override
|
||||
public MutableColumnFamilyOptionsBuilder setTargetFileSizeBase(
|
||||
|
@ -382,79 +382,25 @@ public interface MutableColumnFamilyOptionsInterface {
|
||||
int level0StopWritesTrigger();
|
||||
|
||||
/**
|
||||
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
|
||||
* stop building a single file in a level->level+1 compaction.
|
||||
* We try to limit number of bytes in one compaction to be lower than this
|
||||
* threshold. But it's not guaranteed.
|
||||
* Value 0 will be sanitized.
|
||||
*
|
||||
* @param maxGrandparentOverlapFactor maximum bytes of overlaps in
|
||||
* "grandparent" level.
|
||||
* @param max bytes in a compaction
|
||||
* @return the reference to the current option.
|
||||
* @see #maxCompactionBytes()
|
||||
*/
|
||||
MutableColumnFamilyOptionsInterface setMaxGrandparentOverlapFactor(
|
||||
int maxGrandparentOverlapFactor);
|
||||
MutableColumnFamilyOptionsInterface setMaxCompactionBytes(final long maxCompactionBytes);
|
||||
|
||||
/**
|
||||
* Control maximum bytes of overlaps in grandparent (i.e., level+2) before we
|
||||
* stop building a single file in a level->level+1 compaction.
|
||||
* We try to limit number of bytes in one compaction to be lower than this
|
||||
* threshold. But it's not guaranteed.
|
||||
* Value 0 will be sanitized.
|
||||
*
|
||||
* @return maximum bytes of overlaps in "grandparent" level.
|
||||
* @return the maximum number of bytes in for a compaction.
|
||||
* @see #setMaxCompactionBytes(long)
|
||||
*/
|
||||
int maxGrandparentOverlapFactor();
|
||||
|
||||
/**
|
||||
* Maximum number of bytes in all compacted files. We avoid expanding
|
||||
* the lower level file set of a compaction if it would make the
|
||||
* total compaction cover more than
|
||||
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
*
|
||||
* @param expandedCompactionFactor the maximum number of bytes in all
|
||||
* compacted files.
|
||||
* @return the reference to the current option.
|
||||
* @see #setSourceCompactionFactor(int)
|
||||
*/
|
||||
MutableColumnFamilyOptionsInterface setExpandedCompactionFactor(
|
||||
int expandedCompactionFactor);
|
||||
|
||||
/**
|
||||
* Maximum number of bytes in all compacted files. We avoid expanding
|
||||
* the lower level file set of a compaction if it would make the
|
||||
* total compaction cover more than
|
||||
* (expanded_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
*
|
||||
* @return the maximum number of bytes in all compacted files.
|
||||
* @see #sourceCompactionFactor()
|
||||
*/
|
||||
int expandedCompactionFactor();
|
||||
|
||||
/**
|
||||
* Maximum number of bytes in all source files to be compacted in a
|
||||
* single compaction run. We avoid picking too many files in the
|
||||
* source level so that we do not exceed the total source bytes
|
||||
* for compaction to exceed
|
||||
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
* Default:1, i.e. pick maxfilesize amount of data as the source of
|
||||
* a compaction.
|
||||
*
|
||||
* @param sourceCompactionFactor the maximum number of bytes in all
|
||||
* source files to be compacted in a single compaction run.
|
||||
* @return the reference to the current option.
|
||||
* @see #setExpandedCompactionFactor(int)
|
||||
*/
|
||||
MutableColumnFamilyOptionsInterface setSourceCompactionFactor(
|
||||
int sourceCompactionFactor);
|
||||
|
||||
/**
|
||||
* Maximum number of bytes in all source files to be compacted in a
|
||||
* single compaction run. We avoid picking too many files in the
|
||||
* source level so that we do not exceed the total source bytes
|
||||
* for compaction to exceed
|
||||
* (source_compaction_factor * targetFileSizeLevel()) many bytes.
|
||||
* Default:1, i.e. pick maxfilesize amount of data as the source of
|
||||
* a compaction.
|
||||
*
|
||||
* @return the maximum number of bytes in all source files to be compacted.
|
||||
* @see #expandedCompactionFactor()
|
||||
*/
|
||||
int sourceCompactionFactor();
|
||||
long maxCompactionBytes();
|
||||
|
||||
/**
|
||||
* The target file size for compaction.
|
||||
|
@ -907,37 +907,13 @@ public class Options extends RocksObject
|
||||
}
|
||||
|
||||
@Override
|
||||
public int expandedCompactionFactor() {
|
||||
return expandedCompactionFactor(nativeHandle_);
|
||||
public long maxCompactionBytes() {
|
||||
return maxCompactionBytes(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Options setExpandedCompactionFactor(
|
||||
final int expandedCompactionFactor) {
|
||||
setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int sourceCompactionFactor() {
|
||||
return sourceCompactionFactor(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Options setSourceCompactionFactor(int sourceCompactionFactor) {
|
||||
setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int maxGrandparentOverlapFactor() {
|
||||
return maxGrandparentOverlapFactor(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Options setMaxGrandparentOverlapFactor(
|
||||
final int maxGrandparentOverlapFactor) {
|
||||
setMaxGrandparentOverlapFactor(nativeHandle_, maxGrandparentOverlapFactor);
|
||||
public Options setMaxCompactionBytes(final long maxCompactionBytes) {
|
||||
setMaxCompactionBytes(nativeHandle_, maxCompactionBytes);
|
||||
return this;
|
||||
}
|
||||
|
||||
@ -1398,15 +1374,8 @@ public class Options extends RocksObject
|
||||
private native void setMaxBytesForLevelMultiplier(
|
||||
long handle, int multiplier);
|
||||
private native int maxBytesForLevelMultiplier(long handle);
|
||||
private native void setExpandedCompactionFactor(
|
||||
long handle, int expandedCompactionFactor);
|
||||
private native int expandedCompactionFactor(long handle);
|
||||
private native void setSourceCompactionFactor(
|
||||
long handle, int sourceCompactionFactor);
|
||||
private native int sourceCompactionFactor(long handle);
|
||||
private native void setMaxGrandparentOverlapFactor(
|
||||
long handle, int maxGrandparentOverlapFactor);
|
||||
private native int maxGrandparentOverlapFactor(long handle);
|
||||
private native void setMaxCompactionBytes(long handle, long maxCompactionBytes);
|
||||
private native long maxCompactionBytes(long handle);
|
||||
private native void setSoftRateLimit(
|
||||
long handle, double softRateLimit);
|
||||
private native double softRateLimit(long handle);
|
||||
|
@ -190,29 +190,11 @@ public class ColumnFamilyOptionsTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void expandedCompactionFactor() {
|
||||
public void maxCompactionBytes() {
|
||||
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setExpandedCompactionFactor(intValue);
|
||||
assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void sourceCompactionFactor() {
|
||||
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setSourceCompactionFactor(intValue);
|
||||
assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void maxGrandparentOverlapFactor() {
|
||||
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setMaxGrandparentOverlapFactor(intValue);
|
||||
assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue);
|
||||
final long longValue = rand.nextLong();
|
||||
opt.setMaxCompactionBytes(longValue);
|
||||
assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -153,29 +153,11 @@ public class OptionsTest {
|
||||
}
|
||||
|
||||
@Test
|
||||
public void expandedCompactionFactor() {
|
||||
public void maxCompactionBytes() {
|
||||
try (final Options opt = new Options()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setExpandedCompactionFactor(intValue);
|
||||
assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void sourceCompactionFactor() {
|
||||
try (final Options opt = new Options()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setSourceCompactionFactor(intValue);
|
||||
assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void maxGrandparentOverlapFactor() {
|
||||
try (final Options opt = new Options()) {
|
||||
final int intValue = rand.nextInt();
|
||||
opt.setMaxGrandparentOverlapFactor(intValue);
|
||||
assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue);
|
||||
final long longValue = rand.nextLong();
|
||||
opt.setMaxCompactionBytes(longValue);
|
||||
assertThat(opt.maxCompactionBytes()).isEqualTo(longValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,12 +28,8 @@ public class TtlDBTest {
|
||||
|
||||
@Test
|
||||
public void ttlDBOpen() throws RocksDBException, InterruptedException {
|
||||
try (final Options options = new Options()
|
||||
.setCreateIfMissing(true)
|
||||
.setMaxGrandparentOverlapFactor(0);
|
||||
final TtlDB ttlDB = TtlDB.open(options,
|
||||
dbFolder.getRoot().getAbsolutePath())
|
||||
) {
|
||||
try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
|
||||
final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath())) {
|
||||
ttlDB.put("key".getBytes(), "value".getBytes());
|
||||
assertThat(ttlDB.get("key".getBytes())).
|
||||
isEqualTo("value".getBytes());
|
||||
@ -43,12 +39,8 @@ public class TtlDBTest {
|
||||
|
||||
@Test
|
||||
public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException {
|
||||
try (final Options options = new Options()
|
||||
.setCreateIfMissing(true)
|
||||
.setMaxGrandparentOverlapFactor(0);
|
||||
final TtlDB ttlDB = TtlDB.open(options,
|
||||
dbFolder.getRoot().getAbsolutePath(), 1, false);
|
||||
) {
|
||||
try (final Options options = new Options().setCreateIfMissing(true).setMaxCompactionBytes(0);
|
||||
final TtlDB ttlDB = TtlDB.open(options, dbFolder.getRoot().getAbsolutePath(), 1, false);) {
|
||||
ttlDB.put("key".getBytes(), "value".getBytes());
|
||||
assertThat(ttlDB.get("key".getBytes())).
|
||||
isEqualTo("value".getBytes());
|
||||
|
@ -733,9 +733,8 @@ DEFINE_uint64(
|
||||
"If non-zero, db_bench will rate-limit the writes going into RocksDB. This "
|
||||
"is the global rate in bytes/second.");
|
||||
|
||||
DEFINE_int32(max_grandparent_overlap_factor, 10, "Control maximum bytes of "
|
||||
"overlaps in grandparent (i.e., level+2) before we stop building a"
|
||||
" single file in a level->level+1 compaction.");
|
||||
DEFINE_uint64(max_compaction_bytes, rocksdb::Options().max_compaction_bytes,
|
||||
"Max bytes allowed in one compaction");
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
DEFINE_bool(readonly, false, "Run read only benchmarks.");
|
||||
@ -743,10 +742,6 @@ DEFINE_bool(readonly, false, "Run read only benchmarks.");
|
||||
|
||||
DEFINE_bool(disable_auto_compactions, false, "Do not auto trigger compactions");
|
||||
|
||||
DEFINE_int32(source_compaction_factor, 1, "Cap the size of data in level-K for"
|
||||
" a compaction run that compacts Level-K with Level-(K+1) (for"
|
||||
" K >= 1)");
|
||||
|
||||
DEFINE_uint64(wal_ttl_seconds, 0, "Set the TTL for the WAL Files in seconds.");
|
||||
DEFINE_uint64(wal_size_limit_MB, 0, "Set the size limit for the WAL Files"
|
||||
" in MB.");
|
||||
@ -2862,10 +2857,8 @@ class Benchmark {
|
||||
options.rate_limit_delay_max_milliseconds =
|
||||
FLAGS_rate_limit_delay_max_milliseconds;
|
||||
options.table_cache_numshardbits = FLAGS_table_cache_numshardbits;
|
||||
options.max_grandparent_overlap_factor =
|
||||
FLAGS_max_grandparent_overlap_factor;
|
||||
options.max_compaction_bytes = FLAGS_max_compaction_bytes;
|
||||
options.disable_auto_compactions = FLAGS_disable_auto_compactions;
|
||||
options.source_compaction_factor = FLAGS_source_compaction_factor;
|
||||
options.optimize_filters_for_hits = FLAGS_optimize_filters_for_hits;
|
||||
|
||||
// fill storage options
|
||||
|
@ -1089,23 +1089,11 @@ class StressTest {
|
||||
ToString(FLAGS_level0_stop_writes_trigger + 2),
|
||||
ToString(FLAGS_level0_stop_writes_trigger + 4),
|
||||
}},
|
||||
{"max_grandparent_overlap_factor",
|
||||
{"max_compaction_bytes",
|
||||
{
|
||||
ToString(Options().max_grandparent_overlap_factor - 5),
|
||||
ToString(Options().max_grandparent_overlap_factor),
|
||||
ToString(Options().max_grandparent_overlap_factor + 5),
|
||||
}},
|
||||
{"expanded_compaction_factor",
|
||||
{
|
||||
ToString(Options().expanded_compaction_factor - 5),
|
||||
ToString(Options().expanded_compaction_factor),
|
||||
ToString(Options().expanded_compaction_factor + 5),
|
||||
}},
|
||||
{"source_compaction_factor",
|
||||
{
|
||||
ToString(Options().source_compaction_factor),
|
||||
ToString(Options().source_compaction_factor * 2),
|
||||
ToString(Options().source_compaction_factor * 4),
|
||||
ToString(FLAGS_target_file_size_base * 5),
|
||||
ToString(FLAGS_target_file_size_base * 15),
|
||||
ToString(FLAGS_target_file_size_base * 100),
|
||||
}},
|
||||
{"target_file_size_base",
|
||||
{
|
||||
|
@ -55,12 +55,6 @@ uint64_t MutableCFOptions::MaxFileSizeForLevel(int level) const {
|
||||
assert(level < (int)max_file_size.size());
|
||||
return max_file_size[level];
|
||||
}
|
||||
uint64_t MutableCFOptions::MaxGrandParentOverlapBytes(int level) const {
|
||||
return MaxFileSizeForLevel(level) * max_grandparent_overlap_factor;
|
||||
}
|
||||
uint64_t MutableCFOptions::ExpandedCompactionByteSizeLimit(int level) const {
|
||||
return MaxFileSizeForLevel(level) * expanded_compaction_factor;
|
||||
}
|
||||
|
||||
void MutableCFOptions::Dump(Logger* log) const {
|
||||
// Memtable related options
|
||||
@ -88,12 +82,8 @@ void MutableCFOptions::Dump(Logger* log) const {
|
||||
level0_slowdown_writes_trigger);
|
||||
Log(log, " level0_stop_writes_trigger: %d",
|
||||
level0_stop_writes_trigger);
|
||||
Log(log, " max_grandparent_overlap_factor: %d",
|
||||
max_grandparent_overlap_factor);
|
||||
Log(log, " expanded_compaction_factor: %d",
|
||||
expanded_compaction_factor);
|
||||
Log(log, " source_compaction_factor: %d",
|
||||
source_compaction_factor);
|
||||
Log(log, " max_compaction_bytes: %" PRIu64,
|
||||
max_compaction_bytes);
|
||||
Log(log, " target_file_size_base: %" PRIu64,
|
||||
target_file_size_base);
|
||||
Log(log, " target_file_size_multiplier: %d",
|
||||
|
@ -32,9 +32,7 @@ struct MutableCFOptions {
|
||||
level0_slowdown_writes_trigger(options.level0_slowdown_writes_trigger),
|
||||
level0_stop_writes_trigger(options.level0_stop_writes_trigger),
|
||||
compaction_pri(options.compaction_pri),
|
||||
max_grandparent_overlap_factor(options.max_grandparent_overlap_factor),
|
||||
expanded_compaction_factor(options.expanded_compaction_factor),
|
||||
source_compaction_factor(options.source_compaction_factor),
|
||||
max_compaction_bytes(options.max_compaction_bytes),
|
||||
target_file_size_base(options.target_file_size_base),
|
||||
target_file_size_multiplier(options.target_file_size_multiplier),
|
||||
max_bytes_for_level_base(options.max_bytes_for_level_base),
|
||||
@ -67,9 +65,7 @@ struct MutableCFOptions {
|
||||
level0_slowdown_writes_trigger(0),
|
||||
level0_stop_writes_trigger(0),
|
||||
compaction_pri(kByCompensatedSize),
|
||||
max_grandparent_overlap_factor(0),
|
||||
expanded_compaction_factor(0),
|
||||
source_compaction_factor(0),
|
||||
max_compaction_bytes(0),
|
||||
target_file_size_base(0),
|
||||
target_file_size_multiplier(0),
|
||||
max_bytes_for_level_base(0),
|
||||
@ -87,11 +83,6 @@ struct MutableCFOptions {
|
||||
|
||||
// Get the max file size in a given level.
|
||||
uint64_t MaxFileSizeForLevel(int level) const;
|
||||
// Returns maximum total overlap bytes with grandparent
|
||||
// level (i.e., level+2) before we stop building a single
|
||||
// file in level->level+1 compaction.
|
||||
uint64_t MaxGrandParentOverlapBytes(int level) const;
|
||||
uint64_t ExpandedCompactionByteSizeLimit(int level) const;
|
||||
int MaxBytesMultiplerAdditional(int level) const {
|
||||
if (level >=
|
||||
static_cast<int>(max_bytes_for_level_multiplier_additional.size())) {
|
||||
@ -119,9 +110,7 @@ struct MutableCFOptions {
|
||||
int level0_slowdown_writes_trigger;
|
||||
int level0_stop_writes_trigger;
|
||||
CompactionPri compaction_pri;
|
||||
int max_grandparent_overlap_factor;
|
||||
int expanded_compaction_factor;
|
||||
int source_compaction_factor;
|
||||
uint64_t max_compaction_bytes;
|
||||
uint64_t target_file_size_base;
|
||||
int target_file_size_multiplier;
|
||||
uint64_t max_bytes_for_level_base;
|
||||
|
@ -99,9 +99,7 @@ ColumnFamilyOptions::ColumnFamilyOptions()
|
||||
level_compaction_dynamic_level_bytes(false),
|
||||
max_bytes_for_level_multiplier(10),
|
||||
max_bytes_for_level_multiplier_additional(num_levels, 1),
|
||||
expanded_compaction_factor(25),
|
||||
source_compaction_factor(1),
|
||||
max_grandparent_overlap_factor(10),
|
||||
max_compaction_bytes(0),
|
||||
soft_rate_limit(0.0),
|
||||
hard_rate_limit(0.0),
|
||||
soft_pending_compaction_bytes_limit(64 * 1073741824ull),
|
||||
@ -160,9 +158,7 @@ ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
|
||||
max_bytes_for_level_multiplier(options.max_bytes_for_level_multiplier),
|
||||
max_bytes_for_level_multiplier_additional(
|
||||
options.max_bytes_for_level_multiplier_additional),
|
||||
expanded_compaction_factor(options.expanded_compaction_factor),
|
||||
source_compaction_factor(options.source_compaction_factor),
|
||||
max_grandparent_overlap_factor(options.max_grandparent_overlap_factor),
|
||||
max_compaction_bytes(options.max_compaction_bytes),
|
||||
soft_rate_limit(options.soft_rate_limit),
|
||||
soft_pending_compaction_bytes_limit(
|
||||
options.soft_pending_compaction_bytes_limit),
|
||||
@ -541,13 +537,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
|
||||
}
|
||||
Header(log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
|
||||
max_sequential_skip_in_iterations);
|
||||
Header(log, " Options.expanded_compaction_factor: %d",
|
||||
expanded_compaction_factor);
|
||||
Header(log, " Options.source_compaction_factor: %d",
|
||||
source_compaction_factor);
|
||||
Header(log, " Options.max_grandparent_overlap_factor: %d",
|
||||
max_grandparent_overlap_factor);
|
||||
|
||||
Header(log, " Options.max_compaction_bytes: %" PRIu64,
|
||||
max_compaction_bytes);
|
||||
Header(log,
|
||||
" Options.arena_block_size: %" ROCKSDB_PRIszt,
|
||||
arena_block_size);
|
||||
@ -641,10 +632,9 @@ Options::PrepareForBulkLoad()
|
||||
// manual compaction after all data is loaded into L0.
|
||||
disable_auto_compactions = true;
|
||||
disableDataSync = true;
|
||||
|
||||
// A manual compaction run should pick all files in L0 in
|
||||
// a single compaction run.
|
||||
source_compaction_factor = (1<<30);
|
||||
max_compaction_bytes = (static_cast<uint64_t>(1) << 60);
|
||||
|
||||
// It is better to have only 2 levels, otherwise a manual
|
||||
// compaction would compact at every possible level, thereby
|
||||
|
@ -593,11 +593,13 @@ bool ParseCompactionOptions(const std::string& name, const std::string& value,
|
||||
} else if (name == "level0_stop_writes_trigger") {
|
||||
new_options->level0_stop_writes_trigger = ParseInt(value);
|
||||
} else if (name == "max_grandparent_overlap_factor") {
|
||||
new_options->max_grandparent_overlap_factor = ParseInt(value);
|
||||
// Deprecated
|
||||
} else if (name == "max_compaction_bytes") {
|
||||
new_options->max_compaction_bytes = ParseUint64(value);
|
||||
} else if (name == "expanded_compaction_factor") {
|
||||
new_options->expanded_compaction_factor = ParseInt(value);
|
||||
// Deprecated
|
||||
} else if (name == "source_compaction_factor") {
|
||||
new_options->source_compaction_factor = ParseInt(value);
|
||||
// Deprecated
|
||||
} else if (name == "target_file_size_base") {
|
||||
new_options->target_file_size_base = ParseInt(value);
|
||||
} else if (name == "target_file_size_multiplier") {
|
||||
@ -1479,12 +1481,7 @@ ColumnFamilyOptions BuildColumnFamilyOptions(
|
||||
mutable_cf_options.level0_slowdown_writes_trigger;
|
||||
cf_opts.level0_stop_writes_trigger =
|
||||
mutable_cf_options.level0_stop_writes_trigger;
|
||||
cf_opts.max_grandparent_overlap_factor =
|
||||
mutable_cf_options.max_grandparent_overlap_factor;
|
||||
cf_opts.expanded_compaction_factor =
|
||||
mutable_cf_options.expanded_compaction_factor;
|
||||
cf_opts.source_compaction_factor =
|
||||
mutable_cf_options.source_compaction_factor;
|
||||
cf_opts.max_compaction_bytes = mutable_cf_options.max_compaction_bytes;
|
||||
cf_opts.target_file_size_base = mutable_cf_options.target_file_size_base;
|
||||
cf_opts.target_file_size_multiplier =
|
||||
mutable_cf_options.target_file_size_multiplier;
|
||||
|
@ -382,9 +382,11 @@ static std::unordered_map<std::string, OptionTypeInfo> cf_options_type_info = {
|
||||
{"soft_rate_limit",
|
||||
{offsetof(struct ColumnFamilyOptions, soft_rate_limit),
|
||||
OptionType::kDouble, OptionVerificationType::kNormal}},
|
||||
{"max_compaction_bytes",
|
||||
{offsetof(struct ColumnFamilyOptions, max_compaction_bytes),
|
||||
OptionType::kUInt64T, OptionVerificationType::kNormal}},
|
||||
{"expanded_compaction_factor",
|
||||
{offsetof(struct ColumnFamilyOptions, expanded_compaction_factor),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
{0, OptionType::kInt, OptionVerificationType::kDeprecated}},
|
||||
{"level0_file_num_compaction_trigger",
|
||||
{offsetof(struct ColumnFamilyOptions, level0_file_num_compaction_trigger),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
@ -398,8 +400,7 @@ static std::unordered_map<std::string, OptionTypeInfo> cf_options_type_info = {
|
||||
{offsetof(struct ColumnFamilyOptions, max_bytes_for_level_multiplier),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
{"max_grandparent_overlap_factor",
|
||||
{offsetof(struct ColumnFamilyOptions, max_grandparent_overlap_factor),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
{0, OptionType::kInt, OptionVerificationType::kDeprecated}},
|
||||
{"max_mem_compaction_level",
|
||||
{offsetof(struct ColumnFamilyOptions, max_mem_compaction_level),
|
||||
OptionType::kInt, OptionVerificationType::kDeprecated}},
|
||||
@ -416,8 +417,7 @@ static std::unordered_map<std::string, OptionTypeInfo> cf_options_type_info = {
|
||||
{offsetof(struct ColumnFamilyOptions, num_levels), OptionType::kInt,
|
||||
OptionVerificationType::kNormal}},
|
||||
{"source_compaction_factor",
|
||||
{offsetof(struct ColumnFamilyOptions, source_compaction_factor),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
{0, OptionType::kInt, OptionVerificationType::kDeprecated}},
|
||||
{"target_file_size_multiplier",
|
||||
{offsetof(struct ColumnFamilyOptions, target_file_size_multiplier),
|
||||
OptionType::kInt, OptionVerificationType::kNormal}},
|
||||
|
@ -397,11 +397,10 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
|
||||
"max_sequential_skip_in_iterations=4294971408;"
|
||||
"arena_block_size=1893;"
|
||||
"target_file_size_multiplier=35;"
|
||||
"source_compaction_factor=54;"
|
||||
"min_write_buffer_number_to_merge=9;"
|
||||
"max_write_buffer_number=84;"
|
||||
"write_buffer_size=1653;"
|
||||
"max_grandparent_overlap_factor=64;"
|
||||
"max_compaction_bytes=64;"
|
||||
"max_bytes_for_level_multiplier=60;"
|
||||
"memtable_factory=SkipListFactory;"
|
||||
"compression=kNoCompression;"
|
||||
@ -411,7 +410,6 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
|
||||
"num_levels=99;"
|
||||
"level0_slowdown_writes_trigger=22;"
|
||||
"level0_file_num_compaction_trigger=14;"
|
||||
"expanded_compaction_factor=34;"
|
||||
"compaction_filter=urxcqstuwnCompactionFilter;"
|
||||
"soft_rate_limit=530.615385;"
|
||||
"soft_pending_compaction_bytes_limit=0;"
|
||||
|
@ -69,9 +69,7 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
||||
{"level_compaction_dynamic_level_bytes", "true"},
|
||||
{"max_bytes_for_level_multiplier", "15"},
|
||||
{"max_bytes_for_level_multiplier_additional", "16:17:18"},
|
||||
{"expanded_compaction_factor", "19"},
|
||||
{"source_compaction_factor", "20"},
|
||||
{"max_grandparent_overlap_factor", "21"},
|
||||
{"max_compaction_bytes", "21"},
|
||||
{"soft_rate_limit", "1.1"},
|
||||
{"hard_rate_limit", "2.1"},
|
||||
{"hard_pending_compaction_bytes_limit", "211"},
|
||||
@ -169,9 +167,7 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
||||
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[0], 16);
|
||||
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[1], 17);
|
||||
ASSERT_EQ(new_cf_opt.max_bytes_for_level_multiplier_additional[2], 18);
|
||||
ASSERT_EQ(new_cf_opt.expanded_compaction_factor, 19);
|
||||
ASSERT_EQ(new_cf_opt.source_compaction_factor, 20);
|
||||
ASSERT_EQ(new_cf_opt.max_grandparent_overlap_factor, 21);
|
||||
ASSERT_EQ(new_cf_opt.max_compaction_bytes, 21);
|
||||
ASSERT_EQ(new_cf_opt.soft_rate_limit, 1.1);
|
||||
ASSERT_EQ(new_cf_opt.hard_pending_compaction_bytes_limit, 211);
|
||||
ASSERT_EQ(new_cf_opt.arena_block_size, 22U);
|
||||
|
@ -310,18 +310,15 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, Random* rnd) {
|
||||
static_cast<double>(rnd->Uniform(10000)) / 20000.0;
|
||||
|
||||
// int options
|
||||
cf_opt->expanded_compaction_factor = rnd->Uniform(100);
|
||||
cf_opt->level0_file_num_compaction_trigger = rnd->Uniform(100);
|
||||
cf_opt->level0_slowdown_writes_trigger = rnd->Uniform(100);
|
||||
cf_opt->level0_stop_writes_trigger = rnd->Uniform(100);
|
||||
cf_opt->max_bytes_for_level_multiplier = rnd->Uniform(100);
|
||||
cf_opt->max_grandparent_overlap_factor = rnd->Uniform(100);
|
||||
cf_opt->max_mem_compaction_level = rnd->Uniform(100);
|
||||
cf_opt->max_write_buffer_number = rnd->Uniform(100);
|
||||
cf_opt->max_write_buffer_number_to_maintain = rnd->Uniform(100);
|
||||
cf_opt->min_write_buffer_number_to_merge = rnd->Uniform(100);
|
||||
cf_opt->num_levels = rnd->Uniform(100);
|
||||
cf_opt->source_compaction_factor = rnd->Uniform(100);
|
||||
cf_opt->target_file_size_multiplier = rnd->Uniform(100);
|
||||
|
||||
// size_t options
|
||||
@ -340,6 +337,8 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, Random* rnd) {
|
||||
static const uint64_t uint_max = static_cast<uint64_t>(UINT_MAX);
|
||||
cf_opt->max_sequential_skip_in_iterations = uint_max + rnd->Uniform(10000);
|
||||
cf_opt->target_file_size_base = uint_max + rnd->Uniform(10000);
|
||||
cf_opt->max_compaction_bytes =
|
||||
cf_opt->target_file_size_base * rnd->Uniform(100);
|
||||
|
||||
// unsigned int options
|
||||
cf_opt->rate_limit_delay_max_milliseconds = rnd->Uniform(10000);
|
||||
|
@ -47,7 +47,7 @@ class TtlTest : public testing::Test {
|
||||
options_.create_if_missing = true;
|
||||
options_.env = env_.get();
|
||||
// ensure that compaction is kicked in to always strip timestamp from kvs
|
||||
options_.max_grandparent_overlap_factor = 0;
|
||||
options_.max_compaction_bytes = 1;
|
||||
// compaction should take place always from level0 for determinism
|
||||
db_ttl_ = nullptr;
|
||||
DestroyDB(dbname_, Options());
|
||||
|
Loading…
Reference in New Issue
Block a user