Change some RocksDB default options
Summary: Change some RocksDB default options to make it more friendly to server workloads. Test Plan: Run all existing tests Reviewers: yhchiang, IslamAbdelRahman Reviewed By: IslamAbdelRahman Subscribers: sumeet, muthu, benj, MarkCallaghan, igor, leveldb, andrewkr, dhruba Differential Revision: https://reviews.facebook.net/D55941
This commit is contained in:
parent
a558830f8f
commit
2feafa3db9
6
DEFAULT_OPTIONS_HISTORY.md
Normal file
6
DEFAULT_OPTIONS_HISTORY.md
Normal file
@ -0,0 +1,6 @@
|
||||
## Unreleased
|
||||
* options.write_buffer_size changes from 4MB to 64MB
|
||||
* options.target_file_size_base changes from 2MB to 64MB
|
||||
* options.max_bytes_for_level_base changes from 10MB to 256MB
|
||||
* options.soft_pending_compaction_bytes_limit changes from 0 (disabled) to 64GB
|
||||
* options.hard_pending_compaction_bytes_limit changes from 0 (disabled) to 256GB
|
@ -1,4 +1,8 @@
|
||||
# Rocksdb Change Log
|
||||
## Unreleased
|
||||
### Public API Change
|
||||
* Change some default options. Now default options will optimize for server-workloads. Also enable slowdown and full stop triggers for pending compaction bytes. These changes may cause sub-optimal performance or significant increase of resource usage. To avoid these risks, users can open existing RocksDB with options extracted from RocksDB option files. See https://github.com/facebook/rocksdb/wiki/RocksDB-Options-File for how to use RocksDB option files. Or you can call Options.OldDefaults() to recover old defaults. DEFAULT_OPTIONS_HISTORY.md will track change history of default options.
|
||||
|
||||
## 4.6.0 (3/10/2016)
|
||||
### Public API Changes
|
||||
* Change default of BlockBasedTableOptions.format_version to 2. It means default DB created by 4.6 or up cannot be opened by RocksDB version 3.9 or earlier.
|
||||
|
@ -195,6 +195,7 @@ TEST_F(CompactionPickerTest, LevelMaxScore) {
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
mutable_cf_options_.target_file_size_base = 10000000;
|
||||
mutable_cf_options_.target_file_size_multiplier = 10;
|
||||
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
||||
Add(0, 1U, "150", "200", 1000000000U);
|
||||
// Level 1 score 1.2
|
||||
Add(1, 66U, "150", "200", 6000000U);
|
||||
@ -491,6 +492,7 @@ TEST_F(CompactionPickerTest, CompactionPriMinOverlapping1) {
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
mutable_cf_options_.target_file_size_base = 10000000;
|
||||
mutable_cf_options_.target_file_size_multiplier = 10;
|
||||
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
||||
mutable_cf_options_.compaction_pri = kMinOverlappingRatio;
|
||||
|
||||
Add(2, 6U, "150", "179", 50000000U);
|
||||
@ -517,6 +519,7 @@ TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) {
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
mutable_cf_options_.target_file_size_base = 10000000;
|
||||
mutable_cf_options_.target_file_size_multiplier = 10;
|
||||
mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024;
|
||||
mutable_cf_options_.compaction_pri = kMinOverlappingRatio;
|
||||
|
||||
Add(2, 6U, "150", "175",
|
||||
@ -544,20 +547,21 @@ TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) {
|
||||
|
||||
TEST_F(CompactionPickerTest, CompactionPriMinOverlapping3) {
|
||||
NewVersionStorage(6, kCompactionStyleLevel);
|
||||
mutable_cf_options_.target_file_size_base = 10000000;
|
||||
mutable_cf_options_.target_file_size_multiplier = 10;
|
||||
mutable_cf_options_.max_bytes_for_level_base = 10000000;
|
||||
mutable_cf_options_.max_bytes_for_level_multiplier = 10;
|
||||
mutable_cf_options_.compaction_pri = kMinOverlappingRatio;
|
||||
|
||||
// file 7 and 8 over lap with the same file, but file 8 is smaller so
|
||||
// it will be picked.
|
||||
Add(2, 6U, "150", "175", 60000000U); // Overlaps with file 26, 27
|
||||
Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27
|
||||
Add(2, 8U, "201", "300", 61000000U); // Overlaps with file 27
|
||||
Add(2, 6U, "150", "167", 60000000U); // Overlaps with file 26, 27
|
||||
Add(2, 7U, "168", "169", 60000000U); // Overlaps with file 27
|
||||
Add(2, 8U, "201", "300", 61000000U); // Overlaps with file 28, but the file
|
||||
// itself is larger. Should be picked.
|
||||
|
||||
Add(3, 26U, "160", "165", 260000000U);
|
||||
Add(3, 26U, "166", "170", 260000000U);
|
||||
Add(3, 27U, "180", "400", 260000000U);
|
||||
Add(3, 28U, "401", "500", 260000000U);
|
||||
Add(3, 27U, "166", "170", 260000000U);
|
||||
Add(3, 28U, "180", "400", 260000000U);
|
||||
Add(3, 29U, "401", "500", 260000000U);
|
||||
UpdateVersionStorageInfo();
|
||||
|
||||
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
|
||||
|
@ -368,11 +368,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
// entries are deleted. The compaction should create bunch of 'DeleteFile'
|
||||
// entries in VersionEdit, but none of the 'AddFile's.
|
||||
TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
|
||||
options.disable_auto_compactions = true;
|
||||
options.create_if_missing = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// put some data
|
||||
@ -400,11 +399,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
||||
|
||||
TEST_F(DBTestCompactionFilter, CompactionFilterWithValueChange) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.num_levels = 3;
|
||||
options.compaction_filter_factory =
|
||||
std::make_shared<ChangeFilterFactory>();
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Write 100K+1 keys, these are written to a few files
|
||||
@ -467,8 +465,7 @@ TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||
PutFixed64(&three, 3);
|
||||
PutFixed64(&four, 4);
|
||||
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.merge_operator = MergeOperators::CreateUInt64AddOperator();
|
||||
options.num_levels = 3;
|
||||
@ -621,11 +618,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) {
|
||||
// Compaction filters should only be applied to records that are newer than the
|
||||
// latest snapshot. This test inserts records and applies a delete filter.
|
||||
TEST_F(DBTestCompactionFilter, CompactionFilterSnapshot) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_filter_factory = std::make_shared<DeleteFilterFactory>();
|
||||
options.disable_auto_compactions = true;
|
||||
options.create_if_missing = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// Put some data.
|
||||
@ -659,11 +655,10 @@ TEST_F(DBTestCompactionFilter, CompactionFilterSnapshot) {
|
||||
// records newer than the snapshot will also be processed
|
||||
TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) {
|
||||
std::string five = ToString(5);
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_filter_factory = std::make_shared<DeleteISFilterFactory>();
|
||||
options.disable_auto_compactions = true;
|
||||
options.create_if_missing = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// Put some data.
|
||||
|
@ -288,8 +288,7 @@ TEST_F(DBCompactionTest, SkipStatsUpdateTest) {
|
||||
}
|
||||
|
||||
TEST_F(DBCompactionTest, TestTableReaderForCompaction) {
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
options.max_open_files = 100;
|
||||
@ -493,14 +492,13 @@ TEST_F(DBCompactionTest, DisableStatsUpdateReopen) {
|
||||
TEST_P(DBCompactionTestWithParam, CompactionTrigger) {
|
||||
const int kNumKeysPerFile = 100;
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 110 << 10; // 110KB
|
||||
options.arena_block_size = 4 << 10;
|
||||
options.num_levels = 3;
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
Random rnd(301);
|
||||
@ -538,7 +536,7 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||
// and see number of compactions scheduled to be less than allowed.
|
||||
const int kNumKeysPerFile = 100;
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 110 << 10; // 110KB
|
||||
options.arena_block_size = 4 << 10;
|
||||
options.num_levels = 3;
|
||||
@ -549,7 +547,6 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||
options.base_background_compactions = 1;
|
||||
options.max_background_compactions = 3;
|
||||
options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile));
|
||||
options = CurrentOptions(options);
|
||||
|
||||
// Block all threads in thread pool.
|
||||
const size_t kTotalTasks = 4;
|
||||
@ -632,10 +629,9 @@ TEST_F(DBCompactionTest, BGCompactionsAllowed) {
|
||||
}
|
||||
|
||||
TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000; // Large write buffer
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
Random rnd(301);
|
||||
@ -662,9 +658,8 @@ TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) {
|
||||
|
||||
TEST_F(DBCompactionTest, MinorCompactionsHappen) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 10000;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
const int N = 500;
|
||||
@ -689,7 +684,7 @@ TEST_F(DBCompactionTest, MinorCompactionsHappen) {
|
||||
}
|
||||
|
||||
TEST_F(DBCompactionTest, ZeroSeqIdCompaction) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleLevel;
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
|
||||
@ -703,7 +698,6 @@ TEST_F(DBCompactionTest, ZeroSeqIdCompaction) {
|
||||
const size_t key_len =
|
||||
static_cast<size_t>(compact_opt.output_file_size_limit) / 5;
|
||||
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
std::vector<const Snapshot*> snaps;
|
||||
@ -747,10 +741,8 @@ TEST_F(DBCompactionTest, ZeroSeqIdCompaction) {
|
||||
// if the database is shutdown during the memtable compaction.
|
||||
TEST_F(DBCompactionTest, RecoverDuringMemtableCompaction) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 1000000;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Trigger a long memtable compaction and reopen the database during it
|
||||
@ -774,10 +766,9 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) {
|
||||
[&](void* arg) { trivial_move++; });
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000;
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
int32_t num_keys = 80;
|
||||
@ -1369,10 +1360,9 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) {
|
||||
[&](void* arg) { non_trivial_move++; });
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000;
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
int32_t value_size = 10 * 1024; // 10 KB
|
||||
@ -1661,7 +1651,7 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
|
||||
int max_key_universal_insert = 600;
|
||||
|
||||
// Stage 1: generate a db with level compaction
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 110 << 10; // 110KB
|
||||
options.arena_block_size = 4 << 10;
|
||||
options.num_levels = 4;
|
||||
@ -1671,7 +1661,6 @@ TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) {
|
||||
options.target_file_size_base = 200 << 10; // 200KB
|
||||
options.target_file_size_multiplier = 1;
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
for (int i = 0; i <= max_key_level_insert; i++) {
|
||||
@ -2386,10 +2375,9 @@ TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) {
|
||||
[&](void* arg) { non_trivial_move++; });
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000;
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
int32_t value_size = 10 * 1024; // 10 KB
|
||||
@ -2479,14 +2467,14 @@ class CompactionPriTest : public DBTestBase,
|
||||
};
|
||||
|
||||
TEST_P(CompactionPriTest, Test) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 16 * 1024;
|
||||
options.compaction_pri = static_cast<CompactionPri>(compaction_pri_);
|
||||
options.hard_pending_compaction_bytes_limit = 256 * 1024;
|
||||
options.max_bytes_for_level_base = 64 * 1024;
|
||||
options.max_bytes_for_level_multiplier = 4;
|
||||
options.compression = kNoCompression;
|
||||
options = CurrentOptions(options);
|
||||
|
||||
DestroyAndReopen(options);
|
||||
|
||||
Random rnd(301);
|
||||
|
@ -18,12 +18,11 @@ class DBTestInPlaceUpdate : public DBTestBase {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdate) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Update key with values of smaller size
|
||||
@ -41,12 +40,11 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdate) {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdateLargeNewValue) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Update key with values of larger size
|
||||
@ -64,7 +62,7 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateLargeNewValue) {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
|
||||
@ -72,7 +70,6 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) {
|
||||
options.write_buffer_size = 100000;
|
||||
options.inplace_callback =
|
||||
rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerSize;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Update key with values of smaller size
|
||||
@ -92,7 +89,7 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
|
||||
@ -100,7 +97,6 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) {
|
||||
options.write_buffer_size = 100000;
|
||||
options.inplace_callback =
|
||||
rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerVarintSize;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Update key with values of smaller varint size
|
||||
@ -120,7 +116,7 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
|
||||
@ -128,7 +124,6 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) {
|
||||
options.write_buffer_size = 100000;
|
||||
options.inplace_callback =
|
||||
rocksdb::DBTestInPlaceUpdate::updateInPlaceLargerSize;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Update key with values of larger size
|
||||
@ -146,7 +141,7 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) {
|
||||
|
||||
TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackNoAction) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.inplace_update_support = true;
|
||||
|
||||
@ -154,7 +149,6 @@ TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackNoAction) {
|
||||
options.write_buffer_size = 100000;
|
||||
options.inplace_callback =
|
||||
rocksdb::DBTestInPlaceUpdate::updateInPlaceNoAction;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Callback function requests no actions from db
|
||||
|
@ -228,10 +228,9 @@ TEST_F(DBTest, MemEnvTest) {
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
TEST_F(DBTest, WriteEmptyBatch) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
ASSERT_OK(Put(1, "foo", "bar"));
|
||||
@ -281,13 +280,12 @@ TEST_F(DBTest, ReadOnlyDB) {
|
||||
|
||||
TEST_F(DBTest, CompactedDB) {
|
||||
const uint64_t kFileSize = 1 << 20;
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.disable_auto_compactions = true;
|
||||
options.write_buffer_size = kFileSize;
|
||||
options.target_file_size_base = kFileSize;
|
||||
options.max_bytes_for_level_base = 1 << 30;
|
||||
options.compression = kNoCompression;
|
||||
options = CurrentOptions(options);
|
||||
Reopen(options);
|
||||
// 1 L0 file, use CompactedDB if max_open_files = -1
|
||||
ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, '1')));
|
||||
@ -764,11 +762,10 @@ TEST_F(DBTest, DISABLED_VeryLargeValue) {
|
||||
std::string key1(kKeySize, 'c');
|
||||
std::string key2(kKeySize, 'd');
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
options.paranoid_checks = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
ASSERT_OK(Put("boo", "v1"));
|
||||
@ -811,10 +808,8 @@ TEST_F(DBTest, DISABLED_VeryLargeValue) {
|
||||
|
||||
TEST_F(DBTest, GetFromImmutableLayer) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
ASSERT_OK(Put(1, "foo", "v1"));
|
||||
@ -1916,11 +1911,9 @@ TEST_F(DBTest, Recover) {
|
||||
|
||||
TEST_F(DBTest, RecoverWithTableHandle) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.create_if_missing = true;
|
||||
options.write_buffer_size = 100;
|
||||
options.disable_auto_compactions = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -2368,10 +2361,9 @@ TEST_F(DBTest, CompressedCache) {
|
||||
// Iteration 4: both block cache and compressed cache, but DB is not
|
||||
// compressed
|
||||
for (int iter = 0; iter < 4; iter++) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 64*1024; // small write buffer
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
options = CurrentOptions(options);
|
||||
|
||||
BlockBasedTableOptions table_options;
|
||||
switch (iter) {
|
||||
@ -2636,10 +2628,9 @@ TEST_F(DBTest, MinLevelToCompress2) {
|
||||
|
||||
TEST_F(DBTest, RepeatedWritesToSameKey) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// We must have at most one file per level except for level-0,
|
||||
@ -2716,11 +2707,10 @@ static bool Between(uint64_t val, uint64_t low, uint64_t high) {
|
||||
}
|
||||
|
||||
TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000; // Large write buffer
|
||||
options.compression = kNoCompression;
|
||||
options.create_if_missing = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
const int N = 128;
|
||||
@ -2823,11 +2813,10 @@ TEST_F(DBTest, ApproximateSizesMemTable) {
|
||||
|
||||
TEST_F(DBTest, ApproximateSizes) {
|
||||
do {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000000; // Large write buffer
|
||||
options.compression = kNoCompression;
|
||||
options.create_if_missing = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -7843,8 +7832,7 @@ TEST_F(DBTest, MergeTestTime) {
|
||||
this->env_->addon_time_.store(0);
|
||||
this->env_->time_elapse_only_sleep_ = true;
|
||||
this->env_->no_sleep_ = true;
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
options.merge_operator.reset(new DelayedMergeOperator(this));
|
||||
DestroyAndReopen(options);
|
||||
@ -7884,8 +7872,7 @@ TEST_F(DBTest, MergeTestTime) {
|
||||
#ifndef ROCKSDB_LITE
|
||||
TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
|
||||
SetPerfLevel(kEnableTime);
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_filter_factory = std::make_shared<KeepFilterFactory>();
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
options.merge_operator.reset(new DelayedMergeOperator(this));
|
||||
@ -7904,14 +7891,13 @@ TEST_P(DBTestWithParam, MergeCompactionTimeTest) {
|
||||
}
|
||||
|
||||
TEST_P(DBTestWithParam, FilterCompactionTimeTest) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_filter_factory =
|
||||
std::make_shared<DelayFilterFactory>(this);
|
||||
options.disable_auto_compactions = true;
|
||||
options.create_if_missing = true;
|
||||
options.statistics = rocksdb::CreateDBStatistics();
|
||||
options.max_subcompactions = max_subcompactions_;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// put some data
|
||||
@ -7953,9 +7939,8 @@ TEST_F(DBTest, TestLogCleanup) {
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
TEST_F(DBTest, EmptyCompactedDB) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.max_open_files = -1;
|
||||
options = CurrentOptions(options);
|
||||
Close();
|
||||
ASSERT_OK(ReadOnlyReopen(options));
|
||||
Status s = Put("new", "value");
|
||||
@ -8191,9 +8176,8 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) {
|
||||
// Github issue #595
|
||||
// Large write batch with column families
|
||||
TEST_F(DBTest, LargeBatchWithColumnFamilies) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options = CurrentOptions(options);
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
int64_t j = 0;
|
||||
@ -8287,11 +8271,10 @@ TEST_F(DBTest, DelayedWriteRate) {
|
||||
const int kEntriesPerMemTable = 100;
|
||||
const int kTotalFlushes = 20;
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
env_->SetBackgroundThreads(1, Env::LOW);
|
||||
options.env = env_;
|
||||
env_->no_sleep_ = true;
|
||||
options = CurrentOptions(options);
|
||||
options.write_buffer_size = 100000000;
|
||||
options.max_write_buffer_number = 256;
|
||||
options.max_background_compactions = 1;
|
||||
@ -8352,10 +8335,9 @@ TEST_F(DBTest, DelayedWriteRate) {
|
||||
}
|
||||
|
||||
TEST_F(DBTest, HardLimit) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
env_->SetBackgroundThreads(1, Env::LOW);
|
||||
options = CurrentOptions(options);
|
||||
options.max_write_buffer_number = 256;
|
||||
options.write_buffer_size = 110 << 10; // 110KB
|
||||
options.arena_block_size = 4 * 1024;
|
||||
@ -8403,9 +8385,8 @@ TEST_F(DBTest, HardLimit) {
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
TEST_F(DBTest, SoftLimit) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options = CurrentOptions(options);
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
options.max_write_buffer_number = 256;
|
||||
options.level0_file_num_compaction_trigger = 1;
|
||||
@ -8521,9 +8502,8 @@ TEST_F(DBTest, SoftLimit) {
|
||||
}
|
||||
|
||||
TEST_F(DBTest, LastWriteBufferDelay) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options = CurrentOptions(options);
|
||||
options.write_buffer_size = 100000;
|
||||
options.max_write_buffer_number = 4;
|
||||
options.delayed_write_rate = 20000;
|
||||
@ -10108,9 +10088,8 @@ INSTANTIATE_TEST_CASE_P(DBTestWithParam, DBTestWithParam,
|
||||
::testing::Bool()));
|
||||
|
||||
TEST_F(DBTest, PauseBackgroundWorkTest) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.write_buffer_size = 100000; // Small write buffer
|
||||
options = CurrentOptions(options);
|
||||
Reopen(options);
|
||||
|
||||
std::vector<std::thread> threads;
|
||||
|
@ -210,6 +210,8 @@ Options DBTestBase::CurrentOptions(
|
||||
const anon::OptionsOverride& options_override) {
|
||||
Options options;
|
||||
options.write_buffer_size = 4090 * 4096;
|
||||
options.target_file_size_base = 2 * 1024 * 1024;
|
||||
options.max_bytes_for_level_base = 10 * 1024 * 1024;
|
||||
return CurrentOptions(options, options_override);
|
||||
}
|
||||
|
||||
|
@ -124,8 +124,7 @@ class DelayFilterFactory : public CompactionFilterFactory {
|
||||
// Make sure we don't trigger a problem if the trigger conditon is given
|
||||
// to be 0, which is invalid.
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = num_levels_;
|
||||
@ -160,8 +159,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) {
|
||||
}
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, OptimizeFiltersForHits) {
|
||||
Options options;
|
||||
options = CurrentOptions(options);
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.compaction_options_universal.size_ratio = 5;
|
||||
options.num_levels = num_levels_;
|
||||
@ -337,13 +335,12 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) {
|
||||
}
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = num_levels_;
|
||||
options.write_buffer_size = 100 << 10; // 100KB
|
||||
options.target_file_size_base = 32 << 10; // 32KB
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -388,12 +385,12 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
|
||||
ChangeCompactOptions();
|
||||
Options options;
|
||||
options.create_if_missing = true;
|
||||
options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
|
||||
options.compaction_style = kCompactionStyleLevel;
|
||||
options.num_levels = 1;
|
||||
options.target_file_size_base = options.write_buffer_size;
|
||||
options.compression = kNoCompression;
|
||||
options = CurrentOptions(options);
|
||||
options.write_buffer_size = kEntrySize * kEntriesPerBuffer;
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal);
|
||||
Random rnd(301);
|
||||
@ -451,12 +448,11 @@ TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) {
|
||||
}
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.write_buffer_size = 100 << 10; // 100KB
|
||||
options.num_levels = 7;
|
||||
options.disable_auto_compactions = true;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
// Generate 3 overlapping files
|
||||
@ -496,14 +492,13 @@ class DBTestUniversalCompactionMultiLevels
|
||||
};
|
||||
|
||||
TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionMultiLevels) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = num_levels_;
|
||||
options.write_buffer_size = 100 << 10; // 100KB
|
||||
options.level0_file_num_compaction_trigger = 8;
|
||||
options.max_background_compactions = 3;
|
||||
options.target_file_size_base = 32 * 1024;
|
||||
options = CurrentOptions(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
// Trigger compaction if size amplification exceeds 110%
|
||||
@ -539,7 +534,7 @@ TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) {
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.compaction_options_universal.allow_trivial_move = true;
|
||||
options.num_levels = 3;
|
||||
@ -547,7 +542,6 @@ TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) {
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
options.max_background_compactions = 2;
|
||||
options.target_file_size_base = 32 * 1024;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -586,7 +580,7 @@ class DBTestUniversalCompactionParallel :
|
||||
};
|
||||
|
||||
TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.num_levels = num_levels_;
|
||||
options.write_buffer_size = 1 << 10; // 1KB
|
||||
@ -595,7 +589,6 @@ TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) {
|
||||
options.max_background_flushes = 3;
|
||||
options.target_file_size_base = 1 * 1024;
|
||||
options.compaction_options_universal.max_size_amplification_percent = 110;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -652,7 +645,7 @@ INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionParallel,
|
||||
::testing::Bool()));
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.write_buffer_size = 105 << 10; // 105KB
|
||||
options.arena_block_size = 4 << 10; // 4KB
|
||||
@ -660,7 +653,6 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) {
|
||||
options.level0_file_num_compaction_trigger = 4;
|
||||
options.num_levels = num_levels_;
|
||||
options.compaction_options_universal.compression_size_percent = -1;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -773,14 +765,13 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio1) {
|
||||
return;
|
||||
}
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.write_buffer_size = 100 << 10; // 100KB
|
||||
options.target_file_size_base = 32 << 10; // 32KB
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.num_levels = num_levels_;
|
||||
options.compaction_options_universal.compression_size_percent = 70;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
Random rnd(301);
|
||||
@ -841,14 +832,13 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio2) {
|
||||
if (!Snappy_Supported()) {
|
||||
return;
|
||||
}
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.write_buffer_size = 100 << 10; // 100KB
|
||||
options.target_file_size_base = 32 << 10; // 32KB
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.num_levels = num_levels_;
|
||||
options.compaction_options_universal.compression_size_percent = 95;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
|
||||
Random rnd(301);
|
||||
@ -884,7 +874,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) {
|
||||
});
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.compaction_options_universal.allow_trivial_move = true;
|
||||
options.num_levels = 2;
|
||||
@ -892,7 +882,6 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) {
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
options.max_background_compactions = 1;
|
||||
options.target_file_size_base = 32 * 1024;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -929,7 +918,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
|
||||
|
||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
options.compaction_options_universal.allow_trivial_move = true;
|
||||
options.num_levels = 15;
|
||||
@ -937,7 +926,6 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
|
||||
options.level0_file_num_compaction_trigger = 8;
|
||||
options.max_background_compactions = 4;
|
||||
options.target_file_size_base = 64 * 1024;
|
||||
options = CurrentOptions(options);
|
||||
DestroyAndReopen(options);
|
||||
CreateAndReopenWithCF({"pikachu"}, options);
|
||||
|
||||
@ -963,7 +951,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) {
|
||||
}
|
||||
|
||||
TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.db_paths.emplace_back(dbname_, 300 * 1024);
|
||||
options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024);
|
||||
options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024);
|
||||
@ -976,7 +964,6 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) {
|
||||
options.arena_block_size = 4 << 10;
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
options.num_levels = 1;
|
||||
options = CurrentOptions(options);
|
||||
|
||||
std::vector<std::string> filenames;
|
||||
env_->GetChildren(options.db_paths[1].path, &filenames);
|
||||
@ -1163,7 +1150,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
|
||||
if (!Snappy_Supported()) {
|
||||
return;
|
||||
}
|
||||
Options options;
|
||||
Options options = CurrentOptions();
|
||||
options.db_paths.emplace_back(dbname_, 500 * 1024);
|
||||
options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024);
|
||||
options.compaction_style = kCompactionStyleUniversal;
|
||||
@ -1175,7 +1162,6 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) {
|
||||
options.num_levels = 1;
|
||||
options.memtable_factory.reset(
|
||||
new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1));
|
||||
options = CurrentOptions(options);
|
||||
|
||||
std::vector<std::string> filenames;
|
||||
env_->GetChildren(options.db_paths[1].path, &filenames);
|
||||
|
@ -158,7 +158,15 @@ struct DbPath {
|
||||
struct Options;
|
||||
|
||||
struct ColumnFamilyOptions {
|
||||
// The function recovers options to a previous version. Only 4.6 or later
|
||||
// versions are supported.
|
||||
ColumnFamilyOptions* OldDefaults(int rocksdb_major_version = 4,
|
||||
int rocksdb_minor_version = 6);
|
||||
|
||||
// Some functions that make it easier to optimize RocksDB
|
||||
// Use if if your DB is very small (like under 1GB) and you don't want to
|
||||
// spend lots of memory for memtables.
|
||||
ColumnFamilyOptions* OptimizeForSmallDb();
|
||||
|
||||
// Use this if you don't need to keep the data sorted, i.e. you'll never use
|
||||
// an iterator, only Put() and Get() API calls
|
||||
@ -254,7 +262,7 @@ struct ColumnFamilyOptions {
|
||||
// Note that write_buffer_size is enforced per column family.
|
||||
// See db_write_buffer_size for sharing memory across column families.
|
||||
//
|
||||
// Default: 4MB
|
||||
// Default: 64MB
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
size_t write_buffer_size;
|
||||
@ -400,7 +408,7 @@ struct ColumnFamilyOptions {
|
||||
// be 2MB, and each file on level 2 will be 20MB,
|
||||
// and each file on level-3 will be 200MB.
|
||||
//
|
||||
// Default: 2MB.
|
||||
// Default: 64MB.
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
uint64_t target_file_size_base;
|
||||
@ -415,12 +423,12 @@ struct ColumnFamilyOptions {
|
||||
// max_bytes_for_level_base is the max total for level-1.
|
||||
// Maximum number of bytes for level L can be calculated as
|
||||
// (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1))
|
||||
// For example, if max_bytes_for_level_base is 20MB, and if
|
||||
// For example, if max_bytes_for_level_base is 200MB, and if
|
||||
// max_bytes_for_level_multiplier is 10, total data size for level-1
|
||||
// will be 20MB, total file size for level-2 will be 200MB,
|
||||
// and total file size for level-3 will be 2GB.
|
||||
// will be 2GB, total file size for level-2 will be 20GB,
|
||||
// and total file size for level-3 will be 200GB.
|
||||
//
|
||||
// Default: 10MB.
|
||||
// Default: 256MB.
|
||||
//
|
||||
// Dynamically changeable through SetOptions() API
|
||||
uint64_t max_bytes_for_level_base;
|
||||
@ -538,13 +546,13 @@ struct ColumnFamilyOptions {
|
||||
// All writes will be slowed down to at least delayed_write_rate if estimated
|
||||
// bytes needed to be compaction exceed this threshold.
|
||||
//
|
||||
// Default: 0 (disabled)
|
||||
// Default: 64GB
|
||||
uint64_t soft_pending_compaction_bytes_limit;
|
||||
|
||||
// All writes are stopped if estimated bytes needed to be compaction exceed
|
||||
// this threshold.
|
||||
//
|
||||
// Default: 0 (disabled)
|
||||
// Default: 256GB
|
||||
uint64_t hard_pending_compaction_bytes_limit;
|
||||
|
||||
// DEPRECATED -- this options is no longer used
|
||||
@ -795,6 +803,10 @@ struct ColumnFamilyOptions {
|
||||
};
|
||||
|
||||
struct DBOptions {
|
||||
// The function recovers options to the option as in version 4.6.
|
||||
DBOptions* OldDefaults(int rocksdb_major_version = 4,
|
||||
int rocksdb_minor_version = 6);
|
||||
|
||||
// Some functions that make it easier to optimize RocksDB
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
@ -1303,6 +1315,10 @@ struct Options : public DBOptions, public ColumnFamilyOptions {
|
||||
const ColumnFamilyOptions& column_family_options)
|
||||
: DBOptions(db_options), ColumnFamilyOptions(column_family_options) {}
|
||||
|
||||
// The function recovers options to the option as in version 4.6.
|
||||
Options* OldDefaults(int rocksdb_major_version = 4,
|
||||
int rocksdb_minor_version = 6);
|
||||
|
||||
void Dump(Logger* log) const;
|
||||
|
||||
void DumpCFOptions(Logger* log) const;
|
||||
|
@ -83,7 +83,7 @@ ColumnFamilyOptions::ColumnFamilyOptions()
|
||||
merge_operator(nullptr),
|
||||
compaction_filter(nullptr),
|
||||
compaction_filter_factory(nullptr),
|
||||
write_buffer_size(4 << 20),
|
||||
write_buffer_size(64 << 20),
|
||||
max_write_buffer_number(2),
|
||||
min_write_buffer_number_to_merge(1),
|
||||
max_write_buffer_number_to_maintain(0),
|
||||
@ -93,9 +93,9 @@ ColumnFamilyOptions::ColumnFamilyOptions()
|
||||
level0_file_num_compaction_trigger(4),
|
||||
level0_slowdown_writes_trigger(20),
|
||||
level0_stop_writes_trigger(24),
|
||||
target_file_size_base(2 * 1048576),
|
||||
target_file_size_base(64 * 1048576),
|
||||
target_file_size_multiplier(1),
|
||||
max_bytes_for_level_base(10 * 1048576),
|
||||
max_bytes_for_level_base(256 * 1048576),
|
||||
level_compaction_dynamic_level_bytes(false),
|
||||
max_bytes_for_level_multiplier(10),
|
||||
max_bytes_for_level_multiplier_additional(num_levels, 1),
|
||||
@ -104,8 +104,8 @@ ColumnFamilyOptions::ColumnFamilyOptions()
|
||||
max_grandparent_overlap_factor(10),
|
||||
soft_rate_limit(0.0),
|
||||
hard_rate_limit(0.0),
|
||||
soft_pending_compaction_bytes_limit(0),
|
||||
hard_pending_compaction_bytes_limit(0),
|
||||
soft_pending_compaction_bytes_limit(64 * 1073741824ul),
|
||||
hard_pending_compaction_bytes_limit(256 * 1073741824ul),
|
||||
rate_limit_delay_max_milliseconds(1000),
|
||||
arena_block_size(0),
|
||||
disable_auto_compactions(false),
|
||||
@ -663,8 +663,40 @@ Options::PrepareForBulkLoad()
|
||||
return this;
|
||||
}
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
Options* Options::OldDefaults(int rocksdb_major_version,
|
||||
int rocksdb_minor_version) {
|
||||
ColumnFamilyOptions::OldDefaults(rocksdb_major_version,
|
||||
rocksdb_minor_version);
|
||||
DBOptions::OldDefaults(rocksdb_major_version, rocksdb_minor_version);
|
||||
return this;
|
||||
}
|
||||
|
||||
DBOptions* DBOptions::OldDefaults(int rocksdb_major_version,
|
||||
int rocksdb_minor_version) {
|
||||
return this;
|
||||
}
|
||||
|
||||
ColumnFamilyOptions* ColumnFamilyOptions::OldDefaults(
|
||||
int rocksdb_major_version, int rocksdb_minor_version) {
|
||||
write_buffer_size = 4 << 20;
|
||||
target_file_size_base = 2 * 1048576;
|
||||
max_bytes_for_level_base = 10 * 1048576;
|
||||
soft_pending_compaction_bytes_limit = 0;
|
||||
hard_pending_compaction_bytes_limit = 0;
|
||||
return this;
|
||||
}
|
||||
|
||||
// Optimization functions
|
||||
ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForSmallDb() {
|
||||
write_buffer_size = 2 << 20;
|
||||
target_file_size_base = 2 * 1048576;
|
||||
max_bytes_for_level_base = 10 * 1048576;
|
||||
soft_pending_compaction_bytes_limit = 256 * 1048576;
|
||||
hard_pending_compaction_bytes_limit = 1073741824ul;
|
||||
return this;
|
||||
}
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
ColumnFamilyOptions* ColumnFamilyOptions::OptimizeForPointLookup(
|
||||
uint64_t block_cache_size_mb) {
|
||||
prefix_extractor.reset(NewNoopTransform());
|
||||
|
@ -1270,6 +1270,26 @@ TEST_F(OptionsParserTest, DifferentDefault) {
|
||||
|
||||
RocksDBOptionsParser parser;
|
||||
ASSERT_OK(parser.Parse(kOptionsFileName, env_.get()));
|
||||
|
||||
Options old_default_opts;
|
||||
old_default_opts.OldDefaults();
|
||||
ASSERT_EQ(10 * 1048576, old_default_opts.max_bytes_for_level_base);
|
||||
|
||||
Options old_default_opts46;
|
||||
old_default_opts46.OldDefaults();
|
||||
ASSERT_EQ(10 * 1048576, old_default_opts46.max_bytes_for_level_base);
|
||||
|
||||
ColumnFamilyOptions old_default_cf_opts;
|
||||
old_default_cf_opts.OldDefaults();
|
||||
ASSERT_EQ(2 * 1048576, old_default_cf_opts.target_file_size_base);
|
||||
|
||||
ColumnFamilyOptions old_default_cf_opts46;
|
||||
old_default_cf_opts46.OldDefaults();
|
||||
ASSERT_EQ(2 * 1048576, old_default_cf_opts46.target_file_size_base);
|
||||
|
||||
ColumnFamilyOptions cf_small_opts;
|
||||
cf_small_opts.OptimizeForSmallDb();
|
||||
ASSERT_EQ(2 << 20, cf_small_opts.write_buffer_size);
|
||||
}
|
||||
|
||||
class OptionsSanityCheckTest : public OptionsParserTest {
|
||||
|
Loading…
Reference in New Issue
Block a user