unordered_write incompatible with max_successive_merges (#6284)

Summary:
unordered_write is incompatible with non-zero max_successive_merges. Although we check this at runtime, we currently don't prevent the user from setting this combination in options. This has led to stress tests to fail with this combination is tried in ::SetOptions.
The patch fixes that and also reverts the changes performed by https://github.com/facebook/rocksdb/pull/6254, in which max_successive_merges was mistakenly declared incompatible with unordered_write.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6284

Differential Revision: D19356115

Pulled By: maysamyabandeh

fbshipit-source-id: f06dadec777622bd75f267361c022735cf8cecb6
This commit is contained in:
Maysam Yabandeh 2020-01-10 16:51:34 -08:00 committed by Facebook Github Bot
parent 687119aeaf
commit eff5e076f5
6 changed files with 7 additions and 15 deletions

View File

@ -160,11 +160,6 @@ Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) {
return Status::InvalidArgument( return Status::InvalidArgument(
"Memtable doesn't concurrent writes (allow_concurrent_memtable_write)"); "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)");
} }
if (cf_options.max_successive_merges != 0) {
return Status::InvalidArgument(
"max_successive_merges > 0 is incompatible "
"with concurrent writes (allow_concurrent_memtable_write)");
}
return Status::OK(); return Status::OK();
} }
@ -1252,6 +1247,11 @@ Status ColumnFamilyData::ValidateOptions(
if (s.ok() && db_options.allow_concurrent_memtable_write) { if (s.ok() && db_options.allow_concurrent_memtable_write) {
s = CheckConcurrentWritesSupported(cf_options); s = CheckConcurrentWritesSupported(cf_options);
} }
if (s.ok() && db_options.unordered_write &&
cf_options.max_successive_merges != 0) {
s = Status::InvalidArgument(
"max_successive_merges > 0 is incompatible with unordered_write");
}
if (s.ok()) { if (s.ok()) {
s = CheckCFPathsSupported(db_options, cf_options); s = CheckCFPathsSupported(db_options, cf_options);
} }

View File

@ -150,8 +150,6 @@ TEST_F(DBMergeOperatorTest, MergeErrorOnWrite) {
options.create_if_missing = true; options.create_if_missing = true;
options.merge_operator.reset(new TestPutOperator()); options.merge_operator.reset(new TestPutOperator());
options.max_successive_merges = 3; options.max_successive_merges = 3;
// allow_concurrent_memtable_write is incompatible with max_successive_merges
options.allow_concurrent_memtable_write = false;
options.env = env_; options.env = env_;
Reopen(options); Reopen(options);
ASSERT_OK(Merge("k1", "v1")); ASSERT_OK(Merge("k1", "v1"));

View File

@ -153,8 +153,6 @@ TEST_F(DBTest2, MaxSuccessiveMergesChangeWithDBRecovery) {
options.create_if_missing = true; options.create_if_missing = true;
options.statistics = rocksdb::CreateDBStatistics(); options.statistics = rocksdb::CreateDBStatistics();
options.max_successive_merges = 3; options.max_successive_merges = 3;
// allow_concurrent_memtable_write is incompatible with max_successive_merges
options.allow_concurrent_memtable_write = false;
options.merge_operator = MergeOperators::CreatePutOperator(); options.merge_operator = MergeOperators::CreatePutOperator();
options.disable_auto_compactions = true; options.disable_auto_compactions = true;
DestroyAndReopen(options); DestroyAndReopen(options);

View File

@ -78,9 +78,6 @@ std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
options.create_if_missing = true; options.create_if_missing = true;
options.merge_operator = std::make_shared<CountMergeOperator>(); options.merge_operator = std::make_shared<CountMergeOperator>();
options.max_successive_merges = max_successive_merges; options.max_successive_merges = max_successive_merges;
if (max_successive_merges > 0) {
options.allow_concurrent_memtable_write = false;
}
Status s; Status s;
DestroyDB(dbname, Options()); DestroyDB(dbname, Options());
// DBWithTTL is not supported in ROCKSDB_LITE // DBWithTTL is not supported in ROCKSDB_LITE

View File

@ -351,8 +351,7 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options,
// size_t options // size_t options
cf_opt->arena_block_size = rnd->Uniform(10000); cf_opt->arena_block_size = rnd->Uniform(10000);
cf_opt->inplace_update_num_locks = rnd->Uniform(10000); cf_opt->inplace_update_num_locks = rnd->Uniform(10000);
cf_opt->max_successive_merges = cf_opt->max_successive_merges = rnd->Uniform(10000);
db_options.allow_concurrent_memtable_write ? 0 : rnd->Uniform(10000);
cf_opt->memtable_huge_page_size = rnd->Uniform(10000); cf_opt->memtable_huge_page_size = rnd->Uniform(10000);
cf_opt->write_buffer_size = rnd->Uniform(10000); cf_opt->write_buffer_size = rnd->Uniform(10000);

View File

@ -5820,7 +5820,7 @@ TEST_P(TransactionTest, DuplicateKeys) {
} // do_rollback } // do_rollback
} // do_prepare } // do_prepare
if (!options.unordered_write && !options.allow_concurrent_memtable_write) { if (!options.unordered_write) {
// Also test with max_successive_merges > 0. max_successive_merges will not // Also test with max_successive_merges > 0. max_successive_merges will not
// affect our algorithm for duplicate key insertion but we add the test to // affect our algorithm for duplicate key insertion but we add the test to
// verify that. // verify that.