Split CompactionFilterWithValueChange
Summary: The test currently times out when it is run under tsan. This patch split it into 4 tests. Closes https://github.com/facebook/rocksdb/pull/3047 Differential Revision: D6106515 Pulled By: maysamyabandeh fbshipit-source-id: 03a28cdf8b1c097be2361b1b0cc3dc1acf2b5d63
This commit is contained in:
parent
d75793d6b4
commit
3ef55d2c7c
@ -24,6 +24,36 @@ class DBTestCompactionFilter : public DBTestBase {
|
|||||||
DBTestCompactionFilter() : DBTestBase("/db_compaction_filter_test") {}
|
DBTestCompactionFilter() : DBTestBase("/db_compaction_filter_test") {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Param variant of DBTestBase::ChangeCompactOptions
|
||||||
|
class DBTestCompactionFilterWithCompactParam
|
||||||
|
: public DBTestCompactionFilter,
|
||||||
|
public ::testing::WithParamInterface<DBTestBase::OptionConfig> {
|
||||||
|
public:
|
||||||
|
DBTestCompactionFilterWithCompactParam() : DBTestCompactionFilter() {
|
||||||
|
option_config_ = GetParam();
|
||||||
|
Destroy(last_options_);
|
||||||
|
auto options = CurrentOptions();
|
||||||
|
if (option_config_ == kDefault || option_config_ == kUniversalCompaction ||
|
||||||
|
option_config_ == kUniversalCompactionMultiLevel) {
|
||||||
|
options.create_if_missing = true;
|
||||||
|
}
|
||||||
|
if (option_config_ == kLevelSubcompactions ||
|
||||||
|
option_config_ == kUniversalSubcompactions) {
|
||||||
|
assert(options.max_subcompactions > 1);
|
||||||
|
}
|
||||||
|
TryReopen(options);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
INSTANTIATE_TEST_CASE_P(
|
||||||
|
DBTestCompactionFilterWithCompactOption,
|
||||||
|
DBTestCompactionFilterWithCompactParam,
|
||||||
|
::testing::Values(DBTestBase::OptionConfig::kDefault,
|
||||||
|
DBTestBase::OptionConfig::kUniversalCompaction,
|
||||||
|
DBTestBase::OptionConfig::kUniversalCompactionMultiLevel,
|
||||||
|
DBTestBase::OptionConfig::kLevelSubcompactions,
|
||||||
|
DBTestBase::OptionConfig::kUniversalSubcompactions));
|
||||||
|
|
||||||
class KeepFilter : public CompactionFilter {
|
class KeepFilter : public CompactionFilter {
|
||||||
public:
|
public:
|
||||||
virtual bool Filter(int level, const Slice& key, const Slice& value,
|
virtual bool Filter(int level, const Slice& key, const Slice& value,
|
||||||
@ -439,65 +469,63 @@ TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) {
|
|||||||
}
|
}
|
||||||
#endif // ROCKSDB_LITE
|
#endif // ROCKSDB_LITE
|
||||||
|
|
||||||
TEST_F(DBTestCompactionFilter, CompactionFilterWithValueChange) {
|
TEST_P(DBTestCompactionFilterWithCompactParam,
|
||||||
do {
|
CompactionFilterWithValueChange) {
|
||||||
Options options = CurrentOptions();
|
Options options = CurrentOptions();
|
||||||
options.num_levels = 3;
|
options.num_levels = 3;
|
||||||
options.compaction_filter_factory =
|
options.compaction_filter_factory = std::make_shared<ChangeFilterFactory>();
|
||||||
std::make_shared<ChangeFilterFactory>();
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
|
||||||
|
|
||||||
// Write 100K+1 keys, these are written to a few files
|
// Write 100K+1 keys, these are written to a few files
|
||||||
// in L0. We do this so that the current snapshot points
|
// in L0. We do this so that the current snapshot points
|
||||||
// to the 100001 key.The compaction filter is not invoked
|
// to the 100001 key.The compaction filter is not invoked
|
||||||
// on keys that are visible via a snapshot because we
|
// on keys that are visible via a snapshot because we
|
||||||
// anyways cannot delete it.
|
// anyways cannot delete it.
|
||||||
const std::string value(10, 'x');
|
const std::string value(10, 'x');
|
||||||
for (int i = 0; i < 100001; i++) {
|
for (int i = 0; i < 100001; i++) {
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "B%010d", i);
|
snprintf(key, sizeof(key), "B%010d", i);
|
||||||
Put(1, key, value);
|
Put(1, key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
// push all files to lower levels
|
// push all files to lower levels
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
if (option_config_ != kUniversalCompactionMultiLevel &&
|
if (option_config_ != kUniversalCompactionMultiLevel &&
|
||||||
option_config_ != kUniversalSubcompactions) {
|
option_config_ != kUniversalSubcompactions) {
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||||
} else {
|
} else {
|
||||||
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
nullptr);
|
nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// re-write all data again
|
// re-write all data again
|
||||||
for (int i = 0; i < 100001; i++) {
|
for (int i = 0; i < 100001; i++) {
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "B%010d", i);
|
snprintf(key, sizeof(key), "B%010d", i);
|
||||||
Put(1, key, value);
|
Put(1, key, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
// push all files to lower levels. This should
|
// push all files to lower levels. This should
|
||||||
// invoke the compaction filter for all 100000 keys.
|
// invoke the compaction filter for all 100000 keys.
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
if (option_config_ != kUniversalCompactionMultiLevel &&
|
if (option_config_ != kUniversalCompactionMultiLevel &&
|
||||||
option_config_ != kUniversalSubcompactions) {
|
option_config_ != kUniversalSubcompactions) {
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||||
} else {
|
} else {
|
||||||
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
nullptr);
|
nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify that all keys now have the new value that
|
// verify that all keys now have the new value that
|
||||||
// was set by the compaction process.
|
// was set by the compaction process.
|
||||||
for (int i = 0; i < 100001; i++) {
|
for (int i = 0; i < 100001; i++) {
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "B%010d", i);
|
snprintf(key, sizeof(key), "B%010d", i);
|
||||||
std::string newvalue = Get(1, key);
|
std::string newvalue = Get(1, key);
|
||||||
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
|
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
|
||||||
}
|
}
|
||||||
} while (ChangeCompactOptions());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) {
|
||||||
|
@ -2023,12 +2023,13 @@ TEST_F(DBIteratorTest, TableFilter) {
|
|||||||
|
|
||||||
// Ensure the table_filter callback is called once for each table.
|
// Ensure the table_filter callback is called once for each table.
|
||||||
{
|
{
|
||||||
std::set<uint64_t> unseen {1, 2, 3};
|
std::set<uint64_t> unseen{1, 2, 3};
|
||||||
ReadOptions opts;
|
ReadOptions opts;
|
||||||
opts.table_filter = [&](const TableProperties& props) {
|
opts.table_filter = [&](const TableProperties& props) {
|
||||||
auto it = unseen.find(props.num_entries);
|
auto it = unseen.find(props.num_entries);
|
||||||
if (it == unseen.end()) {
|
if (it == unseen.end()) {
|
||||||
ADD_FAILURE() << "saw table properties with an unexpected " << props.num_entries << " entries";
|
ADD_FAILURE() << "saw table properties with an unexpected "
|
||||||
|
<< props.num_entries << " entries";
|
||||||
} else {
|
} else {
|
||||||
unseen.erase(it);
|
unseen.erase(it);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user