Consider all compaction input files to compute the oldest ancestor time (#6279)
Summary: Look at all compaction input files to compute the oldest ancestor time. In https://github.com/facebook/rocksdb/issues/5992 we changed how creation_time (aka oldest-ancestor-time) table property of compaction output files is computed from max(creation-time-of-all-compaction-inputs) to min(creation-time-of-all-inputs). This exposed a bug where, during compaction, the creation_time:s of only the L0 compaction inputs were being looked at, and all other input levels were being ignored. This PR fixes the issue. Some TTL compactions when using Level-Style compactions might not have run due to this bug. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6279 Test Plan: Enhanced the unit tests to validate that the correct time is propagated to the compaction outputs. Differential Revision: D19337812 Pulled By: sagar0 fbshipit-source-id: edf8a72f11e405e93032ff5f45590816debe0bb4
This commit is contained in:
parent
eff5e076f5
commit
cfa585611d
@ -7,6 +7,7 @@
|
||||
* A new `OptimisticTransactionDBOptions` Option that allows users to configure occ validation policy. The default policy changes from kValidateSerial to kValidateParallel to reduce mutex contention.
|
||||
|
||||
### Bug Fixes
|
||||
* Fixed a bug where non-L0 compaction input files were not considered to compute the `creation_time` of new compaction outputs.
|
||||
* Fix a bug that can cause unnecessary bg thread to be scheduled(#6104).
|
||||
* Fix a bug in which a snapshot read could be affected by a DeleteRange after the snapshot (#6062).
|
||||
* Fix crash caused by concurrent CF iterations and drops(#6147).
|
||||
|
@ -545,11 +545,13 @@ bool Compaction::ShouldFormSubcompactions() const {
|
||||
|
||||
uint64_t Compaction::MinInputFileOldestAncesterTime() const {
|
||||
uint64_t min_oldest_ancester_time = port::kMaxUint64;
|
||||
for (const auto& file : inputs_[0].files) {
|
||||
uint64_t oldest_ancester_time = file->TryGetOldestAncesterTime();
|
||||
if (oldest_ancester_time != 0) {
|
||||
min_oldest_ancester_time =
|
||||
std::min(min_oldest_ancester_time, oldest_ancester_time);
|
||||
for (const auto& level_files : inputs_) {
|
||||
for (const auto& file : level_files.files) {
|
||||
uint64_t oldest_ancester_time = file->TryGetOldestAncesterTime();
|
||||
if (oldest_ancester_time != 0) {
|
||||
min_oldest_ancester_time =
|
||||
std::min(min_oldest_ancester_time, oldest_ancester_time);
|
||||
}
|
||||
}
|
||||
}
|
||||
return min_oldest_ancester_time;
|
||||
|
@ -3607,6 +3607,15 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
||||
}
|
||||
Flush();
|
||||
// Get the first file's creation time. This will be the oldest file in the
|
||||
// DB. Compactions inolving this file's descendents should keep getting
|
||||
// this time.
|
||||
std::vector<std::vector<FileMetaData>> level_to_files;
|
||||
dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(),
|
||||
&level_to_files);
|
||||
uint64_t oldest_time = level_to_files[0][0].oldest_ancester_time;
|
||||
// Add 1 hour and do another flush.
|
||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||
for (int i = 101; i <= 200; ++i) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
||||
}
|
||||
@ -3614,11 +3623,13 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
||||
MoveFilesToLevel(6);
|
||||
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel());
|
||||
|
||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||
// Add two L4 files with key ranges: [1 .. 50], [51 .. 150].
|
||||
for (int i = 1; i <= 50; ++i) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
||||
}
|
||||
Flush();
|
||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||
for (int i = 51; i <= 150; ++i) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
||||
}
|
||||
@ -3626,6 +3637,7 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
||||
MoveFilesToLevel(4);
|
||||
ASSERT_EQ("0,0,0,0,2,0,2", FilesPerLevel());
|
||||
|
||||
env_->addon_time_.fetch_add(1 * 60 * 60);
|
||||
// Add one L1 file with key range: [26, 75].
|
||||
for (int i = 26; i <= 75; ++i) {
|
||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, kValueSize)));
|
||||
@ -3667,6 +3679,10 @@ TEST_F(DBCompactionTest, LevelTtlCascadingCompactions) {
|
||||
ASSERT_EQ("1,0,0,0,0,0,1", FilesPerLevel());
|
||||
ASSERT_EQ(5, ttl_compactions);
|
||||
|
||||
dbfull()->TEST_GetFilesMetaData(dbfull()->DefaultColumnFamily(),
|
||||
&level_to_files);
|
||||
ASSERT_EQ(oldest_time, level_to_files[6][0].oldest_ancester_time);
|
||||
|
||||
env_->addon_time_.fetch_add(25 * 60 * 60);
|
||||
ASSERT_OK(Put(Key(2), "1"));
|
||||
if (if_restart) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user