Use CompactRangeOptions for CompactRange
Summary: This diff update DB::CompactRange to use RangeCompactionOptions instead of using multiple parameters Old CompactRange is still available but deprecated Test Plan: make all check make rocksdbjava USE_CLANG=1 make all OPT=-DROCKSDB_LITE make release Reviewers: sdong, yhchiang, igor Reviewed By: igor Subscribers: dhruba Differential Revision: https://reviews.facebook.net/D40209
This commit is contained in:
parent
c89369f57c
commit
12e030a992
@ -16,6 +16,7 @@
|
|||||||
* options.hard_rate_limit is deprecated.
|
* options.hard_rate_limit is deprecated.
|
||||||
* When options.soft_rate_limit or options.level0_slowdown_writes_trigger is triggered, the way to slow down writes is changed to: write rate to DB is limited to to options.delayed_write_rate.
|
* When options.soft_rate_limit or options.level0_slowdown_writes_trigger is triggered, the way to slow down writes is changed to: write rate to DB is limited to to options.delayed_write_rate.
|
||||||
* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table.
|
* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table.
|
||||||
|
* DB::CompactRange() now accept CompactRangeOptions instead of multiple paramters. CompactRangeOptions is defined in include/rocksdb/options.h.
|
||||||
|
|
||||||
## 3.11.0 (5/19/2015)
|
## 3.11.0 (5/19/2015)
|
||||||
### New Features
|
### New Features
|
||||||
|
4
db/c.cc
4
db/c.cc
@ -77,6 +77,7 @@ using rocksdb::BackupEngine;
|
|||||||
using rocksdb::BackupableDBOptions;
|
using rocksdb::BackupableDBOptions;
|
||||||
using rocksdb::BackupInfo;
|
using rocksdb::BackupInfo;
|
||||||
using rocksdb::RestoreOptions;
|
using rocksdb::RestoreOptions;
|
||||||
|
using rocksdb::CompactRangeOptions;
|
||||||
|
|
||||||
using std::shared_ptr;
|
using std::shared_ptr;
|
||||||
|
|
||||||
@ -1006,6 +1007,7 @@ void rocksdb_compact_range(
|
|||||||
const char* limit_key, size_t limit_key_len) {
|
const char* limit_key, size_t limit_key_len) {
|
||||||
Slice a, b;
|
Slice a, b;
|
||||||
db->rep->CompactRange(
|
db->rep->CompactRange(
|
||||||
|
CompactRangeOptions(),
|
||||||
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
||||||
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
||||||
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
||||||
@ -1018,7 +1020,7 @@ void rocksdb_compact_range_cf(
|
|||||||
const char* limit_key, size_t limit_key_len) {
|
const char* limit_key, size_t limit_key_len) {
|
||||||
Slice a, b;
|
Slice a, b;
|
||||||
db->rep->CompactRange(
|
db->rep->CompactRange(
|
||||||
column_family->rep,
|
CompactRangeOptions(), column_family->rep,
|
||||||
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
// Pass nullptr Slice if corresponding "const char*" is nullptr
|
||||||
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
(start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr),
|
||||||
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
(limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr));
|
||||||
|
@ -215,11 +215,13 @@ class ColumnFamilyTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CompactAll(int cf) {
|
void CompactAll(int cf) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), handles_[cf], nullptr,
|
||||||
|
nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(int cf, const Slice& start, const Slice& limit) {
|
void Compact(int cf, const Slice& start, const Slice& limit) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], &start, &limit));
|
ASSERT_OK(
|
||||||
|
db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
int NumTableFilesAtLevel(int level, int cf) {
|
int NumTableFilesAtLevel(int level, int cf) {
|
||||||
|
@ -309,16 +309,18 @@ class CompactionJobStatsTest : public testing::Test {
|
|||||||
|
|
||||||
void Compact(int cf, const Slice& start, const Slice& limit,
|
void Compact(int cf, const Slice& start, const Slice& limit,
|
||||||
uint32_t target_path_id) {
|
uint32_t target_path_id) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], &start, &limit, false, -1,
|
CompactRangeOptions compact_options;
|
||||||
target_path_id));
|
compact_options.target_path_id = target_path_id;
|
||||||
|
ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(int cf, const Slice& start, const Slice& limit) {
|
void Compact(int cf, const Slice& start, const Slice& limit) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], &start, &limit));
|
ASSERT_OK(
|
||||||
|
db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(const Slice& start, const Slice& limit) {
|
void Compact(const Slice& start, const Slice& limit) {
|
||||||
ASSERT_OK(db_->CompactRange(&start, &limit));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
void TEST_Compact(int level, int cf, const Slice& start, const Slice& limit) {
|
void TEST_Compact(int level, int cf, const Slice& start, const Slice& limit) {
|
||||||
|
@ -3748,7 +3748,7 @@ class Benchmark {
|
|||||||
|
|
||||||
void Compact(ThreadState* thread) {
|
void Compact(ThreadState* thread) {
|
||||||
DB* db = SelectDB(thread);
|
DB* db = SelectDB(thread);
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void PrintStats(const char* key) {
|
void PrintStats(const char* key) {
|
||||||
|
@ -1328,11 +1328,10 @@ void DBImpl::NotifyOnFlushCompleted(
|
|||||||
#endif // ROCKSDB_LITE
|
#endif // ROCKSDB_LITE
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
|
Status DBImpl::CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* begin, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool change_level, int target_level,
|
const Slice* begin, const Slice* end) {
|
||||||
uint32_t target_path_id) {
|
if (options.target_path_id >= db_options_.db_paths.size()) {
|
||||||
if (target_path_id >= db_options_.db_paths.size()) {
|
|
||||||
return Status::InvalidArgument("Invalid target path ID");
|
return Status::InvalidArgument("Invalid target path ID");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1362,8 +1361,8 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
|
|||||||
cfd->NumberLevels() > 1) {
|
cfd->NumberLevels() > 1) {
|
||||||
// Always compact all files together.
|
// Always compact all files together.
|
||||||
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels,
|
||||||
cfd->NumberLevels() - 1, target_path_id, begin,
|
cfd->NumberLevels() - 1, options.target_path_id,
|
||||||
end);
|
begin, end);
|
||||||
final_output_level = cfd->NumberLevels() - 1;
|
final_output_level = cfd->NumberLevels() - 1;
|
||||||
} else {
|
} else {
|
||||||
for (int level = 0; level <= max_level_with_files; level++) {
|
for (int level = 0; level <= max_level_with_files; level++) {
|
||||||
@ -1384,8 +1383,8 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
|
|||||||
output_level = ColumnFamilyData::kCompactToBaseLevel;
|
output_level = ColumnFamilyData::kCompactToBaseLevel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s = RunManualCompaction(cfd, level, output_level, target_path_id, begin,
|
s = RunManualCompaction(cfd, level, output_level, options.target_path_id,
|
||||||
end);
|
begin, end);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1403,8 +1402,8 @@ Status DBImpl::CompactRange(ColumnFamilyHandle* column_family,
|
|||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (change_level) {
|
if (options.change_level) {
|
||||||
s = ReFitLevel(cfd, final_output_level, target_level);
|
s = ReFitLevel(cfd, final_output_level, options.target_level);
|
||||||
}
|
}
|
||||||
LogFlush(db_options_.info_log);
|
LogFlush(db_options_.info_log);
|
||||||
|
|
||||||
|
@ -125,10 +125,9 @@ class DBImpl : public DB {
|
|||||||
const Range* range, int n, uint64_t* sizes,
|
const Range* range, int n, uint64_t* sizes,
|
||||||
bool include_memtable = false) override;
|
bool include_memtable = false) override;
|
||||||
using DB::CompactRange;
|
using DB::CompactRange;
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* begin, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool change_level = false, int target_level = -1,
|
const Slice* begin, const Slice* end) override;
|
||||||
uint32_t target_path_id = 0) override;
|
|
||||||
|
|
||||||
using DB::CompactFiles;
|
using DB::CompactFiles;
|
||||||
virtual Status CompactFiles(const CompactionOptions& compact_options,
|
virtual Status CompactFiles(const CompactionOptions& compact_options,
|
||||||
|
@ -58,10 +58,9 @@ class DBImplReadOnly : public DBImpl {
|
|||||||
return Status::NotSupported("Not supported operation in read only mode.");
|
return Status::NotSupported("Not supported operation in read only mode.");
|
||||||
}
|
}
|
||||||
using DBImpl::CompactRange;
|
using DBImpl::CompactRange;
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* begin, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool reduce_level = false, int target_level = -1,
|
const Slice* begin, const Slice* end) override {
|
||||||
uint32_t target_path_id = 0) override {
|
|
||||||
return Status::NotSupported("Not supported operation in read only mode.");
|
return Status::NotSupported("Not supported operation in read only mode.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
172
db/db_test.cc
172
db/db_test.cc
@ -1092,16 +1092,18 @@ class DBTest : public testing::Test {
|
|||||||
|
|
||||||
void Compact(int cf, const Slice& start, const Slice& limit,
|
void Compact(int cf, const Slice& start, const Slice& limit,
|
||||||
uint32_t target_path_id) {
|
uint32_t target_path_id) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], &start, &limit, false, -1,
|
CompactRangeOptions compact_options;
|
||||||
target_path_id));
|
compact_options.target_path_id = target_path_id;
|
||||||
|
ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(int cf, const Slice& start, const Slice& limit) {
|
void Compact(int cf, const Slice& start, const Slice& limit) {
|
||||||
ASSERT_OK(db_->CompactRange(handles_[cf], &start, &limit));
|
ASSERT_OK(
|
||||||
|
db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact(const Slice& start, const Slice& limit) {
|
void Compact(const Slice& start, const Slice& limit) {
|
||||||
ASSERT_OK(db_->CompactRange(&start, &limit));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Do n memtable compactions, each of which produces an sstable
|
// Do n memtable compactions, each of which produces an sstable
|
||||||
@ -1524,7 +1526,7 @@ TEST_F(DBTest, CompactedDB) {
|
|||||||
ASSERT_OK(Put("hhh", DummyString(kFileSize / 2, 'h')));
|
ASSERT_OK(Put("hhh", DummyString(kFileSize / 2, 'h')));
|
||||||
ASSERT_OK(Put("iii", DummyString(kFileSize / 2, 'i')));
|
ASSERT_OK(Put("iii", DummyString(kFileSize / 2, 'i')));
|
||||||
ASSERT_OK(Put("jjj", DummyString(kFileSize / 2, 'j')));
|
ASSERT_OK(Put("jjj", DummyString(kFileSize / 2, 'j')));
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(3, NumTableFilesAtLevel(1));
|
ASSERT_EQ(3, NumTableFilesAtLevel(1));
|
||||||
Close();
|
Close();
|
||||||
|
|
||||||
@ -2339,7 +2341,7 @@ TEST_F(DBTest, WholeKeyFilterProp) {
|
|||||||
// ranges.
|
// ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
// Reopen with both of whole key off and prefix extractor enabled.
|
// Reopen with both of whole key off and prefix extractor enabled.
|
||||||
// Still no bloom filter should be used.
|
// Still no bloom filter should be used.
|
||||||
@ -2362,7 +2364,7 @@ TEST_F(DBTest, WholeKeyFilterProp) {
|
|||||||
// ranges.
|
// ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
options.prefix_extractor.reset();
|
options.prefix_extractor.reset();
|
||||||
bbto.whole_key_filtering = true;
|
bbto.whole_key_filtering = true;
|
||||||
@ -3790,7 +3792,7 @@ TEST_F(DBTest, TrivialMoveOneFile) {
|
|||||||
LiveFileMetaData level0_file = metadata[0]; // L0 file meta
|
LiveFileMetaData level0_file = metadata[0]; // L0 file meta
|
||||||
|
|
||||||
// Compaction will initiate a trivial move from L0 to L1
|
// Compaction will initiate a trivial move from L0 to L1
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
// File moved From L0 to L1
|
// File moved From L0 to L1
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); // 0 files in L0
|
ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); // 0 files in L0
|
||||||
@ -3855,7 +3857,7 @@ TEST_F(DBTest, TrivialMoveNonOverlappingFiles) {
|
|||||||
|
|
||||||
// Since data is non-overlapping we expect compaction to initiate
|
// Since data is non-overlapping we expect compaction to initiate
|
||||||
// a trivial move
|
// a trivial move
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
// We expect that all the files were trivially moved from L0 to L1
|
// We expect that all the files were trivially moved from L0 to L1
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files);
|
ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files);
|
||||||
@ -3892,7 +3894,7 @@ TEST_F(DBTest, TrivialMoveNonOverlappingFiles) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
|
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
for (uint32_t i = 0; i < ranges.size(); i++) {
|
for (uint32_t i = 0; i < ranges.size(); i++) {
|
||||||
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
|
||||||
@ -3944,7 +3946,10 @@ TEST_F(DBTest, TrivialMoveTargetLevel) {
|
|||||||
|
|
||||||
// 2 files in L0
|
// 2 files in L0
|
||||||
ASSERT_EQ("2", FilesPerLevel(0));
|
ASSERT_EQ("2", FilesPerLevel(0));
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr, true, 6));
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = 6;
|
||||||
|
ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr));
|
||||||
// 2 files in L6
|
// 2 files in L6
|
||||||
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0));
|
ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0));
|
||||||
|
|
||||||
@ -5423,17 +5428,13 @@ TEST_F(DBTest, ConvertCompactionStyle) {
|
|||||||
options = CurrentOptions(options);
|
options = CurrentOptions(options);
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
|
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr, true /* reduce level */,
|
CompactRangeOptions compact_options;
|
||||||
0 /* reduce to level 0 */);
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = 0;
|
||||||
|
dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
||||||
|
|
||||||
for (int i = 0; i < options.num_levels; i++) {
|
// Only 1 file in L0
|
||||||
int num = NumTableFilesAtLevel(i, 1);
|
ASSERT_EQ("1", FilesPerLevel(1));
|
||||||
if (i == 0) {
|
|
||||||
ASSERT_EQ(num, 1);
|
|
||||||
} else {
|
|
||||||
ASSERT_EQ(num, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stage 4: re-open in universal compaction style and do some db operations
|
// Stage 4: re-open in universal compaction style and do some db operations
|
||||||
options = CurrentOptions();
|
options = CurrentOptions();
|
||||||
@ -5548,8 +5549,10 @@ TEST_F(DBTest, IncreaseUniversalCompactionNumLevels) {
|
|||||||
options.target_file_size_base = INT_MAX;
|
options.target_file_size_base = INT_MAX;
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
// Compact all to level 0
|
// Compact all to level 0
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr, true /* reduce level */,
|
CompactRangeOptions compact_options;
|
||||||
0 /* reduce to level 0 */);
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = 0;
|
||||||
|
dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
||||||
// Need to restart it once to remove higher level records in manifest.
|
// Need to restart it once to remove higher level records in manifest.
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
// Final reopen
|
// Final reopen
|
||||||
@ -6021,7 +6024,7 @@ TEST_F(DBTest, CompactionFilterDeletesAll) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// this will produce empty file (delete compaction filter)
|
// this will produce empty file (delete compaction filter)
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
ASSERT_EQ(0U, CountLiveFiles());
|
ASSERT_EQ(0U, CountLiveFiles());
|
||||||
|
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
@ -6062,7 +6065,8 @@ TEST_F(DBTest, CompactionFilterWithValueChange) {
|
|||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||||
} else {
|
} else {
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// re-write all data again
|
// re-write all data again
|
||||||
@ -6079,7 +6083,8 @@ TEST_F(DBTest, CompactionFilterWithValueChange) {
|
|||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]);
|
||||||
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]);
|
||||||
} else {
|
} else {
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify that all keys now have the new value that
|
// verify that all keys now have the new value that
|
||||||
@ -6120,7 +6125,7 @@ TEST_F(DBTest, CompactionFilterWithMergeOperator) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
std::string newvalue = Get("foo");
|
std::string newvalue = Get("foo");
|
||||||
ASSERT_EQ(newvalue, three);
|
ASSERT_EQ(newvalue, three);
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
newvalue = Get("foo");
|
newvalue = Get("foo");
|
||||||
ASSERT_EQ(newvalue, three);
|
ASSERT_EQ(newvalue, three);
|
||||||
|
|
||||||
@ -6128,12 +6133,12 @@ TEST_F(DBTest, CompactionFilterWithMergeOperator) {
|
|||||||
// merge keys.
|
// merge keys.
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), "bar", two));
|
ASSERT_OK(db_->Put(WriteOptions(), "bar", two));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
newvalue = Get("bar");
|
newvalue = Get("bar");
|
||||||
ASSERT_EQ("NOT_FOUND", newvalue);
|
ASSERT_EQ("NOT_FOUND", newvalue);
|
||||||
ASSERT_OK(db_->Merge(WriteOptions(), "bar", two));
|
ASSERT_OK(db_->Merge(WriteOptions(), "bar", two));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
newvalue = Get("bar");
|
newvalue = Get("bar");
|
||||||
ASSERT_EQ(two, two);
|
ASSERT_EQ(two, two);
|
||||||
|
|
||||||
@ -6144,7 +6149,7 @@ TEST_F(DBTest, CompactionFilterWithMergeOperator) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
newvalue = Get("foobar");
|
newvalue = Get("foobar");
|
||||||
ASSERT_EQ(newvalue, three);
|
ASSERT_EQ(newvalue, three);
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
newvalue = Get("foobar");
|
newvalue = Get("foobar");
|
||||||
ASSERT_EQ(newvalue, three);
|
ASSERT_EQ(newvalue, three);
|
||||||
|
|
||||||
@ -6157,7 +6162,7 @@ TEST_F(DBTest, CompactionFilterWithMergeOperator) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
newvalue = Get("barfoo");
|
newvalue = Get("barfoo");
|
||||||
ASSERT_EQ(newvalue, four);
|
ASSERT_EQ(newvalue, four);
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
newvalue = Get("barfoo");
|
newvalue = Get("barfoo");
|
||||||
ASSERT_EQ(newvalue, four);
|
ASSERT_EQ(newvalue, four);
|
||||||
}
|
}
|
||||||
@ -6191,7 +6196,7 @@ TEST_F(DBTest, CompactionFilterContextManual) {
|
|||||||
filter->expect_manual_compaction_.store(true);
|
filter->expect_manual_compaction_.store(true);
|
||||||
filter->expect_full_compaction_.store(false); // Manual compaction always
|
filter->expect_full_compaction_.store(false); // Manual compaction always
|
||||||
// set this flag.
|
// set this flag.
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(cfilter_count, 700);
|
ASSERT_EQ(cfilter_count, 700);
|
||||||
ASSERT_EQ(NumSortedRuns(0), 1);
|
ASSERT_EQ(NumSortedRuns(0), 1);
|
||||||
|
|
||||||
@ -6939,7 +6944,8 @@ TEST_F(DBTest, CompactBetweenSnapshots) {
|
|||||||
// After a compaction, "second", "third" and "fifth" should
|
// After a compaction, "second", "third" and "fifth" should
|
||||||
// be removed
|
// be removed
|
||||||
FillLevels("a", "z", 1);
|
FillLevels("a", "z", 1);
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ("sixth", Get(1, "foo"));
|
ASSERT_EQ("sixth", Get(1, "foo"));
|
||||||
ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
|
ASSERT_EQ("fourth", Get(1, "foo", snapshot2));
|
||||||
ASSERT_EQ("first", Get(1, "foo", snapshot1));
|
ASSERT_EQ("first", Get(1, "foo", snapshot1));
|
||||||
@ -6948,7 +6954,8 @@ TEST_F(DBTest, CompactBetweenSnapshots) {
|
|||||||
// after we release the snapshot1, only two values left
|
// after we release the snapshot1, only two values left
|
||||||
db_->ReleaseSnapshot(snapshot1);
|
db_->ReleaseSnapshot(snapshot1);
|
||||||
FillLevels("a", "z", 1);
|
FillLevels("a", "z", 1);
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
|
|
||||||
// We have only one valid snapshot snapshot2. Since snapshot1 is
|
// We have only one valid snapshot snapshot2. Since snapshot1 is
|
||||||
// not valid anymore, "first" should be removed by a compaction.
|
// not valid anymore, "first" should be removed by a compaction.
|
||||||
@ -6959,7 +6966,8 @@ TEST_F(DBTest, CompactBetweenSnapshots) {
|
|||||||
// after we release the snapshot2, only one value should be left
|
// after we release the snapshot2, only one value should be left
|
||||||
db_->ReleaseSnapshot(snapshot2);
|
db_->ReleaseSnapshot(snapshot2);
|
||||||
FillLevels("a", "z", 1);
|
FillLevels("a", "z", 1);
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ("sixth", Get(1, "foo"));
|
ASSERT_EQ("sixth", Get(1, "foo"));
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth ]");
|
||||||
// skip HashCuckooRep as it does not support snapshot
|
// skip HashCuckooRep as it does not support snapshot
|
||||||
@ -7256,7 +7264,7 @@ TEST_F(DBTest, ManualCompaction) {
|
|||||||
// Compact all
|
// Compact all
|
||||||
MakeTables(1, "a", "z", 1);
|
MakeTables(1, "a", "z", 1);
|
||||||
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
|
||||||
ASSERT_EQ("0,0,1", FilesPerLevel(1));
|
ASSERT_EQ("0,0,1", FilesPerLevel(1));
|
||||||
|
|
||||||
if (iter == 0) {
|
if (iter == 0) {
|
||||||
@ -7294,7 +7302,9 @@ TEST_P(DBTestUniversalManualCompactionOutputPathId,
|
|||||||
ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
|
||||||
|
|
||||||
// Full compaction to DB path 0
|
// Full compaction to DB path 0
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr, false, -1, 1);
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.target_path_id = 1;
|
||||||
|
db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
||||||
ASSERT_EQ(1, TotalLiveFiles(1));
|
ASSERT_EQ(1, TotalLiveFiles(1));
|
||||||
ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
|
ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
|
||||||
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
||||||
@ -7315,13 +7325,15 @@ TEST_P(DBTestUniversalManualCompactionOutputPathId,
|
|||||||
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
||||||
|
|
||||||
// Full compaction to DB path 0
|
// Full compaction to DB path 0
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr, false, -1, 0);
|
compact_options.target_path_id = 0;
|
||||||
|
db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
||||||
ASSERT_EQ(1, TotalLiveFiles(1));
|
ASSERT_EQ(1, TotalLiveFiles(1));
|
||||||
ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
|
||||||
ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path));
|
||||||
|
|
||||||
// Fail when compacting to an invalid path ID
|
// Fail when compacting to an invalid path ID
|
||||||
ASSERT_TRUE(db_->CompactRange(handles_[1], nullptr, nullptr, false, -1, 2)
|
compact_options.target_path_id = 2;
|
||||||
|
ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)
|
||||||
.IsInvalidArgument());
|
.IsInvalidArgument());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7378,7 +7390,9 @@ TEST_F(DBTest, ManualLevelCompactionOutputPathId) {
|
|||||||
ASSERT_EQ("1,2", FilesPerLevel(1));
|
ASSERT_EQ("1,2", FilesPerLevel(1));
|
||||||
ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path));
|
||||||
ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path));
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr, false, 1, 1);
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.target_path_id = 1;
|
||||||
|
db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
||||||
ASSERT_EQ("0,1", FilesPerLevel(1));
|
ASSERT_EQ("0,1", FilesPerLevel(1));
|
||||||
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path));
|
||||||
ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
|
ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path));
|
||||||
@ -7447,7 +7461,7 @@ TEST_F(DBTest, DBOpen_Change_NumLevels) {
|
|||||||
|
|
||||||
ASSERT_OK(Put(1, "a", "123"));
|
ASSERT_OK(Put(1, "a", "123"));
|
||||||
ASSERT_OK(Put(1, "b", "234"));
|
ASSERT_OK(Put(1, "b", "234"));
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
|
||||||
Close();
|
Close();
|
||||||
|
|
||||||
options.create_if_missing = false;
|
options.create_if_missing = false;
|
||||||
@ -7518,7 +7532,7 @@ TEST_F(DBTest, DropWrites) {
|
|||||||
true /* disallow trivial move */);
|
true /* disallow trivial move */);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8076,7 +8090,8 @@ TEST_F(DBTest, CompactOnFlush) {
|
|||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]");
|
||||||
|
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]");
|
||||||
|
|
||||||
// Case 2: Delete followed by another delete
|
// Case 2: Delete followed by another delete
|
||||||
@ -8085,7 +8100,8 @@ TEST_F(DBTest, CompactOnFlush) {
|
|||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, DEL, v2 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, DEL, v2 ]");
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v2 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v2 ]");
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
||||||
|
|
||||||
// Case 3: Put followed by a delete
|
// Case 3: Put followed by a delete
|
||||||
@ -8094,7 +8110,8 @@ TEST_F(DBTest, CompactOnFlush) {
|
|||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v3 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v3 ]");
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL ]");
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
||||||
|
|
||||||
// Case 4: Put followed by another Put
|
// Case 4: Put followed by another Put
|
||||||
@ -8103,12 +8120,14 @@ TEST_F(DBTest, CompactOnFlush) {
|
|||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5, v4 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5, v4 ]");
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]");
|
||||||
|
|
||||||
// clear database
|
// clear database
|
||||||
Delete(1, "foo");
|
Delete(1, "foo");
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
||||||
|
|
||||||
// Case 5: Put followed by snapshot followed by another Put
|
// Case 5: Put followed by snapshot followed by another Put
|
||||||
@ -8122,7 +8141,8 @@ TEST_F(DBTest, CompactOnFlush) {
|
|||||||
|
|
||||||
// clear database
|
// clear database
|
||||||
Delete(1, "foo");
|
Delete(1, "foo");
|
||||||
dbfull()->CompactRange(handles_[1], nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr,
|
||||||
|
nullptr);
|
||||||
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]");
|
||||||
|
|
||||||
// Case 5: snapshot followed by a put followed by another Put
|
// Case 5: snapshot followed by a put followed by another Put
|
||||||
@ -9061,10 +9081,9 @@ class ModelDB: public DB {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
using DB::CompactRange;
|
using DB::CompactRange;
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* start, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool reduce_level, int target_level,
|
const Slice* start, const Slice* end) override {
|
||||||
uint32_t output_path_id) override {
|
|
||||||
return Status::NotSupported("Not supported operation.");
|
return Status::NotSupported("Not supported operation.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -9432,7 +9451,8 @@ void PrefixScanInit(DBTest *dbtest) {
|
|||||||
keystr = std::string(buf);
|
keystr = std::string(buf);
|
||||||
ASSERT_OK(dbtest->Put(keystr, keystr));
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
||||||
dbtest->Flush();
|
dbtest->Flush();
|
||||||
dbtest->dbfull()->CompactRange(nullptr, nullptr); // move to level 1
|
dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr,
|
||||||
|
nullptr); // move to level 1
|
||||||
|
|
||||||
// GROUP 1
|
// GROUP 1
|
||||||
for (int i = 1; i <= small_range_sstfiles; i++) {
|
for (int i = 1; i <= small_range_sstfiles; i++) {
|
||||||
@ -9685,7 +9705,7 @@ TEST_F(DBTest, TailingIteratorIncomplete) {
|
|||||||
// we either see the entry or it's not in cache
|
// we either see the entry or it's not in cache
|
||||||
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
||||||
|
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
// should still be true after compaction
|
// should still be true after compaction
|
||||||
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
||||||
@ -9910,7 +9930,7 @@ TEST_F(DBTest, ManagedTailingIteratorIncomplete) {
|
|||||||
// we either see the entry or it's not in cache
|
// we either see the entry or it's not in cache
|
||||||
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
||||||
|
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
// should still be true after compaction
|
// should still be true after compaction
|
||||||
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete());
|
||||||
@ -10039,7 +10059,7 @@ TEST_F(DBTest, FIFOCompactionTest) {
|
|||||||
if (iter == 0) {
|
if (iter == 0) {
|
||||||
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
} else {
|
} else {
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
}
|
}
|
||||||
// only 5 files should survive
|
// only 5 files should survive
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 5);
|
||||||
@ -10760,7 +10780,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
ASSERT_GT(SizeAtLevel(0), k64KB - k5KB);
|
ASSERT_GT(SizeAtLevel(0), k64KB - k5KB);
|
||||||
|
|
||||||
// Clean up L0
|
// Clean up L0
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
// Increase buffer size
|
// Increase buffer size
|
||||||
@ -10818,7 +10838,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
{"max_write_buffer_number", "8"},
|
{"max_write_buffer_number", "8"},
|
||||||
}));
|
}));
|
||||||
// Clean up memtable and L0
|
// Clean up memtable and L0
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
SleepingBackgroundTask sleeping_task_low2;
|
SleepingBackgroundTask sleeping_task_low2;
|
||||||
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low2,
|
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low2,
|
||||||
@ -10839,7 +10859,7 @@ TEST_F(DBTest, DynamicMemtableOptions) {
|
|||||||
{"max_write_buffer_number", "4"},
|
{"max_write_buffer_number", "4"},
|
||||||
}));
|
}));
|
||||||
// Clean up memtable and L0
|
// Clean up memtable and L0
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
SleepingBackgroundTask sleeping_task_low3;
|
SleepingBackgroundTask sleeping_task_low3;
|
||||||
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low3,
|
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low3,
|
||||||
@ -11077,7 +11097,7 @@ TEST_F(DBTest, PreShutdownManualCompaction) {
|
|||||||
MakeTables(1, "a", "z", 1);
|
MakeTables(1, "a", "z", 1);
|
||||||
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
||||||
CancelAllBackgroundWork(db_);
|
CancelAllBackgroundWork(db_);
|
||||||
db_->CompactRange(handles_[1], nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr);
|
||||||
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
ASSERT_EQ("0,1,2", FilesPerLevel(1));
|
||||||
|
|
||||||
if (iter == 0) {
|
if (iter == 0) {
|
||||||
@ -11349,7 +11369,7 @@ TEST_F(DBTest, DynamicLevelMaxBytesBase) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Test compact range works
|
// Test compact range works
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
// All data should be in the last level.
|
// All data should be in the last level.
|
||||||
ColumnFamilyMetaData cf_meta;
|
ColumnFamilyMetaData cf_meta;
|
||||||
db_->GetColumnFamilyMetaData(&cf_meta);
|
db_->GetColumnFamilyMetaData(&cf_meta);
|
||||||
@ -11542,7 +11562,7 @@ TEST_F(DBTest, DynamicLevelMaxBytesCompactRange) {
|
|||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
|
|
||||||
// Compact against empty DB
|
// Compact against empty DB
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
uint64_t int_prop;
|
uint64_t int_prop;
|
||||||
std::string str_prop;
|
std::string str_prop;
|
||||||
@ -11583,7 +11603,7 @@ TEST_F(DBTest, DynamicLevelMaxBytesCompactRange) {
|
|||||||
});
|
});
|
||||||
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(output_levels.size(), 2);
|
ASSERT_EQ(output_levels.size(), 2);
|
||||||
ASSERT_TRUE(output_levels.find(3) != output_levels.end());
|
ASSERT_TRUE(output_levels.find(3) != output_levels.end());
|
||||||
ASSERT_TRUE(output_levels.find(4) != output_levels.end());
|
ASSERT_TRUE(output_levels.find(4) != output_levels.end());
|
||||||
@ -11701,7 +11721,10 @@ TEST_F(DBTest, MigrateToDynamicLevelMaxBytesBase) {
|
|||||||
// Issue manual compaction in one thread and still verify DB state
|
// Issue manual compaction in one thread and still verify DB state
|
||||||
// in main thread.
|
// in main thread.
|
||||||
std::thread t([&]() {
|
std::thread t([&]() {
|
||||||
dbfull()->CompactRange(nullptr, nullptr, true, options.num_levels - 1);
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = options.num_levels - 1;
|
||||||
|
dbfull()->CompactRange(compact_options, nullptr, nullptr);
|
||||||
compaction_finished.store(true);
|
compaction_finished.store(true);
|
||||||
});
|
});
|
||||||
do {
|
do {
|
||||||
@ -12080,7 +12103,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
// Clean up memtable and L0. Block compaction threads. If continue to write
|
// Clean up memtable and L0. Block compaction threads. If continue to write
|
||||||
// and flush memtables. We should see put timeout after 8 memtable flushes
|
// and flush memtables. We should see put timeout after 8 memtable flushes
|
||||||
// since level0_stop_writes_trigger = 8
|
// since level0_stop_writes_trigger = 8
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
// Block compaction
|
// Block compaction
|
||||||
SleepingBackgroundTask sleeping_task_low1;
|
SleepingBackgroundTask sleeping_task_low1;
|
||||||
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low1,
|
env_->Schedule(&SleepingBackgroundTask::DoSleepTask, &sleeping_task_low1,
|
||||||
@ -12106,7 +12129,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"level0_stop_writes_trigger", "6"}
|
{"level0_stop_writes_trigger", "6"}
|
||||||
}));
|
}));
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
// Block compaction
|
// Block compaction
|
||||||
@ -12131,7 +12154,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"disable_auto_compactions", "true"}
|
{"disable_auto_compactions", "true"}
|
||||||
}));
|
}));
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
@ -12147,7 +12170,7 @@ TEST_F(DBTest, DynamicCompactionOptions) {
|
|||||||
ASSERT_OK(dbfull()->SetOptions({
|
ASSERT_OK(dbfull()->SetOptions({
|
||||||
{"disable_auto_compactions", "false"}
|
{"disable_auto_compactions", "false"}
|
||||||
}));
|
}));
|
||||||
dbfull()->CompactRange(nullptr, nullptr);
|
dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0), 0);
|
||||||
|
|
||||||
for (int i = 0; i < 4; ++i) {
|
for (int i = 0; i < 4; ++i) {
|
||||||
@ -12924,7 +12947,7 @@ TEST_F(DBTest, FilterCompactionTimeTest) {
|
|||||||
Flush();
|
Flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
ASSERT_EQ(0U, CountLiveFiles());
|
ASSERT_EQ(0U, CountLiveFiles());
|
||||||
|
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
@ -13338,7 +13361,7 @@ TEST_F(DBTest, PromoteL0Failure) {
|
|||||||
status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
|
status = experimental::PromoteL0(db_, db_->DefaultColumnFamily());
|
||||||
ASSERT_TRUE(status.IsInvalidArgument());
|
ASSERT_TRUE(status.IsInvalidArgument());
|
||||||
|
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
// Now there is a file in L1.
|
// Now there is a file in L1.
|
||||||
ASSERT_GE(NumTableFilesAtLevel(1, 0), 1);
|
ASSERT_GE(NumTableFilesAtLevel(1, 0), 1);
|
||||||
|
|
||||||
@ -13365,7 +13388,7 @@ TEST_F(DBTest, HugeNumberOfLevels) {
|
|||||||
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024)));
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_OK(db_->CompactRange(nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Github issue #595
|
// Github issue #595
|
||||||
@ -13491,7 +13514,10 @@ TEST_F(DBTest, UniversalCompactionTargetLevel) {
|
|||||||
|
|
||||||
ASSERT_EQ("3", FilesPerLevel(0));
|
ASSERT_EQ("3", FilesPerLevel(0));
|
||||||
// Compact all files into 1 file and put it in L4
|
// Compact all files into 1 file and put it in L4
|
||||||
db_->CompactRange(nullptr, nullptr, true, 4);
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = 4;
|
||||||
|
db_->CompactRange(compact_options, nullptr, nullptr);
|
||||||
ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0));
|
ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -13516,7 +13542,7 @@ TEST_F(DBTest, SuggestCompactRangeNoTwoLevel0Compactions) {
|
|||||||
for (int num = 0; num < 10; num++) {
|
for (int num = 0; num < 10; num++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
}
|
}
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
|
|
||||||
rocksdb::SyncPoint::GetInstance()->LoadDependency(
|
rocksdb::SyncPoint::GetInstance()->LoadDependency(
|
||||||
{{"CompactionJob::Run():Start",
|
{{"CompactionJob::Run():Start",
|
||||||
|
@ -201,8 +201,11 @@ TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
|
|||||||
// 2 ssts, 1 manifest
|
// 2 ssts, 1 manifest
|
||||||
CheckFileTypeCounts(dbname_, 0, 2, 1);
|
CheckFileTypeCounts(dbname_, 0, 2, 1);
|
||||||
std::string first("0"), last("999999");
|
std::string first("0"), last("999999");
|
||||||
|
CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = true;
|
||||||
|
compact_options.target_level = 2;
|
||||||
Slice first_slice(first), last_slice(last);
|
Slice first_slice(first), last_slice(last);
|
||||||
db_->CompactRange(&first_slice, &last_slice, true, 2);
|
db_->CompactRange(compact_options, &first_slice, &last_slice);
|
||||||
// 1 sst after compaction
|
// 1 sst after compaction
|
||||||
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
CheckFileTypeCounts(dbname_, 0, 1, 1);
|
||||||
|
|
||||||
@ -211,7 +214,7 @@ TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) {
|
|||||||
Iterator *itr = 0;
|
Iterator *itr = 0;
|
||||||
CreateTwoLevels();
|
CreateTwoLevels();
|
||||||
itr = db_->NewIterator(ReadOptions());
|
itr = db_->NewIterator(ReadOptions());
|
||||||
db_->CompactRange(&first_slice, &last_slice, true, 2);
|
db_->CompactRange(compact_options, &first_slice, &last_slice);
|
||||||
// 3 sst after compaction with live iterator
|
// 3 sst after compaction with live iterator
|
||||||
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
CheckFileTypeCounts(dbname_, 0, 3, 1);
|
||||||
delete itr;
|
delete itr;
|
||||||
|
@ -659,7 +659,7 @@ class FaultInjectionTest : public testing::Test {
|
|||||||
|
|
||||||
Build(write_options, 0, num_pre_sync);
|
Build(write_options, 0, num_pre_sync);
|
||||||
if (sync_use_compact_) {
|
if (sync_use_compact_) {
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
}
|
}
|
||||||
write_options.sync = false;
|
write_options.sync = false;
|
||||||
Build(write_options, num_pre_sync, num_post_sync);
|
Build(write_options, num_pre_sync, num_post_sync);
|
||||||
|
@ -201,7 +201,8 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
|
|||||||
ASSERT_OK(Flush(static_cast<int>(i)));
|
ASSERT_OK(Flush(static_cast<int>(i)));
|
||||||
const Slice kStart = "a";
|
const Slice kStart = "a";
|
||||||
const Slice kEnd = "z";
|
const Slice kEnd = "z";
|
||||||
ASSERT_OK(dbfull()->CompactRange(handles_[i], &kStart, &kEnd));
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i],
|
||||||
|
&kStart, &kEnd));
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
dbfull()->TEST_WaitForFlushMemTable();
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->TEST_WaitForCompact();
|
||||||
}
|
}
|
||||||
|
@ -294,7 +294,7 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
|
|||||||
db->Flush(o);
|
db->Flush(o);
|
||||||
|
|
||||||
cout << "Compaction started ...\n";
|
cout << "Compaction started ...\n";
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
cout << "Compaction ended\n";
|
cout << "Compaction ended\n";
|
||||||
|
|
||||||
dumpDb(db);
|
dumpDb(db);
|
||||||
@ -341,7 +341,7 @@ void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
|
|||||||
tmp_sum += i;
|
tmp_sum += i;
|
||||||
}
|
}
|
||||||
db->Flush(o);
|
db->Flush(o);
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(tmp_sum, counters->assert_get("b"));
|
ASSERT_EQ(tmp_sum, counters->assert_get("b"));
|
||||||
if (count > max_merge) {
|
if (count > max_merge) {
|
||||||
// in this case, FullMerge should be called instead.
|
// in this case, FullMerge should be called instead.
|
||||||
@ -360,7 +360,7 @@ void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
|
|||||||
tmp_sum += i;
|
tmp_sum += i;
|
||||||
}
|
}
|
||||||
db->Flush(o);
|
db->Flush(o);
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(tmp_sum, counters->assert_get("c"));
|
ASSERT_EQ(tmp_sum, counters->assert_get("c"));
|
||||||
ASSERT_EQ(num_partial_merge_calls, 0U);
|
ASSERT_EQ(num_partial_merge_calls, 0U);
|
||||||
}
|
}
|
||||||
@ -467,7 +467,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
|
|||||||
counters.add("test-key", 1);
|
counters.add("test-key", 1);
|
||||||
counters.add("test-key", 1);
|
counters.add("test-key", 1);
|
||||||
counters.add("test-key", 1);
|
counters.add("test-key", 1);
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
DB* reopen_db;
|
DB* reopen_db;
|
||||||
|
@ -33,6 +33,7 @@ struct ReadOptions;
|
|||||||
struct WriteOptions;
|
struct WriteOptions;
|
||||||
struct FlushOptions;
|
struct FlushOptions;
|
||||||
struct CompactionOptions;
|
struct CompactionOptions;
|
||||||
|
struct CompactRangeOptions;
|
||||||
struct TableProperties;
|
struct TableProperties;
|
||||||
class WriteBatch;
|
class WriteBatch;
|
||||||
class Env;
|
class Env;
|
||||||
@ -415,25 +416,42 @@ class DB {
|
|||||||
// begin==nullptr is treated as a key before all keys in the database.
|
// begin==nullptr is treated as a key before all keys in the database.
|
||||||
// end==nullptr is treated as a key after all keys in the database.
|
// end==nullptr is treated as a key after all keys in the database.
|
||||||
// Therefore the following call will compact the entire database:
|
// Therefore the following call will compact the entire database:
|
||||||
// db->CompactRange(nullptr, nullptr);
|
// db->CompactRange(options, nullptr, nullptr);
|
||||||
// Note that after the entire database is compacted, all data are pushed
|
// Note that after the entire database is compacted, all data are pushed
|
||||||
// down to the last level containing any data. If the total data size
|
// down to the last level containing any data. If the total data size after
|
||||||
// after compaction is reduced, that level might not be appropriate for
|
// compaction is reduced, that level might not be appropriate for hosting all
|
||||||
// hosting all the files. In this case, client could set change_level
|
// the files. In this case, client could set options.change_level to true, to
|
||||||
// to true, to move the files back to the minimum level capable of holding
|
// move the files back to the minimum level capable of holding the data set
|
||||||
// the data set or a given level (specified by non-negative target_level).
|
// or a given level (specified by non-negative options.target_level).
|
||||||
// Compaction outputs should be placed in options.db_paths[target_path_id].
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
// Behavior is undefined if target_path_id is out of range.
|
ColumnFamilyHandle* column_family,
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
const Slice* begin, const Slice* end) = 0;
|
||||||
const Slice* begin, const Slice* end,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
bool change_level = false, int target_level = -1,
|
const Slice* begin, const Slice* end) {
|
||||||
uint32_t target_path_id = 0) = 0;
|
return CompactRange(options, DefaultColumnFamily(), begin, end);
|
||||||
virtual Status CompactRange(const Slice* begin, const Slice* end,
|
|
||||||
bool change_level = false, int target_level = -1,
|
|
||||||
uint32_t target_path_id = 0) {
|
|
||||||
return CompactRange(DefaultColumnFamily(), begin, end, change_level,
|
|
||||||
target_level, target_path_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__attribute__((deprecated)) virtual Status
|
||||||
|
CompactRange(ColumnFamilyHandle* column_family, const Slice* begin,
|
||||||
|
const Slice* end, bool change_level = false,
|
||||||
|
int target_level = -1, uint32_t target_path_id = 0) {
|
||||||
|
CompactRangeOptions options;
|
||||||
|
options.change_level = change_level;
|
||||||
|
options.target_level = target_level;
|
||||||
|
options.target_path_id = target_path_id;
|
||||||
|
return CompactRange(options, column_family, begin, end);
|
||||||
|
}
|
||||||
|
__attribute__((deprecated)) virtual Status
|
||||||
|
CompactRange(const Slice* begin, const Slice* end,
|
||||||
|
bool change_level = false, int target_level = -1,
|
||||||
|
uint32_t target_path_id = 0) {
|
||||||
|
CompactRangeOptions options;
|
||||||
|
options.change_level = change_level;
|
||||||
|
options.target_level = target_level;
|
||||||
|
options.target_path_id = target_path_id;
|
||||||
|
return CompactRange(options, DefaultColumnFamily(), begin, end);
|
||||||
|
}
|
||||||
|
|
||||||
virtual Status SetOptions(ColumnFamilyHandle* column_family,
|
virtual Status SetOptions(ColumnFamilyHandle* column_family,
|
||||||
const std::unordered_map<std::string, std::string>& new_options) {
|
const std::unordered_map<std::string, std::string>& new_options) {
|
||||||
return Status::NotSupported("Not implemented");
|
return Status::NotSupported("Not implemented");
|
||||||
|
@ -1237,6 +1237,19 @@ struct CompactionOptions {
|
|||||||
: compression(kSnappyCompression),
|
: compression(kSnappyCompression),
|
||||||
output_file_size_limit(std::numeric_limits<uint64_t>::max()) {}
|
output_file_size_limit(std::numeric_limits<uint64_t>::max()) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// CompactRangeOptions is used by CompactRange() call.
|
||||||
|
struct CompactRangeOptions {
|
||||||
|
// If true, compacted files will be moved to the minimum level capable
|
||||||
|
// of holding the data or given level (specified non-negative target_level).
|
||||||
|
bool change_level = false;
|
||||||
|
// If change_level is true and target_level have non-negative value, compacted
|
||||||
|
// files will be moved to target_level.
|
||||||
|
int target_level = -1;
|
||||||
|
// Compaction outputs will be placed in options.db_paths[target_path_id].
|
||||||
|
// Behavior is undefined if target_path_id is out of range.
|
||||||
|
uint32_t target_path_id = 0;
|
||||||
|
};
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
||||||
#endif // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
|
#endif // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_
|
||||||
|
@ -127,12 +127,10 @@ class StackableDB : public DB {
|
|||||||
}
|
}
|
||||||
|
|
||||||
using DB::CompactRange;
|
using DB::CompactRange;
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* begin, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool change_level = false, int target_level = -1,
|
const Slice* begin, const Slice* end) override {
|
||||||
uint32_t target_path_id = 0) override {
|
return db_->CompactRange(options, column_family, begin, end);
|
||||||
return db_->CompactRange(column_family, begin, end, change_level,
|
|
||||||
target_level, target_path_id);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
using DB::CompactFiles;
|
using DB::CompactFiles;
|
||||||
|
@ -1476,13 +1476,15 @@ void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
|
|||||||
jint jtarget_level, jint jtarget_path_id) {
|
jint jtarget_level, jint jtarget_path_id) {
|
||||||
|
|
||||||
rocksdb::Status s;
|
rocksdb::Status s;
|
||||||
|
rocksdb::CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = jreduce_level;
|
||||||
|
compact_options.target_level = jtarget_level;
|
||||||
|
compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
|
||||||
if (cf_handle != nullptr) {
|
if (cf_handle != nullptr) {
|
||||||
s = db->CompactRange(cf_handle, nullptr, nullptr, jreduce_level,
|
s = db->CompactRange(compact_options, cf_handle, nullptr, nullptr);
|
||||||
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
|
|
||||||
} else {
|
} else {
|
||||||
// backwards compatibility
|
// backwards compatibility
|
||||||
s = db->CompactRange(nullptr, nullptr, jreduce_level,
|
s = db->CompactRange(compact_options, nullptr, nullptr);
|
||||||
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
@ -1533,13 +1535,15 @@ void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db,
|
|||||||
const rocksdb::Slice end_slice(reinterpret_cast<char*>(end), jend_len);
|
const rocksdb::Slice end_slice(reinterpret_cast<char*>(end), jend_len);
|
||||||
|
|
||||||
rocksdb::Status s;
|
rocksdb::Status s;
|
||||||
|
rocksdb::CompactRangeOptions compact_options;
|
||||||
|
compact_options.change_level = jreduce_level;
|
||||||
|
compact_options.target_level = jtarget_level;
|
||||||
|
compact_options.target_path_id = static_cast<uint32_t>(jtarget_path_id);
|
||||||
if (cf_handle != nullptr) {
|
if (cf_handle != nullptr) {
|
||||||
s = db->CompactRange(cf_handle, &begin_slice, &end_slice, jreduce_level,
|
s = db->CompactRange(compact_options, cf_handle, &begin_slice, &end_slice);
|
||||||
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
|
|
||||||
} else {
|
} else {
|
||||||
// backwards compatibility
|
// backwards compatibility
|
||||||
s = db->CompactRange(&begin_slice, &end_slice, jreduce_level,
|
s = db->CompactRange(compact_options, &begin_slice, &end_slice);
|
||||||
jtarget_level, static_cast<uint32_t>(jtarget_path_id));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
|
env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT);
|
||||||
|
@ -441,7 +441,7 @@ void CompactorCommand::DoCommand() {
|
|||||||
end = new Slice(to_);
|
end = new Slice(to_);
|
||||||
}
|
}
|
||||||
|
|
||||||
db_->CompactRange(begin, end);
|
db_->CompactRange(CompactRangeOptions(), begin, end);
|
||||||
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
||||||
|
|
||||||
delete begin;
|
delete begin;
|
||||||
@ -519,7 +519,7 @@ void DBLoaderCommand::DoCommand() {
|
|||||||
cout << "Warning: " << bad_lines << " bad lines ignored." << endl;
|
cout << "Warning: " << bad_lines << " bad lines ignored." << endl;
|
||||||
}
|
}
|
||||||
if (compact_) {
|
if (compact_) {
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1204,7 +1204,7 @@ void ReduceDBLevelsCommand::DoCommand() {
|
|||||||
}
|
}
|
||||||
// Compact the whole DB to put all files to the highest level.
|
// Compact the whole DB to put all files to the highest level.
|
||||||
fprintf(stdout, "Compacting the db...\n");
|
fprintf(stdout, "Compacting the db...\n");
|
||||||
db_->CompactRange(nullptr, nullptr);
|
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
CloseDB();
|
CloseDB();
|
||||||
|
|
||||||
EnvOptions soptions;
|
EnvOptions soptions;
|
||||||
@ -1309,9 +1309,10 @@ void ChangeCompactionStyleCommand::DoCommand() {
|
|||||||
files_per_level.c_str());
|
files_per_level.c_str());
|
||||||
|
|
||||||
// manual compact into a single file and move the file to level 0
|
// manual compact into a single file and move the file to level 0
|
||||||
db_->CompactRange(nullptr, nullptr,
|
CompactRangeOptions compact_options;
|
||||||
true /* reduce level */,
|
compact_options.change_level = true;
|
||||||
0 /* reduce to level 0 */);
|
compact_options.target_level = 0;
|
||||||
|
db_->CompactRange(compact_options, nullptr, nullptr);
|
||||||
|
|
||||||
// verify compaction result
|
// verify compaction result
|
||||||
files_per_level = "";
|
files_per_level = "";
|
||||||
|
@ -77,7 +77,7 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
|
|||||||
db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
|
db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
|
||||||
|
|
||||||
Slice key4("key4");
|
Slice key4("key4");
|
||||||
db->CompactRange(nullptr, &key4);
|
db->CompactRange(CompactRangeOptions(), nullptr, &key4);
|
||||||
Iterator* itr = db->NewIterator(ReadOptions());
|
Iterator* itr = db->NewIterator(ReadOptions());
|
||||||
itr->SeekToFirst();
|
itr->SeekToFirst();
|
||||||
ASSERT_TRUE(itr->Valid());
|
ASSERT_TRUE(itr->Valid());
|
||||||
@ -130,7 +130,7 @@ TEST_F(ManualCompactionTest, Test) {
|
|||||||
rocksdb::Slice greatest(end_key.data(), end_key.size());
|
rocksdb::Slice greatest(end_key.data(), end_key.size());
|
||||||
|
|
||||||
// commenting out the line below causes the example to work correctly
|
// commenting out the line below causes the example to work correctly
|
||||||
db->CompactRange(&least, &greatest);
|
db->CompactRange(CompactRangeOptions(), &least, &greatest);
|
||||||
|
|
||||||
// count the keys
|
// count the keys
|
||||||
rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions());
|
rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions());
|
||||||
|
@ -54,10 +54,9 @@ class CompactedDBImpl : public DBImpl {
|
|||||||
return Status::NotSupported("Not supported in compacted db mode.");
|
return Status::NotSupported("Not supported in compacted db mode.");
|
||||||
}
|
}
|
||||||
using DBImpl::CompactRange;
|
using DBImpl::CompactRange;
|
||||||
virtual Status CompactRange(ColumnFamilyHandle* column_family,
|
virtual Status CompactRange(const CompactRangeOptions& options,
|
||||||
const Slice* begin, const Slice* end,
|
ColumnFamilyHandle* column_family,
|
||||||
bool change_level = false, int target_level = -1,
|
const Slice* begin, const Slice* end) override {
|
||||||
uint32_t target_path_id = 0) override {
|
|
||||||
return Status::NotSupported("Not supported in compacted db mode.");
|
return Status::NotSupported("Not supported in compacted db mode.");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,7 +515,7 @@ TEST_F(StringAppendOperatorTest, PersistentFlushAndCompaction) {
|
|||||||
slists.Append("c", "bbnagnagsx");
|
slists.Append("c", "bbnagnagsx");
|
||||||
slists.Append("a", "sa");
|
slists.Append("a", "sa");
|
||||||
slists.Append("b", "df");
|
slists.Append("b", "df");
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
slists.Get("a", &a);
|
slists.Get("a", &a);
|
||||||
slists.Get("b", &b);
|
slists.Get("b", &b);
|
||||||
slists.Get("c", &c);
|
slists.Get("c", &c);
|
||||||
@ -536,7 +536,7 @@ TEST_F(StringAppendOperatorTest, PersistentFlushAndCompaction) {
|
|||||||
ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
|
ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
|
||||||
|
|
||||||
// Compact, Get
|
// Compact, Get
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
|
ASSERT_EQ(a, "x\nt\nr\nsa\ngh\njk");
|
||||||
ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
|
ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;");
|
||||||
ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
|
ASSERT_EQ(c, "asdasd\nasdasd\nbbnagnagsx\nrogosh");
|
||||||
@ -544,7 +544,7 @@ TEST_F(StringAppendOperatorTest, PersistentFlushAndCompaction) {
|
|||||||
// Append, Flush, Compact, Get
|
// Append, Flush, Compact, Get
|
||||||
slists.Append("b", "afcg");
|
slists.Append("b", "afcg");
|
||||||
db->Flush(rocksdb::FlushOptions());
|
db->Flush(rocksdb::FlushOptions());
|
||||||
db->CompactRange(nullptr, nullptr);
|
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
slists.Get("b", &b);
|
slists.Get("b", &b);
|
||||||
ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;\nafcg");
|
ASSERT_EQ(b, "y\n2\nmonkey\ndf\nl;\nafcg");
|
||||||
}
|
}
|
||||||
|
@ -589,7 +589,7 @@ class SpatialDBImpl : public SpatialDB {
|
|||||||
|
|
||||||
Status t = Flush(FlushOptions(), cfh);
|
Status t = Flush(FlushOptions(), cfh);
|
||||||
if (t.ok()) {
|
if (t.ok()) {
|
||||||
t = CompactRange(cfh, nullptr, nullptr);
|
t = CompactRange(CompactRangeOptions(), cfh, nullptr, nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -168,9 +168,9 @@ class TtlTest : public testing::Test {
|
|||||||
// Runs a manual compaction
|
// Runs a manual compaction
|
||||||
void ManualCompact(ColumnFamilyHandle* cf = nullptr) {
|
void ManualCompact(ColumnFamilyHandle* cf = nullptr) {
|
||||||
if (cf == nullptr) {
|
if (cf == nullptr) {
|
||||||
db_ttl_->CompactRange(nullptr, nullptr);
|
db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
} else {
|
} else {
|
||||||
db_ttl_->CompactRange(cf, nullptr, nullptr);
|
db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user