Remove v1 RangeDelAggregator (#4778)
Summary: Now that v2 is fully functional, the v1 aggregator is removed. The v2 aggregator has been renamed. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4778 Differential Revision: D13495930 Pulled By: abhimadan fbshipit-source-id: 9d69500a60a283e79b6c4fa938fc68a8aa4d40d6
This commit is contained in:
parent
311cd8cf2f
commit
81b6b09f6b
@ -504,7 +504,6 @@ set(SOURCES
|
||||
db/merge_helper.cc
|
||||
db/merge_operator.cc
|
||||
db/range_del_aggregator.cc
|
||||
db/range_del_aggregator_v2.cc
|
||||
db/range_tombstone_fragmenter.cc
|
||||
db/repair.cc
|
||||
db/snapshot_impl.cc
|
||||
@ -908,7 +907,6 @@ if(WITH_TESTS)
|
||||
db/plain_table_db_test.cc
|
||||
db/prefix_test.cc
|
||||
db/range_del_aggregator_test.cc
|
||||
db/range_del_aggregator_v2_test.cc
|
||||
db/range_tombstone_fragmenter_test.cc
|
||||
db/repair_test.cc
|
||||
db/table_properties_collector_test.cc
|
||||
|
6
Makefile
6
Makefile
@ -543,7 +543,6 @@ TESTS = \
|
||||
persistent_cache_test \
|
||||
statistics_test \
|
||||
lua_test \
|
||||
range_del_aggregator_test \
|
||||
lru_cache_test \
|
||||
object_registry_test \
|
||||
repair_test \
|
||||
@ -554,7 +553,7 @@ TESTS = \
|
||||
trace_analyzer_test \
|
||||
repeatable_thread_test \
|
||||
range_tombstone_fragmenter_test \
|
||||
range_del_aggregator_v2_test \
|
||||
range_del_aggregator_test \
|
||||
sst_file_reader_test \
|
||||
|
||||
PARALLEL_TEST = \
|
||||
@ -1588,9 +1587,6 @@ repeatable_thread_test: util/repeatable_thread_test.o $(LIBOBJECTS) $(TESTHARNES
|
||||
range_tombstone_fragmenter_test: db/range_tombstone_fragmenter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
$(AM_LINK)
|
||||
|
||||
range_del_aggregator_v2_test: db/range_del_aggregator_v2_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
$(AM_LINK)
|
||||
|
||||
sst_file_reader_test: table/sst_file_reader_test.o $(LIBOBJECTS) $(TESTHARNESS)
|
||||
$(AM_LINK)
|
||||
|
||||
|
6
TARGETS
6
TARGETS
@ -124,7 +124,6 @@ cpp_library(
|
||||
"db/merge_helper.cc",
|
||||
"db/merge_operator.cc",
|
||||
"db/range_del_aggregator.cc",
|
||||
"db/range_del_aggregator_v2.cc",
|
||||
"db/range_tombstone_fragmenter.cc",
|
||||
"db/repair.cc",
|
||||
"db/snapshot_impl.cc",
|
||||
@ -936,11 +935,6 @@ ROCKS_TESTS = [
|
||||
"db/range_del_aggregator_test.cc",
|
||||
"serial",
|
||||
],
|
||||
[
|
||||
"range_del_aggregator_v2_test",
|
||||
"db/range_del_aggregator_v2_test.cc",
|
||||
"serial",
|
||||
],
|
||||
[
|
||||
"range_tombstone_fragmenter_test",
|
||||
"db/range_tombstone_fragmenter_test.cc",
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "db/event_helpers.h"
|
||||
#include "db/internal_stats.h"
|
||||
#include "db/merge_helper.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/table_cache.h"
|
||||
#include "db/version_edit.h"
|
||||
#include "monitoring/iostats_context_imp.h"
|
||||
@ -88,8 +88,8 @@ Status BuildTable(
|
||||
Status s;
|
||||
meta->fd.file_size = 0;
|
||||
iter->SeekToFirst();
|
||||
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg(
|
||||
new CompactionRangeDelAggregatorV2(&internal_comparator, snapshots));
|
||||
std::unique_ptr<CompactionRangeDelAggregator> range_del_agg(
|
||||
new CompactionRangeDelAggregator(&internal_comparator, snapshots));
|
||||
for (auto& range_del_iter : range_del_iters) {
|
||||
range_del_agg->AddTombstones(std::move(range_del_iter));
|
||||
}
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "db/db_impl.h"
|
||||
#include "db/internal_stats.h"
|
||||
#include "db/job_context.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/table_properties_collector.h"
|
||||
#include "db/version_set.h"
|
||||
#include "db/write_controller.h"
|
||||
@ -945,7 +945,7 @@ Status ColumnFamilyData::RangesOverlapWithMemtables(
|
||||
ScopedArenaIterator memtable_iter(merge_iter_builder.Finish());
|
||||
|
||||
auto read_seq = super_version->current->version_set()->LastSequence();
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&internal_comparator_, read_seq);
|
||||
ReadRangeDelAggregator range_del_agg(&internal_comparator_, read_seq);
|
||||
auto* active_range_del_iter =
|
||||
super_version->mem->NewRangeTombstoneIterator(read_opts, read_seq);
|
||||
range_del_agg.AddTombstones(
|
||||
|
@ -18,7 +18,7 @@ CompactionIterator::CompactionIterator(
|
||||
SequenceNumber earliest_write_conflict_snapshot,
|
||||
const SnapshotChecker* snapshot_checker, Env* env,
|
||||
bool report_detailed_time, bool expect_valid_internal_key,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg, const Compaction* compaction,
|
||||
CompactionRangeDelAggregator* range_del_agg, const Compaction* compaction,
|
||||
const CompactionFilter* compaction_filter,
|
||||
const std::atomic<bool>* shutting_down,
|
||||
const SequenceNumber preserve_deletes_seqnum)
|
||||
@ -36,7 +36,7 @@ CompactionIterator::CompactionIterator(
|
||||
SequenceNumber earliest_write_conflict_snapshot,
|
||||
const SnapshotChecker* snapshot_checker, Env* env,
|
||||
bool report_detailed_time, bool expect_valid_internal_key,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
std::unique_ptr<CompactionProxy> compaction,
|
||||
const CompactionFilter* compaction_filter,
|
||||
const std::atomic<bool>* shutting_down,
|
||||
|
@ -13,7 +13,7 @@
|
||||
#include "db/compaction_iteration_stats.h"
|
||||
#include "db/merge_helper.h"
|
||||
#include "db/pinned_iterators_manager.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/snapshot_checker.h"
|
||||
#include "options/cf_options.h"
|
||||
#include "rocksdb/compaction_filter.h"
|
||||
@ -64,7 +64,7 @@ class CompactionIterator {
|
||||
SequenceNumber earliest_write_conflict_snapshot,
|
||||
const SnapshotChecker* snapshot_checker, Env* env,
|
||||
bool report_detailed_time, bool expect_valid_internal_key,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
const Compaction* compaction = nullptr,
|
||||
const CompactionFilter* compaction_filter = nullptr,
|
||||
const std::atomic<bool>* shutting_down = nullptr,
|
||||
@ -77,7 +77,7 @@ class CompactionIterator {
|
||||
SequenceNumber earliest_write_conflict_snapshot,
|
||||
const SnapshotChecker* snapshot_checker, Env* env,
|
||||
bool report_detailed_time, bool expect_valid_internal_key,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
std::unique_ptr<CompactionProxy> compaction,
|
||||
const CompactionFilter* compaction_filter = nullptr,
|
||||
const std::atomic<bool>* shutting_down = nullptr,
|
||||
@ -141,7 +141,7 @@ class CompactionIterator {
|
||||
Env* env_;
|
||||
bool report_detailed_time_;
|
||||
bool expect_valid_internal_key_;
|
||||
CompactionRangeDelAggregatorV2* range_del_agg_;
|
||||
CompactionRangeDelAggregator* range_del_agg_;
|
||||
std::unique_ptr<CompactionProxy> compaction_;
|
||||
const CompactionFilter* compaction_filter_;
|
||||
const std::atomic<bool>* shutting_down_;
|
||||
|
@ -228,8 +228,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
|
||||
new FragmentedRangeTombstoneIterator(tombstone_list, icmp_,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg_.reset(
|
||||
new CompactionRangeDelAggregatorV2(&icmp_, snapshots_));
|
||||
range_del_agg_.reset(new CompactionRangeDelAggregator(&icmp_, snapshots_));
|
||||
range_del_agg_->AddTombstones(std::move(range_del_iter));
|
||||
|
||||
std::unique_ptr<CompactionIterator::CompactionProxy> compaction;
|
||||
@ -298,7 +297,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
|
||||
std::unique_ptr<MergeHelper> merge_helper_;
|
||||
std::unique_ptr<LoggingForwardVectorIterator> iter_;
|
||||
std::unique_ptr<CompactionIterator> c_iter_;
|
||||
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg_;
|
||||
std::unique_ptr<CompactionRangeDelAggregator> range_del_agg_;
|
||||
std::unique_ptr<SnapshotChecker> snapshot_checker_;
|
||||
std::atomic<bool> shutting_down_{false};
|
||||
FakeCompaction* compaction_proxy_;
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include "db/memtable_list.h"
|
||||
#include "db/merge_context.h"
|
||||
#include "db/merge_helper.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/version_set.h"
|
||||
#include "monitoring/iostats_context_imp.h"
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
@ -805,8 +805,8 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
|
||||
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
assert(sub_compact != nullptr);
|
||||
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&cfd->internal_comparator(),
|
||||
existing_snapshots_);
|
||||
CompactionRangeDelAggregator range_del_agg(&cfd->internal_comparator(),
|
||||
existing_snapshots_);
|
||||
|
||||
// Although the v2 aggregator is what the level iterator(s) know about,
|
||||
// the AddTombstones calls will be propagated down to the v1 aggregator.
|
||||
@ -1165,7 +1165,7 @@ void CompactionJob::RecordDroppedKeys(
|
||||
|
||||
Status CompactionJob::FinishCompactionOutputFile(
|
||||
const Status& input_status, SubcompactionState* sub_compact,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
CompactionIterationStats* range_del_out_stats,
|
||||
const Slice* next_table_min_key /* = nullptr */) {
|
||||
AutoThreadOperationStageUpdater stage_updater(
|
||||
|
@ -25,7 +25,7 @@
|
||||
#include "db/job_context.h"
|
||||
#include "db/log_writer.h"
|
||||
#include "db/memtable_list.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/version_edit.h"
|
||||
#include "db/write_controller.h"
|
||||
#include "db/write_thread.h"
|
||||
@ -104,7 +104,7 @@ class CompactionJob {
|
||||
|
||||
Status FinishCompactionOutputFile(
|
||||
const Status& input_status, SubcompactionState* sub_compact,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
CompactionIterationStats* range_del_out_stats,
|
||||
const Slice* next_table_min_key = nullptr);
|
||||
Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options);
|
||||
|
@ -340,8 +340,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
Arena arena;
|
||||
{
|
||||
InternalKeyComparator icmp(options.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(
|
||||
&icmp, kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ScopedArenaIterator iter(dbfull()->NewInternalIterator(
|
||||
&arena, &range_del_agg, kMaxSequenceNumber, handles_[1]));
|
||||
iter->SeekToFirst();
|
||||
@ -430,8 +430,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilter) {
|
||||
count = 0;
|
||||
{
|
||||
InternalKeyComparator icmp(options.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(
|
||||
&icmp, kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ScopedArenaIterator iter(dbfull()->NewInternalIterator(
|
||||
&arena, &range_del_agg, kMaxSequenceNumber, handles_[1]));
|
||||
iter->SeekToFirst();
|
||||
@ -648,8 +648,8 @@ TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) {
|
||||
int total = 0;
|
||||
Arena arena;
|
||||
InternalKeyComparator icmp(options.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* snapshots */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* snapshots */);
|
||||
ScopedArenaIterator iter(dbfull()->NewInternalIterator(
|
||||
&arena, &range_del_agg, kMaxSequenceNumber));
|
||||
iter->SeekToFirst();
|
||||
|
@ -45,7 +45,6 @@
|
||||
#include "db/memtable_list.h"
|
||||
#include "db/merge_context.h"
|
||||
#include "db/merge_helper.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "db/table_cache.h"
|
||||
#include "db/table_properties_collector.h"
|
||||
@ -1033,7 +1032,7 @@ bool DBImpl::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum) {
|
||||
}
|
||||
|
||||
InternalIterator* DBImpl::NewInternalIterator(
|
||||
Arena* arena, RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence,
|
||||
Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
|
||||
ColumnFamilyHandle* column_family) {
|
||||
ColumnFamilyData* cfd;
|
||||
if (column_family == nullptr) {
|
||||
@ -1150,10 +1149,12 @@ static void CleanupIteratorState(void* arg1, void* /*arg2*/) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
InternalIterator* DBImpl::NewInternalIterator(
|
||||
const ReadOptions& read_options, ColumnFamilyData* cfd,
|
||||
SuperVersion* super_version, Arena* arena,
|
||||
RangeDelAggregatorV2* range_del_agg, SequenceNumber sequence) {
|
||||
InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options,
|
||||
ColumnFamilyData* cfd,
|
||||
SuperVersion* super_version,
|
||||
Arena* arena,
|
||||
RangeDelAggregator* range_del_agg,
|
||||
SequenceNumber sequence) {
|
||||
InternalIterator* internal_iter;
|
||||
assert(arena != nullptr);
|
||||
assert(range_del_agg != nullptr);
|
||||
|
15
db/db_impl.h
15
db/db_impl.h
@ -31,7 +31,7 @@
|
||||
#include "db/log_writer.h"
|
||||
#include "db/logs_with_prep_tracker.h"
|
||||
#include "db/pre_release_callback.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/read_callback.h"
|
||||
#include "db/snapshot_checker.h"
|
||||
#include "db/snapshot_impl.h"
|
||||
@ -375,8 +375,8 @@ class DBImpl : public DB {
|
||||
// The keys of this iterator are internal keys (see format.h).
|
||||
// The returned iterator should be deleted when no longer needed.
|
||||
InternalIterator* NewInternalIterator(
|
||||
Arena* arena, RangeDelAggregatorV2* range_del_agg,
|
||||
SequenceNumber sequence, ColumnFamilyHandle* column_family = nullptr);
|
||||
Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence,
|
||||
ColumnFamilyHandle* column_family = nullptr);
|
||||
|
||||
LogsWithPrepTracker* logs_with_prep_tracker() {
|
||||
return &logs_with_prep_tracker_;
|
||||
@ -579,12 +579,9 @@ class DBImpl : public DB {
|
||||
|
||||
const WriteController& write_controller() { return write_controller_; }
|
||||
|
||||
InternalIterator* NewInternalIterator(const ReadOptions&,
|
||||
ColumnFamilyData* cfd,
|
||||
SuperVersion* super_version,
|
||||
Arena* arena,
|
||||
RangeDelAggregatorV2* range_del_agg,
|
||||
SequenceNumber sequence);
|
||||
InternalIterator* NewInternalIterator(
|
||||
const ReadOptions&, ColumnFamilyData* cfd, SuperVersion* super_version,
|
||||
Arena* arena, RangeDelAggregator* range_del_agg, SequenceNumber sequence);
|
||||
|
||||
// hollow transactions shell used for recovery.
|
||||
// these will then be passed to TransactionDB so that
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include "db/db_impl.h"
|
||||
#include "db/db_iter.h"
|
||||
#include "db/merge_context.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
@ -171,7 +171,7 @@ class DBIter final: public Iterator {
|
||||
iter_ = iter;
|
||||
iter_->SetPinnedItersMgr(&pinned_iters_mgr_);
|
||||
}
|
||||
virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator() {
|
||||
virtual ReadRangeDelAggregator* GetRangeDelAggregator() {
|
||||
return &range_del_agg_;
|
||||
}
|
||||
|
||||
@ -341,7 +341,7 @@ class DBIter final: public Iterator {
|
||||
const bool total_order_seek_;
|
||||
// List of operands for merge operator.
|
||||
MergeContext merge_context_;
|
||||
ReadRangeDelAggregatorV2 range_del_agg_;
|
||||
ReadRangeDelAggregator range_del_agg_;
|
||||
LocalStatistics local_stats_;
|
||||
PinnedIteratorsManager pinned_iters_mgr_;
|
||||
ReadCallback* read_callback_;
|
||||
@ -1479,7 +1479,7 @@ Iterator* NewDBIterator(Env* env, const ReadOptions& read_options,
|
||||
|
||||
ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); }
|
||||
|
||||
ReadRangeDelAggregatorV2* ArenaWrappedDBIter::GetRangeDelAggregator() {
|
||||
ReadRangeDelAggregator* ArenaWrappedDBIter::GetRangeDelAggregator() {
|
||||
return db_iter_->GetRangeDelAggregator();
|
||||
}
|
||||
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include <string>
|
||||
#include "db/db_impl.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "options/cf_options.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
@ -48,7 +48,7 @@ class ArenaWrappedDBIter : public Iterator {
|
||||
// Get the arena to be used to allocate memory for DBIter to be wrapped,
|
||||
// as well as child iterators in it.
|
||||
virtual Arena* GetArena() { return &arena_; }
|
||||
virtual ReadRangeDelAggregatorV2* GetRangeDelAggregator();
|
||||
virtual ReadRangeDelAggregator* GetRangeDelAggregator();
|
||||
|
||||
// Set the internal iterator wrapped inside the DB Iterator. Usually it is
|
||||
// a merging iterator.
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include "db/db_test_util.h"
|
||||
#include "db/memtable.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "port/stack_trace.h"
|
||||
#include "rocksdb/memtablerep.h"
|
||||
#include "rocksdb/slice_transform.h"
|
||||
@ -135,7 +136,8 @@ TEST_F(DBMemTableTest, DuplicateSeq) {
|
||||
MergeContext merge_context;
|
||||
Options options;
|
||||
InternalKeyComparator ikey_cmp(options.comparator);
|
||||
RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */);
|
||||
ReadRangeDelAggregator range_del_agg(&ikey_cmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
|
||||
// Create a MemTable
|
||||
InternalKeyComparator cmp(BytewiseComparator());
|
||||
|
@ -814,8 +814,8 @@ std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) {
|
||||
Arena arena;
|
||||
auto options = CurrentOptions();
|
||||
InternalKeyComparator icmp(options.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ScopedArenaIterator iter;
|
||||
if (cf == 0) {
|
||||
iter.set(dbfull()->NewInternalIterator(&arena, &range_del_agg,
|
||||
@ -1227,8 +1227,8 @@ void DBTestBase::validateNumberOfEntries(int numValues, int cf) {
|
||||
Arena arena;
|
||||
auto options = CurrentOptions();
|
||||
InternalKeyComparator icmp(options.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
// This should be defined after range_del_agg so that it destructs the
|
||||
// assigned iterator before it range_del_agg is already destructed.
|
||||
ScopedArenaIterator iter;
|
||||
@ -1437,8 +1437,8 @@ void DBTestBase::VerifyDBInternal(
|
||||
std::vector<std::pair<std::string, std::string>> true_data) {
|
||||
Arena arena;
|
||||
InternalKeyComparator icmp(last_options_.comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
auto iter =
|
||||
dbfull()->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber);
|
||||
iter->SeekToFirst();
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "db/db_iter.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/job_context.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "rocksdb/slice.h"
|
||||
@ -73,8 +73,8 @@ class ForwardLevelIterator : public InternalIterator {
|
||||
delete file_iter_;
|
||||
}
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(
|
||||
&cfd_->internal_comparator(), kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
file_iter_ = cfd_->table_cache()->NewIterator(
|
||||
read_options_, *(cfd_->soptions()), cfd_->internal_comparator(),
|
||||
*files_[file_index_],
|
||||
@ -610,8 +610,8 @@ void ForwardIterator::RebuildIterators(bool refresh_sv) {
|
||||
// New
|
||||
sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_));
|
||||
}
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(),
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_);
|
||||
sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_);
|
||||
if (!read_options_.ignore_range_deletions) {
|
||||
@ -669,8 +669,8 @@ void ForwardIterator::RenewIterators() {
|
||||
|
||||
mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_);
|
||||
svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&cfd_->internal_comparator(),
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&cfd_->internal_comparator(),
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
if (!read_options_.ignore_range_deletions) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
|
||||
svnew->mem->NewRangeTombstoneIterator(
|
||||
|
@ -159,7 +159,7 @@ bool MemTableListVersion::GetFromList(
|
||||
|
||||
Status MemTableListVersion::AddRangeTombstoneIterators(
|
||||
const ReadOptions& read_opts, Arena* /*arena*/,
|
||||
RangeDelAggregatorV2* range_del_agg) {
|
||||
RangeDelAggregator* range_del_agg) {
|
||||
assert(range_del_agg != nullptr);
|
||||
for (auto& m : memlist_) {
|
||||
// Using kMaxSequenceNumber is OK because these are immutable memtables.
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "db/dbformat.h"
|
||||
#include "db/logs_with_prep_tracker.h"
|
||||
#include "db/memtable.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "monitoring/instrumented_mutex.h"
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
@ -91,7 +91,7 @@ class MemTableListVersion {
|
||||
}
|
||||
|
||||
Status AddRangeTombstoneIterators(const ReadOptions& read_opts, Arena* arena,
|
||||
RangeDelAggregatorV2* range_del_agg);
|
||||
RangeDelAggregator* range_del_agg);
|
||||
|
||||
void AddIterators(const ReadOptions& options,
|
||||
std::vector<InternalIterator*>* iterator_list,
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "db/merge_context.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/version_set.h"
|
||||
#include "db/write_controller.h"
|
||||
#include "rocksdb/db.h"
|
||||
|
@ -79,7 +79,8 @@ class MergeContext {
|
||||
return GetOperandsDirectionForward();
|
||||
}
|
||||
|
||||
// Return all the operands in the order as they were merged (passed to FullMerge or FullMergeV2)
|
||||
// Return all the operands in the order as they were merged (passed to
|
||||
// FullMerge or FullMergeV2)
|
||||
const std::vector<Slice>& GetOperandsDirectionForward() {
|
||||
if (!operand_list_) {
|
||||
return empty_operand_list;
|
||||
@ -89,7 +90,8 @@ class MergeContext {
|
||||
return *operand_list_;
|
||||
}
|
||||
|
||||
// Return all the operands in the reversed order relative to how they were merged (passed to FullMerge or FullMergeV2)
|
||||
// Return all the operands in the reversed order relative to how they were
|
||||
// merged (passed to FullMerge or FullMergeV2)
|
||||
const std::vector<Slice>& GetOperandsDirectionBackward() {
|
||||
if (!operand_list_) {
|
||||
return empty_operand_list;
|
||||
|
@ -114,7 +114,7 @@ Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator,
|
||||
// TODO: Avoid the snapshot stripe map lookup in CompactionRangeDelAggregator
|
||||
// and just pass the StripeRep corresponding to the stripe being merged.
|
||||
Status MergeHelper::MergeUntil(InternalIterator* iter,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg,
|
||||
CompactionRangeDelAggregator* range_del_agg,
|
||||
const SequenceNumber stop_before,
|
||||
const bool at_bottom) {
|
||||
// Get a copy of the internal key, before it's invalidated by iter->Next()
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#include "db/dbformat.h"
|
||||
#include "db/merge_context.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/snapshot_checker.h"
|
||||
#include "rocksdb/compaction_filter.h"
|
||||
#include "rocksdb/env.h"
|
||||
@ -78,7 +78,7 @@ class MergeHelper {
|
||||
//
|
||||
// REQUIRED: The first key in the input is not corrupted.
|
||||
Status MergeUntil(InternalIterator* iter,
|
||||
CompactionRangeDelAggregatorV2* range_del_agg = nullptr,
|
||||
CompactionRangeDelAggregator* range_del_agg = nullptr,
|
||||
const SequenceNumber stop_before = 0,
|
||||
const bool at_bottom = false);
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,10 +1,12 @@
|
||||
// Copyright (c) 2016-present, Facebook, Inc. All rights reserved.
|
||||
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <set>
|
||||
@ -14,220 +16,422 @@
|
||||
#include "db/compaction_iteration_stats.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/pinned_iterators_manager.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "db/version_edit.h"
|
||||
#include "include/rocksdb/comparator.h"
|
||||
#include "include/rocksdb/types.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/scoped_arena_iterator.h"
|
||||
#include "table/table_builder.h"
|
||||
#include "util/heap.h"
|
||||
#include "util/kv_map.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
// RangeDelMaps maintain position across calls to ShouldDelete. The caller may
|
||||
// wish to specify a mode to optimize positioning the iterator during the next
|
||||
// call to ShouldDelete. The non-kFullScan modes are only available when
|
||||
// deletion collapsing is enabled.
|
||||
//
|
||||
// For example, if we invoke Next() on an iterator, kForwardTraversal should be
|
||||
// specified to advance one-by-one through deletions until one is found with its
|
||||
// interval containing the key. This will typically be faster than doing a full
|
||||
// binary search (kBinarySearch).
|
||||
enum class RangeDelPositioningMode {
|
||||
kFullScan, // used iff collapse_deletions_ == false
|
||||
kForwardTraversal,
|
||||
kBackwardTraversal,
|
||||
kBinarySearch,
|
||||
};
|
||||
class TruncatedRangeDelIterator {
|
||||
public:
|
||||
TruncatedRangeDelIterator(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
|
||||
const InternalKeyComparator* icmp, const InternalKey* smallest,
|
||||
const InternalKey* largest);
|
||||
|
||||
// TruncatedRangeTombstones are a slight generalization of regular
|
||||
// RangeTombstones that can represent truncations caused by SST boundaries.
|
||||
// Instead of using user keys to represent the start and end keys, they instead
|
||||
// use internal keys, whose sequence number indicates the sequence number of
|
||||
// the smallest/largest SST key (in the case where a tombstone is untruncated,
|
||||
// the sequence numbers will be kMaxSequenceNumber for both start and end
|
||||
// keys). Like RangeTombstones, TruncatedRangeTombstone are also
|
||||
// end-key-exclusive.
|
||||
struct TruncatedRangeTombstone {
|
||||
TruncatedRangeTombstone(const ParsedInternalKey& sk,
|
||||
const ParsedInternalKey& ek, SequenceNumber s)
|
||||
: start_key_(sk), end_key_(ek), seq_(s) {}
|
||||
bool Valid() const;
|
||||
|
||||
RangeTombstone Tombstone() const {
|
||||
// The RangeTombstone returned here can cover less than the
|
||||
// TruncatedRangeTombstone when its end key has a seqnum that is not
|
||||
// kMaxSequenceNumber. Since this method is only used by RangeDelIterators
|
||||
// (which in turn are only used during flush/compaction), we avoid this
|
||||
// problem by using truncation boundaries spanning multiple SSTs, which
|
||||
// are selected in a way that guarantee a clean break at the end key.
|
||||
assert(end_key_.sequence == kMaxSequenceNumber);
|
||||
return RangeTombstone(start_key_.user_key, end_key_.user_key, seq_);
|
||||
void Next();
|
||||
void Prev();
|
||||
|
||||
void InternalNext();
|
||||
|
||||
// Seeks to the tombstone with the highest viisble sequence number that covers
|
||||
// target (a user key). If no such tombstone exists, the position will be at
|
||||
// the earliest tombstone that ends after target.
|
||||
void Seek(const Slice& target);
|
||||
|
||||
// Seeks to the tombstone with the highest viisble sequence number that covers
|
||||
// target (a user key). If no such tombstone exists, the position will be at
|
||||
// the latest tombstone that starts before target.
|
||||
void SeekForPrev(const Slice& target);
|
||||
|
||||
void SeekToFirst();
|
||||
void SeekToLast();
|
||||
|
||||
ParsedInternalKey start_key() const {
|
||||
return (smallest_ == nullptr ||
|
||||
icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0)
|
||||
? iter_->parsed_start_key()
|
||||
: *smallest_;
|
||||
}
|
||||
|
||||
ParsedInternalKey start_key_;
|
||||
ParsedInternalKey end_key_;
|
||||
SequenceNumber seq_;
|
||||
};
|
||||
|
||||
// A RangeDelIterator iterates over range deletion tombstones.
|
||||
class RangeDelIterator {
|
||||
public:
|
||||
virtual ~RangeDelIterator() = default;
|
||||
|
||||
virtual bool Valid() const = 0;
|
||||
virtual void Next() = 0;
|
||||
// NOTE: the Slice passed to this method must be a user key.
|
||||
virtual void Seek(const Slice& target) = 0;
|
||||
virtual void Seek(const ParsedInternalKey& target) = 0;
|
||||
virtual RangeTombstone Tombstone() const = 0;
|
||||
};
|
||||
|
||||
// A RangeDelMap keeps track of range deletion tombstones within a snapshot
|
||||
// stripe.
|
||||
//
|
||||
// RangeDelMaps are used internally by RangeDelAggregator. They are not intended
|
||||
// to be used directly.
|
||||
class RangeDelMap {
|
||||
public:
|
||||
virtual ~RangeDelMap() = default;
|
||||
|
||||
virtual bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) = 0;
|
||||
virtual bool IsRangeOverlapped(const ParsedInternalKey& start,
|
||||
const ParsedInternalKey& end) = 0;
|
||||
virtual void InvalidatePosition() = 0;
|
||||
|
||||
virtual size_t Size() const = 0;
|
||||
bool IsEmpty() const { return Size() == 0; }
|
||||
|
||||
virtual void AddTombstone(TruncatedRangeTombstone tombstone) = 0;
|
||||
virtual std::unique_ptr<RangeDelIterator> NewIterator() = 0;
|
||||
};
|
||||
|
||||
// A RangeDelAggregator aggregates range deletion tombstones as they are
|
||||
// encountered in memtables/SST files. It provides methods that check whether a
|
||||
// key is covered by range tombstones or write the relevant tombstones to a new
|
||||
// SST file.
|
||||
class RangeDelAggregator {
|
||||
public:
|
||||
// @param snapshots These are used to organize the tombstones into snapshot
|
||||
// stripes, which is the seqnum range between consecutive snapshots,
|
||||
// including the higher snapshot and excluding the lower one. Currently,
|
||||
// this is used by ShouldDelete() to prevent deletion of keys that are
|
||||
// covered by range tombstones in other snapshot stripes. This constructor
|
||||
// is used for writes (flush/compaction). All DB snapshots are provided
|
||||
// such that no keys are removed that are uncovered according to any DB
|
||||
// snapshot.
|
||||
// Note this overload does not lazily initialize Rep.
|
||||
RangeDelAggregator(const InternalKeyComparator& icmp,
|
||||
const std::vector<SequenceNumber>& snapshots,
|
||||
bool collapse_deletions = true);
|
||||
|
||||
// @param upper_bound Similar to snapshots above, except with a single
|
||||
// snapshot, which allows us to store the snapshot on the stack and defer
|
||||
// initialization of heap-allocating members (in Rep) until the first range
|
||||
// deletion is encountered. This constructor is used in case of reads (get/
|
||||
// iterator), for which only the user snapshot (upper_bound) is provided
|
||||
// such that the seqnum space is divided into two stripes. Only the older
|
||||
// stripe will be used by ShouldDelete().
|
||||
RangeDelAggregator(const InternalKeyComparator& icmp,
|
||||
SequenceNumber upper_bound,
|
||||
bool collapse_deletions = false);
|
||||
|
||||
// Returns whether the key should be deleted, which is the case when it is
|
||||
// covered by a range tombstone residing in the same snapshot stripe.
|
||||
// @param mode If collapse_deletions_ is true, this dictates how we will find
|
||||
// the deletion whose interval contains this key. Otherwise, its
|
||||
// value must be kFullScan indicating linear scan from beginning.
|
||||
bool ShouldDelete(
|
||||
const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) {
|
||||
if (rep_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return ShouldDeleteImpl(parsed, mode);
|
||||
ParsedInternalKey end_key() const {
|
||||
return (largest_ == nullptr ||
|
||||
icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0)
|
||||
? iter_->parsed_end_key()
|
||||
: *largest_;
|
||||
}
|
||||
bool ShouldDelete(
|
||||
const Slice& internal_key,
|
||||
RangeDelPositioningMode mode = RangeDelPositioningMode::kFullScan) {
|
||||
if (rep_ == nullptr) {
|
||||
return false;
|
||||
}
|
||||
return ShouldDeleteImpl(internal_key, mode);
|
||||
}
|
||||
bool ShouldDeleteImpl(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode);
|
||||
bool ShouldDeleteImpl(const Slice& internal_key,
|
||||
RangeDelPositioningMode mode);
|
||||
|
||||
// Checks whether range deletions cover any keys between `start` and `end`,
|
||||
// inclusive.
|
||||
//
|
||||
// @param start User key representing beginning of range to check for overlap.
|
||||
// @param end User key representing end of range to check for overlap. This
|
||||
// argument is inclusive, so the existence of a range deletion covering
|
||||
// `end` causes this to return true.
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
SequenceNumber seq() const { return iter_->seq(); }
|
||||
|
||||
// Adds tombstones to the tombstone aggregation structure maintained by this
|
||||
// object. Tombstones are truncated to smallest and largest. If smallest (or
|
||||
// largest) is null, it is not used for truncation. When adding range
|
||||
// tombstones present in an sstable, smallest and largest should be set to
|
||||
// the smallest and largest keys from the sstable file metadata. Note that
|
||||
// tombstones end keys are exclusive while largest is inclusive.
|
||||
// @return non-OK status if any of the tombstone keys are corrupted.
|
||||
Status AddTombstones(std::unique_ptr<InternalIterator> input,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr);
|
||||
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
|
||||
SplitBySnapshot(const std::vector<SequenceNumber>& snapshots);
|
||||
|
||||
// Resets iterators maintained across calls to ShouldDelete(). This may be
|
||||
// called when the tombstones change, or the owner may call explicitly, e.g.,
|
||||
// if it's an iterator that just seeked to an arbitrary position. The effect
|
||||
// of invalidation is that the following call to ShouldDelete() will binary
|
||||
// search for its tombstone.
|
||||
void InvalidateRangeDelMapPositions();
|
||||
SequenceNumber upper_bound() const { return iter_->upper_bound(); }
|
||||
|
||||
bool IsEmpty();
|
||||
bool AddFile(uint64_t file_number);
|
||||
|
||||
// Create a new iterator over the range deletion tombstones in all of the
|
||||
// snapshot stripes in this aggregator. Tombstones are presented in start key
|
||||
// order. Tombstones with the same start key are presented in arbitrary order.
|
||||
//
|
||||
// The iterator is invalidated after any call to AddTombstones. It is the
|
||||
// caller's responsibility to avoid using invalid iterators.
|
||||
std::unique_ptr<RangeDelIterator> NewIterator();
|
||||
SequenceNumber lower_bound() const { return iter_->lower_bound(); }
|
||||
|
||||
private:
|
||||
// Maps snapshot seqnum -> map of tombstones that fall in that stripe, i.e.,
|
||||
// their seqnums are greater than the next smaller snapshot's seqnum, and the
|
||||
// corresponding index into the list of snapshots. Each entry is lazily
|
||||
// initialized.
|
||||
typedef std::map<SequenceNumber,
|
||||
std::pair<std::unique_ptr<RangeDelMap>, size_t>>
|
||||
StripeMap;
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> iter_;
|
||||
const InternalKeyComparator* icmp_;
|
||||
const ParsedInternalKey* smallest_ = nullptr;
|
||||
const ParsedInternalKey* largest_ = nullptr;
|
||||
std::list<ParsedInternalKey> pinned_bounds_;
|
||||
|
||||
struct Rep {
|
||||
std::vector<SequenceNumber> snapshots_;
|
||||
StripeMap stripe_map_;
|
||||
PinnedIteratorsManager pinned_iters_mgr_;
|
||||
std::list<std::string> pinned_slices_;
|
||||
std::set<uint64_t> added_files_;
|
||||
const InternalKey* smallest_ikey_;
|
||||
const InternalKey* largest_ikey_;
|
||||
};
|
||||
|
||||
struct SeqMaxComparator {
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return a->seq() > b->seq();
|
||||
}
|
||||
};
|
||||
|
||||
struct StartKeyMinComparator {
|
||||
explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return icmp->Compare(a->start_key(), b->start_key()) > 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
|
||||
class ForwardRangeDelIterator {
|
||||
public:
|
||||
ForwardRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed);
|
||||
void Invalidate();
|
||||
|
||||
void AddNewIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
iter->Seek(parsed.user_key);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
size_t UnusedIdx() const { return unused_idx_; }
|
||||
void IncUnusedIdx() { unused_idx_++; }
|
||||
|
||||
private:
|
||||
using ActiveSeqSet =
|
||||
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
|
||||
|
||||
struct EndKeyMinComparator {
|
||||
explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const ActiveSeqSet::const_iterator& a,
|
||||
const ActiveSeqSet::const_iterator& b) const {
|
||||
return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
// Initializes rep_ lazily. This aggregator object is constructed for every
|
||||
// read, so expensive members should only be created when necessary, i.e.,
|
||||
// once the first range deletion is encountered.
|
||||
void InitRep(const std::vector<SequenceNumber>& snapshots);
|
||||
|
||||
std::unique_ptr<RangeDelMap> NewRangeDelMap();
|
||||
RangeDelMap* GetRangeDelMapIfExists(SequenceNumber seq);
|
||||
RangeDelMap& GetRangeDelMap(SequenceNumber seq);
|
||||
void PushIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
if (!iter->Valid()) {
|
||||
// The iterator has been fully consumed, so we don't need to add it to
|
||||
// either of the heaps.
|
||||
return;
|
||||
}
|
||||
int cmp = icmp_->Compare(parsed, iter->start_key());
|
||||
if (cmp < 0) {
|
||||
PushInactiveIter(iter);
|
||||
} else {
|
||||
PushActiveIter(iter);
|
||||
}
|
||||
}
|
||||
|
||||
SequenceNumber upper_bound_;
|
||||
std::unique_ptr<Rep> rep_;
|
||||
const InternalKeyComparator& icmp_;
|
||||
// collapse range deletions so they're binary searchable
|
||||
const bool collapse_deletions_;
|
||||
void PushActiveIter(TruncatedRangeDelIterator* iter) {
|
||||
auto seq_pos = active_seqnums_.insert(iter);
|
||||
active_iters_.push(seq_pos);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopActiveIter() {
|
||||
auto active_top = active_iters_.top();
|
||||
auto iter = *active_top;
|
||||
active_iters_.pop();
|
||||
active_seqnums_.erase(active_top);
|
||||
return iter;
|
||||
}
|
||||
|
||||
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
|
||||
inactive_iters_.push(iter);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopInactiveIter() {
|
||||
auto* iter = inactive_iters_.top();
|
||||
inactive_iters_.pop();
|
||||
return iter;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
|
||||
size_t unused_idx_;
|
||||
ActiveSeqSet active_seqnums_;
|
||||
BinaryHeap<ActiveSeqSet::const_iterator, EndKeyMinComparator> active_iters_;
|
||||
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> inactive_iters_;
|
||||
};
|
||||
|
||||
class ReverseRangeDelIterator {
|
||||
public:
|
||||
ReverseRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed);
|
||||
void Invalidate();
|
||||
|
||||
void AddNewIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
iter->SeekForPrev(parsed.user_key);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
size_t UnusedIdx() const { return unused_idx_; }
|
||||
void IncUnusedIdx() { unused_idx_++; }
|
||||
|
||||
private:
|
||||
using ActiveSeqSet =
|
||||
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
|
||||
|
||||
struct EndKeyMaxComparator {
|
||||
explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return icmp->Compare(a->end_key(), b->end_key()) < 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
struct StartKeyMaxComparator {
|
||||
explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const ActiveSeqSet::const_iterator& a,
|
||||
const ActiveSeqSet::const_iterator& b) const {
|
||||
return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
|
||||
void PushIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
if (!iter->Valid()) {
|
||||
// The iterator has been fully consumed, so we don't need to add it to
|
||||
// either of the heaps.
|
||||
} else if (icmp_->Compare(iter->end_key(), parsed) <= 0) {
|
||||
PushInactiveIter(iter);
|
||||
} else {
|
||||
PushActiveIter(iter);
|
||||
}
|
||||
}
|
||||
|
||||
void PushActiveIter(TruncatedRangeDelIterator* iter) {
|
||||
auto seq_pos = active_seqnums_.insert(iter);
|
||||
active_iters_.push(seq_pos);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopActiveIter() {
|
||||
auto active_top = active_iters_.top();
|
||||
auto iter = *active_top;
|
||||
active_iters_.pop();
|
||||
active_seqnums_.erase(active_top);
|
||||
return iter;
|
||||
}
|
||||
|
||||
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
|
||||
inactive_iters_.push(iter);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopInactiveIter() {
|
||||
auto* iter = inactive_iters_.top();
|
||||
inactive_iters_.pop();
|
||||
return iter;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
|
||||
size_t unused_idx_;
|
||||
ActiveSeqSet active_seqnums_;
|
||||
BinaryHeap<ActiveSeqSet::const_iterator, StartKeyMaxComparator> active_iters_;
|
||||
BinaryHeap<TruncatedRangeDelIterator*, EndKeyMaxComparator> inactive_iters_;
|
||||
};
|
||||
|
||||
enum class RangeDelPositioningMode { kForwardTraversal, kBackwardTraversal };
|
||||
class RangeDelAggregator {
|
||||
public:
|
||||
explicit RangeDelAggregator(const InternalKeyComparator* icmp)
|
||||
: icmp_(icmp) {}
|
||||
virtual ~RangeDelAggregator() {}
|
||||
|
||||
virtual void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) = 0;
|
||||
|
||||
bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) {
|
||||
ParsedInternalKey parsed;
|
||||
if (!ParseInternalKey(key, &parsed)) {
|
||||
return false;
|
||||
}
|
||||
return ShouldDelete(parsed, mode);
|
||||
}
|
||||
virtual bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) = 0;
|
||||
|
||||
virtual void InvalidateRangeDelMapPositions() = 0;
|
||||
|
||||
virtual bool IsEmpty() const = 0;
|
||||
|
||||
bool AddFile(uint64_t file_number) {
|
||||
return files_seen_.insert(file_number).second;
|
||||
}
|
||||
|
||||
protected:
|
||||
class StripeRep {
|
||||
public:
|
||||
StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound,
|
||||
SequenceNumber lower_bound)
|
||||
: icmp_(icmp),
|
||||
forward_iter_(icmp, &iters_),
|
||||
reverse_iter_(icmp, &iters_),
|
||||
upper_bound_(upper_bound),
|
||||
lower_bound_(lower_bound) {}
|
||||
|
||||
void AddTombstones(std::unique_ptr<TruncatedRangeDelIterator> input_iter) {
|
||||
iters_.push_back(std::move(input_iter));
|
||||
}
|
||||
|
||||
bool IsEmpty() const { return iters_.empty(); }
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode);
|
||||
|
||||
void Invalidate() {
|
||||
InvalidateForwardIter();
|
||||
InvalidateReverseIter();
|
||||
}
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
private:
|
||||
bool InStripe(SequenceNumber seq) const {
|
||||
return lower_bound_ <= seq && seq <= upper_bound_;
|
||||
}
|
||||
|
||||
void InvalidateForwardIter() { forward_iter_.Invalidate(); }
|
||||
|
||||
void InvalidateReverseIter() { reverse_iter_.Invalidate(); }
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> iters_;
|
||||
ForwardRangeDelIterator forward_iter_;
|
||||
ReverseRangeDelIterator reverse_iter_;
|
||||
SequenceNumber upper_bound_;
|
||||
SequenceNumber lower_bound_;
|
||||
};
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
|
||||
private:
|
||||
std::set<uint64_t> files_seen_;
|
||||
};
|
||||
|
||||
class ReadRangeDelAggregator : public RangeDelAggregator {
|
||||
public:
|
||||
ReadRangeDelAggregator(const InternalKeyComparator* icmp,
|
||||
SequenceNumber upper_bound)
|
||||
: RangeDelAggregator(icmp),
|
||||
rep_(icmp, upper_bound, 0 /* lower_bound */) {}
|
||||
~ReadRangeDelAggregator() override {}
|
||||
|
||||
using RangeDelAggregator::ShouldDelete;
|
||||
void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) override;
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) override;
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); }
|
||||
|
||||
bool IsEmpty() const override { return rep_.IsEmpty(); }
|
||||
|
||||
private:
|
||||
StripeRep rep_;
|
||||
};
|
||||
|
||||
class CompactionRangeDelAggregator : public RangeDelAggregator {
|
||||
public:
|
||||
CompactionRangeDelAggregator(const InternalKeyComparator* icmp,
|
||||
const std::vector<SequenceNumber>& snapshots)
|
||||
: RangeDelAggregator(icmp), snapshots_(&snapshots) {}
|
||||
~CompactionRangeDelAggregator() override {}
|
||||
|
||||
void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) override;
|
||||
|
||||
using RangeDelAggregator::ShouldDelete;
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) override;
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
void InvalidateRangeDelMapPositions() override {
|
||||
for (auto& rep : reps_) {
|
||||
rep.second.Invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
bool IsEmpty() const override {
|
||||
for (const auto& rep : reps_) {
|
||||
if (!rep.second.IsEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates an iterator over all the range tombstones in the aggregator, for
|
||||
// use in compaction. Nullptr arguments indicate that the iterator range is
|
||||
// unbounded.
|
||||
// NOTE: the boundaries are used for optimization purposes to reduce the
|
||||
// number of tombstones that are passed to the fragmenter; they do not
|
||||
// guarantee that the resulting iterator only contains range tombstones that
|
||||
// cover keys in the provided range. If required, these bounds must be
|
||||
// enforced during iteration.
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> NewIterator(
|
||||
const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr,
|
||||
bool upper_bound_inclusive = false);
|
||||
|
||||
private:
|
||||
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> parent_iters_;
|
||||
std::map<SequenceNumber, StripeRep> reps_;
|
||||
|
||||
const std::vector<SequenceNumber>* snapshots_;
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -20,7 +20,6 @@ int main() {
|
||||
#include <vector>
|
||||
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "rocksdb/comparator.h"
|
||||
#include "rocksdb/env.h"
|
||||
@ -48,8 +47,6 @@ DEFINE_double(tombstone_width_mean, 100.0, "average range tombstone width");
|
||||
DEFINE_double(tombstone_width_stddev, 0.0,
|
||||
"standard deviation of range tombstone width");
|
||||
|
||||
DEFINE_bool(use_collapsed, true, "use the collapsed range tombstone map");
|
||||
|
||||
DEFINE_int32(seed, 0, "random number generator seed");
|
||||
|
||||
DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run");
|
||||
@ -57,8 +54,6 @@ DEFINE_int32(should_deletes_per_run, 1, "number of ShouldDelete calls per run");
|
||||
DEFINE_int32(add_tombstones_per_run, 1,
|
||||
"number of AddTombstones calls per run");
|
||||
|
||||
DEFINE_bool(use_v2_aggregator, false, "benchmark RangeDelAggregatorV2");
|
||||
|
||||
namespace {
|
||||
|
||||
struct Stats {
|
||||
@ -187,14 +182,10 @@ int main(int argc, char** argv) {
|
||||
std::vector<rocksdb::PersistentRangeTombstone>(
|
||||
FLAGS_num_range_tombstones);
|
||||
}
|
||||
auto mode = FLAGS_use_collapsed
|
||||
? rocksdb::RangeDelPositioningMode::kForwardTraversal
|
||||
: rocksdb::RangeDelPositioningMode::kFullScan;
|
||||
auto mode = rocksdb::RangeDelPositioningMode::kForwardTraversal;
|
||||
|
||||
for (int i = 0; i < FLAGS_num_runs; i++) {
|
||||
rocksdb::RangeDelAggregator range_del_agg(icmp, {} /* snapshots */,
|
||||
FLAGS_use_collapsed);
|
||||
rocksdb::ReadRangeDelAggregatorV2 range_del_agg_v2(
|
||||
rocksdb::ReadRangeDelAggregator range_del_agg(
|
||||
&icmp, rocksdb::kMaxSequenceNumber /* upper_bound */);
|
||||
|
||||
std::vector<std::unique_ptr<rocksdb::FragmentedRangeTombstoneList> >
|
||||
@ -223,17 +214,10 @@ int main(int argc, char** argv) {
|
||||
fragmented_range_tombstone_lists.back().get(), icmp,
|
||||
rocksdb::kMaxSequenceNumber));
|
||||
|
||||
if (FLAGS_use_v2_aggregator) {
|
||||
rocksdb::StopWatchNano stop_watch_add_tombstones(
|
||||
rocksdb::Env::Default(), true /* auto_start */);
|
||||
range_del_agg_v2.AddTombstones(std::move(fragmented_range_del_iter));
|
||||
stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos();
|
||||
} else {
|
||||
rocksdb::StopWatchNano stop_watch_add_tombstones(
|
||||
rocksdb::Env::Default(), true /* auto_start */);
|
||||
range_del_agg.AddTombstones(std::move(range_del_iter));
|
||||
stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos();
|
||||
}
|
||||
rocksdb::StopWatchNano stop_watch_add_tombstones(rocksdb::Env::Default(),
|
||||
true /* auto_start */);
|
||||
range_del_agg.AddTombstones(std::move(fragmented_range_del_iter));
|
||||
stats.time_add_tombstones += stop_watch_add_tombstones.ElapsedNanos();
|
||||
}
|
||||
|
||||
rocksdb::ParsedInternalKey parsed_key;
|
||||
@ -247,18 +231,10 @@ int main(int argc, char** argv) {
|
||||
std::string key_string = rocksdb::Key(first_key + j);
|
||||
parsed_key.user_key = key_string;
|
||||
|
||||
uint64_t call_time;
|
||||
if (FLAGS_use_v2_aggregator) {
|
||||
rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(),
|
||||
true /* auto_start */);
|
||||
range_del_agg_v2.ShouldDelete(parsed_key, mode);
|
||||
call_time = stop_watch_should_delete.ElapsedNanos();
|
||||
} else {
|
||||
rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(),
|
||||
true /* auto_start */);
|
||||
range_del_agg.ShouldDelete(parsed_key, mode);
|
||||
call_time = stop_watch_should_delete.ElapsedNanos();
|
||||
}
|
||||
rocksdb::StopWatchNano stop_watch_should_delete(rocksdb::Env::Default(),
|
||||
true /* auto_start */);
|
||||
range_del_agg.ShouldDelete(parsed_key, mode);
|
||||
uint64_t call_time = stop_watch_should_delete.ElapsedNanos();
|
||||
|
||||
if (j == 0) {
|
||||
stats.time_first_should_delete += call_time;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,492 +0,0 @@
|
||||
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
|
||||
#include "db/compaction_iteration_stats.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/pinned_iterators_manager.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "db/version_edit.h"
|
||||
#include "include/rocksdb/comparator.h"
|
||||
#include "include/rocksdb/types.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/scoped_arena_iterator.h"
|
||||
#include "table/table_builder.h"
|
||||
#include "util/heap.h"
|
||||
#include "util/kv_map.h"
|
||||
#include "util/vector_iterator.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
TruncatedRangeDelIterator::TruncatedRangeDelIterator(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
|
||||
const InternalKeyComparator* icmp, const InternalKey* smallest,
|
||||
const InternalKey* largest)
|
||||
: iter_(std::move(iter)),
|
||||
icmp_(icmp),
|
||||
smallest_ikey_(smallest),
|
||||
largest_ikey_(largest) {
|
||||
if (smallest != nullptr) {
|
||||
pinned_bounds_.emplace_back();
|
||||
auto& parsed_smallest = pinned_bounds_.back();
|
||||
if (!ParseInternalKey(smallest->Encode(), &parsed_smallest)) {
|
||||
assert(false);
|
||||
}
|
||||
smallest_ = &parsed_smallest;
|
||||
}
|
||||
if (largest != nullptr) {
|
||||
pinned_bounds_.emplace_back();
|
||||
auto& parsed_largest = pinned_bounds_.back();
|
||||
if (!ParseInternalKey(largest->Encode(), &parsed_largest)) {
|
||||
assert(false);
|
||||
}
|
||||
if (parsed_largest.type == kTypeRangeDeletion &&
|
||||
parsed_largest.sequence == kMaxSequenceNumber) {
|
||||
// The file boundary has been artificially extended by a range tombstone.
|
||||
// We do not need to adjust largest to properly truncate range
|
||||
// tombstones that extend past the boundary.
|
||||
} else if (parsed_largest.sequence == 0) {
|
||||
// The largest key in the sstable has a sequence number of 0. Since we
|
||||
// guarantee that no internal keys with the same user key and sequence
|
||||
// number can exist in a DB, we know that the largest key in this sstable
|
||||
// cannot exist as the smallest key in the next sstable. This further
|
||||
// implies that no range tombstone in this sstable covers largest;
|
||||
// otherwise, the file boundary would have been artificially extended.
|
||||
//
|
||||
// Therefore, we will never truncate a range tombstone at largest, so we
|
||||
// can leave it unchanged.
|
||||
} else {
|
||||
// The same user key may straddle two sstable boundaries. To ensure that
|
||||
// the truncated end key can cover the largest key in this sstable, reduce
|
||||
// its sequence number by 1.
|
||||
parsed_largest.sequence -= 1;
|
||||
}
|
||||
largest_ = &parsed_largest;
|
||||
}
|
||||
}
|
||||
|
||||
bool TruncatedRangeDelIterator::Valid() const {
|
||||
return iter_->Valid() &&
|
||||
(smallest_ == nullptr ||
|
||||
icmp_->Compare(*smallest_, iter_->parsed_end_key()) < 0) &&
|
||||
(largest_ == nullptr ||
|
||||
icmp_->Compare(iter_->parsed_start_key(), *largest_) < 0);
|
||||
}
|
||||
|
||||
void TruncatedRangeDelIterator::Next() { iter_->TopNext(); }
|
||||
|
||||
void TruncatedRangeDelIterator::Prev() { iter_->TopPrev(); }
|
||||
|
||||
void TruncatedRangeDelIterator::InternalNext() { iter_->Next(); }
|
||||
|
||||
// NOTE: target is a user key
|
||||
void TruncatedRangeDelIterator::Seek(const Slice& target) {
|
||||
if (largest_ != nullptr &&
|
||||
icmp_->Compare(*largest_, ParsedInternalKey(target, kMaxSequenceNumber,
|
||||
kTypeRangeDeletion)) <= 0) {
|
||||
iter_->Invalidate();
|
||||
return;
|
||||
}
|
||||
if (smallest_ != nullptr &&
|
||||
icmp_->user_comparator()->Compare(target, smallest_->user_key) < 0) {
|
||||
iter_->Seek(smallest_->user_key);
|
||||
return;
|
||||
}
|
||||
iter_->Seek(target);
|
||||
}
|
||||
|
||||
// NOTE: target is a user key
|
||||
void TruncatedRangeDelIterator::SeekForPrev(const Slice& target) {
|
||||
if (smallest_ != nullptr &&
|
||||
icmp_->Compare(ParsedInternalKey(target, 0, kTypeRangeDeletion),
|
||||
*smallest_) < 0) {
|
||||
iter_->Invalidate();
|
||||
return;
|
||||
}
|
||||
if (largest_ != nullptr &&
|
||||
icmp_->user_comparator()->Compare(largest_->user_key, target) < 0) {
|
||||
iter_->SeekForPrev(largest_->user_key);
|
||||
return;
|
||||
}
|
||||
iter_->SeekForPrev(target);
|
||||
}
|
||||
|
||||
void TruncatedRangeDelIterator::SeekToFirst() {
|
||||
if (smallest_ != nullptr) {
|
||||
iter_->Seek(smallest_->user_key);
|
||||
return;
|
||||
}
|
||||
iter_->SeekToTopFirst();
|
||||
}
|
||||
|
||||
void TruncatedRangeDelIterator::SeekToLast() {
|
||||
if (largest_ != nullptr) {
|
||||
iter_->SeekForPrev(largest_->user_key);
|
||||
return;
|
||||
}
|
||||
iter_->SeekToTopLast();
|
||||
}
|
||||
|
||||
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
|
||||
TruncatedRangeDelIterator::SplitBySnapshot(
|
||||
const std::vector<SequenceNumber>& snapshots) {
|
||||
using FragmentedIterPair =
|
||||
std::pair<const SequenceNumber,
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator>>;
|
||||
|
||||
auto split_untruncated_iters = iter_->SplitBySnapshot(snapshots);
|
||||
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
|
||||
split_truncated_iters;
|
||||
std::for_each(
|
||||
split_untruncated_iters.begin(), split_untruncated_iters.end(),
|
||||
[&](FragmentedIterPair& iter_pair) {
|
||||
std::unique_ptr<TruncatedRangeDelIterator> truncated_iter(
|
||||
new TruncatedRangeDelIterator(std::move(iter_pair.second), icmp_,
|
||||
smallest_ikey_, largest_ikey_));
|
||||
split_truncated_iters.emplace(iter_pair.first,
|
||||
std::move(truncated_iter));
|
||||
});
|
||||
return split_truncated_iters;
|
||||
}
|
||||
|
||||
ForwardRangeDelIterator::ForwardRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters)
|
||||
: icmp_(icmp),
|
||||
iters_(iters),
|
||||
unused_idx_(0),
|
||||
active_seqnums_(SeqMaxComparator()),
|
||||
active_iters_(EndKeyMinComparator(icmp)),
|
||||
inactive_iters_(StartKeyMinComparator(icmp)) {}
|
||||
|
||||
bool ForwardRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) {
|
||||
assert(iters_ != nullptr);
|
||||
// Move active iterators that end before parsed.
|
||||
while (!active_iters_.empty() &&
|
||||
icmp_->Compare((*active_iters_.top())->end_key(), parsed) <= 0) {
|
||||
TruncatedRangeDelIterator* iter = PopActiveIter();
|
||||
do {
|
||||
iter->Next();
|
||||
} while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
// Move inactive iterators that start before parsed.
|
||||
while (!inactive_iters_.empty() &&
|
||||
icmp_->Compare(inactive_iters_.top()->start_key(), parsed) <= 0) {
|
||||
TruncatedRangeDelIterator* iter = PopInactiveIter();
|
||||
while (iter->Valid() && icmp_->Compare(iter->end_key(), parsed) <= 0) {
|
||||
iter->Next();
|
||||
}
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
return active_seqnums_.empty()
|
||||
? false
|
||||
: (*active_seqnums_.begin())->seq() > parsed.sequence;
|
||||
}
|
||||
|
||||
void ForwardRangeDelIterator::Invalidate() {
|
||||
unused_idx_ = 0;
|
||||
active_iters_.clear();
|
||||
active_seqnums_.clear();
|
||||
inactive_iters_.clear();
|
||||
}
|
||||
|
||||
ReverseRangeDelIterator::ReverseRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters)
|
||||
: icmp_(icmp),
|
||||
iters_(iters),
|
||||
unused_idx_(0),
|
||||
active_seqnums_(SeqMaxComparator()),
|
||||
active_iters_(StartKeyMaxComparator(icmp)),
|
||||
inactive_iters_(EndKeyMaxComparator(icmp)) {}
|
||||
|
||||
bool ReverseRangeDelIterator::ShouldDelete(const ParsedInternalKey& parsed) {
|
||||
assert(iters_ != nullptr);
|
||||
// Move active iterators that start after parsed.
|
||||
while (!active_iters_.empty() &&
|
||||
icmp_->Compare(parsed, (*active_iters_.top())->start_key()) < 0) {
|
||||
TruncatedRangeDelIterator* iter = PopActiveIter();
|
||||
do {
|
||||
iter->Prev();
|
||||
} while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
// Move inactive iterators that end after parsed.
|
||||
while (!inactive_iters_.empty() &&
|
||||
icmp_->Compare(parsed, inactive_iters_.top()->end_key()) < 0) {
|
||||
TruncatedRangeDelIterator* iter = PopInactiveIter();
|
||||
while (iter->Valid() && icmp_->Compare(parsed, iter->start_key()) < 0) {
|
||||
iter->Prev();
|
||||
}
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
return active_seqnums_.empty()
|
||||
? false
|
||||
: (*active_seqnums_.begin())->seq() > parsed.sequence;
|
||||
}
|
||||
|
||||
void ReverseRangeDelIterator::Invalidate() {
|
||||
unused_idx_ = 0;
|
||||
active_iters_.clear();
|
||||
active_seqnums_.clear();
|
||||
inactive_iters_.clear();
|
||||
}
|
||||
|
||||
bool RangeDelAggregatorV2::StripeRep::ShouldDelete(
|
||||
const ParsedInternalKey& parsed, RangeDelPositioningMode mode) {
|
||||
if (!InStripe(parsed.sequence) || IsEmpty()) {
|
||||
return false;
|
||||
}
|
||||
switch (mode) {
|
||||
case RangeDelPositioningMode::kForwardTraversal:
|
||||
InvalidateReverseIter();
|
||||
|
||||
// Pick up previously unseen iterators.
|
||||
for (auto it = std::next(iters_.begin(), forward_iter_.UnusedIdx());
|
||||
it != iters_.end(); ++it, forward_iter_.IncUnusedIdx()) {
|
||||
auto& iter = *it;
|
||||
forward_iter_.AddNewIter(iter.get(), parsed);
|
||||
}
|
||||
|
||||
return forward_iter_.ShouldDelete(parsed);
|
||||
case RangeDelPositioningMode::kBackwardTraversal:
|
||||
InvalidateForwardIter();
|
||||
|
||||
// Pick up previously unseen iterators.
|
||||
for (auto it = std::next(iters_.begin(), reverse_iter_.UnusedIdx());
|
||||
it != iters_.end(); ++it, reverse_iter_.IncUnusedIdx()) {
|
||||
auto& iter = *it;
|
||||
reverse_iter_.AddNewIter(iter.get(), parsed);
|
||||
}
|
||||
|
||||
return reverse_iter_.ShouldDelete(parsed);
|
||||
default:
|
||||
assert(false);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool RangeDelAggregatorV2::StripeRep::IsRangeOverlapped(const Slice& start,
|
||||
const Slice& end) {
|
||||
Invalidate();
|
||||
|
||||
// Set the internal start/end keys so that:
|
||||
// - if start_ikey has the same user key and sequence number as the
|
||||
// current end key, start_ikey will be considered greater; and
|
||||
// - if end_ikey has the same user key and sequence number as the current
|
||||
// start key, end_ikey will be considered greater.
|
||||
ParsedInternalKey start_ikey(start, kMaxSequenceNumber,
|
||||
static_cast<ValueType>(0));
|
||||
ParsedInternalKey end_ikey(end, 0, static_cast<ValueType>(0));
|
||||
for (auto& iter : iters_) {
|
||||
bool checked_candidate_tombstones = false;
|
||||
for (iter->SeekForPrev(start);
|
||||
iter->Valid() && icmp_->Compare(iter->start_key(), end_ikey) <= 0;
|
||||
iter->Next()) {
|
||||
checked_candidate_tombstones = true;
|
||||
if (icmp_->Compare(start_ikey, iter->end_key()) < 0 &&
|
||||
icmp_->Compare(iter->start_key(), end_ikey) <= 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!checked_candidate_tombstones) {
|
||||
// Do an additional check for when the end of the range is the begin
|
||||
// key of a tombstone, which we missed earlier since SeekForPrev'ing
|
||||
// to the start was invalid.
|
||||
iter->SeekForPrev(end);
|
||||
if (iter->Valid() && icmp_->Compare(start_ikey, iter->end_key()) < 0 &&
|
||||
icmp_->Compare(iter->start_key(), end_ikey) <= 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void ReadRangeDelAggregatorV2::AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest, const InternalKey* largest) {
|
||||
if (input_iter == nullptr || input_iter->empty()) {
|
||||
return;
|
||||
}
|
||||
rep_.AddTombstones(
|
||||
std::unique_ptr<TruncatedRangeDelIterator>(new TruncatedRangeDelIterator(
|
||||
std::move(input_iter), icmp_, smallest, largest)));
|
||||
}
|
||||
|
||||
bool ReadRangeDelAggregatorV2::ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) {
|
||||
return rep_.ShouldDelete(parsed, mode);
|
||||
}
|
||||
|
||||
bool ReadRangeDelAggregatorV2::IsRangeOverlapped(const Slice& start,
|
||||
const Slice& end) {
|
||||
InvalidateRangeDelMapPositions();
|
||||
return rep_.IsRangeOverlapped(start, end);
|
||||
}
|
||||
|
||||
void CompactionRangeDelAggregatorV2::AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest, const InternalKey* largest) {
|
||||
if (input_iter == nullptr || input_iter->empty()) {
|
||||
return;
|
||||
}
|
||||
assert(input_iter->lower_bound() == 0);
|
||||
assert(input_iter->upper_bound() == kMaxSequenceNumber);
|
||||
parent_iters_.emplace_back(new TruncatedRangeDelIterator(
|
||||
std::move(input_iter), icmp_, smallest, largest));
|
||||
|
||||
auto split_iters = parent_iters_.back()->SplitBySnapshot(*snapshots_);
|
||||
for (auto& split_iter : split_iters) {
|
||||
auto it = reps_.find(split_iter.first);
|
||||
if (it == reps_.end()) {
|
||||
bool inserted;
|
||||
SequenceNumber upper_bound = split_iter.second->upper_bound();
|
||||
SequenceNumber lower_bound = split_iter.second->lower_bound();
|
||||
std::tie(it, inserted) = reps_.emplace(
|
||||
split_iter.first, StripeRep(icmp_, upper_bound, lower_bound));
|
||||
assert(inserted);
|
||||
}
|
||||
assert(it != reps_.end());
|
||||
it->second.AddTombstones(std::move(split_iter.second));
|
||||
}
|
||||
}
|
||||
|
||||
bool CompactionRangeDelAggregatorV2::ShouldDelete(
|
||||
const ParsedInternalKey& parsed, RangeDelPositioningMode mode) {
|
||||
auto it = reps_.lower_bound(parsed.sequence);
|
||||
if (it == reps_.end()) {
|
||||
return false;
|
||||
}
|
||||
return it->second.ShouldDelete(parsed, mode);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
class TruncatedRangeDelMergingIter : public InternalIterator {
|
||||
public:
|
||||
TruncatedRangeDelMergingIter(
|
||||
const InternalKeyComparator* icmp, const Slice* lower_bound,
|
||||
const Slice* upper_bound, bool upper_bound_inclusive,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>& children)
|
||||
: icmp_(icmp),
|
||||
lower_bound_(lower_bound),
|
||||
upper_bound_(upper_bound),
|
||||
upper_bound_inclusive_(upper_bound_inclusive),
|
||||
heap_(StartKeyMinComparator(icmp)) {
|
||||
for (auto& child : children) {
|
||||
if (child != nullptr) {
|
||||
assert(child->lower_bound() == 0);
|
||||
assert(child->upper_bound() == kMaxSequenceNumber);
|
||||
children_.push_back(child.get());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool Valid() const override {
|
||||
return !heap_.empty() && BeforeEndKey(heap_.top());
|
||||
}
|
||||
Status status() const override { return Status::OK(); }
|
||||
|
||||
void SeekToFirst() override {
|
||||
heap_.clear();
|
||||
for (auto& child : children_) {
|
||||
if (lower_bound_ != nullptr) {
|
||||
child->Seek(*lower_bound_);
|
||||
} else {
|
||||
child->SeekToFirst();
|
||||
}
|
||||
if (child->Valid()) {
|
||||
heap_.push(child);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Next() override {
|
||||
auto* top = heap_.top();
|
||||
top->InternalNext();
|
||||
if (top->Valid()) {
|
||||
heap_.replace_top(top);
|
||||
} else {
|
||||
heap_.pop();
|
||||
}
|
||||
}
|
||||
|
||||
Slice key() const override {
|
||||
auto* top = heap_.top();
|
||||
cur_start_key_.Set(top->start_key().user_key, top->seq(),
|
||||
kTypeRangeDeletion);
|
||||
return cur_start_key_.Encode();
|
||||
}
|
||||
|
||||
Slice value() const override {
|
||||
auto* top = heap_.top();
|
||||
assert(top->end_key().sequence == kMaxSequenceNumber);
|
||||
return top->end_key().user_key;
|
||||
}
|
||||
|
||||
// Unused InternalIterator methods
|
||||
void Prev() override { assert(false); }
|
||||
void Seek(const Slice& /* target */) override { assert(false); }
|
||||
void SeekForPrev(const Slice& /* target */) override { assert(false); }
|
||||
void SeekToLast() override { assert(false); }
|
||||
|
||||
private:
|
||||
bool BeforeEndKey(const TruncatedRangeDelIterator* iter) const {
|
||||
if (upper_bound_ == nullptr) {
|
||||
return true;
|
||||
}
|
||||
int cmp = icmp_->user_comparator()->Compare(iter->start_key().user_key,
|
||||
*upper_bound_);
|
||||
return upper_bound_inclusive_ ? cmp <= 0 : cmp < 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
const Slice* lower_bound_;
|
||||
const Slice* upper_bound_;
|
||||
bool upper_bound_inclusive_;
|
||||
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> heap_;
|
||||
std::vector<TruncatedRangeDelIterator*> children_;
|
||||
|
||||
mutable InternalKey cur_start_key_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator>
|
||||
CompactionRangeDelAggregatorV2::NewIterator(const Slice* lower_bound,
|
||||
const Slice* upper_bound,
|
||||
bool upper_bound_inclusive) {
|
||||
InvalidateRangeDelMapPositions();
|
||||
std::unique_ptr<TruncatedRangeDelMergingIter> merging_iter(
|
||||
new TruncatedRangeDelMergingIter(icmp_, lower_bound, upper_bound,
|
||||
upper_bound_inclusive, parent_iters_));
|
||||
|
||||
// TODO: add tests where tombstone fragments can be outside of upper and lower
|
||||
// bound range
|
||||
auto fragmented_tombstone_list =
|
||||
std::make_shared<FragmentedRangeTombstoneList>(
|
||||
std::move(merging_iter), *icmp_, true /* for_compaction */,
|
||||
*snapshots_);
|
||||
|
||||
return std::unique_ptr<FragmentedRangeTombstoneIterator>(
|
||||
new FragmentedRangeTombstoneIterator(
|
||||
fragmented_tombstone_list, *icmp_,
|
||||
kMaxSequenceNumber /* upper_bound */));
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
@ -1,436 +0,0 @@
|
||||
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <algorithm>
|
||||
#include <iterator>
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <set>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "db/compaction_iteration_stats.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/pinned_iterators_manager.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "db/version_edit.h"
|
||||
#include "include/rocksdb/comparator.h"
|
||||
#include "include/rocksdb/types.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/scoped_arena_iterator.h"
|
||||
#include "table/table_builder.h"
|
||||
#include "util/heap.h"
|
||||
#include "util/kv_map.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
class TruncatedRangeDelIterator {
|
||||
public:
|
||||
TruncatedRangeDelIterator(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> iter,
|
||||
const InternalKeyComparator* icmp, const InternalKey* smallest,
|
||||
const InternalKey* largest);
|
||||
|
||||
bool Valid() const;
|
||||
|
||||
void Next();
|
||||
void Prev();
|
||||
|
||||
void InternalNext();
|
||||
|
||||
// Seeks to the tombstone with the highest viisble sequence number that covers
|
||||
// target (a user key). If no such tombstone exists, the position will be at
|
||||
// the earliest tombstone that ends after target.
|
||||
void Seek(const Slice& target);
|
||||
|
||||
// Seeks to the tombstone with the highest viisble sequence number that covers
|
||||
// target (a user key). If no such tombstone exists, the position will be at
|
||||
// the latest tombstone that starts before target.
|
||||
void SeekForPrev(const Slice& target);
|
||||
|
||||
void SeekToFirst();
|
||||
void SeekToLast();
|
||||
|
||||
ParsedInternalKey start_key() const {
|
||||
return (smallest_ == nullptr ||
|
||||
icmp_->Compare(*smallest_, iter_->parsed_start_key()) <= 0)
|
||||
? iter_->parsed_start_key()
|
||||
: *smallest_;
|
||||
}
|
||||
|
||||
ParsedInternalKey end_key() const {
|
||||
return (largest_ == nullptr ||
|
||||
icmp_->Compare(iter_->parsed_end_key(), *largest_) <= 0)
|
||||
? iter_->parsed_end_key()
|
||||
: *largest_;
|
||||
}
|
||||
|
||||
SequenceNumber seq() const { return iter_->seq(); }
|
||||
|
||||
std::map<SequenceNumber, std::unique_ptr<TruncatedRangeDelIterator>>
|
||||
SplitBySnapshot(const std::vector<SequenceNumber>& snapshots);
|
||||
|
||||
SequenceNumber upper_bound() const { return iter_->upper_bound(); }
|
||||
|
||||
SequenceNumber lower_bound() const { return iter_->lower_bound(); }
|
||||
|
||||
private:
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> iter_;
|
||||
const InternalKeyComparator* icmp_;
|
||||
const ParsedInternalKey* smallest_ = nullptr;
|
||||
const ParsedInternalKey* largest_ = nullptr;
|
||||
std::list<ParsedInternalKey> pinned_bounds_;
|
||||
|
||||
const InternalKey* smallest_ikey_;
|
||||
const InternalKey* largest_ikey_;
|
||||
};
|
||||
|
||||
struct SeqMaxComparator {
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return a->seq() > b->seq();
|
||||
}
|
||||
};
|
||||
|
||||
struct StartKeyMinComparator {
|
||||
explicit StartKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return icmp->Compare(a->start_key(), b->start_key()) > 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
|
||||
class ForwardRangeDelIterator {
|
||||
public:
|
||||
ForwardRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed);
|
||||
void Invalidate();
|
||||
|
||||
void AddNewIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
iter->Seek(parsed.user_key);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
size_t UnusedIdx() const { return unused_idx_; }
|
||||
void IncUnusedIdx() { unused_idx_++; }
|
||||
|
||||
private:
|
||||
using ActiveSeqSet =
|
||||
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
|
||||
|
||||
struct EndKeyMinComparator {
|
||||
explicit EndKeyMinComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const ActiveSeqSet::const_iterator& a,
|
||||
const ActiveSeqSet::const_iterator& b) const {
|
||||
return icmp->Compare((*a)->end_key(), (*b)->end_key()) > 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
|
||||
void PushIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
if (!iter->Valid()) {
|
||||
// The iterator has been fully consumed, so we don't need to add it to
|
||||
// either of the heaps.
|
||||
return;
|
||||
}
|
||||
int cmp = icmp_->Compare(parsed, iter->start_key());
|
||||
if (cmp < 0) {
|
||||
PushInactiveIter(iter);
|
||||
} else {
|
||||
PushActiveIter(iter);
|
||||
}
|
||||
}
|
||||
|
||||
void PushActiveIter(TruncatedRangeDelIterator* iter) {
|
||||
auto seq_pos = active_seqnums_.insert(iter);
|
||||
active_iters_.push(seq_pos);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopActiveIter() {
|
||||
auto active_top = active_iters_.top();
|
||||
auto iter = *active_top;
|
||||
active_iters_.pop();
|
||||
active_seqnums_.erase(active_top);
|
||||
return iter;
|
||||
}
|
||||
|
||||
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
|
||||
inactive_iters_.push(iter);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopInactiveIter() {
|
||||
auto* iter = inactive_iters_.top();
|
||||
inactive_iters_.pop();
|
||||
return iter;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
|
||||
size_t unused_idx_;
|
||||
ActiveSeqSet active_seqnums_;
|
||||
BinaryHeap<ActiveSeqSet::const_iterator, EndKeyMinComparator> active_iters_;
|
||||
BinaryHeap<TruncatedRangeDelIterator*, StartKeyMinComparator> inactive_iters_;
|
||||
};
|
||||
|
||||
class ReverseRangeDelIterator {
|
||||
public:
|
||||
ReverseRangeDelIterator(
|
||||
const InternalKeyComparator* icmp,
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters);
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed);
|
||||
void Invalidate();
|
||||
|
||||
void AddNewIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
iter->SeekForPrev(parsed.user_key);
|
||||
PushIter(iter, parsed);
|
||||
assert(active_iters_.size() == active_seqnums_.size());
|
||||
}
|
||||
|
||||
size_t UnusedIdx() const { return unused_idx_; }
|
||||
void IncUnusedIdx() { unused_idx_++; }
|
||||
|
||||
private:
|
||||
using ActiveSeqSet =
|
||||
std::multiset<TruncatedRangeDelIterator*, SeqMaxComparator>;
|
||||
|
||||
struct EndKeyMaxComparator {
|
||||
explicit EndKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const TruncatedRangeDelIterator* a,
|
||||
const TruncatedRangeDelIterator* b) const {
|
||||
return icmp->Compare(a->end_key(), b->end_key()) < 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
struct StartKeyMaxComparator {
|
||||
explicit StartKeyMaxComparator(const InternalKeyComparator* c) : icmp(c) {}
|
||||
|
||||
bool operator()(const ActiveSeqSet::const_iterator& a,
|
||||
const ActiveSeqSet::const_iterator& b) const {
|
||||
return icmp->Compare((*a)->start_key(), (*b)->start_key()) < 0;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp;
|
||||
};
|
||||
|
||||
void PushIter(TruncatedRangeDelIterator* iter,
|
||||
const ParsedInternalKey& parsed) {
|
||||
if (!iter->Valid()) {
|
||||
// The iterator has been fully consumed, so we don't need to add it to
|
||||
// either of the heaps.
|
||||
} else if (icmp_->Compare(iter->end_key(), parsed) <= 0) {
|
||||
PushInactiveIter(iter);
|
||||
} else {
|
||||
PushActiveIter(iter);
|
||||
}
|
||||
}
|
||||
|
||||
void PushActiveIter(TruncatedRangeDelIterator* iter) {
|
||||
auto seq_pos = active_seqnums_.insert(iter);
|
||||
active_iters_.push(seq_pos);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopActiveIter() {
|
||||
auto active_top = active_iters_.top();
|
||||
auto iter = *active_top;
|
||||
active_iters_.pop();
|
||||
active_seqnums_.erase(active_top);
|
||||
return iter;
|
||||
}
|
||||
|
||||
void PushInactiveIter(TruncatedRangeDelIterator* iter) {
|
||||
inactive_iters_.push(iter);
|
||||
}
|
||||
|
||||
TruncatedRangeDelIterator* PopInactiveIter() {
|
||||
auto* iter = inactive_iters_.top();
|
||||
inactive_iters_.pop();
|
||||
return iter;
|
||||
}
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
const std::vector<std::unique_ptr<TruncatedRangeDelIterator>>* iters_;
|
||||
size_t unused_idx_;
|
||||
ActiveSeqSet active_seqnums_;
|
||||
BinaryHeap<ActiveSeqSet::const_iterator, StartKeyMaxComparator> active_iters_;
|
||||
BinaryHeap<TruncatedRangeDelIterator*, EndKeyMaxComparator> inactive_iters_;
|
||||
};
|
||||
|
||||
class RangeDelAggregatorV2 {
|
||||
public:
|
||||
explicit RangeDelAggregatorV2(const InternalKeyComparator* icmp)
|
||||
: icmp_(icmp) {}
|
||||
virtual ~RangeDelAggregatorV2() {}
|
||||
|
||||
virtual void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) = 0;
|
||||
|
||||
bool ShouldDelete(const Slice& key, RangeDelPositioningMode mode) {
|
||||
ParsedInternalKey parsed;
|
||||
if (!ParseInternalKey(key, &parsed)) {
|
||||
return false;
|
||||
}
|
||||
return ShouldDelete(parsed, mode);
|
||||
}
|
||||
virtual bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) = 0;
|
||||
|
||||
virtual void InvalidateRangeDelMapPositions() = 0;
|
||||
|
||||
virtual bool IsEmpty() const = 0;
|
||||
|
||||
bool AddFile(uint64_t file_number) {
|
||||
return files_seen_.insert(file_number).second;
|
||||
}
|
||||
|
||||
protected:
|
||||
class StripeRep {
|
||||
public:
|
||||
StripeRep(const InternalKeyComparator* icmp, SequenceNumber upper_bound,
|
||||
SequenceNumber lower_bound)
|
||||
: icmp_(icmp),
|
||||
forward_iter_(icmp, &iters_),
|
||||
reverse_iter_(icmp, &iters_),
|
||||
upper_bound_(upper_bound),
|
||||
lower_bound_(lower_bound) {}
|
||||
|
||||
void AddTombstones(std::unique_ptr<TruncatedRangeDelIterator> input_iter) {
|
||||
iters_.push_back(std::move(input_iter));
|
||||
}
|
||||
|
||||
bool IsEmpty() const { return iters_.empty(); }
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode);
|
||||
|
||||
void Invalidate() {
|
||||
InvalidateForwardIter();
|
||||
InvalidateReverseIter();
|
||||
}
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
private:
|
||||
bool InStripe(SequenceNumber seq) const {
|
||||
return lower_bound_ <= seq && seq <= upper_bound_;
|
||||
}
|
||||
|
||||
void InvalidateForwardIter() { forward_iter_.Invalidate(); }
|
||||
|
||||
void InvalidateReverseIter() { reverse_iter_.Invalidate(); }
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> iters_;
|
||||
ForwardRangeDelIterator forward_iter_;
|
||||
ReverseRangeDelIterator reverse_iter_;
|
||||
SequenceNumber upper_bound_;
|
||||
SequenceNumber lower_bound_;
|
||||
};
|
||||
|
||||
const InternalKeyComparator* icmp_;
|
||||
|
||||
private:
|
||||
std::set<uint64_t> files_seen_;
|
||||
};
|
||||
|
||||
class ReadRangeDelAggregatorV2 : public RangeDelAggregatorV2 {
|
||||
public:
|
||||
ReadRangeDelAggregatorV2(const InternalKeyComparator* icmp,
|
||||
SequenceNumber upper_bound)
|
||||
: RangeDelAggregatorV2(icmp),
|
||||
rep_(icmp, upper_bound, 0 /* lower_bound */) {}
|
||||
~ReadRangeDelAggregatorV2() override {}
|
||||
|
||||
using RangeDelAggregatorV2::ShouldDelete;
|
||||
void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) override;
|
||||
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) override;
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
void InvalidateRangeDelMapPositions() override { rep_.Invalidate(); }
|
||||
|
||||
bool IsEmpty() const override { return rep_.IsEmpty(); }
|
||||
|
||||
private:
|
||||
StripeRep rep_;
|
||||
};
|
||||
|
||||
class CompactionRangeDelAggregatorV2 : public RangeDelAggregatorV2 {
|
||||
public:
|
||||
CompactionRangeDelAggregatorV2(const InternalKeyComparator* icmp,
|
||||
const std::vector<SequenceNumber>& snapshots)
|
||||
: RangeDelAggregatorV2(icmp), snapshots_(&snapshots) {}
|
||||
~CompactionRangeDelAggregatorV2() override {}
|
||||
|
||||
void AddTombstones(
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter,
|
||||
const InternalKey* smallest = nullptr,
|
||||
const InternalKey* largest = nullptr) override;
|
||||
|
||||
using RangeDelAggregatorV2::ShouldDelete;
|
||||
bool ShouldDelete(const ParsedInternalKey& parsed,
|
||||
RangeDelPositioningMode mode) override;
|
||||
|
||||
bool IsRangeOverlapped(const Slice& start, const Slice& end);
|
||||
|
||||
void InvalidateRangeDelMapPositions() override {
|
||||
for (auto& rep : reps_) {
|
||||
rep.second.Invalidate();
|
||||
}
|
||||
}
|
||||
|
||||
bool IsEmpty() const override {
|
||||
for (const auto& rep : reps_) {
|
||||
if (!rep.second.IsEmpty()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// Creates an iterator over all the range tombstones in the aggregator, for
|
||||
// use in compaction. Nullptr arguments indicate that the iterator range is
|
||||
// unbounded.
|
||||
// NOTE: the boundaries are used for optimization purposes to reduce the
|
||||
// number of tombstones that are passed to the fragmenter; they do not
|
||||
// guarantee that the resulting iterator only contains range tombstones that
|
||||
// cover keys in the provided range. If required, these bounds must be
|
||||
// enforced during iteration.
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> NewIterator(
|
||||
const Slice* lower_bound = nullptr, const Slice* upper_bound = nullptr,
|
||||
bool upper_bound_inclusive = false);
|
||||
|
||||
private:
|
||||
std::vector<std::unique_ptr<TruncatedRangeDelIterator>> parent_iters_;
|
||||
std::map<SequenceNumber, StripeRep> reps_;
|
||||
|
||||
const std::vector<SequenceNumber>* snapshots_;
|
||||
};
|
||||
|
||||
} // namespace rocksdb
|
@ -1,709 +0,0 @@
|
||||
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "db/db_test_util.h"
|
||||
#include "db/dbformat.h"
|
||||
#include "db/range_tombstone_fragmenter.h"
|
||||
#include "util/testutil.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
class RangeDelAggregatorV2Test : public testing::Test {};
|
||||
|
||||
namespace {
|
||||
|
||||
static auto bytewise_icmp = InternalKeyComparator(BytewiseComparator());
|
||||
|
||||
std::unique_ptr<InternalIterator> MakeRangeDelIter(
|
||||
const std::vector<RangeTombstone>& range_dels) {
|
||||
std::vector<std::string> keys, values;
|
||||
for (const auto& range_del : range_dels) {
|
||||
auto key_and_value = range_del.Serialize();
|
||||
keys.push_back(key_and_value.first.Encode().ToString());
|
||||
values.push_back(key_and_value.second.ToString());
|
||||
}
|
||||
return std::unique_ptr<test::VectorIterator>(
|
||||
new test::VectorIterator(keys, values));
|
||||
}
|
||||
|
||||
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>>
|
||||
MakeFragmentedTombstoneLists(
|
||||
const std::vector<std::vector<RangeTombstone>>& range_dels_list) {
|
||||
std::vector<std::unique_ptr<FragmentedRangeTombstoneList>> fragment_lists;
|
||||
for (const auto& range_dels : range_dels_list) {
|
||||
auto range_del_iter = MakeRangeDelIter(range_dels);
|
||||
fragment_lists.emplace_back(new FragmentedRangeTombstoneList(
|
||||
std::move(range_del_iter), bytewise_icmp));
|
||||
}
|
||||
return fragment_lists;
|
||||
}
|
||||
|
||||
struct TruncatedIterScanTestCase {
|
||||
ParsedInternalKey start;
|
||||
ParsedInternalKey end;
|
||||
SequenceNumber seq;
|
||||
};
|
||||
|
||||
struct TruncatedIterSeekTestCase {
|
||||
Slice target;
|
||||
ParsedInternalKey start;
|
||||
ParsedInternalKey end;
|
||||
SequenceNumber seq;
|
||||
bool invalid;
|
||||
};
|
||||
|
||||
struct ShouldDeleteTestCase {
|
||||
ParsedInternalKey lookup_key;
|
||||
bool result;
|
||||
};
|
||||
|
||||
struct IsRangeOverlappedTestCase {
|
||||
Slice start;
|
||||
Slice end;
|
||||
bool result;
|
||||
};
|
||||
|
||||
ParsedInternalKey UncutEndpoint(const Slice& s) {
|
||||
return ParsedInternalKey(s, kMaxSequenceNumber, kTypeRangeDeletion);
|
||||
}
|
||||
|
||||
ParsedInternalKey InternalValue(const Slice& key, SequenceNumber seq) {
|
||||
return ParsedInternalKey(key, seq, kTypeValue);
|
||||
}
|
||||
|
||||
void VerifyIterator(
|
||||
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
||||
const std::vector<TruncatedIterScanTestCase>& expected_range_dels) {
|
||||
// Test forward iteration.
|
||||
iter->SeekToFirst();
|
||||
for (size_t i = 0; i < expected_range_dels.size(); i++, iter->Next()) {
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
EXPECT_EQ(0, icmp.Compare(iter->start_key(), expected_range_dels[i].start));
|
||||
EXPECT_EQ(0, icmp.Compare(iter->end_key(), expected_range_dels[i].end));
|
||||
EXPECT_EQ(expected_range_dels[i].seq, iter->seq());
|
||||
}
|
||||
EXPECT_FALSE(iter->Valid());
|
||||
|
||||
// Test reverse iteration.
|
||||
iter->SeekToLast();
|
||||
std::vector<TruncatedIterScanTestCase> reverse_expected_range_dels(
|
||||
expected_range_dels.rbegin(), expected_range_dels.rend());
|
||||
for (size_t i = 0; i < reverse_expected_range_dels.size();
|
||||
i++, iter->Prev()) {
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
EXPECT_EQ(0, icmp.Compare(iter->start_key(),
|
||||
reverse_expected_range_dels[i].start));
|
||||
EXPECT_EQ(
|
||||
0, icmp.Compare(iter->end_key(), reverse_expected_range_dels[i].end));
|
||||
EXPECT_EQ(reverse_expected_range_dels[i].seq, iter->seq());
|
||||
}
|
||||
EXPECT_FALSE(iter->Valid());
|
||||
}
|
||||
|
||||
void VerifySeek(TruncatedRangeDelIterator* iter,
|
||||
const InternalKeyComparator& icmp,
|
||||
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
||||
for (const auto& test_case : test_cases) {
|
||||
iter->Seek(test_case.target);
|
||||
if (test_case.invalid) {
|
||||
ASSERT_FALSE(iter->Valid());
|
||||
} else {
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
||||
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
||||
EXPECT_EQ(test_case.seq, iter->seq());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VerifySeekForPrev(
|
||||
TruncatedRangeDelIterator* iter, const InternalKeyComparator& icmp,
|
||||
const std::vector<TruncatedIterSeekTestCase>& test_cases) {
|
||||
for (const auto& test_case : test_cases) {
|
||||
iter->SeekForPrev(test_case.target);
|
||||
if (test_case.invalid) {
|
||||
ASSERT_FALSE(iter->Valid());
|
||||
} else {
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
EXPECT_EQ(0, icmp.Compare(iter->start_key(), test_case.start));
|
||||
EXPECT_EQ(0, icmp.Compare(iter->end_key(), test_case.end));
|
||||
EXPECT_EQ(test_case.seq, iter->seq());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void VerifyShouldDelete(RangeDelAggregatorV2* range_del_agg,
|
||||
const std::vector<ShouldDeleteTestCase>& test_cases) {
|
||||
for (const auto& test_case : test_cases) {
|
||||
EXPECT_EQ(
|
||||
test_case.result,
|
||||
range_del_agg->ShouldDelete(
|
||||
test_case.lookup_key, RangeDelPositioningMode::kForwardTraversal));
|
||||
}
|
||||
for (auto it = test_cases.rbegin(); it != test_cases.rend(); ++it) {
|
||||
const auto& test_case = *it;
|
||||
EXPECT_EQ(
|
||||
test_case.result,
|
||||
range_del_agg->ShouldDelete(
|
||||
test_case.lookup_key, RangeDelPositioningMode::kBackwardTraversal));
|
||||
}
|
||||
}
|
||||
|
||||
void VerifyIsRangeOverlapped(
|
||||
ReadRangeDelAggregatorV2* range_del_agg,
|
||||
const std::vector<IsRangeOverlappedTestCase>& test_cases) {
|
||||
for (const auto& test_case : test_cases) {
|
||||
EXPECT_EQ(test_case.result,
|
||||
range_del_agg->IsRangeOverlapped(test_case.start, test_case.end));
|
||||
}
|
||||
}
|
||||
|
||||
void CheckIterPosition(const RangeTombstone& tombstone,
|
||||
const FragmentedRangeTombstoneIterator* iter) {
|
||||
// Test InternalIterator interface.
|
||||
EXPECT_EQ(tombstone.start_key_, ExtractUserKey(iter->key()));
|
||||
EXPECT_EQ(tombstone.end_key_, iter->value());
|
||||
EXPECT_EQ(tombstone.seq_, iter->seq());
|
||||
|
||||
// Test FragmentedRangeTombstoneIterator interface.
|
||||
EXPECT_EQ(tombstone.start_key_, iter->start_key());
|
||||
EXPECT_EQ(tombstone.end_key_, iter->end_key());
|
||||
EXPECT_EQ(tombstone.seq_, GetInternalKeySeqno(iter->key()));
|
||||
}
|
||||
|
||||
void VerifyFragmentedRangeDels(
|
||||
FragmentedRangeTombstoneIterator* iter,
|
||||
const std::vector<RangeTombstone>& expected_tombstones) {
|
||||
iter->SeekToFirst();
|
||||
for (size_t i = 0; i < expected_tombstones.size(); i++, iter->Next()) {
|
||||
ASSERT_TRUE(iter->Valid());
|
||||
CheckIterPosition(expected_tombstones[i], iter);
|
||||
}
|
||||
EXPECT_FALSE(iter->Valid());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, EmptyTruncatedIter) {
|
||||
auto range_del_iter = MakeRangeDelIter({});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
|
||||
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
||||
nullptr);
|
||||
|
||||
iter.SeekToFirst();
|
||||
ASSERT_FALSE(iter.Valid());
|
||||
|
||||
iter.SeekToLast();
|
||||
ASSERT_FALSE(iter.Valid());
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, UntruncatedIter) {
|
||||
auto range_del_iter =
|
||||
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
|
||||
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
||||
nullptr);
|
||||
|
||||
VerifyIterator(&iter, bytewise_icmp,
|
||||
{{UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
||||
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
||||
|
||||
VerifySeek(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
||||
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
||||
{"", UncutEndpoint("a"), UncutEndpoint("e"), 10}});
|
||||
|
||||
VerifySeekForPrev(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", UncutEndpoint("a"), UncutEndpoint("e"), 10},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
||||
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, UntruncatedIterWithSnapshot) {
|
||||
auto range_del_iter =
|
||||
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
9 /* snapshot */));
|
||||
|
||||
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp, nullptr,
|
||||
nullptr);
|
||||
|
||||
VerifyIterator(&iter, bytewise_icmp,
|
||||
{{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{UncutEndpoint("j"), UncutEndpoint("n"), 4}});
|
||||
|
||||
VerifySeek(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
||||
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
||||
{"", UncutEndpoint("e"), UncutEndpoint("g"), 8}});
|
||||
|
||||
VerifySeekForPrev(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"n", UncutEndpoint("j"), UncutEndpoint("n"), 4},
|
||||
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, TruncatedIterPartiallyCutTombstones) {
|
||||
auto range_del_iter =
|
||||
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
|
||||
InternalKey smallest("d", 7, kTypeValue);
|
||||
InternalKey largest("m", 9, kTypeValue);
|
||||
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
||||
&smallest, &largest);
|
||||
|
||||
VerifyIterator(&iter, bytewise_icmp,
|
||||
{{InternalValue("d", 7), UncutEndpoint("e"), 10},
|
||||
{UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{UncutEndpoint("j"), InternalValue("m", 8), 4}});
|
||||
|
||||
VerifySeek(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("j"), InternalValue("m", 8), 4},
|
||||
{"n", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
||||
{"", InternalValue("d", 7), UncutEndpoint("e"), 10}});
|
||||
|
||||
VerifySeekForPrev(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", InternalValue("d", 7), UncutEndpoint("e"), 10},
|
||||
{"e", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"ia", UncutEndpoint("e"), UncutEndpoint("g"), 8},
|
||||
{"n", UncutEndpoint("j"), InternalValue("m", 8), 4},
|
||||
{"", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, TruncatedIterFullyCutTombstones) {
|
||||
auto range_del_iter =
|
||||
MakeRangeDelIter({{"a", "e", 10}, {"e", "g", 8}, {"j", "n", 4}});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
|
||||
InternalKey smallest("f", 7, kTypeValue);
|
||||
InternalKey largest("i", 9, kTypeValue);
|
||||
TruncatedRangeDelIterator iter(std::move(input_iter), &bytewise_icmp,
|
||||
&smallest, &largest);
|
||||
|
||||
VerifyIterator(&iter, bytewise_icmp,
|
||||
{{InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
||||
|
||||
VerifySeek(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
||||
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
||||
{"j", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */}});
|
||||
|
||||
VerifySeekForPrev(
|
||||
&iter, bytewise_icmp,
|
||||
{{"d", UncutEndpoint(""), UncutEndpoint(""), 0, true /* invalid */},
|
||||
{"f", InternalValue("f", 7), UncutEndpoint("g"), 8},
|
||||
{"j", InternalValue("f", 7), UncutEndpoint("g"), 8}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, SingleIterInAggregator) {
|
||||
auto range_del_iter = MakeRangeDelIter({{"a", "e", 10}, {"c", "g", 8}});
|
||||
FragmentedRangeTombstoneList fragment_list(std::move(range_del_iter),
|
||||
bytewise_icmp);
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(&fragment_list, bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
||||
{InternalValue("b", 9), true},
|
||||
{InternalValue("d", 9), true},
|
||||
{InternalValue("e", 7), true},
|
||||
{InternalValue("g", 7), false}});
|
||||
|
||||
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
||||
{"_", "a", true},
|
||||
{"a", "c", true},
|
||||
{"d", "f", true},
|
||||
{"g", "l", false}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregator) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, kMaxSequenceNumber);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
||||
{InternalValue("b", 19), false},
|
||||
{InternalValue("b", 9), true},
|
||||
{InternalValue("d", 9), true},
|
||||
{InternalValue("e", 7), true},
|
||||
{InternalValue("g", 7), false},
|
||||
{InternalValue("h", 24), true},
|
||||
{InternalValue("i", 24), false},
|
||||
{InternalValue("ii", 14), true},
|
||||
{InternalValue("j", 14), false}});
|
||||
|
||||
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
||||
{"_", "a", true},
|
||||
{"a", "c", true},
|
||||
{"d", "f", true},
|
||||
{"g", "l", true},
|
||||
{"x", "y", false}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, MultipleItersInAggregatorWithUpperBound) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
19 /* snapshot */));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), false},
|
||||
{InternalValue("a", 9), true},
|
||||
{InternalValue("b", 9), true},
|
||||
{InternalValue("d", 9), true},
|
||||
{InternalValue("e", 7), true},
|
||||
{InternalValue("g", 7), false},
|
||||
{InternalValue("h", 24), false},
|
||||
{InternalValue("i", 24), false},
|
||||
{InternalValue("ii", 14), true},
|
||||
{InternalValue("j", 14), false}});
|
||||
|
||||
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
||||
{"_", "a", true},
|
||||
{"a", "c", true},
|
||||
{"d", "f", true},
|
||||
{"g", "l", true},
|
||||
{"x", "y", false}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregator) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
||||
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
||||
{InternalKey("a", 4, kTypeValue),
|
||||
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
||||
{InternalKey("m", 20, kTypeValue),
|
||||
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
||||
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
|
||||
for (size_t i = 0; i < fragment_lists.size(); i++) {
|
||||
const auto& fragment_list = fragment_lists[i];
|
||||
const auto& bounds = iter_bounds[i];
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
19 /* snapshot */));
|
||||
range_del_agg.AddTombstones(std::move(input_iter), &bounds.first,
|
||||
&bounds.second);
|
||||
}
|
||||
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
||||
{InternalValue("a", 9), false},
|
||||
{InternalValue("a", 4), true},
|
||||
{InternalValue("m", 10), false},
|
||||
{InternalValue("m", 9), true},
|
||||
{InternalValue("x", 10), false},
|
||||
{InternalValue("x", 9), false},
|
||||
{InternalValue("x", 5), true},
|
||||
{InternalValue("z", 9), false}});
|
||||
|
||||
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
||||
{"_", "a", true},
|
||||
{"a", "n", true},
|
||||
{"l", "x", true},
|
||||
{"w", "z", true},
|
||||
{"zzz", "zz", false},
|
||||
{"zz", "zzz", false}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, MultipleTruncatedItersInAggregatorSameLevel) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "z", 10}}, {{"a", "z", 10}}, {{"a", "z", 10}}});
|
||||
std::vector<std::pair<InternalKey, InternalKey>> iter_bounds = {
|
||||
{InternalKey("a", 4, kTypeValue),
|
||||
InternalKey("m", kMaxSequenceNumber, kTypeRangeDeletion)},
|
||||
{InternalKey("m", 20, kTypeValue),
|
||||
InternalKey("x", kMaxSequenceNumber, kTypeRangeDeletion)},
|
||||
{InternalKey("x", 5, kTypeValue), InternalKey("zz", 30, kTypeValue)}};
|
||||
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, 19);
|
||||
|
||||
auto add_iter_to_agg = [&](size_t i) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_lists[i].get(),
|
||||
bytewise_icmp, 19 /* snapshot */));
|
||||
range_del_agg.AddTombstones(std::move(input_iter), &iter_bounds[i].first,
|
||||
&iter_bounds[i].second);
|
||||
};
|
||||
|
||||
add_iter_to_agg(0);
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 10), false},
|
||||
{InternalValue("a", 9), false},
|
||||
{InternalValue("a", 4), true}});
|
||||
|
||||
add_iter_to_agg(1);
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("m", 10), false},
|
||||
{InternalValue("m", 9), true}});
|
||||
|
||||
add_iter_to_agg(2);
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("x", 10), false},
|
||||
{InternalValue("x", 9), false},
|
||||
{InternalValue("x", 5), true},
|
||||
{InternalValue("z", 9), false}});
|
||||
|
||||
VerifyIsRangeOverlapped(&range_del_agg, {{"", "_", false},
|
||||
{"_", "a", true},
|
||||
{"a", "n", true},
|
||||
{"l", "x", true},
|
||||
{"w", "z", true},
|
||||
{"zzz", "zz", false},
|
||||
{"zz", "zzz", false}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorNoSnapshots) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots;
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
VerifyShouldDelete(&range_del_agg, {{InternalValue("a", 19), true},
|
||||
{InternalValue("b", 19), false},
|
||||
{InternalValue("b", 9), true},
|
||||
{InternalValue("d", 9), true},
|
||||
{InternalValue("e", 7), true},
|
||||
{InternalValue("g", 7), false},
|
||||
{InternalValue("h", 24), true},
|
||||
{InternalValue("i", 24), false},
|
||||
{InternalValue("ii", 14), true},
|
||||
{InternalValue("j", 14), false}});
|
||||
|
||||
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
||||
{"b", "c", 10},
|
||||
{"c", "e", 10},
|
||||
{"e", "g", 8},
|
||||
{"h", "i", 25},
|
||||
{"ii", "j", 15}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorWithSnapshots) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots{9, 19};
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
VerifyShouldDelete(
|
||||
&range_del_agg,
|
||||
{
|
||||
{InternalValue("a", 19), false}, // [10, 19]
|
||||
{InternalValue("a", 9), false}, // [0, 9]
|
||||
{InternalValue("b", 9), false}, // [0, 9]
|
||||
{InternalValue("d", 9), false}, // [0, 9]
|
||||
{InternalValue("d", 7), true}, // [0, 9]
|
||||
{InternalValue("e", 7), true}, // [0, 9]
|
||||
{InternalValue("g", 7), false}, // [0, 9]
|
||||
{InternalValue("h", 24), true}, // [20, kMaxSequenceNumber]
|
||||
{InternalValue("i", 24), false}, // [20, kMaxSequenceNumber]
|
||||
{InternalValue("ii", 14), true}, // [10, 19]
|
||||
{InternalValue("j", 14), false} // [10, 19]
|
||||
});
|
||||
|
||||
auto range_del_compaction_iter = range_del_agg.NewIterator();
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter.get(), {{"a", "b", 20},
|
||||
{"a", "b", 10},
|
||||
{"b", "c", 10},
|
||||
{"c", "e", 10},
|
||||
{"c", "e", 8},
|
||||
{"e", "g", 8},
|
||||
{"h", "i", 25},
|
||||
{"ii", "j", 15}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorLeft) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots{9, 19};
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
Slice start("_");
|
||||
Slice end("__");
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorEmptyIteratorRight) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots{9, 19};
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
Slice start("p");
|
||||
Slice end("q");
|
||||
auto range_del_compaction_iter1 =
|
||||
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {});
|
||||
|
||||
auto range_del_compaction_iter2 =
|
||||
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test, CompactionAggregatorBoundedIterator) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "e", 10}, {"c", "g", 8}},
|
||||
{{"a", "b", 20}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots{9, 19};
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
Slice start("bb");
|
||||
Slice end("e");
|
||||
auto range_del_compaction_iter1 =
|
||||
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(),
|
||||
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}});
|
||||
|
||||
auto range_del_compaction_iter2 =
|
||||
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(
|
||||
range_del_compaction_iter2.get(),
|
||||
{{"a", "c", 10}, {"c", "e", 10}, {"c", "e", 8}, {"e", "g", 8}});
|
||||
}
|
||||
|
||||
TEST_F(RangeDelAggregatorV2Test,
|
||||
CompactionAggregatorBoundedIteratorExtraFragments) {
|
||||
auto fragment_lists = MakeFragmentedTombstoneLists(
|
||||
{{{"a", "d", 10}, {"c", "g", 8}},
|
||||
{{"b", "c", 20}, {"d", "f", 30}, {"h", "i", 25}, {"ii", "j", 15}}});
|
||||
|
||||
std::vector<SequenceNumber> snapshots{9, 19};
|
||||
CompactionRangeDelAggregatorV2 range_del_agg(&bytewise_icmp, snapshots);
|
||||
for (const auto& fragment_list : fragment_lists) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> input_iter(
|
||||
new FragmentedRangeTombstoneIterator(fragment_list.get(), bytewise_icmp,
|
||||
kMaxSequenceNumber));
|
||||
range_del_agg.AddTombstones(std::move(input_iter));
|
||||
}
|
||||
|
||||
Slice start("bb");
|
||||
Slice end("e");
|
||||
auto range_del_compaction_iter1 =
|
||||
range_del_agg.NewIterator(&start, &end, false /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter1.get(), {{"a", "b", 10},
|
||||
{"b", "c", 20},
|
||||
{"b", "c", 10},
|
||||
{"c", "d", 10},
|
||||
{"c", "d", 8},
|
||||
{"d", "f", 30},
|
||||
{"d", "f", 8},
|
||||
{"f", "g", 8}});
|
||||
|
||||
auto range_del_compaction_iter2 =
|
||||
range_del_agg.NewIterator(&start, &end, true /* end_key_inclusive */);
|
||||
VerifyFragmentedRangeDels(range_del_compaction_iter2.get(), {{"a", "b", 10},
|
||||
{"b", "c", 20},
|
||||
{"b", "c", 10},
|
||||
{"c", "d", 10},
|
||||
{"c", "d", 8},
|
||||
{"d", "f", 30},
|
||||
{"d", "f", 8},
|
||||
{"f", "g", 8}});
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
::testing::InitGoogleTest(&argc, argv);
|
||||
return RUN_ALL_TESTS();
|
||||
}
|
@ -185,7 +185,7 @@ Status TableCache::FindTable(const EnvOptions& env_options,
|
||||
InternalIterator* TableCache::NewIterator(
|
||||
const ReadOptions& options, const EnvOptions& env_options,
|
||||
const InternalKeyComparator& icomparator, const FileMetaData& file_meta,
|
||||
RangeDelAggregatorV2* range_del_agg, const SliceTransform* prefix_extractor,
|
||||
RangeDelAggregator* range_del_agg, const SliceTransform* prefix_extractor,
|
||||
TableReader** table_reader_ptr, HistogramImpl* file_read_hist,
|
||||
bool for_compaction, Arena* arena, bool skip_filters, int level,
|
||||
const InternalKey* smallest_compaction_key,
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include <stdint.h>
|
||||
|
||||
#include "db/dbformat.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "options/cf_options.h"
|
||||
#include "port/port.h"
|
||||
#include "rocksdb/cache.h"
|
||||
@ -52,7 +52,7 @@ class TableCache {
|
||||
InternalIterator* NewIterator(
|
||||
const ReadOptions& options, const EnvOptions& toptions,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
const FileMetaData& file_meta, RangeDelAggregatorV2* range_del_agg,
|
||||
const FileMetaData& file_meta, RangeDelAggregator* range_del_agg,
|
||||
const SliceTransform* prefix_extractor = nullptr,
|
||||
TableReader** table_reader_ptr = nullptr,
|
||||
HistogramImpl* file_read_hist = nullptr, bool for_compaction = false,
|
||||
|
@ -459,7 +459,7 @@ class LevelIterator final : public InternalIterator {
|
||||
const EnvOptions& env_options, const InternalKeyComparator& icomparator,
|
||||
const LevelFilesBrief* flevel, const SliceTransform* prefix_extractor,
|
||||
bool should_sample, HistogramImpl* file_read_hist, bool for_compaction,
|
||||
bool skip_filters, int level, RangeDelAggregatorV2* range_del_agg,
|
||||
bool skip_filters, int level, RangeDelAggregator* range_del_agg,
|
||||
const std::vector<AtomicCompactionUnitBoundary>* compaction_boundaries =
|
||||
nullptr)
|
||||
: table_cache_(table_cache),
|
||||
@ -571,7 +571,7 @@ class LevelIterator final : public InternalIterator {
|
||||
bool skip_filters_;
|
||||
size_t file_index_;
|
||||
int level_;
|
||||
RangeDelAggregatorV2* range_del_agg_;
|
||||
RangeDelAggregator* range_del_agg_;
|
||||
IteratorWrapper file_iter_; // May be nullptr
|
||||
PinnedIteratorsManager* pinned_iters_mgr_;
|
||||
|
||||
@ -985,7 +985,7 @@ double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel(
|
||||
void Version::AddIterators(const ReadOptions& read_options,
|
||||
const EnvOptions& soptions,
|
||||
MergeIteratorBuilder* merge_iter_builder,
|
||||
RangeDelAggregatorV2* range_del_agg) {
|
||||
RangeDelAggregator* range_del_agg) {
|
||||
assert(storage_info_.finalized_);
|
||||
|
||||
for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) {
|
||||
@ -998,7 +998,7 @@ void Version::AddIteratorsForLevel(const ReadOptions& read_options,
|
||||
const EnvOptions& soptions,
|
||||
MergeIteratorBuilder* merge_iter_builder,
|
||||
int level,
|
||||
RangeDelAggregatorV2* range_del_agg) {
|
||||
RangeDelAggregator* range_del_agg) {
|
||||
assert(storage_info_.finalized_);
|
||||
if (level >= storage_info_.num_non_empty_levels()) {
|
||||
// This is an empty level
|
||||
@ -1057,8 +1057,8 @@ Status Version::OverlapWithLevelIterator(const ReadOptions& read_options,
|
||||
|
||||
Arena arena;
|
||||
Status status;
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
|
||||
*overlap = false;
|
||||
|
||||
@ -4328,7 +4328,7 @@ void VersionSet::AddLiveFiles(std::vector<FileDescriptor>* live_list) {
|
||||
}
|
||||
|
||||
InternalIterator* VersionSet::MakeInputIterator(
|
||||
const Compaction* c, RangeDelAggregatorV2* range_del_agg,
|
||||
const Compaction* c, RangeDelAggregator* range_del_agg,
|
||||
const EnvOptions& env_options_compactions) {
|
||||
auto cfd = c->column_family_data();
|
||||
ReadOptions read_options;
|
||||
|
@ -34,7 +34,7 @@
|
||||
#include "db/dbformat.h"
|
||||
#include "db/file_indexer.h"
|
||||
#include "db/log_reader.h"
|
||||
#include "db/range_del_aggregator_v2.h"
|
||||
#include "db/range_del_aggregator.h"
|
||||
#include "db/read_callback.h"
|
||||
#include "db/table_cache.h"
|
||||
#include "db/version_builder.h"
|
||||
@ -538,11 +538,11 @@ class Version {
|
||||
// REQUIRES: This version has been saved (see VersionSet::SaveTo)
|
||||
void AddIterators(const ReadOptions&, const EnvOptions& soptions,
|
||||
MergeIteratorBuilder* merger_iter_builder,
|
||||
RangeDelAggregatorV2* range_del_agg);
|
||||
RangeDelAggregator* range_del_agg);
|
||||
|
||||
void AddIteratorsForLevel(const ReadOptions&, const EnvOptions& soptions,
|
||||
MergeIteratorBuilder* merger_iter_builder,
|
||||
int level, RangeDelAggregatorV2* range_del_agg);
|
||||
int level, RangeDelAggregator* range_del_agg);
|
||||
|
||||
Status OverlapWithLevelIterator(const ReadOptions&, const EnvOptions&,
|
||||
const Slice& smallest_user_key,
|
||||
@ -935,7 +935,7 @@ class VersionSet {
|
||||
// Create an iterator that reads over the compaction inputs for "*c".
|
||||
// The caller should delete the iterator when no longer needed.
|
||||
InternalIterator* MakeInputIterator(
|
||||
const Compaction* c, RangeDelAggregatorV2* range_del_agg,
|
||||
const Compaction* c, RangeDelAggregator* range_del_agg,
|
||||
const EnvOptions& env_options_compactions);
|
||||
|
||||
// Add all files listed in any live version to *live.
|
||||
|
2
src.mk
2
src.mk
@ -44,7 +44,6 @@ LIB_SOURCES = \
|
||||
db/merge_helper.cc \
|
||||
db/merge_operator.cc \
|
||||
db/range_del_aggregator.cc \
|
||||
db/range_del_aggregator_v2.cc \
|
||||
db/range_tombstone_fragmenter.cc \
|
||||
db/repair.cc \
|
||||
db/snapshot_impl.cc \
|
||||
@ -335,7 +334,6 @@ MAIN_SOURCES = \
|
||||
db/repair_test.cc \
|
||||
db/range_del_aggregator_test.cc \
|
||||
db/range_del_aggregator_bench.cc \
|
||||
db/range_del_aggregator_v2_test.cc \
|
||||
db/range_tombstone_fragmenter_test.cc \
|
||||
db/table_properties_collector_test.cc \
|
||||
db/util_merge_operators_test.cc \
|
||||
|
@ -19,8 +19,8 @@ Status GetAllKeyVersions(DB* db, Slice begin_key, Slice end_key,
|
||||
|
||||
DBImpl* idb = static_cast<DBImpl*>(db->GetRootDB());
|
||||
auto icmp = InternalKeyComparator(idb->GetOptions().comparator);
|
||||
ReadRangeDelAggregatorV2 range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
ReadRangeDelAggregator range_del_agg(&icmp,
|
||||
kMaxSequenceNumber /* upper_bound */);
|
||||
Arena arena;
|
||||
ScopedArenaIterator iter(
|
||||
idb->NewInternalIterator(&arena, &range_del_agg, kMaxSequenceNumber));
|
||||
|
Loading…
Reference in New Issue
Block a user