457f77b9ff
Summary: The old RangeDelAggregator did expensive pre-processing work to create a collapsed, binary-searchable representation of range tombstones. With FragmentedRangeTombstoneIterator, much of this work is now unnecessary. RangeDelAggregatorV2 takes advantage of this by seeking in each iterator to find a covering tombstone in ShouldDelete, while doing minimal work in AddTombstones. The old RangeDelAggregator is still used during flush/compaction for now, though RangeDelAggregatorV2 will support those uses in a future PR. Pull Request resolved: https://github.com/facebook/rocksdb/pull/4649 Differential Revision: D13146964 Pulled By: abhimadan fbshipit-source-id: be29a4c020fc440500c137216fcc1cf529571eb3
234 lines
8.2 KiB
C++
234 lines
8.2 KiB
C++
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
#pragma once
|
|
|
|
#include <list>
|
|
#include <memory>
|
|
#include <string>
|
|
#include <vector>
|
|
|
|
#include "db/dbformat.h"
|
|
#include "db/pinned_iterators_manager.h"
|
|
#include "rocksdb/status.h"
|
|
#include "table/internal_iterator.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
struct FragmentedRangeTombstoneList {
|
|
public:
|
|
// A compact representation of a "stack" of range tombstone fragments, which
|
|
// start and end at the same user keys but have different sequence numbers.
|
|
// The members seq_start_idx and seq_end_idx are intended to be parameters to
|
|
// seq_iter().
|
|
struct RangeTombstoneStack {
|
|
RangeTombstoneStack(const Slice& start, const Slice& end, size_t start_idx,
|
|
size_t end_idx)
|
|
: start_key(start),
|
|
end_key(end),
|
|
seq_start_idx(start_idx),
|
|
seq_end_idx(end_idx) {}
|
|
|
|
Slice start_key;
|
|
Slice end_key;
|
|
size_t seq_start_idx;
|
|
size_t seq_end_idx;
|
|
};
|
|
FragmentedRangeTombstoneList(
|
|
std::unique_ptr<InternalIterator> unfragmented_tombstones,
|
|
const InternalKeyComparator& icmp, bool one_time_use,
|
|
SequenceNumber snapshot = kMaxSequenceNumber);
|
|
|
|
std::vector<RangeTombstoneStack>::const_iterator begin() const {
|
|
return tombstones_.begin();
|
|
}
|
|
|
|
std::vector<RangeTombstoneStack>::const_iterator end() const {
|
|
return tombstones_.end();
|
|
}
|
|
|
|
std::vector<SequenceNumber>::const_iterator seq_iter(size_t idx) const {
|
|
return std::next(tombstone_seqs_.begin(), idx);
|
|
}
|
|
|
|
std::vector<SequenceNumber>::const_iterator seq_begin() const {
|
|
return tombstone_seqs_.begin();
|
|
}
|
|
|
|
std::vector<SequenceNumber>::const_iterator seq_end() const {
|
|
return tombstone_seqs_.end();
|
|
}
|
|
|
|
bool empty() const { return tombstones_.size() == 0; }
|
|
|
|
private:
|
|
// Given an ordered range tombstone iterator unfragmented_tombstones,
|
|
// "fragment" the tombstones into non-overlapping pieces, and store them in
|
|
// tombstones_ and tombstone_seqs_.
|
|
void FragmentTombstones(
|
|
std::unique_ptr<InternalIterator> unfragmented_tombstones,
|
|
const InternalKeyComparator& icmp, bool one_time_use,
|
|
SequenceNumber snapshot = kMaxSequenceNumber);
|
|
|
|
std::vector<RangeTombstoneStack> tombstones_;
|
|
std::vector<SequenceNumber> tombstone_seqs_;
|
|
std::list<std::string> pinned_slices_;
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
|
};
|
|
|
|
// FragmentedRangeTombstoneIterator converts an InternalIterator of a range-del
|
|
// meta block into an iterator over non-overlapping tombstone fragments. The
|
|
// tombstone fragmentation process should be more efficient than the range
|
|
// tombstone collapsing algorithm in RangeDelAggregator because this leverages
|
|
// the internal key ordering already provided by the input iterator, if
|
|
// applicable (when the iterator is unsorted, a new sorted iterator is created
|
|
// before proceeding). If there are few overlaps, creating a
|
|
// FragmentedRangeTombstoneIterator should be O(n), while the RangeDelAggregator
|
|
// tombstone collapsing is always O(n log n).
|
|
class FragmentedRangeTombstoneIterator : public InternalIterator {
|
|
public:
|
|
FragmentedRangeTombstoneIterator(
|
|
const FragmentedRangeTombstoneList* tombstones, SequenceNumber snapshot,
|
|
const InternalKeyComparator& icmp);
|
|
FragmentedRangeTombstoneIterator(
|
|
const std::shared_ptr<const FragmentedRangeTombstoneList>& tombstones,
|
|
SequenceNumber snapshot, const InternalKeyComparator& icmp);
|
|
|
|
void SeekToFirst() override;
|
|
void SeekToLast() override;
|
|
|
|
void SeekToTopFirst();
|
|
void SeekToTopLast();
|
|
|
|
// NOTE: Seek and SeekForPrev do not behave in the way InternalIterator
|
|
// seeking should behave. This is OK because they are not currently used, but
|
|
// eventually FragmentedRangeTombstoneIterator should no longer implement
|
|
// InternalIterator.
|
|
//
|
|
// Seeks to the range tombstone that covers target at a seqnum in the
|
|
// snapshot. If no such tombstone exists, seek to the earliest tombstone in
|
|
// the snapshot that ends after target.
|
|
void Seek(const Slice& target) override;
|
|
// Seeks to the range tombstone that covers target at a seqnum in the
|
|
// snapshot. If no such tombstone exists, seek to the latest tombstone in the
|
|
// snapshot that starts before target.
|
|
void SeekForPrev(const Slice& target) override;
|
|
|
|
void Next() override;
|
|
void Prev() override;
|
|
|
|
void TopNext();
|
|
void TopPrev();
|
|
|
|
bool Valid() const override;
|
|
Slice key() const override {
|
|
MaybePinKey();
|
|
return current_start_key_.Encode();
|
|
}
|
|
Slice value() const override { return pos_->end_key; }
|
|
bool IsKeyPinned() const override { return false; }
|
|
bool IsValuePinned() const override { return true; }
|
|
Status status() const override { return Status::OK(); }
|
|
|
|
bool empty() const { return tombstones_->empty(); }
|
|
void Invalidate() {
|
|
pos_ = tombstones_->end();
|
|
seq_pos_ = tombstones_->seq_end();
|
|
}
|
|
|
|
// TODO: implement properly
|
|
RangeTombstone tombstone() const {
|
|
return RangeTombstone(start_key(), end_key(), seq());
|
|
}
|
|
Slice start_key() const { return pos_->start_key; }
|
|
Slice end_key() const { return pos_->end_key; }
|
|
SequenceNumber seq() const { return *seq_pos_; }
|
|
ParsedInternalKey parsed_start_key() const {
|
|
return ParsedInternalKey(pos_->start_key, kMaxSequenceNumber,
|
|
kTypeRangeDeletion);
|
|
}
|
|
ParsedInternalKey parsed_end_key() const {
|
|
return ParsedInternalKey(pos_->end_key, kMaxSequenceNumber,
|
|
kTypeRangeDeletion);
|
|
}
|
|
ParsedInternalKey internal_key() const {
|
|
return ParsedInternalKey(pos_->start_key, *seq_pos_, kTypeRangeDeletion);
|
|
}
|
|
|
|
SequenceNumber MaxCoveringTombstoneSeqnum(const Slice& user_key);
|
|
|
|
private:
|
|
using RangeTombstoneStack = FragmentedRangeTombstoneList::RangeTombstoneStack;
|
|
|
|
struct RangeTombstoneStackStartComparator {
|
|
explicit RangeTombstoneStackStartComparator(const Comparator* c) : cmp(c) {}
|
|
|
|
bool operator()(const RangeTombstoneStack& a,
|
|
const RangeTombstoneStack& b) const {
|
|
return cmp->Compare(a.start_key, b.start_key) < 0;
|
|
}
|
|
|
|
bool operator()(const RangeTombstoneStack& a, const Slice& b) const {
|
|
return cmp->Compare(a.start_key, b) < 0;
|
|
}
|
|
|
|
bool operator()(const Slice& a, const RangeTombstoneStack& b) const {
|
|
return cmp->Compare(a, b.start_key) < 0;
|
|
}
|
|
|
|
const Comparator* cmp;
|
|
};
|
|
|
|
struct RangeTombstoneStackEndComparator {
|
|
explicit RangeTombstoneStackEndComparator(const Comparator* c) : cmp(c) {}
|
|
|
|
bool operator()(const RangeTombstoneStack& a,
|
|
const RangeTombstoneStack& b) const {
|
|
return cmp->Compare(a.end_key, b.end_key) < 0;
|
|
}
|
|
|
|
bool operator()(const RangeTombstoneStack& a, const Slice& b) const {
|
|
return cmp->Compare(a.end_key, b) < 0;
|
|
}
|
|
|
|
bool operator()(const Slice& a, const RangeTombstoneStack& b) const {
|
|
return cmp->Compare(a, b.end_key) < 0;
|
|
}
|
|
|
|
const Comparator* cmp;
|
|
};
|
|
|
|
void MaybePinKey() const {
|
|
if (pos_ != tombstones_->end() && seq_pos_ != tombstones_->seq_end() &&
|
|
(pinned_pos_ != pos_ || pinned_seq_pos_ != seq_pos_)) {
|
|
current_start_key_.Set(pos_->start_key, *seq_pos_, kTypeRangeDeletion);
|
|
pinned_pos_ = pos_;
|
|
pinned_seq_pos_ = seq_pos_;
|
|
}
|
|
}
|
|
|
|
void SeekToCoveringTombstone(const Slice& key);
|
|
void SeekForPrevToCoveringTombstone(const Slice& key);
|
|
void ScanForwardToVisibleTombstone();
|
|
void ScanBackwardToVisibleTombstone();
|
|
bool ValidPos() const {
|
|
return Valid() && seq_pos_ != tombstones_->seq_iter(pos_->seq_end_idx);
|
|
}
|
|
|
|
const RangeTombstoneStackStartComparator tombstone_start_cmp_;
|
|
const RangeTombstoneStackEndComparator tombstone_end_cmp_;
|
|
const Comparator* ucmp_;
|
|
std::shared_ptr<const FragmentedRangeTombstoneList> tombstones_ref_;
|
|
const FragmentedRangeTombstoneList* tombstones_;
|
|
SequenceNumber snapshot_;
|
|
std::vector<RangeTombstoneStack>::const_iterator pos_;
|
|
std::vector<SequenceNumber>::const_iterator seq_pos_;
|
|
mutable std::vector<RangeTombstoneStack>::const_iterator pinned_pos_;
|
|
mutable std::vector<SequenceNumber>::const_iterator pinned_seq_pos_;
|
|
mutable InternalKey current_start_key_;
|
|
};
|
|
|
|
} // namespace rocksdb
|