Revert "Merging iterator to avoid child iterator reseek for some cases (#5286)"

This reverts commit 9fad3e21eb.
This commit is contained in:
sdong 2019-10-01 16:48:35 -07:00
parent fbf178ec68
commit 56cb92bec6
7 changed files with 9 additions and 101 deletions

View File

@ -1,4 +1,7 @@
# Rocksdb Change Log
## Unreleased
* Revert the feature "Merging iterator to avoid child iterator reseek for some cases (#5286)" since it might cause strong results when reseek happens with a different iterator upper bound.
## 6.3.5 (9/17/2019)
* Fix a bug introduced 6.3 which could cause wrong results in a corner case when prefix bloom filter is used and the iterator is reseeked.
@ -58,7 +61,6 @@
* Fix a bug caused by secondary not skipping the beginning of new MANIFEST.
* On DB open, delete WAL trash files left behind in wal_dir
## 6.2.0 (4/30/2019)
### New Features
* Add an option `strict_bytes_per_sync` that causes a file-writing thread to block rather than exceed the limit on bytes pending writeback specified by `bytes_per_sync` or `wal_bytes_per_sync`.

View File

@ -2548,75 +2548,6 @@ TEST_P(DBIteratorTest, AvoidReseekLevelIterator) {
SyncPoint::GetInstance()->DisableProcessing();
}
TEST_P(DBIteratorTest, AvoidReseekChildIterator) {
Options options = CurrentOptions();
options.compression = CompressionType::kNoCompression;
BlockBasedTableOptions table_options;
table_options.block_size = 800;
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
Reopen(options);
Random rnd(301);
std::string random_str = RandomString(&rnd, 180);
ASSERT_OK(Put("1", random_str));
ASSERT_OK(Put("2", random_str));
ASSERT_OK(Put("3", random_str));
ASSERT_OK(Put("4", random_str));
ASSERT_OK(Put("8", random_str));
ASSERT_OK(Put("9", random_str));
ASSERT_OK(Flush());
ASSERT_OK(Put("5", random_str));
ASSERT_OK(Put("6", random_str));
ASSERT_OK(Put("7", random_str));
ASSERT_OK(Flush());
// These two keys will be kept in memtable.
ASSERT_OK(Put("0", random_str));
ASSERT_OK(Put("8", random_str));
int num_iter_wrapper_seek = 0;
SyncPoint::GetInstance()->SetCallBack(
"IteratorWrapper::Seek:0",
[&](void* /*arg*/) { num_iter_wrapper_seek++; });
SyncPoint::GetInstance()->EnableProcessing();
{
std::unique_ptr<Iterator> iter(NewIterator(ReadOptions()));
iter->Seek("1");
ASSERT_TRUE(iter->Valid());
// DBIter always wraps internal iterator with IteratorWrapper,
// and in merging iterator each child iterator will be wrapped
// with IteratorWrapper.
ASSERT_EQ(4, num_iter_wrapper_seek);
// child position: 1 and 5
num_iter_wrapper_seek = 0;
iter->Seek("2");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(3, num_iter_wrapper_seek);
// child position: 2 and 5
num_iter_wrapper_seek = 0;
iter->Seek("6");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(4, num_iter_wrapper_seek);
// child position: 8 and 6
num_iter_wrapper_seek = 0;
iter->Seek("7");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(3, num_iter_wrapper_seek);
// child position: 8 and 7
num_iter_wrapper_seek = 0;
iter->Seek("5");
ASSERT_TRUE(iter->Valid());
ASSERT_EQ(4, num_iter_wrapper_seek);
}
SyncPoint::GetInstance()->DisableProcessing();
}
INSTANTIATE_TEST_CASE_P(DBIteratorTestInstance, DBIteratorTest,
testing::Values(true, false));

View File

@ -858,8 +858,7 @@ class LevelIterator final : public InternalIterator {
bool skip_filters, int level, RangeDelAggregator* range_del_agg,
const std::vector<AtomicCompactionUnitBoundary>* compaction_boundaries =
nullptr)
: InternalIterator(false),
table_cache_(table_cache),
: table_cache_(table_cache),
read_options_(read_options),
env_options_(env_options),
icomparator_(icomparator),

View File

@ -628,8 +628,7 @@ class BlockBasedTableIterator : public InternalIteratorBase<TValue> {
BlockType block_type, bool key_includes_seq = true,
bool index_key_is_full = true,
bool for_compaction = false)
: InternalIteratorBase<TValue>(false),
table_(table),
: table_(table),
read_options_(read_options),
icomp_(icomp),
user_comparator_(icomp.user_comparator()),

View File

@ -20,8 +20,7 @@ class PinnedIteratorsManager;
template <class TValue>
class InternalIteratorBase : public Cleanable {
public:
InternalIteratorBase() : is_mutable_(true) {}
InternalIteratorBase(bool _is_mutable) : is_mutable_(_is_mutable) {}
InternalIteratorBase() {}
virtual ~InternalIteratorBase() {}
// An iterator is either positioned at a key/value pair, or
@ -120,7 +119,6 @@ class InternalIteratorBase : public Cleanable {
virtual Status GetProperty(std::string /*prop_name*/, std::string* /*prop*/) {
return Status::NotSupported("");
}
bool is_mutable() const { return is_mutable_; }
protected:
void SeekForPrevImpl(const Slice& target, const Comparator* cmp) {
@ -132,7 +130,6 @@ class InternalIteratorBase : public Cleanable {
Prev();
}
}
bool is_mutable_;
private:
// No copying allowed

View File

@ -69,12 +69,7 @@ class IteratorWrapperBase {
assert(!valid_ || iter_->status().ok());
}
void Prev() { assert(iter_); iter_->Prev(); Update(); }
void Seek(const Slice& k) {
TEST_SYNC_POINT("IteratorWrapper::Seek:0");
assert(iter_);
iter_->Seek(k);
Update();
}
void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); }
void SeekForPrev(const Slice& k) {
assert(iter_);
iter_->SeekForPrev(k);

View File

@ -127,29 +127,14 @@ class MergingIterator : public InternalIterator {
}
void Seek(const Slice& target) override {
bool is_increasing_reseek = false;
if (current_ != nullptr && direction_ == kForward && status_.ok() &&
!prefix_seek_mode_ && comparator_->Compare(target, key()) >= 0) {
is_increasing_reseek = true;
}
ClearHeaps();
status_ = Status::OK();
for (auto& child : children_) {
// If upper bound never changes, we can skip Seek() for
// the !Valid() case too, but people do hack the code to change
// upper bound between Seek(), so it's not a good idea to break
// the API.
// If DBIter is used on top of merging iterator, we probably
// can skip mutable child iterators if they are invalid too,
// but it's a less clean API. We can optimize for it later if
// needed.
if (!is_increasing_reseek || !child.Valid() ||
comparator_->Compare(target, child.key()) > 0 ||
child.iter()->is_mutable()) {
{
PERF_TIMER_GUARD(seek_child_seek_time);
child.Seek(target);
PERF_COUNTER_ADD(seek_child_seek_count, 1);
}
PERF_COUNTER_ADD(seek_child_seek_count, 1);
if (child.Valid()) {
assert(child.status().ok());