e45673dece
Summary: Context: Index type `kBinarySearchWithFirstKey` added the ability for sst file iterator to sometimes report a key from index without reading the corresponding data block. This is useful when sst blocks are cut at some meaningful boundaries (e.g. one block per key prefix), and many seeks land between blocks (e.g. for each prefix, the ranges of keys in different sst files are nearly disjoint, so a typical seek needs to read a data block from only one file even if all files have the prefix). But this added a new error condition, which rocksdb code was really not equipped to deal with: `InternalIterator::value()` may fail with an IO error or Status::Incomplete, but it's just a method returning a Slice, with no way to report error instead. Before this PR, this type of error wasn't handled at all (an empty slice was returned), and kBinarySearchWithFirstKey implementation was considered a prototype. Now that we (LogDevice) have experimented with kBinarySearchWithFirstKey for a while and confirmed that it's really useful, this PR is adding the missing error handling. It's a pretty inconvenient situation implementation-wise. The error needs to be reported from InternalIterator when trying to access value. But there are ~700 call sites of `InternalIterator::value()`, most of which either can't hit the error condition (because the iterator is reading from memtable or from index or something) or wouldn't benefit from the deferred loading of the value (e.g. compaction iterator that reads all values anyway). Adding error handling to all these call sites would needlessly bloat the code. So instead I made the deferred value loading optional: only the call sites that may use deferred loading have to call the new method `PrepareValue()` before calling `value()`. The feature is enabled with a new bool argument `allow_unprepared_value` to a bunch of methods that create iterators (it wouldn't make sense to put it in ReadOptions because it's completely internal to iterators, with virtually no user-visible effect). Lmk if you have better ideas. Note that the deferred value loading only happens for *internal* iterators. The user-visible iterator (DBIter) always prepares the value before returning from Seek/Next/etc. We could go further and add an API to defer that value loading too, but that's most likely not useful for LogDevice, so it doesn't seem worth the complexity for now. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6621 Test Plan: make -j5 check . Will also deploy to some logdevice test clusters and look at stats. Reviewed By: siying Differential Revision: D20786930 Pulled By: al13n321 fbshipit-source-id: 6da77d918bad3780522e918f17f4d5513d3e99ee
169 lines
4.0 KiB
C++
169 lines
4.0 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
|
|
#include <set>
|
|
|
|
#include "table/internal_iterator.h"
|
|
#include "test_util/sync_point.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
// A internal wrapper class with an interface similar to Iterator that caches
|
|
// the valid() and key() results for an underlying iterator.
|
|
// This can help avoid virtual function calls and also gives better
|
|
// cache locality.
|
|
template <class TValue = Slice>
|
|
class IteratorWrapperBase {
|
|
public:
|
|
IteratorWrapperBase() : iter_(nullptr), valid_(false) {}
|
|
explicit IteratorWrapperBase(InternalIteratorBase<TValue>* _iter)
|
|
: iter_(nullptr) {
|
|
Set(_iter);
|
|
}
|
|
~IteratorWrapperBase() {}
|
|
InternalIteratorBase<TValue>* iter() const { return iter_; }
|
|
|
|
// Set the underlying Iterator to _iter and return
|
|
// previous underlying Iterator.
|
|
InternalIteratorBase<TValue>* Set(InternalIteratorBase<TValue>* _iter) {
|
|
InternalIteratorBase<TValue>* old_iter = iter_;
|
|
|
|
iter_ = _iter;
|
|
if (iter_ == nullptr) {
|
|
valid_ = false;
|
|
} else {
|
|
Update();
|
|
}
|
|
return old_iter;
|
|
}
|
|
|
|
void DeleteIter(bool is_arena_mode) {
|
|
if (iter_) {
|
|
if (!is_arena_mode) {
|
|
delete iter_;
|
|
} else {
|
|
iter_->~InternalIteratorBase<TValue>();
|
|
}
|
|
}
|
|
}
|
|
|
|
// Iterator interface methods
|
|
bool Valid() const { return valid_; }
|
|
Slice key() const {
|
|
assert(Valid());
|
|
return result_.key;
|
|
}
|
|
TValue value() const {
|
|
assert(Valid());
|
|
return iter_->value();
|
|
}
|
|
// Methods below require iter() != nullptr
|
|
Status status() const {
|
|
assert(iter_);
|
|
return iter_->status();
|
|
}
|
|
bool PrepareValue() {
|
|
assert(Valid());
|
|
if (result_.value_prepared) {
|
|
return true;
|
|
}
|
|
if (iter_->PrepareValue()) {
|
|
result_.value_prepared = true;
|
|
return true;
|
|
}
|
|
|
|
assert(!iter_->Valid());
|
|
valid_ = false;
|
|
return false;
|
|
}
|
|
void Next() {
|
|
assert(iter_);
|
|
valid_ = iter_->NextAndGetResult(&result_);
|
|
assert(!valid_ || iter_->status().ok());
|
|
}
|
|
void Prev() {
|
|
assert(iter_);
|
|
iter_->Prev();
|
|
Update();
|
|
}
|
|
void Seek(const Slice& k) {
|
|
assert(iter_);
|
|
iter_->Seek(k);
|
|
Update();
|
|
}
|
|
void SeekForPrev(const Slice& k) {
|
|
assert(iter_);
|
|
iter_->SeekForPrev(k);
|
|
Update();
|
|
}
|
|
void SeekToFirst() {
|
|
assert(iter_);
|
|
iter_->SeekToFirst();
|
|
Update();
|
|
}
|
|
void SeekToLast() {
|
|
assert(iter_);
|
|
iter_->SeekToLast();
|
|
Update();
|
|
}
|
|
|
|
bool MayBeOutOfLowerBound() {
|
|
assert(Valid());
|
|
return iter_->MayBeOutOfLowerBound();
|
|
}
|
|
|
|
bool MayBeOutOfUpperBound() {
|
|
assert(Valid());
|
|
return result_.may_be_out_of_upper_bound;
|
|
}
|
|
|
|
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {
|
|
assert(iter_);
|
|
iter_->SetPinnedItersMgr(pinned_iters_mgr);
|
|
}
|
|
bool IsKeyPinned() const {
|
|
assert(Valid());
|
|
return iter_->IsKeyPinned();
|
|
}
|
|
bool IsValuePinned() const {
|
|
assert(Valid());
|
|
return iter_->IsValuePinned();
|
|
}
|
|
|
|
bool IsValuePrepared() const {
|
|
return result_.value_prepared;
|
|
}
|
|
|
|
private:
|
|
void Update() {
|
|
valid_ = iter_->Valid();
|
|
if (valid_) {
|
|
assert(iter_->status().ok());
|
|
result_.key = iter_->key();
|
|
result_.may_be_out_of_upper_bound = true;
|
|
result_.value_prepared = false;
|
|
}
|
|
}
|
|
|
|
InternalIteratorBase<TValue>* iter_;
|
|
IterateResult result_;
|
|
bool valid_;
|
|
};
|
|
|
|
using IteratorWrapper = IteratorWrapperBase<Slice>;
|
|
|
|
class Arena;
|
|
// Return an empty iterator (yields nothing) allocated from arena.
|
|
template <class TValue = Slice>
|
|
extern InternalIteratorBase<TValue>* NewEmptyInternalIterator(Arena* arena);
|
|
|
|
} // namespace ROCKSDB_NAMESPACE
|