rocksdb/table/iterator_wrapper.h
Andrew Kryczka 75d3b6fdf0 Redesign block cache pinning API (#7520)
Summary:
The old flag-based APIs (`BlockBasedTableOptions::pin_l0_filter_and_index_blocks_in_cache` and `BlockBasedTableOptions::pin_top_level_index_and_filter`) were insufficient for our needs. For example, it was impossible to pin only unpartitioned meta-blocks, which could prevent block cache contention when turning on dictionary compression or during a migration to partitioned indexes/filters. It was also impossible to pin all meta-blocks in memory while having predictable memory usage via block cache. If we had continued adding flags to address these scenarios, they would have had significant overlap causing confusion. Instead, this PR deprecates the flags and starts a new API with non-overlapping options.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7520

Test Plan:
- new unit test
- added new options to stress/crash test and ran for a while: `$ python tools/db_crashtest.py blackbox --simple --max_key=1000000 -write_buffer_size=1048576 -target_file_size_base=1048576 -max_bytes_for_level_base=4194304 --interval=10 -value_size_mult=33 -column_families=1 -reopen=0`

Reviewed By: pdillinger

Differential Revision: D24200034

Pulled By: ajkr

fbshipit-source-id: 3fa7cfc71e7960f7a867511dd6ae5834dd73b13e
2020-10-11 14:58:24 -07:00

181 lines
4.3 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#pragma once
#include <set>
#include "table/internal_iterator.h"
#include "test_util/sync_point.h"
namespace ROCKSDB_NAMESPACE {
// A internal wrapper class with an interface similar to Iterator that caches
// the valid() and key() results for an underlying iterator.
// This can help avoid virtual function calls and also gives better
// cache locality.
template <class TValue = Slice>
class IteratorWrapperBase {
public:
IteratorWrapperBase() : iter_(nullptr), valid_(false) {}
explicit IteratorWrapperBase(InternalIteratorBase<TValue>* _iter)
: iter_(nullptr) {
Set(_iter);
}
~IteratorWrapperBase() {}
InternalIteratorBase<TValue>* iter() const { return iter_; }
// Set the underlying Iterator to _iter and return
// previous underlying Iterator.
InternalIteratorBase<TValue>* Set(InternalIteratorBase<TValue>* _iter) {
InternalIteratorBase<TValue>* old_iter = iter_;
iter_ = _iter;
if (iter_ == nullptr) {
valid_ = false;
} else {
Update();
}
return old_iter;
}
void DeleteIter(bool is_arena_mode) {
if (iter_) {
if (!is_arena_mode) {
delete iter_;
} else {
iter_->~InternalIteratorBase<TValue>();
}
}
}
// Iterator interface methods
bool Valid() const { return valid_; }
Slice key() const {
assert(Valid());
return result_.key;
}
TValue value() const {
assert(Valid());
return iter_->value();
}
// Methods below require iter() != nullptr
Status status() const {
assert(iter_);
return iter_->status();
}
bool PrepareValue() {
assert(Valid());
if (result_.value_prepared) {
return true;
}
if (iter_->PrepareValue()) {
result_.value_prepared = true;
return true;
}
assert(!iter_->Valid());
valid_ = false;
return false;
}
void Next() {
assert(iter_);
valid_ = iter_->NextAndGetResult(&result_);
assert(!valid_ || iter_->status().ok());
}
bool NextAndGetResult(IterateResult* result) {
assert(iter_);
valid_ = iter_->NextAndGetResult(&result_);
*result = result_;
assert(!valid_ || iter_->status().ok());
return valid_;
}
void Prev() {
assert(iter_);
iter_->Prev();
Update();
}
void Seek(const Slice& k) {
assert(iter_);
iter_->Seek(k);
Update();
}
void SeekForPrev(const Slice& k) {
assert(iter_);
iter_->SeekForPrev(k);
Update();
}
void SeekToFirst() {
assert(iter_);
iter_->SeekToFirst();
Update();
}
void SeekToLast() {
assert(iter_);
iter_->SeekToLast();
Update();
}
bool MayBeOutOfLowerBound() {
assert(Valid());
return iter_->MayBeOutOfLowerBound();
}
IterBoundCheck UpperBoundCheckResult() {
assert(Valid());
return result_.bound_check_result;
}
void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {
assert(iter_);
iter_->SetPinnedItersMgr(pinned_iters_mgr);
}
bool IsKeyPinned() const {
assert(Valid());
return iter_->IsKeyPinned();
}
bool IsValuePinned() const {
assert(Valid());
return iter_->IsValuePinned();
}
bool IsValuePrepared() const {
return result_.value_prepared;
}
Slice user_key() const {
assert(Valid());
return iter_->user_key();
}
private:
void Update() {
valid_ = iter_->Valid();
if (valid_) {
assert(iter_->status().ok());
result_.key = iter_->key();
result_.bound_check_result = IterBoundCheck::kUnknown;
result_.value_prepared = false;
}
}
InternalIteratorBase<TValue>* iter_;
IterateResult result_;
bool valid_;
};
using IteratorWrapper = IteratorWrapperBase<Slice>;
class Arena;
// Return an empty iterator (yields nothing) allocated from arena.
template <class TValue = Slice>
extern InternalIteratorBase<TValue>* NewEmptyInternalIterator(Arena* arena);
} // namespace ROCKSDB_NAMESPACE