2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "table/merger.h"
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
#include <vector>
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
2013-11-18 20:32:54 +01:00
|
|
|
#include "rocksdb/options.h"
|
2012-12-26 20:51:36 +01:00
|
|
|
#include "table/iter_heap.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/iterator_wrapper.h"
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
#include "util/arena.h"
|
2015-07-06 13:24:09 +02:00
|
|
|
#include "util/heap.h"
|
2013-11-18 20:32:54 +01:00
|
|
|
#include "util/stop_watch.h"
|
2015-08-06 22:23:02 +02:00
|
|
|
#include "util/sync_point.h"
|
2013-11-18 20:32:54 +01:00
|
|
|
#include "util/perf_context_imp.h"
|
2014-05-08 22:32:45 +02:00
|
|
|
#include "util/autovector.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2014-11-13 20:39:30 +01:00
|
|
|
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
|
|
|
|
namespace {
|
2015-07-06 13:24:09 +02:00
|
|
|
typedef BinaryHeap<IteratorWrapper*, MaxIteratorComparator> MergerMaxIterHeap;
|
|
|
|
typedef BinaryHeap<IteratorWrapper*, MinIteratorComparator> MergerMinIterHeap;
|
2014-11-13 20:39:30 +01:00
|
|
|
} // namespace
|
2014-04-10 06:17:14 +02:00
|
|
|
|
2014-05-08 22:32:45 +02:00
|
|
|
const size_t kNumIterReserve = 4;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class MergingIterator : public Iterator {
|
|
|
|
public:
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
MergingIterator(const Comparator* comparator, Iterator** children, int n,
|
|
|
|
bool is_arena_mode)
|
|
|
|
: is_arena_mode_(is_arena_mode),
|
|
|
|
comparator_(comparator),
|
2013-03-01 03:04:58 +01:00
|
|
|
current_(nullptr),
|
2012-12-26 20:51:36 +01:00
|
|
|
direction_(kForward),
|
2015-07-06 13:24:09 +02:00
|
|
|
minHeap_(comparator_) {
|
2014-05-08 22:32:45 +02:00
|
|
|
children_.resize(n);
|
2011-03-18 23:37:00 +01:00
|
|
|
for (int i = 0; i < n; i++) {
|
|
|
|
children_[i].Set(children[i]);
|
|
|
|
}
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentForward();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
virtual void AddIterator(Iterator* iter) {
|
|
|
|
assert(direction_ == kForward);
|
|
|
|
children_.emplace_back(iter);
|
|
|
|
auto new_wrapper = children_.back();
|
|
|
|
if (new_wrapper.Valid()) {
|
|
|
|
minHeap_.push(&new_wrapper);
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentForward();
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual ~MergingIterator() {
|
|
|
|
for (auto& child : children_) {
|
|
|
|
child.DeleteIter(is_arena_mode_);
|
|
|
|
}
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual bool Valid() const override { return (current_ != nullptr); }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SeekToFirst() override {
|
2012-12-26 20:51:36 +01:00
|
|
|
ClearHeaps();
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
|
|
|
child.SeekToFirst();
|
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2011-03-21 20:40:57 +01:00
|
|
|
direction_ = kForward;
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentForward();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void SeekToLast() override {
|
2012-12-26 20:51:36 +01:00
|
|
|
ClearHeaps();
|
2015-07-06 13:24:09 +02:00
|
|
|
InitMaxHeap();
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
|
|
|
child.SeekToLast();
|
|
|
|
if (child.Valid()) {
|
2015-07-06 13:24:09 +02:00
|
|
|
maxHeap_->push(&child);
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2011-03-21 20:40:57 +01:00
|
|
|
direction_ = kReverse;
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentReverse();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Seek(const Slice& target) override {
|
2015-07-06 13:24:09 +02:00
|
|
|
ClearHeaps();
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
2014-08-23 00:28:58 +02:00
|
|
|
{
|
|
|
|
PERF_TIMER_GUARD(seek_child_seek_time);
|
|
|
|
child.Seek(target);
|
|
|
|
}
|
2014-04-08 19:58:07 +02:00
|
|
|
PERF_COUNTER_ADD(seek_child_seek_count, 1);
|
2013-11-18 20:32:54 +01:00
|
|
|
|
2013-08-21 07:58:16 +02:00
|
|
|
if (child.Valid()) {
|
2015-07-06 13:24:09 +02:00
|
|
|
PERF_TIMER_GUARD(seek_min_heap_time);
|
|
|
|
minHeap_.push(&child);
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
direction_ = kForward;
|
|
|
|
{
|
2014-08-23 00:28:58 +02:00
|
|
|
PERF_TIMER_GUARD(seek_min_heap_time);
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentForward();
|
2013-11-19 00:39:42 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Next() override {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(Valid());
|
2011-03-21 20:40:57 +01:00
|
|
|
|
|
|
|
// Ensure that all children are positioned after key().
|
|
|
|
// If we are moving in the forward direction, it is already
|
2015-07-06 13:24:09 +02:00
|
|
|
// true for all of the non-current children since current_ is
|
|
|
|
// the smallest child and key() == current_->key().
|
2011-03-21 20:40:57 +01:00
|
|
|
if (direction_ != kForward) {
|
2015-07-06 13:24:09 +02:00
|
|
|
// Otherwise, advance the non-current children. We advance current_
|
|
|
|
// just after the if-block.
|
2012-12-26 20:51:36 +01:00
|
|
|
ClearHeaps();
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (&child != current_) {
|
|
|
|
child.Seek(key());
|
|
|
|
if (child.Valid() &&
|
|
|
|
comparator_->Compare(key(), child.key()) == 0) {
|
|
|
|
child.Next();
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
}
|
|
|
|
if (child.Valid()) {
|
|
|
|
minHeap_.push(&child);
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
direction_ = kForward;
|
2015-07-06 13:24:09 +02:00
|
|
|
// The loop advanced all non-current children to be > key() so current_
|
|
|
|
// should still be strictly the smallest key.
|
|
|
|
assert(current_ == CurrentForward());
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
|
|
|
|
2015-07-06 13:24:09 +02:00
|
|
|
// For the heap modifications below to be correct, current_ must be the
|
|
|
|
// current top of the heap.
|
|
|
|
assert(current_ == CurrentForward());
|
|
|
|
|
2012-12-26 20:51:36 +01:00
|
|
|
// as the current points to the current record. move the iterator forward.
|
2011-03-18 23:37:00 +01:00
|
|
|
current_->Next();
|
2015-07-06 13:24:09 +02:00
|
|
|
if (current_->Valid()) {
|
|
|
|
// current is still valid after the Next() call above. Call
|
|
|
|
// replace_top() to restore the heap property. When the same child
|
|
|
|
// iterator yields a sequence of keys, this is cheap.
|
|
|
|
minHeap_.replace_top(current_);
|
|
|
|
} else {
|
|
|
|
// current stopped being valid, remove it from the heap.
|
|
|
|
minHeap_.pop();
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentForward();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Prev() override {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(Valid());
|
2011-03-21 20:40:57 +01:00
|
|
|
// Ensure that all children are positioned before key().
|
|
|
|
// If we are moving in the reverse direction, it is already
|
2015-07-06 13:24:09 +02:00
|
|
|
// true for all of the non-current children since current_ is
|
|
|
|
// the largest child and key() == current_->key().
|
2011-03-21 20:40:57 +01:00
|
|
|
if (direction_ != kReverse) {
|
2015-07-06 13:24:09 +02:00
|
|
|
// Otherwise, retreat the non-current children. We retreat current_
|
|
|
|
// just after the if-block.
|
2012-12-26 20:51:36 +01:00
|
|
|
ClearHeaps();
|
2015-07-06 13:24:09 +02:00
|
|
|
InitMaxHeap();
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
|
|
|
if (&child != current_) {
|
|
|
|
child.Seek(key());
|
|
|
|
if (child.Valid()) {
|
2011-03-21 20:40:57 +01:00
|
|
|
// Child is at first entry >= key(). Step back one to be < key()
|
2015-08-19 03:08:49 +02:00
|
|
|
TEST_SYNC_POINT_CALLBACK("MergeIterator::Prev:BeforePrev", &child);
|
2013-08-21 07:58:16 +02:00
|
|
|
child.Prev();
|
2011-03-21 20:40:57 +01:00
|
|
|
} else {
|
|
|
|
// Child has no entries >= key(). Position at last entry.
|
2015-08-06 22:23:02 +02:00
|
|
|
TEST_SYNC_POINT("MergeIterator::Prev:BeforeSeekToLast");
|
2013-08-21 07:58:16 +02:00
|
|
|
child.SeekToLast();
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
}
|
|
|
|
if (child.Valid()) {
|
|
|
|
maxHeap_->push(&child);
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
direction_ = kReverse;
|
2015-07-07 21:45:06 +02:00
|
|
|
// Note that we don't do assert(current_ == CurrentReverse()) here
|
|
|
|
// because it is possible to have some keys larger than the seek-key
|
|
|
|
// inserted between Seek() and SeekToLast(), which makes current_ not
|
|
|
|
// equal to CurrentReverse().
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentReverse();
|
2011-03-21 20:40:57 +01:00
|
|
|
}
|
|
|
|
|
2015-07-06 13:24:09 +02:00
|
|
|
// For the heap modifications below to be correct, current_ must be the
|
|
|
|
// current top of the heap.
|
|
|
|
assert(current_ == CurrentReverse());
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
current_->Prev();
|
2012-12-26 20:51:36 +01:00
|
|
|
if (current_->Valid()) {
|
2015-07-06 13:24:09 +02:00
|
|
|
// current is still valid after the Prev() call above. Call
|
|
|
|
// replace_top() to restore the heap property. When the same child
|
|
|
|
// iterator yields a sequence of keys, this is cheap.
|
|
|
|
maxHeap_->replace_top(current_);
|
|
|
|
} else {
|
|
|
|
// current stopped being valid, remove it from the heap.
|
|
|
|
maxHeap_->pop();
|
2012-12-26 20:51:36 +01:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
current_ = CurrentReverse();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice key() const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(Valid());
|
|
|
|
return current_->key();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Slice value() const override {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(Valid());
|
|
|
|
return current_->value();
|
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual Status status() const override {
|
2014-11-06 20:14:28 +01:00
|
|
|
Status s;
|
2013-08-21 07:58:16 +02:00
|
|
|
for (auto& child : children_) {
|
2014-11-06 20:14:28 +01:00
|
|
|
s = child.status();
|
|
|
|
if (!s.ok()) {
|
2011-03-18 23:37:00 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2014-11-06 20:14:28 +01:00
|
|
|
return s;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2015-07-06 13:24:09 +02:00
|
|
|
// Clears heaps for both directions, used when changing direction or seeking
|
2012-12-26 20:51:36 +01:00
|
|
|
void ClearHeaps();
|
2015-07-06 13:24:09 +02:00
|
|
|
// Ensures that maxHeap_ is initialized when starting to go in the reverse
|
|
|
|
// direction
|
|
|
|
void InitMaxHeap();
|
2011-03-18 23:37:00 +01:00
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
bool is_arena_mode_;
|
2011-03-18 23:37:00 +01:00
|
|
|
const Comparator* comparator_;
|
2014-05-08 22:32:45 +02:00
|
|
|
autovector<IteratorWrapper, kNumIterReserve> children_;
|
2015-07-06 13:24:09 +02:00
|
|
|
|
|
|
|
// Cached pointer to child iterator with the current key, or nullptr if no
|
|
|
|
// child iterators are valid. This is the top of minHeap_ or maxHeap_
|
|
|
|
// depending on the direction.
|
2011-03-18 23:37:00 +01:00
|
|
|
IteratorWrapper* current_;
|
2011-03-21 20:40:57 +01:00
|
|
|
// Which direction is the iterator moving?
|
|
|
|
enum Direction {
|
|
|
|
kForward,
|
|
|
|
kReverse
|
|
|
|
};
|
|
|
|
Direction direction_;
|
2014-11-13 20:39:30 +01:00
|
|
|
MergerMinIterHeap minHeap_;
|
2015-07-06 13:24:09 +02:00
|
|
|
// Max heap is used for reverse iteration, which is way less common than
|
|
|
|
// forward. Lazily initialize it to save memory.
|
|
|
|
std::unique_ptr<MergerMaxIterHeap> maxHeap_;
|
2012-12-26 20:51:36 +01:00
|
|
|
|
2015-07-06 13:24:09 +02:00
|
|
|
IteratorWrapper* CurrentForward() const {
|
|
|
|
assert(direction_ == kForward);
|
|
|
|
return !minHeap_.empty() ? minHeap_.top() : nullptr;
|
2015-07-06 13:24:09 +02:00
|
|
|
}
|
|
|
|
|
2015-07-06 13:24:09 +02:00
|
|
|
IteratorWrapper* CurrentReverse() const {
|
|
|
|
assert(direction_ == kReverse);
|
|
|
|
assert(maxHeap_);
|
|
|
|
return !maxHeap_->empty() ? maxHeap_->top() : nullptr;
|
2015-07-06 13:24:09 +02:00
|
|
|
}
|
2015-07-06 13:24:09 +02:00
|
|
|
};
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-07-07 23:45:20 +02:00
|
|
|
void MergingIterator::ClearHeaps() {
|
2015-07-06 13:24:09 +02:00
|
|
|
minHeap_.clear();
|
|
|
|
if (maxHeap_) {
|
|
|
|
maxHeap_->clear();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergingIterator::InitMaxHeap() {
|
|
|
|
if (!maxHeap_) {
|
|
|
|
maxHeap_.reset(new MergerMaxIterHeap(comparator_));
|
|
|
|
}
|
2015-07-07 23:45:20 +02:00
|
|
|
}
|
|
|
|
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
Iterator* NewMergingIterator(const Comparator* cmp, Iterator** list, int n,
|
|
|
|
Arena* arena) {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(n >= 0);
|
|
|
|
if (n == 0) {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
return NewEmptyIterator(arena);
|
2011-03-18 23:37:00 +01:00
|
|
|
} else if (n == 1) {
|
|
|
|
return list[0];
|
|
|
|
} else {
|
In DB::NewIterator(), try to allocate the whole iterator tree in an arena
Summary:
In this patch, try to allocate the whole iterator tree starting from DBIter from an arena
1. ArenaWrappedDBIter is created when serves as the entry point of an iterator tree, with an arena in it.
2. Add an option to create iterator from arena for following iterators: DBIter, MergingIterator, MemtableIterator, all mem table's iterators, all table reader's iterators and two level iterator.
3. MergeIteratorBuilder is created to incrementally build the tree of internal iterators. It is passed to mem table list and version set and add iterators to it.
Limitations:
(1) Only DB::NewIterator() without tailing uses the arena. Other cases, including readonly DB and compactions are still from malloc
(2) Two level iterator itself is allocated in arena, but not iterators inside it.
Test Plan: make all check
Reviewers: ljin, haobo
Reviewed By: haobo
Subscribers: leveldb, dhruba, yhchiang, igor
Differential Revision: https://reviews.facebook.net/D18513
2014-06-03 01:38:00 +02:00
|
|
|
if (arena == nullptr) {
|
|
|
|
return new MergingIterator(cmp, list, n, false);
|
|
|
|
} else {
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(MergingIterator));
|
|
|
|
return new (mem) MergingIterator(cmp, list, n, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
MergeIteratorBuilder::MergeIteratorBuilder(const Comparator* comparator,
|
|
|
|
Arena* a)
|
|
|
|
: first_iter(nullptr), use_merging_iter(false), arena(a) {
|
|
|
|
|
|
|
|
auto mem = arena->AllocateAligned(sizeof(MergingIterator));
|
|
|
|
merge_iter = new (mem) MergingIterator(comparator, nullptr, 0, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
void MergeIteratorBuilder::AddIterator(Iterator* iter) {
|
|
|
|
if (!use_merging_iter && first_iter != nullptr) {
|
|
|
|
merge_iter->AddIterator(first_iter);
|
|
|
|
use_merging_iter = true;
|
|
|
|
}
|
|
|
|
if (use_merging_iter) {
|
|
|
|
merge_iter->AddIterator(iter);
|
|
|
|
} else {
|
|
|
|
first_iter = iter;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* MergeIteratorBuilder::Finish() {
|
|
|
|
if (!use_merging_iter) {
|
|
|
|
return first_iter;
|
|
|
|
} else {
|
|
|
|
auto ret = merge_iter;
|
|
|
|
merge_iter = nullptr;
|
|
|
|
return ret;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|