rocksdb/table/iterator.cc
Maysam Yabandeh 0712d541d1 Delegate Cleanables
Summary:
Cleanable objects will perform the registered cleanups when
they are destructed. We however rather to delay this cleaning like when
we are gathering the merge operands. Current approach is to create the
Cleanable object on heap (instead of on stack) and delay deleting it.

By allowing Cleanables to delegate their cleanups to another cleanable
object we can delay the cleaning without however the need to craete the
cleanable object on heap and keeping it around. This patch applies this
technique for the cleanups of BlockIter and shows improved performance
for some in-memory benchmarks:
+1.8% for merge worklaod, +6.4% for non-merge workload when the merge
operator is specified.
https://our.intern.facebook.com/intern/tasks?t=15168163

Non-merge benchmark:
TEST_TMPDIR=/dev/shm/v100nocomp/ ./db_bench --benchmarks=fillrandom
--num=1000000 -value_size=100 -compression_type=none

Reading random with no merge operator specified:
TEST_TMPDIR=/dev/shm/v100nocomp/ ./db_bench
--benchmarks="read
Closes https://github.com/facebook/rocksdb/pull/1711

Differential Revision: D4361163

Pulled By: maysamyabandeh

fbshipit-source-id: 9801e07
2016-12-29 15:54:19 -08:00

199 lines
5.9 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/iterator.h"
#include "table/internal_iterator.h"
#include "table/iterator_wrapper.h"
#include "util/arena.h"
namespace rocksdb {
Cleanable::Cleanable() {
cleanup_.function = nullptr;
cleanup_.next = nullptr;
}
Cleanable::~Cleanable() { DoCleanup(); }
void Cleanable::Reset() {
DoCleanup();
cleanup_.function = nullptr;
cleanup_.next = nullptr;
}
void Cleanable::DoCleanup() {
if (cleanup_.function != nullptr) {
(*cleanup_.function)(cleanup_.arg1, cleanup_.arg2);
for (Cleanup* c = cleanup_.next; c != nullptr;) {
(*c->function)(c->arg1, c->arg2);
Cleanup* next = c->next;
delete c;
c = next;
}
}
}
// If the entire linked list was on heap we could have simply add attach one
// link list to another. However the head is an embeded object to avoid the cost
// of creating objects for most of the use cases when the Cleanable has only one
// Cleanup to do. We could put evernything on heap if benchmarks show no
// negative impact on performance.
// Also we need to iterate on the linked list since there is no pointer to the
// tail. We can add the tail pointer but maintainin it might negatively impact
// the perforamnce for the common case of one cleanup where tail pointer is not
// needed. Again benchmarks could clarify that.
// Even without a tail pointer we could iterate on the list, find the tail, and
// have only that node updated without the need to insert the Cleanups one by
// one. This however would be redundant when the source Cleanable has one or a
// few Cleanups which is the case most of the time.
// TODO(myabandeh): if the list is too long we should maintain a tail pointer
// and have the entire list (minus the head that has to be inserted separately)
// merged with the target linked list at once.
void Cleanable::DelegateCleanupsTo(Cleanable* other) {
assert(other != nullptr);
if (cleanup_.function == nullptr) {
return;
}
Cleanup* c = &cleanup_;
other->RegisterCleanup(c->function, c->arg1, c->arg2);
c = c->next;
while (c != nullptr) {
Cleanup* next = c->next;
other->RegisterCleanup(c);
c = next;
}
cleanup_.function = nullptr;
cleanup_.next = nullptr;
}
void Cleanable::RegisterCleanup(Cleanable::Cleanup* c) {
assert(c != nullptr);
if (cleanup_.function == nullptr) {
cleanup_.function = c->function;
cleanup_.arg1 = c->arg1;
cleanup_.arg2 = c->arg2;
delete c;
} else {
c->next = cleanup_.next;
cleanup_.next = c;
}
}
void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
assert(func != nullptr);
Cleanup* c;
if (cleanup_.function == nullptr) {
c = &cleanup_;
} else {
c = new Cleanup;
c->next = cleanup_.next;
cleanup_.next = c;
}
c->function = func;
c->arg1 = arg1;
c->arg2 = arg2;
}
Status Iterator::GetProperty(std::string prop_name, std::string* prop) {
if (prop == nullptr) {
return Status::InvalidArgument("prop is nullptr");
}
if (prop_name == "rocksdb.iterator.is-key-pinned") {
*prop = "0";
return Status::OK();
}
return Status::InvalidArgument("Undentified property.");
}
namespace {
class EmptyIterator : public Iterator {
public:
explicit EmptyIterator(const Status& s) : status_(s) { }
virtual bool Valid() const override { return false; }
virtual void Seek(const Slice& target) override {}
virtual void SeekForPrev(const Slice& target) override {}
virtual void SeekToFirst() override {}
virtual void SeekToLast() override {}
virtual void Next() override { assert(false); }
virtual void Prev() override { assert(false); }
Slice key() const override {
assert(false);
return Slice();
}
Slice value() const override {
assert(false);
return Slice();
}
virtual Status status() const override { return status_; }
private:
Status status_;
};
class EmptyInternalIterator : public InternalIterator {
public:
explicit EmptyInternalIterator(const Status& s) : status_(s) {}
virtual bool Valid() const override { return false; }
virtual void Seek(const Slice& target) override {}
virtual void SeekForPrev(const Slice& target) override {}
virtual void SeekToFirst() override {}
virtual void SeekToLast() override {}
virtual void Next() override { assert(false); }
virtual void Prev() override { assert(false); }
Slice key() const override {
assert(false);
return Slice();
}
Slice value() const override {
assert(false);
return Slice();
}
virtual Status status() const override { return status_; }
private:
Status status_;
};
} // namespace
Iterator* NewEmptyIterator() {
return new EmptyIterator(Status::OK());
}
Iterator* NewErrorIterator(const Status& status) {
return new EmptyIterator(status);
}
InternalIterator* NewEmptyInternalIterator() {
return new EmptyInternalIterator(Status::OK());
}
InternalIterator* NewEmptyInternalIterator(Arena* arena) {
if (arena == nullptr) {
return NewEmptyInternalIterator();
} else {
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
return new (mem) EmptyInternalIterator(Status::OK());
}
}
InternalIterator* NewErrorInternalIterator(const Status& status) {
return new EmptyInternalIterator(status);
}
InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena) {
if (arena == nullptr) {
return NewErrorInternalIterator(status);
} else {
auto mem = arena->AllocateAligned(sizeof(EmptyIterator));
return new (mem) EmptyInternalIterator(status);
}
}
} // namespace rocksdb