0712d541d1
Summary: Cleanable objects will perform the registered cleanups when they are destructed. We however rather to delay this cleaning like when we are gathering the merge operands. Current approach is to create the Cleanable object on heap (instead of on stack) and delay deleting it. By allowing Cleanables to delegate their cleanups to another cleanable object we can delay the cleaning without however the need to craete the cleanable object on heap and keeping it around. This patch applies this technique for the cleanups of BlockIter and shows improved performance for some in-memory benchmarks: +1.8% for merge worklaod, +6.4% for non-merge workload when the merge operator is specified. https://our.intern.facebook.com/intern/tasks?t=15168163 Non-merge benchmark: TEST_TMPDIR=/dev/shm/v100nocomp/ ./db_bench --benchmarks=fillrandom --num=1000000 -value_size=100 -compression_type=none Reading random with no merge operator specified: TEST_TMPDIR=/dev/shm/v100nocomp/ ./db_bench --benchmarks="read Closes https://github.com/facebook/rocksdb/pull/1711 Differential Revision: D4361163 Pulled By: maysamyabandeh fbshipit-source-id: 9801e07
88 lines
2.5 KiB
C++
88 lines
2.5 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
#pragma once
|
|
#include <algorithm>
|
|
#include <memory>
|
|
#include <utility>
|
|
#include <vector>
|
|
|
|
#include "table/internal_iterator.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
// PinnedIteratorsManager will be notified whenever we need to pin an Iterator
|
|
// and it will be responsible for deleting pinned Iterators when they are
|
|
// not needed anymore.
|
|
class PinnedIteratorsManager : public Cleanable {
|
|
public:
|
|
PinnedIteratorsManager() : pinning_enabled(false) {}
|
|
~PinnedIteratorsManager() {
|
|
if (pinning_enabled) {
|
|
ReleasePinnedData();
|
|
}
|
|
}
|
|
|
|
// Enable Iterators pinning
|
|
void StartPinning() {
|
|
assert(pinning_enabled == false);
|
|
pinning_enabled = true;
|
|
}
|
|
|
|
// Is pinning enabled ?
|
|
bool PinningEnabled() { return pinning_enabled; }
|
|
|
|
// Take ownership of iter and delete it when ReleasePinnedData() is called
|
|
void PinIterator(InternalIterator* iter, bool arena = false) {
|
|
if (arena) {
|
|
PinPtr(iter, &PinnedIteratorsManager::ReleaseArenaInternalIterator);
|
|
} else {
|
|
PinPtr(iter, &PinnedIteratorsManager::ReleaseInternalIterator);
|
|
}
|
|
}
|
|
|
|
typedef void (*ReleaseFunction)(void* arg1);
|
|
void PinPtr(void* ptr, ReleaseFunction release_func) {
|
|
assert(pinning_enabled);
|
|
if (ptr == nullptr) {
|
|
return;
|
|
}
|
|
pinned_ptrs_.emplace_back(ptr, release_func);
|
|
}
|
|
|
|
// Release pinned Iterators
|
|
inline void ReleasePinnedData() {
|
|
assert(pinning_enabled == true);
|
|
pinning_enabled = false;
|
|
|
|
// Remove duplicate pointers
|
|
std::sort(pinned_ptrs_.begin(), pinned_ptrs_.end());
|
|
auto unique_end = std::unique(pinned_ptrs_.begin(), pinned_ptrs_.end());
|
|
|
|
for (auto i = pinned_ptrs_.begin(); i != unique_end; ++i) {
|
|
void* ptr = i->first;
|
|
ReleaseFunction release_func = i->second;
|
|
release_func(ptr);
|
|
}
|
|
pinned_ptrs_.clear();
|
|
// Also do cleanups from the base Cleanable
|
|
Cleanable::Reset();
|
|
}
|
|
|
|
private:
|
|
static void ReleaseInternalIterator(void* ptr) {
|
|
delete reinterpret_cast<InternalIterator*>(ptr);
|
|
}
|
|
|
|
static void ReleaseArenaInternalIterator(void* ptr) {
|
|
reinterpret_cast<InternalIterator*>(ptr)->~InternalIterator();
|
|
}
|
|
|
|
bool pinning_enabled;
|
|
std::vector<std::pair<void*, ReleaseFunction>> pinned_ptrs_;
|
|
};
|
|
|
|
} // namespace rocksdb
|