49623f9c8e
Summary: **Context:** Through heap profiling, we discovered that `BlockBasedTableReader` objects can accumulate and lead to high memory usage (e.g, `max_open_file = -1`). These memories are currently not saved, not tracked, not constrained and not cache evict-able. As a first step to improve this, similar to https://github.com/facebook/rocksdb/pull/8428, this PR is to track an estimate of `BlockBasedTableReader` object's memory in block cache and fail future creation if the memory usage exceeds the available space of cache at the time of creation. **Summary:** - Approximate big memory users (`BlockBasedTable::Rep` and `TableProperties` )' memory usage in addition to the existing estimated ones (filter block/index block/un-compression dictionary) - Charge all of these memory usages to block cache on `BlockBasedTable::Open()` and release them on `~BlockBasedTable()` as there is no memory usage fluctuation of concern in between - Refactor on CacheReservationManager (and its call-sites) to add concurrent support for BlockBasedTable used in this PR. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9748 Test Plan: - New unit tests - db bench: `OpenDb` : **-0.52% in ms** - Setup `./db_bench -benchmarks=fillseq -db=/dev/shm/testdb -disable_auto_compactions=1 -write_buffer_size=1048576` - Repeated run with pre-change w/o feature and post-change with feature, benchmark `OpenDb`: `./db_bench -benchmarks=readrandom -use_existing_db=1 -db=/dev/shm/testdb -reserve_table_reader_memory=true (remove this when running w/o feature) -file_opening_threads=3 -open_files=-1 -report_open_timing=true| egrep 'OpenDb:'` #-run | (feature-off) avg milliseconds | std milliseconds | (feature-on) avg milliseconds | std milliseconds | change (%) -- | -- | -- | -- | -- | -- 10 | 11.4018 | 5.95173 | 9.47788 | 1.57538 | -16.87382694 20 | 9.23746 | 0.841053 | 9.32377 | 1.14074 | 0.9343477536 40 | 9.0876 | 0.671129 | 9.35053 | 1.11713 | 2.893283155 80 | 9.72514 | 2.28459 | 9.52013 | 1.0894 | -2.108041632 160 | 9.74677 | 0.991234 | 9.84743 | 1.73396 | 1.032752389 320 | 10.7297 | 5.11555 | 10.547 | 1.97692 | **-1.70275031** 640 | 11.7092 | 2.36565 | 11.7869 | 2.69377 | **0.6635807741** - db bench on write with cost to cache in WriteBufferManager (just in case this PR's CRM refactoring accidentally slows down anything in WBM) : `fillseq` : **+0.54% in micros/op** `./db_bench -benchmarks=fillseq -db=/dev/shm/testdb -disable_auto_compactions=1 -cost_write_buffer_to_cache=true -write_buffer_size=10000000000 | egrep 'fillseq'` #-run | (pre-PR) avg micros/op | std micros/op | (post-PR) avg micros/op | std micros/op | change (%) -- | -- | -- | -- | -- | -- 10 | 6.15 | 0.260187 | 6.289 | 0.371192 | 2.260162602 20 | 7.28025 | 0.465402 | 7.37255 | 0.451256 | 1.267813605 40 | 7.06312 | 0.490654 | 7.13803 | 0.478676 | **1.060579461** 80 | 7.14035 | 0.972831 | 7.14196 | 0.92971 | **0.02254791432** - filter bench: `bloom filter`: **-0.78% in ms/key** - ` ./filter_bench -impl=2 -quick -reserve_table_builder_memory=true | grep 'Build avg'` #-run | (pre-PR) avg ns/key | std ns/key | (post-PR) ns/key | std ns/key | change (%) -- | -- | -- | -- | -- | -- 10 | 26.4369 | 0.442182 | 26.3273 | 0.422919 | **-0.4145720565** 20 | 26.4451 | 0.592787 | 26.1419 | 0.62451 | **-1.1465262** - Crash test `python3 tools/db_crashtest.py blackbox --reserve_table_reader_memory=1 --cache_size=1` killed as normal Reviewed By: ajkr Differential Revision: D35136549 Pulled By: hx235 fbshipit-source-id: 146978858d0f900f43f4eb09bfd3e83195e3be28
184 lines
6.5 KiB
C++
184 lines
6.5 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
#include "cache/cache_reservation_manager.h"
|
|
|
|
#include <cassert>
|
|
#include <cstddef>
|
|
#include <cstring>
|
|
#include <memory>
|
|
|
|
#include "cache/cache_entry_roles.h"
|
|
#include "rocksdb/cache.h"
|
|
#include "rocksdb/slice.h"
|
|
#include "rocksdb/status.h"
|
|
#include "table/block_based/reader_common.h"
|
|
#include "util/coding.h"
|
|
|
|
namespace ROCKSDB_NAMESPACE {
|
|
|
|
template <CacheEntryRole R>
|
|
CacheReservationManagerImpl<R>::CacheReservationHandle::CacheReservationHandle(
|
|
std::size_t incremental_memory_used,
|
|
std::shared_ptr<CacheReservationManagerImpl> cache_res_mgr)
|
|
: incremental_memory_used_(incremental_memory_used) {
|
|
assert(cache_res_mgr);
|
|
cache_res_mgr_ = cache_res_mgr;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
CacheReservationManagerImpl<
|
|
R>::CacheReservationHandle::~CacheReservationHandle() {
|
|
Status s = cache_res_mgr_->ReleaseCacheReservation(incremental_memory_used_);
|
|
s.PermitUncheckedError();
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
CacheReservationManagerImpl<R>::CacheReservationManagerImpl(
|
|
std::shared_ptr<Cache> cache, bool delayed_decrease)
|
|
: delayed_decrease_(delayed_decrease),
|
|
cache_allocated_size_(0),
|
|
memory_used_(0) {
|
|
assert(cache != nullptr);
|
|
cache_ = cache;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
CacheReservationManagerImpl<R>::~CacheReservationManagerImpl() {
|
|
for (auto* handle : dummy_handles_) {
|
|
cache_->Release(handle, true);
|
|
}
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Status CacheReservationManagerImpl<R>::UpdateCacheReservation(
|
|
std::size_t new_mem_used) {
|
|
memory_used_ = new_mem_used;
|
|
std::size_t cur_cache_allocated_size =
|
|
cache_allocated_size_.load(std::memory_order_relaxed);
|
|
if (new_mem_used == cur_cache_allocated_size) {
|
|
return Status::OK();
|
|
} else if (new_mem_used > cur_cache_allocated_size) {
|
|
Status s = IncreaseCacheReservation(new_mem_used);
|
|
return s;
|
|
} else {
|
|
// In delayed decrease mode, we don't decrease cache reservation
|
|
// untill the memory usage is less than 3/4 of what we reserve
|
|
// in the cache.
|
|
// We do this because
|
|
// (1) Dummy entry insertion is expensive in block cache
|
|
// (2) Delayed releasing previously inserted dummy entries can save such
|
|
// expensive dummy entry insertion on memory increase in the near future,
|
|
// which is likely to happen when the memory usage is greater than or equal
|
|
// to 3/4 of what we reserve
|
|
if (delayed_decrease_ && new_mem_used >= cur_cache_allocated_size / 4 * 3) {
|
|
return Status::OK();
|
|
} else {
|
|
Status s = DecreaseCacheReservation(new_mem_used);
|
|
return s;
|
|
}
|
|
}
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Status CacheReservationManagerImpl<R>::MakeCacheReservation(
|
|
std::size_t incremental_memory_used,
|
|
std::unique_ptr<CacheReservationManager::CacheReservationHandle>* handle) {
|
|
assert(handle);
|
|
Status s =
|
|
UpdateCacheReservation(GetTotalMemoryUsed() + incremental_memory_used);
|
|
(*handle).reset(new CacheReservationManagerImpl::CacheReservationHandle(
|
|
incremental_memory_used,
|
|
std::enable_shared_from_this<
|
|
CacheReservationManagerImpl<R>>::shared_from_this()));
|
|
return s;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Status CacheReservationManagerImpl<R>::ReleaseCacheReservation(
|
|
std::size_t incremental_memory_used) {
|
|
assert(GetTotalMemoryUsed() >= incremental_memory_used);
|
|
std::size_t updated_total_mem_used =
|
|
GetTotalMemoryUsed() - incremental_memory_used;
|
|
Status s = UpdateCacheReservation(updated_total_mem_used);
|
|
return s;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Status CacheReservationManagerImpl<R>::IncreaseCacheReservation(
|
|
std::size_t new_mem_used) {
|
|
Status return_status = Status::OK();
|
|
while (new_mem_used > cache_allocated_size_.load(std::memory_order_relaxed)) {
|
|
Cache::Handle* handle = nullptr;
|
|
return_status = cache_->Insert(GetNextCacheKey(), nullptr, kSizeDummyEntry,
|
|
GetNoopDeleterForRole<R>(), &handle);
|
|
|
|
if (return_status != Status::OK()) {
|
|
return return_status;
|
|
}
|
|
|
|
dummy_handles_.push_back(handle);
|
|
cache_allocated_size_ += kSizeDummyEntry;
|
|
}
|
|
return return_status;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Status CacheReservationManagerImpl<R>::DecreaseCacheReservation(
|
|
std::size_t new_mem_used) {
|
|
Status return_status = Status::OK();
|
|
|
|
// Decrease to the smallest multiple of kSizeDummyEntry that is greater than
|
|
// or equal to new_mem_used We do addition instead of new_mem_used <=
|
|
// cache_allocated_size_.load(std::memory_order_relaxed) - kSizeDummyEntry to
|
|
// avoid underflow of size_t when cache_allocated_size_ = 0
|
|
while (new_mem_used + kSizeDummyEntry <=
|
|
cache_allocated_size_.load(std::memory_order_relaxed)) {
|
|
assert(!dummy_handles_.empty());
|
|
auto* handle = dummy_handles_.back();
|
|
cache_->Release(handle, true);
|
|
dummy_handles_.pop_back();
|
|
cache_allocated_size_ -= kSizeDummyEntry;
|
|
}
|
|
return return_status;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
std::size_t CacheReservationManagerImpl<R>::GetTotalReservedCacheSize() {
|
|
return cache_allocated_size_.load(std::memory_order_relaxed);
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
std::size_t CacheReservationManagerImpl<R>::GetTotalMemoryUsed() {
|
|
return memory_used_;
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Slice CacheReservationManagerImpl<R>::GetNextCacheKey() {
|
|
// Calling this function will have the side-effect of changing the
|
|
// underlying cache_key_ that is shared among other keys generated from this
|
|
// fucntion. Therefore please make sure the previous keys are saved/copied
|
|
// before calling this function.
|
|
cache_key_ = CacheKey::CreateUniqueForCacheLifetime(cache_.get());
|
|
return cache_key_.AsSlice();
|
|
}
|
|
|
|
template <CacheEntryRole R>
|
|
Cache::DeleterFn CacheReservationManagerImpl<R>::TEST_GetNoopDeleterForRole() {
|
|
return GetNoopDeleterForRole<R>();
|
|
}
|
|
|
|
template class CacheReservationManagerImpl<
|
|
CacheEntryRole::kBlockBasedTableReader>;
|
|
template class CacheReservationManagerImpl<
|
|
CacheEntryRole::kCompressionDictionaryBuildingBuffer>;
|
|
template class CacheReservationManagerImpl<CacheEntryRole::kFilterConstruction>;
|
|
template class CacheReservationManagerImpl<CacheEntryRole::kMisc>;
|
|
template class CacheReservationManagerImpl<CacheEntryRole::kWriteBuffer>;
|
|
} // namespace ROCKSDB_NAMESPACE
|