WriteBufferManger doens't cost to cache if no limit is set (#4695)

Summary:
WriteBufferManger is not invoked when allocating memory for memtable if the limit is not set even if a cache is passed. It is inconsistent from the comment syas. Fix it.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4695

Differential Revision: D13112722

Pulled By: siying

fbshipit-source-id: 0b27eef63867f679cd06033ea56907c0569597f4
This commit is contained in:
Siying Dong 2018-11-18 16:51:15 -08:00 committed by Facebook Github Bot
parent 9d6d4867ab
commit 13579e8c5a
4 changed files with 31 additions and 9 deletions

View File

@ -454,6 +454,22 @@ TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) {
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
}
TEST_F(DBTest2, TestWriteBufferNoLimitWithCache) {
Options options = CurrentOptions();
options.arena_block_size = 4096;
std::shared_ptr<Cache> cache =
NewLRUCache(LRUCacheOptions(10000000, 1, false, 0.0));
options.write_buffer_size = 50000; // this is never hit
// Use a write buffer total size so that the soft limit is about
// 105000.
options.write_buffer_manager.reset(new WriteBufferManager(0, cache));
Reopen(options);
ASSERT_OK(Put("foo", "bar"));
// One dummy entry is 1MB.
ASSERT_GT(cache->GetUsage(), 500000);
}
namespace {
void ValidateKeyExistence(DB* db, const std::vector<Slice>& keys_must_exist,
const std::vector<Slice>& keys_must_not_exist) {

View File

@ -69,12 +69,13 @@ MemTable::MemTable(const InternalKeyComparator& cmp,
refs_(0),
kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)),
mem_tracker_(write_buffer_manager),
arena_(
moptions_.arena_block_size,
(write_buffer_manager != nullptr && write_buffer_manager->enabled())
? &mem_tracker_
: nullptr,
mutable_cf_options.memtable_huge_page_size),
arena_(moptions_.arena_block_size,
(write_buffer_manager != nullptr &&
(write_buffer_manager->enabled() ||
write_buffer_manager->cost_to_cache()))
? &mem_tracker_
: nullptr,
mutable_cf_options.memtable_huge_page_size),
table_(ioptions.memtable_factory->CreateMemTableRep(
comparator_, &arena_, mutable_cf_options.prefix_extractor.get(),
ioptions.info_log, column_family_id)),

View File

@ -30,6 +30,8 @@ class WriteBufferManager {
bool enabled() const { return buffer_size_ != 0; }
bool cost_to_cache() const { return cache_rep_ != nullptr; }
// Only valid if enabled()
size_t memory_usage() const {
return memory_used_.load(std::memory_order_relaxed);

View File

@ -24,7 +24,8 @@ AllocTracker::~AllocTracker() { FreeMem(); }
void AllocTracker::Allocate(size_t bytes) {
assert(write_buffer_manager_ != nullptr);
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed);
write_buffer_manager_->ReserveMem(bytes);
}
@ -32,7 +33,8 @@ void AllocTracker::Allocate(size_t bytes) {
void AllocTracker::DoneAllocating() {
if (write_buffer_manager_ != nullptr && !done_allocating_) {
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
write_buffer_manager_->ScheduleFreeMem(
bytes_allocated_.load(std::memory_order_relaxed));
} else {
@ -47,7 +49,8 @@ void AllocTracker::FreeMem() {
DoneAllocating();
}
if (write_buffer_manager_ != nullptr && !freed_) {
if (write_buffer_manager_->enabled()) {
if (write_buffer_manager_->enabled() ||
write_buffer_manager_->cost_to_cache()) {
write_buffer_manager_->FreeMem(
bytes_allocated_.load(std::memory_order_relaxed));
} else {