rocksdb/memtable/write_buffer_manager_test.cc
Siying Dong 95b0e89b5d Improve write buffer manager (and allow the size to be tracked in block cache)
Summary:
Improve write buffer manager in several ways:
1. Size is tracked when arena block is allocated, rather than every allocation, so that it can better track actual memory usage and the tracking overhead is slightly lower.
2. We start to trigger memtable flush when 7/8 of the memory cap hits, instead of 100%, and make 100% much harder to hit.
3. Allow a cache object to be passed into buffer manager and the size allocated by memtable can be costed there. This can help users have one single memory cap across block cache and memtable.
Closes https://github.com/facebook/rocksdb/pull/2350

Differential Revision: D5110648

Pulled By: siying

fbshipit-source-id: b4238113094bf22574001e446b5d88523ba00017
2017-06-02 14:26:56 -07:00

142 lines
4.9 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory.
// This source code is also licensed under the GPLv2 license found in the
// COPYING file in the root directory of this source tree.
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "rocksdb/write_buffer_manager.h"
#include "util/testharness.h"
namespace rocksdb {
class WriteBufferManagerTest : public testing::Test {};
#ifndef ROCKSDB_LITE
TEST_F(WriteBufferManagerTest, ShouldFlush) {
// A write buffer manager of size 50MB
std::unique_ptr<WriteBufferManager> wbf(
new WriteBufferManager(10 * 1024 * 1024));
wbf->ReserveMem(8 * 1024 * 1024);
ASSERT_FALSE(wbf->ShouldFlush());
// 90% of the hard limit will hit the condition
wbf->ReserveMem(1 * 1024 * 1024);
ASSERT_TRUE(wbf->ShouldFlush());
// Scheduling for feeing will release the condition
wbf->ScheduleFreeMem(1 * 1024 * 1024);
ASSERT_FALSE(wbf->ShouldFlush());
wbf->ReserveMem(2 * 1024 * 1024);
ASSERT_TRUE(wbf->ShouldFlush());
wbf->ScheduleFreeMem(5 * 1024 * 1024);
// hard limit still hit
ASSERT_TRUE(wbf->ShouldFlush());
wbf->FreeMem(10 * 1024 * 1024);
ASSERT_FALSE(wbf->ShouldFlush());
}
TEST_F(WriteBufferManagerTest, CacheCost) {
// 1GB cache
std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
// A write buffer manager of size 50MB
std::unique_ptr<WriteBufferManager> wbf(
new WriteBufferManager(50 * 1024 * 1024, cache));
// Allocate 1.5MB will allocate 2MB
wbf->ReserveMem(1536 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 2 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 2 * 1024 * 1024 + 10000);
// Allocate another 2MB
wbf->ReserveMem(2 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 4 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 4 * 1024 * 1024 + 10000);
// Allocate another 20MB
wbf->ReserveMem(20 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 24 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 24 * 1024 * 1024 + 10000);
// Free 2MB will not cause any change in cache cost
wbf->FreeMem(2 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 24 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 24 * 1024 * 1024 + 10000);
ASSERT_FALSE(wbf->ShouldFlush());
// Allocate another 30MB
wbf->ReserveMem(30 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 52 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 52 * 1024 * 1024 + 10000);
ASSERT_TRUE(wbf->ShouldFlush());
ASSERT_TRUE(wbf->ShouldFlush());
wbf->ScheduleFreeMem(20 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 52 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 52 * 1024 * 1024 + 10000);
// Still need flush as the hard limit hits
ASSERT_TRUE(wbf->ShouldFlush());
// Free 20MB will releae 1MB from cache
wbf->FreeMem(20 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 + 10000);
ASSERT_FALSE(wbf->ShouldFlush());
// Every free will release 1MB if still not hit 3/4
wbf->FreeMem(16 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 50 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 50 * 1024 * 1024 + 10000);
wbf->FreeMem(16 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 49 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 49 * 1024 * 1024 + 10000);
// Free 2MB will not cause any change in cache cost
wbf->ReserveMem(2 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 49 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 49 * 1024 * 1024 + 10000);
wbf->FreeMem(16 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 48 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 48 * 1024 * 1024 + 10000);
// Destory write buffer manger should free everything
wbf.reset();
ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024);
}
TEST_F(WriteBufferManagerTest, NoCapCacheCost) {
// 1GB cache
std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
// A write buffer manager of size 256MB
std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
// Allocate 1.5MB will allocate 2MB
wbf->ReserveMem(10 * 1024 * 1024);
ASSERT_GE(cache->GetPinnedUsage(), 10 * 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 10 * 1024 * 1024 + 10000);
ASSERT_FALSE(wbf->ShouldFlush());
wbf->FreeMem(9 * 1024 * 1024);
for (int i = 0; i < 10; i++) {
wbf->FreeMem(16 * 1024);
}
ASSERT_GE(cache->GetPinnedUsage(), 1024 * 1024);
ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024 + 10000);
}
#endif // ROCKSDB_LITE
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}