2017-06-02 23:13:59 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2017-06-02 23:13:59 +02:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/testharness.h"
|
2017-06-02 23:13:59 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-06-02 23:13:59 +02:00
|
|
|
class WriteBufferManagerTest : public testing::Test {};
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
2021-01-26 07:07:26 +01:00
|
|
|
const size_t kSizeDummyEntry = 256 * 1024;
|
|
|
|
|
2017-06-02 23:13:59 +02:00
|
|
|
TEST_F(WriteBufferManagerTest, ShouldFlush) {
|
2017-06-21 19:28:54 +02:00
|
|
|
// A write buffer manager of size 10MB
|
2017-06-02 23:13:59 +02:00
|
|
|
std::unique_ptr<WriteBufferManager> wbf(
|
|
|
|
new WriteBufferManager(10 * 1024 * 1024));
|
|
|
|
|
|
|
|
wbf->ReserveMem(8 * 1024 * 1024);
|
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
// 90% of the hard limit will hit the condition
|
|
|
|
wbf->ReserveMem(1 * 1024 * 1024);
|
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
2017-06-21 19:28:54 +02:00
|
|
|
// Scheduling for freeing will release the condition
|
2017-06-02 23:13:59 +02:00
|
|
|
wbf->ScheduleFreeMem(1 * 1024 * 1024);
|
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
wbf->ReserveMem(2 * 1024 * 1024);
|
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
2017-06-21 19:28:54 +02:00
|
|
|
|
|
|
|
wbf->ScheduleFreeMem(4 * 1024 * 1024);
|
|
|
|
// 11MB total, 6MB mutable. hard limit still hit
|
2017-06-02 23:13:59 +02:00
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
2017-06-21 19:28:54 +02:00
|
|
|
|
|
|
|
wbf->ScheduleFreeMem(2 * 1024 * 1024);
|
|
|
|
// 11MB total, 4MB mutable. hard limit stills but won't flush because more
|
|
|
|
// than half data is already being flushed.
|
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
wbf->ReserveMem(4 * 1024 * 1024);
|
|
|
|
// 15 MB total, 8MB mutable.
|
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
wbf->FreeMem(7 * 1024 * 1024);
|
|
|
|
// 9MB total, 8MB mutable.
|
2017-06-02 23:13:59 +02:00
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(WriteBufferManagerTest, CacheCost) {
|
2019-09-17 00:14:51 +02:00
|
|
|
LRUCacheOptions co;
|
2017-06-02 23:13:59 +02:00
|
|
|
// 1GB cache
|
2019-09-17 00:14:51 +02:00
|
|
|
co.capacity = 1024 * 1024 * 1024;
|
|
|
|
co.num_shard_bits = 4;
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(co);
|
2017-06-02 23:13:59 +02:00
|
|
|
// A write buffer manager of size 50MB
|
|
|
|
std::unique_ptr<WriteBufferManager> wbf(
|
|
|
|
new WriteBufferManager(50 * 1024 * 1024, cache));
|
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Allocate 333KB will allocate 512KB
|
|
|
|
wbf->ReserveMem(333 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 2 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 2 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
// 2 dummy entries are added for size 333 kb.
|
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 2 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Allocate another 512KB
|
|
|
|
wbf->ReserveMem(512 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 4 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 4 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
// 2 more dummy entries are added for size 512.
|
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 4 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Allocate another 10MB
|
|
|
|
wbf->ReserveMem(10 * 1024 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 11 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 11 * 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
// 40 more entries are added for size 10 * 1024 * 1024.
|
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Free 1MB will not cause any change in cache cost
|
|
|
|
wbf->FreeMem(1024 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 11 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 11 * 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 44 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Allocate another 41MB
|
|
|
|
wbf->ReserveMem(41 * 1024 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 204 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
wbf->ScheduleFreeMem(20 * 1024 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 204 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
// Still need flush as the hard limit hits
|
|
|
|
ASSERT_TRUE(wbf->ShouldFlush());
|
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Free 20MB will releae 256KB from cache
|
2017-06-02 23:13:59 +02:00
|
|
|
wbf->FreeMem(20 * 1024 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 203 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Every free will release 256KB if still not hit 3/4
|
2017-06-02 23:13:59 +02:00
|
|
|
wbf->FreeMem(16 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 2 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 2 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 202 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
|
|
|
wbf->FreeMem(16 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 3 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 3 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 201 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
2019-04-16 20:59:35 +02:00
|
|
|
// Reserve 512KB will not cause any change in cache cost
|
|
|
|
wbf->ReserveMem(512 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 3 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 3 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 201 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
|
|
|
wbf->FreeMem(16 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 4 * 256 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 51 * 1024 * 1024 - 4 * 256 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 200 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
|
|
|
|
// Destory write buffer manger should free everything
|
|
|
|
wbf.reset();
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024);
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(WriteBufferManagerTest, NoCapCacheCost) {
|
|
|
|
// 1GB cache
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(1024 * 1024 * 1024, 4);
|
|
|
|
// A write buffer manager of size 256MB
|
|
|
|
std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
|
|
|
|
// Allocate 1.5MB will allocate 2MB
|
|
|
|
wbf->ReserveMem(10 * 1024 * 1024);
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 10 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 10 * 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 40 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
ASSERT_FALSE(wbf->ShouldFlush());
|
|
|
|
|
|
|
|
wbf->FreeMem(9 * 1024 * 1024);
|
2019-04-16 20:59:35 +02:00
|
|
|
for (int i = 0; i < 40; i++) {
|
|
|
|
wbf->FreeMem(4 * 1024);
|
2017-06-02 23:13:59 +02:00
|
|
|
}
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 1024 * 1024 + 10000);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 4 * kSizeDummyEntry);
|
2017-06-02 23:13:59 +02:00
|
|
|
}
|
2020-04-01 20:24:58 +02:00
|
|
|
|
|
|
|
TEST_F(WriteBufferManagerTest, CacheFull) {
|
|
|
|
// 15MB cache size with strict capacity
|
|
|
|
LRUCacheOptions lo;
|
|
|
|
lo.capacity = 12 * 1024 * 1024;
|
|
|
|
lo.num_shard_bits = 0;
|
|
|
|
lo.strict_capacity_limit = true;
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(lo);
|
|
|
|
std::unique_ptr<WriteBufferManager> wbf(new WriteBufferManager(0, cache));
|
|
|
|
wbf->ReserveMem(10 * 1024 * 1024);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 40 * kSizeDummyEntry);
|
2020-04-01 20:24:58 +02:00
|
|
|
size_t prev_pinned = cache->GetPinnedUsage();
|
|
|
|
ASSERT_GE(prev_pinned, 10 * 1024 * 1024);
|
2021-01-08 22:24:11 +01:00
|
|
|
|
2020-04-01 20:24:58 +02:00
|
|
|
// Some insert will fail
|
|
|
|
wbf->ReserveMem(10 * 1024 * 1024);
|
|
|
|
ASSERT_LE(cache->GetPinnedUsage(), 12 * 1024 * 1024);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 80 * kSizeDummyEntry);
|
2020-04-01 20:24:58 +02:00
|
|
|
|
|
|
|
// Increase capacity so next insert will succeed
|
|
|
|
cache->SetCapacity(30 * 1024 * 1024);
|
|
|
|
wbf->ReserveMem(10 * 1024 * 1024);
|
|
|
|
ASSERT_GT(cache->GetPinnedUsage(), 20 * 1024 * 1024);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 120 * kSizeDummyEntry);
|
2020-04-01 20:24:58 +02:00
|
|
|
|
|
|
|
// Gradually release 20 MB
|
|
|
|
for (int i = 0; i < 40; i++) {
|
|
|
|
wbf->FreeMem(512 * 1024);
|
|
|
|
}
|
|
|
|
ASSERT_GE(cache->GetPinnedUsage(), 10 * 1024 * 1024);
|
|
|
|
ASSERT_LT(cache->GetPinnedUsage(), 20 * 1024 * 1024);
|
2021-01-08 22:24:11 +01:00
|
|
|
ASSERT_EQ(wbf->dummy_entries_in_cache_usage(), 95 * kSizeDummyEntry);
|
2020-04-01 20:24:58 +02:00
|
|
|
}
|
|
|
|
|
2017-06-02 23:13:59 +02:00
|
|
|
#endif // ROCKSDB_LITE
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2017-06-02 23:13:59 +02:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|