2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/cache.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-06-19 18:43:08 +02:00
|
|
|
#include <forward_list>
|
2016-08-19 21:28:19 +02:00
|
|
|
#include <functional>
|
2013-04-04 03:53:42 +02:00
|
|
|
#include <iostream>
|
2016-08-19 21:28:19 +02:00
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "cache/clock_cache.h"
|
|
|
|
#include "cache/lru_cache.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "test_util/testharness.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Conversions between numeric keys/values and the types expected by Cache.
|
|
|
|
static std::string EncodeKey(int k) {
|
|
|
|
std::string result;
|
|
|
|
PutFixed32(&result, k);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
static int DecodeKey(const Slice& k) {
|
|
|
|
assert(k.size() == 4);
|
|
|
|
return DecodeFixed32(k.data());
|
|
|
|
}
|
|
|
|
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
|
2014-11-11 22:47:22 +01:00
|
|
|
static int DecodeValue(void* v) {
|
|
|
|
return static_cast<int>(reinterpret_cast<uintptr_t>(v));
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2016-08-29 19:40:00 +02:00
|
|
|
const std::string kLRU = "lru";
|
|
|
|
const std::string kClock = "clock";
|
2016-08-19 21:28:19 +02:00
|
|
|
|
2020-04-01 01:09:11 +02:00
|
|
|
void dumbDeleter(const Slice& /*key*/, void* /*value*/) {}
|
|
|
|
|
|
|
|
void eraseDeleter(const Slice& /*key*/, void* value) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(value);
|
|
|
|
cache->Erase("foo");
|
|
|
|
}
|
|
|
|
|
2016-08-29 19:40:00 +02:00
|
|
|
class CacheTest : public testing::TestWithParam<std::string> {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
|
|
|
static CacheTest* current_;
|
|
|
|
|
2020-04-01 01:09:11 +02:00
|
|
|
static void Deleter(const Slice& key, void* v) {
|
|
|
|
current_->deleted_keys_.push_back(DecodeKey(key));
|
|
|
|
current_->deleted_values_.push_back(DecodeValue(v));
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2011-08-22 23:08:51 +02:00
|
|
|
static const int kCacheSize = 1000;
|
2013-10-10 02:04:40 +02:00
|
|
|
static const int kNumShardBits = 4;
|
|
|
|
|
|
|
|
static const int kCacheSize2 = 100;
|
|
|
|
static const int kNumShardBits2 = 2;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
std::vector<int> deleted_keys_;
|
|
|
|
std::vector<int> deleted_values_;
|
2018-11-09 20:17:34 +01:00
|
|
|
std::shared_ptr<Cache> cache_;
|
|
|
|
std::shared_ptr<Cache> cache2_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
CacheTest()
|
2016-08-29 19:40:00 +02:00
|
|
|
: cache_(NewCache(kCacheSize, kNumShardBits, false)),
|
|
|
|
cache2_(NewCache(kCacheSize2, kNumShardBits2, false)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
current_ = this;
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
~CacheTest() override {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2017-01-27 15:35:41 +01:00
|
|
|
std::shared_ptr<Cache> NewCache(size_t capacity) {
|
|
|
|
auto type = GetParam();
|
|
|
|
if (type == kLRU) {
|
|
|
|
return NewLRUCache(capacity);
|
|
|
|
}
|
|
|
|
if (type == kClock) {
|
|
|
|
return NewClockCache(capacity);
|
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2019-09-17 00:14:51 +02:00
|
|
|
std::shared_ptr<Cache> NewCache(
|
|
|
|
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
|
|
|
|
CacheMetadataChargePolicy charge_policy = kDontChargeCacheMetadata) {
|
2016-08-29 19:40:00 +02:00
|
|
|
auto type = GetParam();
|
|
|
|
if (type == kLRU) {
|
2019-09-17 00:14:51 +02:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = capacity;
|
|
|
|
co.num_shard_bits = num_shard_bits;
|
|
|
|
co.strict_capacity_limit = strict_capacity_limit;
|
|
|
|
co.high_pri_pool_ratio = 0;
|
|
|
|
co.metadata_charge_policy = charge_policy;
|
|
|
|
return NewLRUCache(co);
|
2016-08-29 19:40:00 +02:00
|
|
|
}
|
|
|
|
if (type == kClock) {
|
2019-09-17 00:14:51 +02:00
|
|
|
return NewClockCache(capacity, num_shard_bits, strict_capacity_limit,
|
|
|
|
charge_policy);
|
2016-08-29 19:40:00 +02:00
|
|
|
}
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-08-19 21:28:19 +02:00
|
|
|
|
2019-03-27 18:18:56 +01:00
|
|
|
int Lookup(std::shared_ptr<Cache> cache, int key) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Cache::Handle* handle = cache->Lookup(EncodeKey(key));
|
|
|
|
const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
|
2013-03-01 03:04:58 +01:00
|
|
|
if (handle != nullptr) {
|
2013-10-10 02:04:40 +02:00
|
|
|
cache->Release(handle);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2019-03-27 18:18:56 +01:00
|
|
|
void Insert(std::shared_ptr<Cache> cache, int key, int value,
|
|
|
|
int charge = 1) {
|
2020-08-07 02:18:25 +02:00
|
|
|
EXPECT_OK(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
|
|
|
|
&CacheTest::Deleter));
|
2013-10-10 02:04:40 +02:00
|
|
|
}
|
|
|
|
|
2019-03-27 18:18:56 +01:00
|
|
|
void Erase(std::shared_ptr<Cache> cache, int key) {
|
2013-10-10 02:04:40 +02:00
|
|
|
cache->Erase(EncodeKey(key));
|
|
|
|
}
|
|
|
|
|
|
|
|
int Lookup(int key) {
|
|
|
|
return Lookup(cache_, key);
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void Insert(int key, int value, int charge = 1) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Insert(cache_, key, value, charge);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Erase(int key) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Erase(cache_, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Lookup2(int key) {
|
|
|
|
return Lookup(cache2_, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Insert2(int key, int value, int charge = 1) {
|
|
|
|
Insert(cache2_, key, value, charge);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Erase2(int key) {
|
|
|
|
Erase(cache2_, key);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
CacheTest* CacheTest::current_;
|
|
|
|
|
2019-09-17 00:14:51 +02:00
|
|
|
class LRUCacheTest : public CacheTest {};
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, UsageTest) {
|
2018-11-09 20:17:34 +01:00
|
|
|
// cache is std::shared_ptr and will be automatically cleaned up.
|
2013-12-11 01:21:49 +01:00
|
|
|
const uint64_t kCapacity = 100000;
|
2019-09-17 00:14:51 +02:00
|
|
|
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
|
|
|
auto precise_cache = NewCache(kCapacity, 0, false, kFullChargeCacheMetadata);
|
|
|
|
ASSERT_EQ(0, cache->GetUsage());
|
|
|
|
ASSERT_EQ(0, precise_cache->GetUsage());
|
2013-12-11 01:21:49 +01:00
|
|
|
|
|
|
|
size_t usage = 0;
|
2016-03-11 02:35:19 +01:00
|
|
|
char value[10] = "abcdef";
|
2013-12-11 01:21:49 +01:00
|
|
|
// make sure everything will be cached
|
|
|
|
for (int i = 1; i < 100; ++i) {
|
|
|
|
std::string key(i, 'a');
|
|
|
|
auto kv_size = key.size() + 5;
|
2020-08-07 02:18:25 +02:00
|
|
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
|
|
|
|
dumbDeleter));
|
|
|
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
|
|
|
kv_size, dumbDeleter));
|
2013-12-11 01:21:49 +01:00
|
|
|
usage += kv_size;
|
|
|
|
ASSERT_EQ(usage, cache->GetUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_LT(usage, precise_cache->GetUsage());
|
2013-12-11 01:21:49 +01:00
|
|
|
}
|
|
|
|
|
2019-09-17 00:14:51 +02:00
|
|
|
cache->EraseUnRefEntries();
|
|
|
|
precise_cache->EraseUnRefEntries();
|
|
|
|
ASSERT_EQ(0, cache->GetUsage());
|
|
|
|
ASSERT_EQ(0, precise_cache->GetUsage());
|
|
|
|
|
2013-12-11 01:21:49 +01:00
|
|
|
// make sure the cache will be overloaded
|
2013-12-13 23:19:18 +01:00
|
|
|
for (uint64_t i = 1; i < kCapacity; ++i) {
|
2014-11-25 05:44:49 +01:00
|
|
|
auto key = ToString(i);
|
2020-08-07 02:18:25 +02:00
|
|
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
|
|
|
dumbDeleter));
|
|
|
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
|
|
|
key.size() + 5, dumbDeleter));
|
2013-12-11 01:21:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// the usage should be close to the capacity
|
|
|
|
ASSERT_GT(kCapacity, cache->GetUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_GT(kCapacity, precise_cache->GetUsage());
|
2013-12-11 01:21:49 +01:00
|
|
|
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_LT(kCapacity * 0.95, precise_cache->GetUsage());
|
2013-12-11 01:21:49 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, PinnedUsageTest) {
|
2018-11-09 20:17:34 +01:00
|
|
|
// cache is std::shared_ptr and will be automatically cleaned up.
|
2019-09-17 00:14:51 +02:00
|
|
|
const uint64_t kCapacity = 200000;
|
|
|
|
auto cache = NewCache(kCapacity, 8, false, kDontChargeCacheMetadata);
|
|
|
|
auto precise_cache = NewCache(kCapacity, 8, false, kFullChargeCacheMetadata);
|
2015-06-18 22:56:31 +02:00
|
|
|
|
|
|
|
size_t pinned_usage = 0;
|
2016-03-11 02:35:19 +01:00
|
|
|
char value[10] = "abcdef";
|
2015-06-18 22:56:31 +02:00
|
|
|
|
2015-06-19 18:43:08 +02:00
|
|
|
std::forward_list<Cache::Handle*> unreleased_handles;
|
2019-09-17 00:14:51 +02:00
|
|
|
std::forward_list<Cache::Handle*> unreleased_handles_in_precise_cache;
|
2015-06-19 18:43:08 +02:00
|
|
|
|
2015-06-18 22:56:31 +02:00
|
|
|
// Add entries. Unpin some of them after insertion. Then, pin some of them
|
|
|
|
// again. Check GetPinnedUsage().
|
|
|
|
for (int i = 1; i < 100; ++i) {
|
|
|
|
std::string key(i, 'a');
|
|
|
|
auto kv_size = key.size() + 5;
|
2016-03-11 02:35:19 +01:00
|
|
|
Cache::Handle* handle;
|
2019-09-17 00:14:51 +02:00
|
|
|
Cache::Handle* handle_in_precise_cache;
|
2020-08-07 02:18:25 +02:00
|
|
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), kv_size,
|
|
|
|
dumbDeleter, &handle));
|
2019-09-17 00:14:51 +02:00
|
|
|
assert(handle);
|
2020-08-07 02:18:25 +02:00
|
|
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
|
|
|
kv_size, dumbDeleter,
|
|
|
|
&handle_in_precise_cache));
|
2019-09-17 00:14:51 +02:00
|
|
|
assert(handle_in_precise_cache);
|
2015-06-18 22:56:31 +02:00
|
|
|
pinned_usage += kv_size;
|
|
|
|
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
2015-06-18 22:56:31 +02:00
|
|
|
if (i % 2 == 0) {
|
|
|
|
cache->Release(handle);
|
2019-09-17 00:14:51 +02:00
|
|
|
precise_cache->Release(handle_in_precise_cache);
|
2015-06-18 22:56:31 +02:00
|
|
|
pinned_usage -= kv_size;
|
|
|
|
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
2015-06-19 18:43:08 +02:00
|
|
|
} else {
|
|
|
|
unreleased_handles.push_front(handle);
|
2019-09-17 00:14:51 +02:00
|
|
|
unreleased_handles_in_precise_cache.push_front(handle_in_precise_cache);
|
2015-06-18 22:56:31 +02:00
|
|
|
}
|
|
|
|
if (i % 3 == 0) {
|
2015-06-19 18:43:08 +02:00
|
|
|
unreleased_handles.push_front(cache->Lookup(key));
|
2019-09-17 00:14:51 +02:00
|
|
|
auto x = precise_cache->Lookup(key);
|
|
|
|
assert(x);
|
|
|
|
unreleased_handles_in_precise_cache.push_front(x);
|
2015-06-18 22:56:31 +02:00
|
|
|
// If i % 2 == 0, then the entry was unpinned before Lookup, so pinned
|
|
|
|
// usage increased
|
|
|
|
if (i % 2 == 0) {
|
|
|
|
pinned_usage += kv_size;
|
|
|
|
}
|
|
|
|
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_LT(pinned_usage, precise_cache->GetPinnedUsage());
|
2015-06-18 22:56:31 +02:00
|
|
|
}
|
|
|
|
}
|
2019-09-17 00:14:51 +02:00
|
|
|
auto precise_cache_pinned_usage = precise_cache->GetPinnedUsage();
|
|
|
|
ASSERT_LT(pinned_usage, precise_cache_pinned_usage);
|
2015-06-18 22:56:31 +02:00
|
|
|
|
|
|
|
// check that overloading the cache does not change the pinned usage
|
|
|
|
for (uint64_t i = 1; i < 2 * kCapacity; ++i) {
|
|
|
|
auto key = ToString(i);
|
2020-08-07 02:18:25 +02:00
|
|
|
ASSERT_OK(cache->Insert(key, reinterpret_cast<void*>(value), key.size() + 5,
|
|
|
|
dumbDeleter));
|
|
|
|
ASSERT_OK(precise_cache->Insert(key, reinterpret_cast<void*>(value),
|
|
|
|
key.size() + 5, dumbDeleter));
|
2015-06-18 22:56:31 +02:00
|
|
|
}
|
|
|
|
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
2019-09-17 00:14:51 +02:00
|
|
|
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
|
|
|
|
|
|
|
|
cache->EraseUnRefEntries();
|
|
|
|
precise_cache->EraseUnRefEntries();
|
|
|
|
ASSERT_EQ(pinned_usage, cache->GetPinnedUsage());
|
|
|
|
ASSERT_EQ(precise_cache_pinned_usage, precise_cache->GetPinnedUsage());
|
2015-06-19 18:43:08 +02:00
|
|
|
|
|
|
|
// release handles for pinned entries to prevent memory leaks
|
|
|
|
for (auto handle : unreleased_handles) {
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
2019-09-17 00:14:51 +02:00
|
|
|
for (auto handle : unreleased_handles_in_precise_cache) {
|
|
|
|
precise_cache->Release(handle);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(0, cache->GetPinnedUsage());
|
|
|
|
ASSERT_EQ(0, precise_cache->GetPinnedUsage());
|
|
|
|
cache->EraseUnRefEntries();
|
|
|
|
precise_cache->EraseUnRefEntries();
|
|
|
|
ASSERT_EQ(0, cache->GetUsage());
|
|
|
|
ASSERT_EQ(0, precise_cache->GetUsage());
|
2015-06-18 22:56:31 +02:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, HitAndMiss) {
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
|
|
|
|
Insert(100, 101);
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
ASSERT_EQ(-1, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
|
|
|
Insert(200, 201);
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
|
|
|
Insert(100, 102);
|
|
|
|
ASSERT_EQ(102, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, InsertSameKey) {
|
|
|
|
Insert(1, 1);
|
|
|
|
Insert(1, 2);
|
|
|
|
ASSERT_EQ(2, Lookup(1));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, Erase) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Erase(200);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Insert(100, 101);
|
|
|
|
Insert(200, 201);
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, EntriesArePinned) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(100, 101);
|
|
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Insert(100, 102);
|
|
|
|
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(2U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
cache_->Release(h1);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
cache_->Release(h2);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(2U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[1]);
|
|
|
|
ASSERT_EQ(102, deleted_values_[1]);
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(0U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, EvictionPolicy) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(100, 101);
|
|
|
|
Insert(200, 201);
|
|
|
|
|
|
|
|
// Frequently used entry must be kept around
|
2019-10-25 02:14:27 +02:00
|
|
|
for (int i = 0; i < kCacheSize * 2; i++) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(1000+i, 2000+i);
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
2011-08-22 23:08:51 +02:00
|
|
|
ASSERT_EQ(-1, Lookup(200));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2017-01-11 01:48:23 +01:00
|
|
|
TEST_P(CacheTest, ExternalRefPinsEntries) {
|
|
|
|
Insert(100, 101);
|
|
|
|
Cache::Handle* h = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_TRUE(cache_->Ref(h));
|
|
|
|
ASSERT_EQ(101, DecodeValue(cache_->Value(h)));
|
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
|
|
|
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
if (i > 0) {
|
|
|
|
// First release (i == 1) corresponds to Ref(), second release (i == 2)
|
|
|
|
// corresponds to Lookup(). Then, since all external refs are released,
|
|
|
|
// the below insertions should push out the cache entry.
|
|
|
|
cache_->Release(h);
|
|
|
|
}
|
|
|
|
// double cache size because the usage bit in block cache prevents 100 from
|
|
|
|
// being evicted in the first kCacheSize iterations
|
|
|
|
for (int j = 0; j < 2 * kCacheSize + 100; j++) {
|
|
|
|
Insert(1000 + j, 2000 + j);
|
|
|
|
}
|
|
|
|
if (i < 2) {
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, EvictionPolicyRef) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Insert(100, 101);
|
|
|
|
Insert(101, 102);
|
|
|
|
Insert(102, 103);
|
|
|
|
Insert(103, 104);
|
|
|
|
Insert(200, 101);
|
|
|
|
Insert(201, 102);
|
|
|
|
Insert(202, 103);
|
|
|
|
Insert(203, 104);
|
|
|
|
Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
|
|
|
|
Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
|
|
|
|
Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
|
|
|
|
Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
|
|
|
|
Insert(300, 101);
|
|
|
|
Insert(301, 102);
|
|
|
|
Insert(302, 103);
|
|
|
|
Insert(303, 104);
|
|
|
|
|
|
|
|
// Insert entries much more than Cache capacity
|
2019-10-25 02:14:27 +02:00
|
|
|
for (int i = 0; i < kCacheSize * 2; i++) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Insert(1000 + i, 2000 + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether the entries inserted in the beginning
|
|
|
|
// are evicted. Ones without extra ref are evicted and
|
|
|
|
// those with are not.
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(-1, Lookup(101));
|
|
|
|
ASSERT_EQ(-1, Lookup(102));
|
|
|
|
ASSERT_EQ(-1, Lookup(103));
|
|
|
|
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
ASSERT_EQ(-1, Lookup(301));
|
|
|
|
ASSERT_EQ(-1, Lookup(302));
|
|
|
|
ASSERT_EQ(-1, Lookup(303));
|
|
|
|
|
|
|
|
ASSERT_EQ(101, Lookup(200));
|
|
|
|
ASSERT_EQ(102, Lookup(201));
|
|
|
|
ASSERT_EQ(103, Lookup(202));
|
|
|
|
ASSERT_EQ(104, Lookup(203));
|
|
|
|
|
|
|
|
// Cleaning up all the handles
|
|
|
|
cache_->Release(h201);
|
|
|
|
cache_->Release(h202);
|
|
|
|
cache_->Release(h203);
|
|
|
|
cache_->Release(h204);
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, EvictEmptyCache) {
|
|
|
|
// Insert item large than capacity to trigger eviction on empty cache.
|
2016-08-29 19:40:00 +02:00
|
|
|
auto cache = NewCache(1, 0, false);
|
2020-04-01 01:09:11 +02:00
|
|
|
ASSERT_OK(cache->Insert("foo", nullptr, 10, dumbDeleter));
|
2016-08-19 21:28:19 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, EraseFromDeleter) {
|
|
|
|
// Have deleter which will erase item from cache, which will re-enter
|
|
|
|
// the cache at that point.
|
2016-08-29 19:40:00 +02:00
|
|
|
std::shared_ptr<Cache> cache = NewCache(10, 0, false);
|
2020-04-01 01:09:11 +02:00
|
|
|
ASSERT_OK(cache->Insert("foo", nullptr, 1, dumbDeleter));
|
|
|
|
ASSERT_OK(cache->Insert("bar", cache.get(), 1, eraseDeleter));
|
2016-08-19 21:28:19 +02:00
|
|
|
cache->Erase("bar");
|
|
|
|
ASSERT_EQ(nullptr, cache->Lookup("foo"));
|
|
|
|
ASSERT_EQ(nullptr, cache->Lookup("bar"));
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, ErasedHandleState) {
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// insert a key and get two handles
|
|
|
|
Insert(100, 1000);
|
|
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
|
|
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(h1, h2);
|
|
|
|
ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
|
|
|
|
ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
|
2013-10-10 02:04:40 +02:00
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// delete the key from the cache
|
|
|
|
Erase(100);
|
|
|
|
// can no longer find in the cache
|
2013-10-10 02:04:40 +02:00
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// release one handle
|
|
|
|
cache_->Release(h1);
|
|
|
|
// still can't find in cache
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
2013-10-10 02:04:40 +02:00
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
cache_->Release(h2);
|
2013-10-10 02:04:40 +02:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, HeavyEntries) {
|
2011-08-22 23:08:51 +02:00
|
|
|
// Add a bunch of light and heavy entries and then count the combined
|
|
|
|
// size of items still in the cache, which must be approximately the
|
|
|
|
// same as the total capacity.
|
|
|
|
const int kLight = 1;
|
|
|
|
const int kHeavy = 10;
|
|
|
|
int added = 0;
|
|
|
|
int index = 0;
|
|
|
|
while (added < 2*kCacheSize) {
|
|
|
|
const int weight = (index & 1) ? kLight : kHeavy;
|
|
|
|
Insert(index, 1000+index, weight);
|
|
|
|
added += weight;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cached_weight = 0;
|
|
|
|
for (int i = 0; i < index; i++) {
|
|
|
|
const int weight = (i & 1 ? kLight : kHeavy);
|
|
|
|
int r = Lookup(i);
|
|
|
|
if (r >= 0) {
|
|
|
|
cached_weight += weight;
|
|
|
|
ASSERT_EQ(1000+i, r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, NewId) {
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t a = cache_->NewId();
|
|
|
|
uint64_t b = cache_->NewId();
|
|
|
|
ASSERT_NE(a, b);
|
|
|
|
}
|
|
|
|
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
class Value {
|
|
|
|
public:
|
2014-12-11 23:15:13 +01:00
|
|
|
explicit Value(size_t v) : v_(v) { }
|
2013-04-04 03:53:42 +02:00
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
size_t v_;
|
2013-04-04 03:53:42 +02:00
|
|
|
};
|
|
|
|
|
2020-04-01 01:09:11 +02:00
|
|
|
namespace {
|
|
|
|
void deleter(const Slice& /*key*/, void* value) {
|
|
|
|
delete static_cast<Value *>(value);
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2017-04-24 20:21:47 +02:00
|
|
|
TEST_P(CacheTest, ReleaseAndErase) {
|
|
|
|
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
|
|
|
|
Cache::Handle* handle;
|
2020-04-01 01:09:11 +02:00
|
|
|
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
|
|
|
|
&CacheTest::Deleter, &handle);
|
2017-04-24 20:21:47 +02:00
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
ASSERT_EQ(5U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(1U, cache->GetUsage());
|
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
|
|
|
auto erased = cache->Release(handle, true);
|
|
|
|
ASSERT_TRUE(erased);
|
|
|
|
// This tests that deleter has been called
|
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, ReleaseWithoutErase) {
|
|
|
|
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
|
|
|
|
Cache::Handle* handle;
|
2020-04-01 01:09:11 +02:00
|
|
|
Status s = cache->Insert(EncodeKey(100), EncodeValue(100), 1,
|
|
|
|
&CacheTest::Deleter, &handle);
|
2017-04-24 20:21:47 +02:00
|
|
|
ASSERT_TRUE(s.ok());
|
|
|
|
ASSERT_EQ(5U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(1U, cache->GetUsage());
|
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
|
|
|
auto erased = cache->Release(handle);
|
|
|
|
ASSERT_FALSE(erased);
|
|
|
|
// This tests that deleter is not called. When cache has free capacity it is
|
|
|
|
// not expected to immediately erase the released items.
|
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, SetCapacity) {
|
2015-04-24 23:12:58 +02:00
|
|
|
// test1: increase capacity
|
|
|
|
// lets create a cache with capacity 5,
|
|
|
|
// then, insert 5 elements, then increase capacity
|
|
|
|
// to 10, returned capacity should be 10, usage=5
|
2016-08-29 19:40:00 +02:00
|
|
|
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
|
2015-04-24 23:12:58 +02:00
|
|
|
std::vector<Cache::Handle*> handles(10);
|
|
|
|
// Insert 5 entries, but not releasing.
|
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
std::string key = ToString(i+1);
|
2020-04-01 01:09:11 +02:00
|
|
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_TRUE(s.ok());
|
2015-04-24 23:12:58 +02:00
|
|
|
}
|
|
|
|
ASSERT_EQ(5U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(5U, cache->GetUsage());
|
|
|
|
cache->SetCapacity(10);
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(5U, cache->GetUsage());
|
|
|
|
|
|
|
|
// test2: decrease capacity
|
|
|
|
// insert 5 more elements to cache, then release 5,
|
|
|
|
// then decrease capacity to 7, final capacity should be 7
|
|
|
|
// and usage should be 7
|
|
|
|
for (size_t i = 5; i < 10; i++) {
|
|
|
|
std::string key = ToString(i+1);
|
2020-04-01 01:09:11 +02:00
|
|
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_TRUE(s.ok());
|
2015-04-24 23:12:58 +02:00
|
|
|
}
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(10U, cache->GetUsage());
|
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(10U, cache->GetUsage());
|
|
|
|
cache->SetCapacity(7);
|
|
|
|
ASSERT_EQ(7, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(7, cache->GetUsage());
|
2015-04-27 06:47:30 +02:00
|
|
|
|
|
|
|
// release remaining 5 to keep valgrind happy
|
|
|
|
for (size_t i = 5; i < 10; i++) {
|
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
2021-11-29 19:52:32 +01:00
|
|
|
|
|
|
|
// Make sure this doesn't crash or upset ASAN/valgrind
|
|
|
|
cache->DisownData();
|
2015-04-24 23:12:58 +02:00
|
|
|
}
|
|
|
|
|
2019-09-17 00:14:51 +02:00
|
|
|
TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
2016-03-11 02:35:19 +01:00
|
|
|
// test1: set the flag to false. Insert more keys than capacity. See if they
|
|
|
|
// all go through.
|
2019-09-17 00:14:51 +02:00
|
|
|
std::shared_ptr<Cache> cache = NewCache(5, 0, false);
|
2016-03-11 02:35:19 +01:00
|
|
|
std::vector<Cache::Handle*> handles(10);
|
|
|
|
Status s;
|
|
|
|
for (size_t i = 0; i < 10; i++) {
|
|
|
|
std::string key = ToString(i + 1);
|
2020-04-01 01:09:11 +02:00
|
|
|
s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
2016-08-23 22:53:49 +02:00
|
|
|
ASSERT_OK(s);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_NE(nullptr, handles[i]);
|
|
|
|
}
|
2019-07-17 04:13:35 +02:00
|
|
|
ASSERT_EQ(10, cache->GetUsage());
|
2016-03-11 02:35:19 +01:00
|
|
|
|
|
|
|
// test2: set the flag to true. Insert and check if it fails.
|
|
|
|
std::string extra_key = "extra";
|
|
|
|
Value* extra_value = new Value(0);
|
|
|
|
cache->SetStrictCapacityLimit(true);
|
|
|
|
Cache::Handle* handle;
|
2020-04-01 01:09:11 +02:00
|
|
|
s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_TRUE(s.IsIncomplete());
|
|
|
|
ASSERT_EQ(nullptr, handle);
|
2019-07-17 04:13:35 +02:00
|
|
|
ASSERT_EQ(10, cache->GetUsage());
|
2016-03-11 02:35:19 +01:00
|
|
|
|
|
|
|
for (size_t i = 0; i < 10; i++) {
|
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
// test3: init with flag being true.
|
2019-09-17 00:14:51 +02:00
|
|
|
std::shared_ptr<Cache> cache2 = NewCache(5, 0, true);
|
2016-03-11 02:35:19 +01:00
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
std::string key = ToString(i + 1);
|
2020-04-01 01:09:11 +02:00
|
|
|
s = cache2->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
2016-08-23 22:53:49 +02:00
|
|
|
ASSERT_OK(s);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_NE(nullptr, handles[i]);
|
|
|
|
}
|
2020-04-01 01:09:11 +02:00
|
|
|
s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_TRUE(s.IsIncomplete());
|
|
|
|
ASSERT_EQ(nullptr, handle);
|
|
|
|
// test insert without handle
|
2020-04-01 01:09:11 +02:00
|
|
|
s = cache2->Insert(extra_key, extra_value, 1, &deleter);
|
2016-08-23 22:53:49 +02:00
|
|
|
// AS if the key have been inserted into cache but get evicted immediately.
|
|
|
|
ASSERT_OK(s);
|
2019-07-17 04:13:35 +02:00
|
|
|
ASSERT_EQ(5, cache2->GetUsage());
|
2016-08-23 22:53:49 +02:00
|
|
|
ASSERT_EQ(nullptr, cache2->Lookup(extra_key));
|
2016-03-11 02:35:19 +01:00
|
|
|
|
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
cache2->Release(handles[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
TEST_P(CacheTest, OverCapacity) {
|
2014-12-11 23:15:13 +01:00
|
|
|
size_t n = 10;
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
// a LRUCache with n entries and one shard only
|
2016-08-29 19:40:00 +02:00
|
|
|
std::shared_ptr<Cache> cache = NewCache(n, 0, false);
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
std::vector<Cache::Handle*> handles(n+1);
|
|
|
|
|
|
|
|
// Insert n+1 entries, but not releasing.
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2014-11-25 05:44:49 +01:00
|
|
|
std::string key = ToString(i+1);
|
2020-04-01 01:09:11 +02:00
|
|
|
Status s = cache->Insert(key, new Value(i + 1), 1, &deleter, &handles[i]);
|
2016-03-11 02:35:19 +01:00
|
|
|
ASSERT_TRUE(s.ok());
|
2013-04-04 03:53:42 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Guess what's in the cache now?
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2014-11-25 05:44:49 +01:00
|
|
|
std::string key = ToString(i+1);
|
2013-04-04 03:53:42 +02:00
|
|
|
auto h = cache->Lookup(key);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
ASSERT_TRUE(h != nullptr);
|
2013-04-04 03:53:42 +02:00
|
|
|
if (h) cache->Release(h);
|
|
|
|
}
|
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// the cache is over capacity since nothing could be evicted
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(n + 1U, cache->GetUsage());
|
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2013-04-04 03:53:42 +02:00
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
2016-08-19 21:28:19 +02:00
|
|
|
// Make sure eviction is triggered.
|
|
|
|
cache->SetCapacity(n);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
|
|
|
|
// cache is under capacity now since elements were released
|
|
|
|
ASSERT_EQ(n, cache->GetUsage());
|
|
|
|
|
|
|
|
// element 0 is evicted and the rest is there
|
|
|
|
// This is consistent with the LRU policy since the element 0
|
|
|
|
// was released first
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
std::string key = ToString(i+1);
|
|
|
|
auto h = cache->Lookup(key);
|
|
|
|
if (h) {
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_NE(i, 0U);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
cache->Release(h);
|
|
|
|
} else {
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(i, 0U);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
}
|
|
|
|
}
|
2013-04-04 03:53:42 +02:00
|
|
|
}
|
|
|
|
|
2014-05-02 22:24:04 +02:00
|
|
|
namespace {
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-12 01:16:11 +02:00
|
|
|
std::vector<std::pair<int, int>> legacy_callback_state;
|
|
|
|
void legacy_callback(void* value, size_t charge) {
|
|
|
|
legacy_callback_state.push_back(
|
|
|
|
{DecodeValue(value), static_cast<int>(charge)});
|
2014-05-02 22:24:04 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-12 01:16:11 +02:00
|
|
|
TEST_P(CacheTest, ApplyToAllCacheEntriesTest) {
|
2014-05-02 22:24:04 +02:00
|
|
|
std::vector<std::pair<int, int>> inserted;
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-12 01:16:11 +02:00
|
|
|
legacy_callback_state.clear();
|
2014-05-02 22:24:04 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
Insert(i, i * 2, i + 1);
|
|
|
|
inserted.push_back({i * 2, i + 1});
|
|
|
|
}
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-12 01:16:11 +02:00
|
|
|
cache_->ApplyToAllCacheEntries(legacy_callback, true);
|
|
|
|
|
|
|
|
std::sort(inserted.begin(), inserted.end());
|
|
|
|
std::sort(legacy_callback_state.begin(), legacy_callback_state.end());
|
|
|
|
ASSERT_EQ(inserted.size(), legacy_callback_state.size());
|
|
|
|
for (size_t i = 0; i < inserted.size(); ++i) {
|
|
|
|
EXPECT_EQ(inserted[i], legacy_callback_state[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, ApplyToAllEntriesTest) {
|
|
|
|
std::vector<std::string> callback_state;
|
|
|
|
const auto callback = [&](const Slice& key, void* value, size_t charge,
|
|
|
|
Cache::DeleterFn deleter) {
|
|
|
|
callback_state.push_back(ToString(DecodeKey(key)) + "," +
|
|
|
|
ToString(DecodeValue(value)) + "," +
|
|
|
|
ToString(charge));
|
|
|
|
assert(deleter == &CacheTest::Deleter);
|
|
|
|
};
|
|
|
|
|
|
|
|
std::vector<std::string> inserted;
|
|
|
|
callback_state.clear();
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
Insert(i, i * 2, i + 1);
|
|
|
|
inserted.push_back(ToString(i) + "," + ToString(i * 2) + "," +
|
|
|
|
ToString(i + 1));
|
|
|
|
}
|
|
|
|
cache_->ApplyToAllEntries(callback, /*opts*/ {});
|
2014-05-02 22:24:04 +02:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::sort(inserted.begin(), inserted.end());
|
|
|
|
std::sort(callback_state.begin(), callback_state.end());
|
New Cache API for gathering statistics (#8225)
Summary:
Adds a new Cache::ApplyToAllEntries API that we expect to use
(in follow-up PRs) for efficiently gathering block cache statistics.
Notable features vs. old ApplyToAllCacheEntries:
* Includes key and deleter (in addition to value and charge). We could
have passed in a Handle but then more virtual function calls would be
needed to get the "fields" of each entry. We expect to use the 'deleter'
to identify the origin of entries, perhaps even more.
* Heavily tuned to minimize latency impact on operating cache. It
does this by iterating over small sections of each cache shard while
cycling through the shards.
* Supports tuning roughly how many entries to operate on for each
lock acquire and release, to control the impact on the latency of other
operations without excessive lock acquire & release. The right balance
can depend on the cost of the callback. Good default seems to be
around 256.
* There should be no need to disable thread safety. (I would expect
uncontended locks to be sufficiently fast.)
I have enhanced cache_bench to validate this approach:
* Reports a histogram of ns per operation, so we can look at the
ditribution of times, not just throughput (average).
* Can add a thread for simulated "gather stats" which calls
ApplyToAllEntries at a specified interval. We also generate a histogram
of time to run ApplyToAllEntries.
To make the iteration over some entries of each shard work as cleanly as
possible, even with resize between next set of entries, I have
re-arranged which hash bits are used for sharding and which for indexing
within a shard.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8225
Test Plan:
A couple of unit tests are added, but primary validation is manual, as
the primary risk is to performance.
The primary validation is using cache_bench to ensure that neither
the minor hashing changes nor the simulated stats gathering
significantly impact QPS or latency distribution. Note that adding op
latency histogram seriously impacts the benchmark QPS, so for a
fair baseline, we need the cache_bench changes (except remove simulated
stat gathering to make it compile). In short, we don't see any
reproducible difference in ops/sec or op latency unless we are gathering
stats nearly continuously. Test uses 10GB block cache with
8KB values to be somewhat realistic in the number of items to iterate
over.
Baseline typical output:
```
Complete in 92.017 s; Rough parallel ops/sec = 869401
Thread ops/sec = 54662
Operation latency (ns):
Count: 80000000 Average: 11223.9494 StdDev: 29.61
Min: 0 Median: 7759.3973 Max: 9620500
Percentiles: P50: 7759.40 P75: 14190.73 P99: 46922.75 P99.9: 77509.84 P99.99: 217030.58
------------------------------------------------------
[ 0, 1 ] 68 0.000% 0.000%
( 2900, 4400 ] 89 0.000% 0.000%
( 4400, 6600 ] 33630240 42.038% 42.038% ########
( 6600, 9900 ] 18129842 22.662% 64.700% #####
( 9900, 14000 ] 7877533 9.847% 74.547% ##
( 14000, 22000 ] 15193238 18.992% 93.539% ####
( 22000, 33000 ] 3037061 3.796% 97.335% #
( 33000, 50000 ] 1626316 2.033% 99.368%
( 50000, 75000 ] 421532 0.527% 99.895%
( 75000, 110000 ] 56910 0.071% 99.966%
( 110000, 170000 ] 16134 0.020% 99.986%
( 170000, 250000 ] 5166 0.006% 99.993%
( 250000, 380000 ] 3017 0.004% 99.996%
( 380000, 570000 ] 1337 0.002% 99.998%
( 570000, 860000 ] 805 0.001% 99.999%
( 860000, 1200000 ] 319 0.000% 100.000%
( 1200000, 1900000 ] 231 0.000% 100.000%
( 1900000, 2900000 ] 100 0.000% 100.000%
( 2900000, 4300000 ] 39 0.000% 100.000%
( 4300000, 6500000 ] 16 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
```
New, gather_stats=false. Median thread ops/sec of 5 runs:
```
Complete in 92.030 s; Rough parallel ops/sec = 869285
Thread ops/sec = 54458
Operation latency (ns):
Count: 80000000 Average: 11298.1027 StdDev: 42.18
Min: 0 Median: 7722.0822 Max: 6398720
Percentiles: P50: 7722.08 P75: 14294.68 P99: 47522.95 P99.9: 85292.16 P99.99: 228077.78
------------------------------------------------------
[ 0, 1 ] 109 0.000% 0.000%
( 2900, 4400 ] 793 0.001% 0.001%
( 4400, 6600 ] 34054563 42.568% 42.569% #########
( 6600, 9900 ] 17482646 21.853% 64.423% ####
( 9900, 14000 ] 7908180 9.885% 74.308% ##
( 14000, 22000 ] 15032072 18.790% 93.098% ####
( 22000, 33000 ] 3237834 4.047% 97.145% #
( 33000, 50000 ] 1736882 2.171% 99.316%
( 50000, 75000 ] 446851 0.559% 99.875%
( 75000, 110000 ] 68251 0.085% 99.960%
( 110000, 170000 ] 18592 0.023% 99.983%
( 170000, 250000 ] 7200 0.009% 99.992%
( 250000, 380000 ] 3334 0.004% 99.997%
( 380000, 570000 ] 1393 0.002% 99.998%
( 570000, 860000 ] 700 0.001% 99.999%
( 860000, 1200000 ] 293 0.000% 100.000%
( 1200000, 1900000 ] 196 0.000% 100.000%
( 1900000, 2900000 ] 69 0.000% 100.000%
( 2900000, 4300000 ] 32 0.000% 100.000%
( 4300000, 6500000 ] 10 0.000% 100.000%
```
New, gather_stats=true, 1 second delay between scans. Scans take about
1 second here so it's spending about 50% time scanning. Still the effect on
ops/sec and latency seems to be in the noise. Median thread ops/sec of 5 runs:
```
Complete in 91.890 s; Rough parallel ops/sec = 870608
Thread ops/sec = 54551
Operation latency (ns):
Count: 80000000 Average: 11311.2629 StdDev: 45.28
Min: 0 Median: 7686.5458 Max: 10018340
Percentiles: P50: 7686.55 P75: 14481.95 P99: 47232.60 P99.9: 79230.18 P99.99: 232998.86
------------------------------------------------------
[ 0, 1 ] 71 0.000% 0.000%
( 2900, 4400 ] 291 0.000% 0.000%
( 4400, 6600 ] 34492060 43.115% 43.116% #########
( 6600, 9900 ] 16727328 20.909% 64.025% ####
( 9900, 14000 ] 7845828 9.807% 73.832% ##
( 14000, 22000 ] 15510654 19.388% 93.220% ####
( 22000, 33000 ] 3216533 4.021% 97.241% #
( 33000, 50000 ] 1680859 2.101% 99.342%
( 50000, 75000 ] 439059 0.549% 99.891%
( 75000, 110000 ] 60540 0.076% 99.967%
( 110000, 170000 ] 14649 0.018% 99.985%
( 170000, 250000 ] 5242 0.007% 99.991%
( 250000, 380000 ] 3260 0.004% 99.995%
( 380000, 570000 ] 1599 0.002% 99.997%
( 570000, 860000 ] 1043 0.001% 99.999%
( 860000, 1200000 ] 471 0.001% 99.999%
( 1200000, 1900000 ] 275 0.000% 100.000%
( 1900000, 2900000 ] 143 0.000% 100.000%
( 2900000, 4300000 ] 60 0.000% 100.000%
( 4300000, 6500000 ] 27 0.000% 100.000%
( 6500000, 9800000 ] 7 0.000% 100.000%
( 9800000, 14000000 ] 1 0.000% 100.000%
Gather stats latency (us):
Count: 46 Average: 980387.5870 StdDev: 60911.18
Min: 879155 Median: 1033777.7778 Max: 1261431
Percentiles: P50: 1033777.78 P75: 1120666.67 P99: 1261431.00 P99.9: 1261431.00 P99.99: 1261431.00
------------------------------------------------------
( 860000, 1200000 ] 45 97.826% 97.826% ####################
( 1200000, 1900000 ] 1 2.174% 100.000%
Most recent cache entry stats:
Number of entries: 1295133
Total charge: 9.88 GB
Average key size: 23.4982
Average charge: 8.00 KB
Unique deleters: 3
```
Reviewed By: mrambacher
Differential Revision: D28295742
Pulled By: pdillinger
fbshipit-source-id: bbc4a552f91ba0fe10e5cc025c42cef5a81f2b95
2021-05-12 01:16:11 +02:00
|
|
|
ASSERT_EQ(inserted.size(), callback_state.size());
|
|
|
|
for (size_t i = 0; i < inserted.size(); ++i) {
|
|
|
|
EXPECT_EQ(inserted[i], callback_state[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(CacheTest, ApplyToAllEntriesDuringResize) {
|
|
|
|
// This is a mini-stress test of ApplyToAllEntries, to ensure
|
|
|
|
// items in the cache that are neither added nor removed
|
|
|
|
// during ApplyToAllEntries are counted exactly once.
|
|
|
|
|
|
|
|
// Insert some entries that we expect to be seen exactly once
|
|
|
|
// during iteration.
|
|
|
|
constexpr int kSpecialCharge = 2;
|
|
|
|
constexpr int kNotSpecialCharge = 1;
|
|
|
|
constexpr int kSpecialCount = 100;
|
|
|
|
for (int i = 0; i < kSpecialCount; ++i) {
|
|
|
|
Insert(i, i * 2, kSpecialCharge);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For callback
|
|
|
|
int special_count = 0;
|
|
|
|
const auto callback = [&](const Slice&, void*, size_t charge,
|
|
|
|
Cache::DeleterFn) {
|
|
|
|
if (charge == static_cast<size_t>(kSpecialCharge)) {
|
|
|
|
++special_count;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Start counting
|
|
|
|
std::thread apply_thread([&]() {
|
|
|
|
// Use small average_entries_per_lock to make the problem difficult
|
|
|
|
Cache::ApplyToAllEntriesOptions opts;
|
|
|
|
opts.average_entries_per_lock = 2;
|
|
|
|
cache_->ApplyToAllEntries(callback, opts);
|
|
|
|
});
|
|
|
|
|
|
|
|
// In parallel, add more entries, enough to cause resize but not enough
|
|
|
|
// to cause ejections
|
|
|
|
for (int i = kSpecialCount * 1; i < kSpecialCount * 6; ++i) {
|
|
|
|
Insert(i, i * 2, kNotSpecialCharge);
|
|
|
|
}
|
|
|
|
|
|
|
|
apply_thread.join();
|
|
|
|
ASSERT_EQ(special_count, kSpecialCount);
|
2014-05-02 22:24:04 +02:00
|
|
|
}
|
|
|
|
|
2017-01-27 15:35:41 +01:00
|
|
|
TEST_P(CacheTest, DefaultShardBits) {
|
|
|
|
// test1: set the flag to false. Insert more keys than capacity. See if they
|
|
|
|
// all go through.
|
|
|
|
std::shared_ptr<Cache> cache = NewCache(16 * 1024L * 1024L);
|
|
|
|
ShardedCache* sc = dynamic_cast<ShardedCache*>(cache.get());
|
|
|
|
ASSERT_EQ(5, sc->GetNumShardBits());
|
|
|
|
|
|
|
|
cache = NewLRUCache(511 * 1024L, -1, true);
|
|
|
|
sc = dynamic_cast<ShardedCache*>(cache.get());
|
|
|
|
ASSERT_EQ(0, sc->GetNumShardBits());
|
|
|
|
|
|
|
|
cache = NewLRUCache(1024L * 1024L * 1024L, -1, true);
|
|
|
|
sc = dynamic_cast<ShardedCache*>(cache.get());
|
|
|
|
ASSERT_EQ(6, sc->GetNumShardBits());
|
|
|
|
}
|
|
|
|
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-20 01:45:51 +02:00
|
|
|
TEST_P(CacheTest, GetChargeAndDeleter) {
|
2019-06-19 02:32:44 +02:00
|
|
|
Insert(1, 2);
|
|
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(1));
|
|
|
|
ASSERT_EQ(2, DecodeValue(cache_->Value(h1)));
|
|
|
|
ASSERT_EQ(1, cache_->GetCharge(h1));
|
Use deleters to label cache entries and collect stats (#8297)
Summary:
This change gathers and publishes statistics about the
kinds of items in block cache. This is especially important for
profiling relative usage of cache by index vs. filter vs. data blocks.
It works by iterating over the cache during periodic stats dump
(InternalStats, stats_dump_period_sec) or on demand when
DB::Get(Map)Property(kBlockCacheEntryStats), except that for
efficiency and sharing among column families, saved data from
the last scan is used when the data is not considered too old.
The new information can be seen in info LOG, for example:
Block cache LRUCache@0x7fca62229330 capacity: 95.37 MB collections: 8 last_copies: 0 last_secs: 0.00178 secs_since: 0
Block cache entry stats(count,size,portion): DataBlock(7092,28.24 MB,29.6136%) FilterBlock(215,867.90 KB,0.888728%) FilterMetaBlock(2,5.31 KB,0.00544%) IndexBlock(217,180.11 KB,0.184432%) WriteBuffer(1,256.00 KB,0.262144%) Misc(1,0.00 KB,0%)
And also through DB::GetProperty and GetMapProperty (here using
ldb just for demonstration):
$ ./ldb --db=/dev/shm/dbbench/ get_property rocksdb.block-cache-entry-stats
rocksdb.block-cache-entry-stats.bytes.data-block: 0
rocksdb.block-cache-entry-stats.bytes.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-block: 0
rocksdb.block-cache-entry-stats.bytes.filter-meta-block: 0
rocksdb.block-cache-entry-stats.bytes.index-block: 178992
rocksdb.block-cache-entry-stats.bytes.misc: 0
rocksdb.block-cache-entry-stats.bytes.other-block: 0
rocksdb.block-cache-entry-stats.bytes.write-buffer: 0
rocksdb.block-cache-entry-stats.capacity: 8388608
rocksdb.block-cache-entry-stats.count.data-block: 0
rocksdb.block-cache-entry-stats.count.deprecated-filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-block: 0
rocksdb.block-cache-entry-stats.count.filter-meta-block: 0
rocksdb.block-cache-entry-stats.count.index-block: 215
rocksdb.block-cache-entry-stats.count.misc: 1
rocksdb.block-cache-entry-stats.count.other-block: 0
rocksdb.block-cache-entry-stats.count.write-buffer: 0
rocksdb.block-cache-entry-stats.id: LRUCache@0x7f3636661290
rocksdb.block-cache-entry-stats.percent.data-block: 0.000000
rocksdb.block-cache-entry-stats.percent.deprecated-filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-block: 0.000000
rocksdb.block-cache-entry-stats.percent.filter-meta-block: 0.000000
rocksdb.block-cache-entry-stats.percent.index-block: 2.133751
rocksdb.block-cache-entry-stats.percent.misc: 0.000000
rocksdb.block-cache-entry-stats.percent.other-block: 0.000000
rocksdb.block-cache-entry-stats.percent.write-buffer: 0.000000
rocksdb.block-cache-entry-stats.secs_for_last_collection: 0.000052
rocksdb.block-cache-entry-stats.secs_since_last_collection: 0
Solution detail - We need some way to flag what kind of blocks each
entry belongs to, preferably without changing the Cache API.
One of the complications is that Cache is a general interface that could
have other users that don't adhere to whichever convention we decide
on for keys and values. Or we would pay for an extra field in the Handle
that would only be used for this purpose.
This change uses a back-door approach, the deleter, to indicate the
"role" of a Cache entry (in addition to the value type, implicitly).
This has the added benefit of ensuring proper code origin whenever we
recognize a particular role for a cache entry; if the entry came from
some other part of the code, it will use an unrecognized deleter, which
we simply attribute to the "Misc" role.
An internal API makes for simple instantiation and automatic
registration of Cache deleters for a given value type and "role".
Another internal API, CacheEntryStatsCollector, solves the problem of
caching the results of a scan and sharing them, to ensure scans are
neither excessive nor redundant so as not to harm Cache performance.
Because code is added to BlocklikeTraits, it is pulled out of
block_based_table_reader.cc into its own file.
This is a reformulation of https://github.com/facebook/rocksdb/issues/8276, without the type checking option
(could still be added), and with actual stat gathering.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/8297
Test Plan: manual testing with db_bench, and a couple of basic unit tests
Reviewed By: ltamasi
Differential Revision: D28488721
Pulled By: pdillinger
fbshipit-source-id: 472f524a9691b5afb107934be2d41d84f2b129fb
2021-05-20 01:45:51 +02:00
|
|
|
ASSERT_EQ(&CacheTest::Deleter, cache_->GetDeleter(h1));
|
2019-06-19 02:32:44 +02:00
|
|
|
cache_->Release(h1);
|
|
|
|
}
|
|
|
|
|
2016-08-19 21:28:19 +02:00
|
|
|
#ifdef SUPPORT_CLOCK_CACHE
|
2019-09-17 00:14:51 +02:00
|
|
|
std::shared_ptr<Cache> (*new_clock_cache_func)(
|
|
|
|
size_t, int, bool, CacheMetadataChargePolicy) = NewClockCache;
|
2020-06-04 00:53:09 +02:00
|
|
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest,
|
|
|
|
testing::Values(kLRU, kClock));
|
2016-08-19 21:28:19 +02:00
|
|
|
#else
|
2020-06-04 00:53:09 +02:00
|
|
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, CacheTest, testing::Values(kLRU));
|
2016-08-19 21:28:19 +02:00
|
|
|
#endif // SUPPORT_CLOCK_CACHE
|
2020-06-04 00:53:09 +02:00
|
|
|
INSTANTIATE_TEST_CASE_P(CacheTestInstance, LRUCacheTest, testing::Values(kLRU));
|
2016-08-19 21:28:19 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 22:08:00 +01:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|