2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/cache.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
#include <vector>
|
2013-04-04 03:53:42 +02:00
|
|
|
#include <string>
|
|
|
|
#include <iostream>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
2015-03-20 01:29:37 +01:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/testharness.h"
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Conversions between numeric keys/values and the types expected by Cache.
|
|
|
|
static std::string EncodeKey(int k) {
|
|
|
|
std::string result;
|
|
|
|
PutFixed32(&result, k);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
static int DecodeKey(const Slice& k) {
|
|
|
|
assert(k.size() == 4);
|
|
|
|
return DecodeFixed32(k.data());
|
|
|
|
}
|
|
|
|
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
|
2014-11-11 22:47:22 +01:00
|
|
|
static int DecodeValue(void* v) {
|
|
|
|
return static_cast<int>(reinterpret_cast<uintptr_t>(v));
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class CacheTest : public testing::Test {
|
2011-03-18 23:37:00 +01:00
|
|
|
public:
|
|
|
|
static CacheTest* current_;
|
|
|
|
|
|
|
|
static void Deleter(const Slice& key, void* v) {
|
|
|
|
current_->deleted_keys_.push_back(DecodeKey(key));
|
|
|
|
current_->deleted_values_.push_back(DecodeValue(v));
|
|
|
|
}
|
|
|
|
|
2011-08-22 23:08:51 +02:00
|
|
|
static const int kCacheSize = 1000;
|
2013-10-10 02:04:40 +02:00
|
|
|
static const int kNumShardBits = 4;
|
|
|
|
|
|
|
|
static const int kCacheSize2 = 100;
|
|
|
|
static const int kNumShardBits2 = 2;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
std::vector<int> deleted_keys_;
|
|
|
|
std::vector<int> deleted_values_;
|
2013-01-20 11:07:13 +01:00
|
|
|
shared_ptr<Cache> cache_;
|
2013-10-10 02:04:40 +02:00
|
|
|
shared_ptr<Cache> cache2_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-10 02:04:40 +02:00
|
|
|
CacheTest() :
|
2015-03-17 23:04:37 +01:00
|
|
|
cache_(NewLRUCache(kCacheSize, kNumShardBits)),
|
|
|
|
cache2_(NewLRUCache(kCacheSize2, kNumShardBits2)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
current_ = this;
|
|
|
|
}
|
|
|
|
|
|
|
|
~CacheTest() {
|
|
|
|
}
|
|
|
|
|
2013-10-10 02:04:40 +02:00
|
|
|
int Lookup(shared_ptr<Cache> cache, int key) {
|
|
|
|
Cache::Handle* handle = cache->Lookup(EncodeKey(key));
|
|
|
|
const int r = (handle == nullptr) ? -1 : DecodeValue(cache->Value(handle));
|
2013-03-01 03:04:58 +01:00
|
|
|
if (handle != nullptr) {
|
2013-10-10 02:04:40 +02:00
|
|
|
cache->Release(handle);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
2013-10-10 02:04:40 +02:00
|
|
|
void Insert(shared_ptr<Cache> cache, int key, int value, int charge = 1) {
|
|
|
|
cache->Release(cache->Insert(EncodeKey(key), EncodeValue(value), charge,
|
|
|
|
&CacheTest::Deleter));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Erase(shared_ptr<Cache> cache, int key) {
|
|
|
|
cache->Erase(EncodeKey(key));
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int Lookup(int key) {
|
|
|
|
return Lookup(cache_, key);
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void Insert(int key, int value, int charge = 1) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Insert(cache_, key, value, charge);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void Erase(int key) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Erase(cache_, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
int Lookup2(int key) {
|
|
|
|
return Lookup(cache2_, key);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Insert2(int key, int value, int charge = 1) {
|
|
|
|
Insert(cache2_, key, value, charge);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Erase2(int key) {
|
|
|
|
Erase(cache2_, key);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
CacheTest* CacheTest::current_;
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
namespace {
|
2013-12-11 01:21:49 +01:00
|
|
|
void dumbDeleter(const Slice& key, void* value) { }
|
2014-04-10 06:17:14 +02:00
|
|
|
} // namespace
|
2013-12-11 01:21:49 +01:00
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, UsageTest) {
|
2013-12-11 01:21:49 +01:00
|
|
|
// cache is shared_ptr and will be automatically cleaned up.
|
|
|
|
const uint64_t kCapacity = 100000;
|
2015-03-17 23:04:37 +01:00
|
|
|
auto cache = NewLRUCache(kCapacity, 8);
|
2013-12-11 01:21:49 +01:00
|
|
|
|
|
|
|
size_t usage = 0;
|
|
|
|
const char* value = "abcdef";
|
|
|
|
// make sure everything will be cached
|
|
|
|
for (int i = 1; i < 100; ++i) {
|
|
|
|
std::string key(i, 'a');
|
|
|
|
auto kv_size = key.size() + 5;
|
|
|
|
cache->Release(
|
|
|
|
cache->Insert(key, (void*)value, kv_size, dumbDeleter)
|
|
|
|
);
|
|
|
|
usage += kv_size;
|
|
|
|
ASSERT_EQ(usage, cache->GetUsage());
|
|
|
|
}
|
|
|
|
|
|
|
|
// make sure the cache will be overloaded
|
2013-12-13 23:19:18 +01:00
|
|
|
for (uint64_t i = 1; i < kCapacity; ++i) {
|
2014-11-25 05:44:49 +01:00
|
|
|
auto key = ToString(i);
|
2013-12-11 01:21:49 +01:00
|
|
|
cache->Release(
|
|
|
|
cache->Insert(key, (void*)value, key.size() + 5, dumbDeleter)
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// the usage should be close to the capacity
|
|
|
|
ASSERT_GT(kCapacity, cache->GetUsage());
|
|
|
|
ASSERT_LT(kCapacity * 0.95, cache->GetUsage());
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, HitAndMiss) {
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
|
|
|
|
Insert(100, 101);
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
ASSERT_EQ(-1, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
|
|
|
Insert(200, 201);
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
|
|
|
Insert(100, 102);
|
|
|
|
ASSERT_EQ(102, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, Erase) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Erase(200);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Insert(100, 101);
|
|
|
|
Insert(200, 201);
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
|
|
|
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(201, Lookup(200));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, EntriesArePinned) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(100, 101);
|
|
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(101, DecodeValue(cache_->Value(h1)));
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Insert(100, 102);
|
|
|
|
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(102, DecodeValue(cache_->Value(h2)));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(0U, deleted_keys_.size());
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(2U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
cache_->Release(h1);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[0]);
|
|
|
|
ASSERT_EQ(101, deleted_values_[0]);
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Erase(100);
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(1U, deleted_keys_.size());
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(1U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
cache_->Release(h2);
|
2012-11-06 21:02:18 +01:00
|
|
|
ASSERT_EQ(2U, deleted_keys_.size());
|
2011-03-18 23:37:00 +01:00
|
|
|
ASSERT_EQ(100, deleted_keys_[1]);
|
|
|
|
ASSERT_EQ(102, deleted_values_[1]);
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(0U, cache_->GetUsage());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, EvictionPolicy) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(100, 101);
|
|
|
|
Insert(200, 201);
|
|
|
|
|
|
|
|
// Frequently used entry must be kept around
|
2011-08-22 23:08:51 +02:00
|
|
|
for (int i = 0; i < kCacheSize + 100; i++) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Insert(1000+i, 2000+i);
|
|
|
|
ASSERT_EQ(2000+i, Lookup(1000+i));
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
|
|
|
}
|
|
|
|
ASSERT_EQ(101, Lookup(100));
|
2011-08-22 23:08:51 +02:00
|
|
|
ASSERT_EQ(-1, Lookup(200));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, EvictionPolicyRef) {
|
2013-10-10 02:04:40 +02:00
|
|
|
Insert(100, 101);
|
|
|
|
Insert(101, 102);
|
|
|
|
Insert(102, 103);
|
|
|
|
Insert(103, 104);
|
|
|
|
Insert(200, 101);
|
|
|
|
Insert(201, 102);
|
|
|
|
Insert(202, 103);
|
|
|
|
Insert(203, 104);
|
|
|
|
Cache::Handle* h201 = cache_->Lookup(EncodeKey(200));
|
|
|
|
Cache::Handle* h202 = cache_->Lookup(EncodeKey(201));
|
|
|
|
Cache::Handle* h203 = cache_->Lookup(EncodeKey(202));
|
|
|
|
Cache::Handle* h204 = cache_->Lookup(EncodeKey(203));
|
|
|
|
Insert(300, 101);
|
|
|
|
Insert(301, 102);
|
|
|
|
Insert(302, 103);
|
|
|
|
Insert(303, 104);
|
|
|
|
|
|
|
|
// Insert entries much more than Cache capacity
|
|
|
|
for (int i = 0; i < kCacheSize + 100; i++) {
|
|
|
|
Insert(1000 + i, 2000 + i);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check whether the entries inserted in the beginning
|
|
|
|
// are evicted. Ones without extra ref are evicted and
|
|
|
|
// those with are not.
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
ASSERT_EQ(-1, Lookup(101));
|
|
|
|
ASSERT_EQ(-1, Lookup(102));
|
|
|
|
ASSERT_EQ(-1, Lookup(103));
|
|
|
|
|
|
|
|
ASSERT_EQ(-1, Lookup(300));
|
|
|
|
ASSERT_EQ(-1, Lookup(301));
|
|
|
|
ASSERT_EQ(-1, Lookup(302));
|
|
|
|
ASSERT_EQ(-1, Lookup(303));
|
|
|
|
|
|
|
|
ASSERT_EQ(101, Lookup(200));
|
|
|
|
ASSERT_EQ(102, Lookup(201));
|
|
|
|
ASSERT_EQ(103, Lookup(202));
|
|
|
|
ASSERT_EQ(104, Lookup(203));
|
|
|
|
|
|
|
|
// Cleaning up all the handles
|
|
|
|
cache_->Release(h201);
|
|
|
|
cache_->Release(h202);
|
|
|
|
cache_->Release(h203);
|
|
|
|
cache_->Release(h204);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, ErasedHandleState) {
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// insert a key and get two handles
|
|
|
|
Insert(100, 1000);
|
|
|
|
Cache::Handle* h1 = cache_->Lookup(EncodeKey(100));
|
|
|
|
Cache::Handle* h2 = cache_->Lookup(EncodeKey(100));
|
|
|
|
ASSERT_EQ(h1, h2);
|
|
|
|
ASSERT_EQ(DecodeValue(cache_->Value(h1)), 1000);
|
|
|
|
ASSERT_EQ(DecodeValue(cache_->Value(h2)), 1000);
|
2013-10-10 02:04:40 +02:00
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// delete the key from the cache
|
|
|
|
Erase(100);
|
|
|
|
// can no longer find in the cache
|
2013-10-10 02:04:40 +02:00
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// release one handle
|
|
|
|
cache_->Release(h1);
|
|
|
|
// still can't find in cache
|
|
|
|
ASSERT_EQ(-1, Lookup(100));
|
2013-10-10 02:04:40 +02:00
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
cache_->Release(h2);
|
2013-10-10 02:04:40 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, HeavyEntries) {
|
2011-08-22 23:08:51 +02:00
|
|
|
// Add a bunch of light and heavy entries and then count the combined
|
|
|
|
// size of items still in the cache, which must be approximately the
|
|
|
|
// same as the total capacity.
|
|
|
|
const int kLight = 1;
|
|
|
|
const int kHeavy = 10;
|
|
|
|
int added = 0;
|
|
|
|
int index = 0;
|
|
|
|
while (added < 2*kCacheSize) {
|
|
|
|
const int weight = (index & 1) ? kLight : kHeavy;
|
|
|
|
Insert(index, 1000+index, weight);
|
|
|
|
added += weight;
|
|
|
|
index++;
|
|
|
|
}
|
|
|
|
|
|
|
|
int cached_weight = 0;
|
|
|
|
for (int i = 0; i < index; i++) {
|
|
|
|
const int weight = (i & 1 ? kLight : kHeavy);
|
|
|
|
int r = Lookup(i);
|
|
|
|
if (r >= 0) {
|
|
|
|
cached_weight += weight;
|
|
|
|
ASSERT_EQ(1000+i, r);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_LE(cached_weight, kCacheSize + kCacheSize/10);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, NewId) {
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t a = cache_->NewId();
|
|
|
|
uint64_t b = cache_->NewId();
|
|
|
|
ASSERT_NE(a, b);
|
|
|
|
}
|
|
|
|
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
class Value {
|
|
|
|
private:
|
2014-12-11 23:15:13 +01:00
|
|
|
size_t v_;
|
2013-04-04 03:53:42 +02:00
|
|
|
public:
|
2014-12-11 23:15:13 +01:00
|
|
|
explicit Value(size_t v) : v_(v) { }
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
~Value() { std::cout << v_ << " is destructed\n"; }
|
|
|
|
};
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
namespace {
|
2013-04-04 03:53:42 +02:00
|
|
|
void deleter(const Slice& key, void* value) {
|
2014-10-01 10:53:37 +02:00
|
|
|
delete static_cast<Value *>(value);
|
2013-04-04 03:53:42 +02:00
|
|
|
}
|
2014-04-10 06:17:14 +02:00
|
|
|
} // namespace
|
2013-04-04 03:53:42 +02:00
|
|
|
|
2015-04-24 23:12:58 +02:00
|
|
|
TEST_F(CacheTest, SetCapacity) {
|
|
|
|
// test1: increase capacity
|
|
|
|
// lets create a cache with capacity 5,
|
|
|
|
// then, insert 5 elements, then increase capacity
|
|
|
|
// to 10, returned capacity should be 10, usage=5
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(5, 0);
|
|
|
|
std::vector<Cache::Handle*> handles(10);
|
|
|
|
// Insert 5 entries, but not releasing.
|
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
std::string key = ToString(i+1);
|
|
|
|
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(5U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(5U, cache->GetUsage());
|
|
|
|
cache->SetCapacity(10);
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(5U, cache->GetUsage());
|
|
|
|
|
|
|
|
// test2: decrease capacity
|
|
|
|
// insert 5 more elements to cache, then release 5,
|
|
|
|
// then decrease capacity to 7, final capacity should be 7
|
|
|
|
// and usage should be 7
|
|
|
|
for (size_t i = 5; i < 10; i++) {
|
|
|
|
std::string key = ToString(i+1);
|
|
|
|
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(10U, cache->GetUsage());
|
|
|
|
for (size_t i = 0; i < 5; i++) {
|
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
|
|
|
ASSERT_EQ(10U, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(10U, cache->GetUsage());
|
|
|
|
cache->SetCapacity(7);
|
|
|
|
ASSERT_EQ(7, cache->GetCapacity());
|
|
|
|
ASSERT_EQ(7, cache->GetUsage());
|
2015-04-27 06:47:30 +02:00
|
|
|
|
|
|
|
// release remaining 5 to keep valgrind happy
|
|
|
|
for (size_t i = 5; i < 10; i++) {
|
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
2015-04-24 23:12:58 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, OverCapacity) {
|
2014-12-11 23:15:13 +01:00
|
|
|
size_t n = 10;
|
2013-04-04 03:53:42 +02:00
|
|
|
|
|
|
|
// a LRUCache with n entries and one shard only
|
|
|
|
std::shared_ptr<Cache> cache = NewLRUCache(n, 0);
|
|
|
|
|
|
|
|
std::vector<Cache::Handle*> handles(n+1);
|
|
|
|
|
|
|
|
// Insert n+1 entries, but not releasing.
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2014-11-25 05:44:49 +01:00
|
|
|
std::string key = ToString(i+1);
|
2013-04-04 03:53:42 +02:00
|
|
|
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Guess what's in the cache now?
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2014-11-25 05:44:49 +01:00
|
|
|
std::string key = ToString(i+1);
|
2013-04-04 03:53:42 +02:00
|
|
|
auto h = cache->Lookup(key);
|
|
|
|
std::cout << key << (h?" found\n":" not found\n");
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
ASSERT_TRUE(h != nullptr);
|
2013-04-04 03:53:42 +02:00
|
|
|
if (h) cache->Release(h);
|
|
|
|
}
|
|
|
|
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
// the cache is over capacity since nothing could be evicted
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(n + 1U, cache->GetUsage());
|
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
2013-04-04 03:53:42 +02:00
|
|
|
cache->Release(handles[i]);
|
|
|
|
}
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
|
|
|
|
// cache is under capacity now since elements were released
|
|
|
|
ASSERT_EQ(n, cache->GetUsage());
|
|
|
|
|
|
|
|
// element 0 is evicted and the rest is there
|
|
|
|
// This is consistent with the LRU policy since the element 0
|
|
|
|
// was released first
|
2014-12-11 23:15:13 +01:00
|
|
|
for (size_t i = 0; i < n + 1; i++) {
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
std::string key = ToString(i+1);
|
|
|
|
auto h = cache->Lookup(key);
|
|
|
|
if (h) {
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_NE(i, 0U);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
cache->Release(h);
|
|
|
|
} else {
|
2014-12-11 23:15:13 +01:00
|
|
|
ASSERT_EQ(i, 0U);
|
Modifed the LRU cache eviction code so that it doesn't evict blocks which have exteranl references
Summary:
Currently, blocks which have more than one reference (ie referenced by something other than cache itself) are evicted from cache. This doesn't make much sense:
- blocks are still in RAM, so the RAM usage reported by the cache is incorrect
- if the same block is needed by another iterator, it will be loaded and decompressed again
This diff changes the reference counting scheme a bit. Previously, if the cache contained the block, this was accounted for in its refcount. After this change, the refcount is only used to track external references. There is a boolean flag which indicates whether or not the block is contained in the cache.
This diff also changes how LRU list is used. Previously, both hashtable and the LRU list contained all blocks. After this change, the LRU list contains blocks with the refcount==0, ie those which can be evicted from the cache.
Note that this change still allows for cache to grow beyond its capacity. This happens when all blocks are pinned (ie refcount>0). This is consistent with the current behavior. The cache's insert function never fails. I spent lots of time trying to make table_reader and other places work with the insert which might failed. It turned out to be pretty hard. It might really destabilize some customers, so finally, I decided against doing this.
table_cache_remove_scan_count_limit option will be unneeded after this change, but I will remove it in the following diff, if this one gets approved
Test Plan: Ran tests, made sure they pass
Reviewers: sdong, ljin
Differential Revision: https://reviews.facebook.net/D25503
2014-10-21 20:49:13 +02:00
|
|
|
}
|
|
|
|
}
|
2013-04-04 03:53:42 +02:00
|
|
|
}
|
|
|
|
|
2014-05-02 22:24:04 +02:00
|
|
|
namespace {
|
|
|
|
std::vector<std::pair<int, int>> callback_state;
|
|
|
|
void callback(void* entry, size_t charge) {
|
|
|
|
callback_state.push_back({DecodeValue(entry), static_cast<int>(charge)});
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CacheTest, ApplyToAllCacheEntiresTest) {
|
2014-05-02 22:24:04 +02:00
|
|
|
std::vector<std::pair<int, int>> inserted;
|
|
|
|
callback_state.clear();
|
|
|
|
|
|
|
|
for (int i = 0; i < 10; ++i) {
|
|
|
|
Insert(i, i * 2, i + 1);
|
|
|
|
inserted.push_back({i * 2, i + 1});
|
|
|
|
}
|
|
|
|
cache_->ApplyToAllCacheEntries(callback, true);
|
|
|
|
|
|
|
|
sort(inserted.begin(), inserted.end());
|
|
|
|
sort(callback_state.begin(), callback_state.end());
|
|
|
|
ASSERT_TRUE(inserted == callback_state);
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2015-03-17 22:08:00 +01:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|