Task 6532943: Rocksdb - SetCapacity() can dynamically change cache capacity if feasible

Summary:
When new capacity is larger than existing capacity, simply update the capacity to the new valie
When new capacity is less than existing capacity, but more than the usage, simply update the capacity to new value
When new capacity is less than the existing capacity and existing usage both, try to purge entries in LRU if feasible to make usage < capacity

Test Plan: Created unit tests in cache_test.cc

Reviewers: sdong, rven, yhchiang, igor

Reviewed By: igor

Subscribers: dhruba

Differential Revision: https://reviews.facebook.net/D37527
This commit is contained in:
Aashish Pant 2015-04-24 14:12:58 -07:00
parent 98a44559d5
commit 794ccfde89
4 changed files with 97 additions and 13 deletions

View File

@ -2,6 +2,9 @@
## Unreleased
### New Features
* Added a new API Cache::SetCapacity(size_t capacity) to dynamically change the maximum configured capacity of the cache. If the new capacity is less than the existing cache usage, the implementation will try to lower the usage by evicting the necessary number of elements following a strict LRU policy.
### New Features
* Added an experimental API for handling flashcache devices (blacklists background threads from caching their reads) -- NewFlashcacheAwareEnv
* If universal compaction is used and options.num_levels > 1, compact files are tried to be stored in none-L0 with smaller files based on options.target_file_size_base. The limitation of DB size when using universal compaction is greatly mitigated by using more levels. You can set num_levels = 1 to make universal compaction behave as before. If you set num_levels > 1 and want to roll back to a previous version, you need to compact all files to a big file in level 0 (by setting target_file_size_base to be large and CompactRange(<cf_handle>, nullptr, nullptr, true, 0) and reopen the DB with the same version to rewrite the manifest, and then you can open it using previous releases.

View File

@ -92,6 +92,12 @@ class Cache {
// its cache keys.
virtual uint64_t NewId() = 0;
// sets the maximum configured capacity of the cache. When the new
// capacity is less than the old capacity and the existing usage is
// greater than new capacity, the implementation will do its best job to
// purge the released entries from the cache in order to lower the usage
virtual void SetCapacity(size_t capacity) = 0;
// returns the maximum configured capacity of the cache
virtual size_t GetCapacity() const = 0;

View File

@ -192,7 +192,9 @@ class LRUCache {
~LRUCache();
// Separate from constructor so caller can easily make an array of LRUCache
void SetCapacity(size_t capacity) { capacity_ = capacity; }
// if current usage is more than new capacity, the function will attempt to
// free the needed space
void SetCapacity(size_t capacity);
// Like Cache methods, but with an extra "hash" parameter.
Cache::Handle* Insert(const Slice& key, uint32_t hash,
@ -219,6 +221,13 @@ class LRUCache {
// Return true if last reference
bool Unref(LRUHandle* e);
// Free some space following strict LRU policy until enough space
// to hold (usage_ + charge) is freed or the lru list is empty
// This function is not thread safe - it needs to be executed while
// holding the mutex_
void EvictFromLRU(size_t charge,
autovector<LRUHandle*>* deleted);
// Initialized before use.
size_t capacity_;
@ -284,6 +293,35 @@ void LRUCache::LRU_Append(LRUHandle* e) {
e->next->prev = e;
}
void LRUCache::EvictFromLRU(size_t charge,
autovector<LRUHandle*>* deleted) {
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->in_cache);
assert(old->refs == 1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->in_cache = false;
Unref(old);
usage_ -= old->charge;
deleted->push_back(old);
}
}
void LRUCache::SetCapacity(size_t capacity) {
autovector<LRUHandle*> last_reference_list;
{
MutexLock l(&mutex_);
capacity_ = capacity;
EvictFromLRU(0, &last_reference_list);
}
// we free the entries here outside of mutex for
// performance reasons
for (auto entry : last_reference_list) {
entry->Free();
}
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
LRUHandle* e = table_.Lookup(key, hash);
@ -357,18 +395,7 @@ Cache::Handle* LRUCache::Insert(
// Free the space following strict LRU policy until enough space
// is freed or the lru list is empty
while (usage_ + charge > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
assert(old->in_cache);
assert(old->refs ==
1); // LRU list contains elements which may be evicted
LRU_Remove(old);
table_.Remove(old->key(), old->hash);
old->in_cache = false;
Unref(old);
usage_ -= old->charge;
last_reference_list.push_back(old);
}
EvictFromLRU(charge, &last_reference_list);
// insert into the cache
// note that the cache might get larger than its capacity if not enough
@ -427,6 +454,7 @@ class ShardedLRUCache : public Cache {
private:
LRUCache* shards_;
port::Mutex id_mutex_;
port::Mutex capacity_mutex_;
uint64_t last_id_;
int num_shard_bits_;
size_t capacity_;
@ -453,6 +481,15 @@ class ShardedLRUCache : public Cache {
virtual ~ShardedLRUCache() {
delete[] shards_;
}
virtual void SetCapacity(size_t capacity) {
int num_shards = 1 << num_shard_bits_;
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
MutexLock l(&capacity_mutex_);
for (int s = 0; s < num_shards; s++) {
shards_[s].SetCapacity(per_shard);
}
capacity_ = capacity;
}
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key,
void* value)) override {

View File

@ -347,6 +347,44 @@ void deleter(const Slice& key, void* value) {
}
} // namespace
TEST_F(CacheTest, SetCapacity) {
// test1: increase capacity
// lets create a cache with capacity 5,
// then, insert 5 elements, then increase capacity
// to 10, returned capacity should be 10, usage=5
std::shared_ptr<Cache> cache = NewLRUCache(5, 0);
std::vector<Cache::Handle*> handles(10);
// Insert 5 entries, but not releasing.
for (size_t i = 0; i < 5; i++) {
std::string key = ToString(i+1);
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
}
ASSERT_EQ(5U, cache->GetCapacity());
ASSERT_EQ(5U, cache->GetUsage());
cache->SetCapacity(10);
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(5U, cache->GetUsage());
// test2: decrease capacity
// insert 5 more elements to cache, then release 5,
// then decrease capacity to 7, final capacity should be 7
// and usage should be 7
for (size_t i = 5; i < 10; i++) {
std::string key = ToString(i+1);
handles[i] = cache->Insert(key, new Value(i+1), 1, &deleter);
}
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(10U, cache->GetUsage());
for (size_t i = 0; i < 5; i++) {
cache->Release(handles[i]);
}
ASSERT_EQ(10U, cache->GetCapacity());
ASSERT_EQ(10U, cache->GetUsage());
cache->SetCapacity(7);
ASSERT_EQ(7, cache->GetCapacity());
ASSERT_EQ(7, cache->GetUsage());
}
TEST_F(CacheTest, OverCapacity) {
size_t n = 10;