72f8cc703c
Summary: Add mid-point insertion functionality to LRU cache. Caller of `Cache::Insert()` can set an additional parameter to make a cache entry have higher priority. The LRU cache will reserve at most `capacity * high_pri_pool_pct` bytes for high-pri cache entries. If `high_pri_pool_pct` is zero, the cache degenerates to normal LRU cache. Context: If we are to put index and filter blocks into RocksDB block cache, index/filter block can be swap out too early. We want to add an option to RocksDB to reserve some capacity in block cache just for index/filter blocks, to mitigate the issue. In later diffs I'll update block based table reader to use the interface to cache index/filter blocks at high priority, and expose the option to `DBOptions` and make it dynamic changeable. Test Plan: unit test. Reviewers: IslamAbdelRahman, sdong, lightmark Reviewed By: lightmark Subscribers: andrewkr, dhruba, march, leveldb Differential Revision: https://reviews.facebook.net/D61977
118 lines
3.6 KiB
C++
118 lines
3.6 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "util/sharded_cache.h"
|
|
#include "util/mutexlock.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
|
|
bool strict_capacity_limit)
|
|
: num_shard_bits_(num_shard_bits),
|
|
capacity_(capacity),
|
|
strict_capacity_limit_(strict_capacity_limit),
|
|
last_id_(1) {}
|
|
|
|
void ShardedCache::SetCapacity(size_t capacity) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
|
|
MutexLock l(&capacity_mutex_);
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->SetCapacity(per_shard);
|
|
}
|
|
capacity_ = capacity;
|
|
}
|
|
|
|
void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
MutexLock l(&capacity_mutex_);
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
|
|
}
|
|
strict_capacity_limit_ = strict_capacity_limit;
|
|
}
|
|
|
|
Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
|
|
void (*deleter)(const Slice& key, void* value),
|
|
Handle** handle, Priority priority) {
|
|
uint32_t hash = HashSlice(key);
|
|
return GetShard(Shard(hash))
|
|
->Insert(key, hash, value, charge, deleter, handle, priority);
|
|
}
|
|
|
|
Cache::Handle* ShardedCache::Lookup(const Slice& key) {
|
|
uint32_t hash = HashSlice(key);
|
|
return GetShard(Shard(hash))->Lookup(key, hash);
|
|
}
|
|
|
|
void ShardedCache::Release(Handle* handle) {
|
|
uint32_t hash = GetHash(handle);
|
|
GetShard(Shard(hash))->Release(handle);
|
|
}
|
|
|
|
void ShardedCache::Erase(const Slice& key) {
|
|
uint32_t hash = HashSlice(key);
|
|
GetShard(Shard(hash))->Erase(key, hash);
|
|
}
|
|
|
|
uint64_t ShardedCache::NewId() {
|
|
return last_id_.fetch_add(1, std::memory_order_relaxed);
|
|
}
|
|
|
|
size_t ShardedCache::GetCapacity() const {
|
|
MutexLock l(&capacity_mutex_);
|
|
return capacity_;
|
|
}
|
|
|
|
bool ShardedCache::HasStrictCapacityLimit() const {
|
|
MutexLock l(&capacity_mutex_);
|
|
return strict_capacity_limit_;
|
|
}
|
|
|
|
size_t ShardedCache::GetUsage() const {
|
|
// We will not lock the cache when getting the usage from shards.
|
|
int num_shards = 1 << num_shard_bits_;
|
|
size_t usage = 0;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
usage += GetShard(s)->GetUsage();
|
|
}
|
|
return usage;
|
|
}
|
|
|
|
size_t ShardedCache::GetUsage(Handle* handle) const {
|
|
return GetCharge(handle);
|
|
}
|
|
|
|
size_t ShardedCache::GetPinnedUsage() const {
|
|
// We will not lock the cache when getting the usage from shards.
|
|
int num_shards = 1 << num_shard_bits_;
|
|
size_t usage = 0;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
usage += GetShard(s)->GetPinnedUsage();
|
|
}
|
|
return usage;
|
|
}
|
|
|
|
void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
|
bool thread_safe) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->ApplyToAllCacheEntries(callback, thread_safe);
|
|
}
|
|
}
|
|
|
|
void ShardedCache::EraseUnRefEntries() {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->EraseUnRefEntries();
|
|
}
|
|
}
|
|
|
|
} // namespace rocksdb
|