Address comments

This commit is contained in:
anand76 2021-03-31 12:13:52 -07:00
parent b2c302597d
commit 8015fc9871
5 changed files with 85 additions and 80 deletions

50
cache/lru_cache.cc vendored
View File

@ -98,7 +98,7 @@ LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio,
bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache)
const std::shared_ptr<TieredCache>& tiered_cache)
: capacity_(0),
high_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit),
@ -107,7 +107,7 @@ LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
usage_(0),
lru_usage_(0),
mutex_(use_adaptive_mutex),
nvm_cache_(nvm_cache) {
tiered_cache_(tiered_cache) {
set_metadata_charge_policy(metadata_charge_policy);
// Make empty circular linked list
lru_.next = &lru_;
@ -261,8 +261,9 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
// Try to insert the evicted entries into NVM cache
// Free the entries outside of mutex for performance reasons
for (auto entry : last_reference_list) {
if (nvm_cache_ && entry->IsNvmCompatible() && !entry->IsPromoted()) {
nvm_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
if (tiered_cache_ && entry->IsTieredCacheCompatible() &&
!entry->IsPromoted()) {
tiered_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
.PermitUncheckedError();
}
entry->Free();
@ -329,8 +330,9 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle) {
// Try to insert the evicted entries into NVM cache
// Free the entries here outside of mutex for performance reasons
for (auto entry : last_reference_list) {
if (nvm_cache_ && entry->IsNvmCompatible() && !entry->IsPromoted()) {
nvm_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
if (tiered_cache_ && entry->IsTieredCacheCompatible() &&
!entry->IsPromoted()) {
tiered_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
.PermitUncheckedError();
}
entry->Free();
@ -363,19 +365,19 @@ Cache::Handle* LRUCacheShard::Lookup(
// mutex if we're going to lookup in the NVM cache
// Only support synchronous for now
// TODO: Support asynchronous lookup in NVM cache
if (!e && nvm_cache_ && helper_cb && wait) {
if (!e && tiered_cache_ && helper_cb && wait) {
assert(create_cb);
std::unique_ptr<NvmCacheHandle> nvm_handle =
nvm_cache_->Lookup(key, create_cb, wait);
if (nvm_handle != nullptr) {
std::unique_ptr<TieredCacheHandle> tiered_handle =
tiered_cache_->Lookup(key, create_cb, wait);
if (tiered_handle != nullptr) {
e = reinterpret_cast<LRUHandle*>(
new char[sizeof(LRUHandle) - 1 + key.size()]);
e->flags = 0;
e->SetPromoted(true);
e->SetNvmCompatible(true);
e->SetTieredCacheCompatible(true);
e->info_.helper_cb = helper_cb;
e->charge = nvm_handle->Size();
e->charge = tiered_handle->Size();
e->key_length = key.size();
e->hash = hash;
e->refs = 0;
@ -384,8 +386,8 @@ Cache::Handle* LRUCacheShard::Lookup(
e->SetPriority(priority);
memcpy(e->key_data, key.data(), key.size());
e->value = nvm_handle->Value();
e->charge = nvm_handle->Size();
e->value = tiered_handle->Value();
e->charge = tiered_handle->Size();
// This call could nullify e if the cache is over capacity and
// strict_capacity_limit_ is true. In such a case, the caller will try
@ -465,7 +467,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
e->value = value;
e->flags = 0;
if (helper_cb) {
e->SetNvmCompatible(true);
e->SetTieredCacheCompatible(true);
e->info_.helper_cb = helper_cb;
} else {
e->info_.deleter = deleter;
@ -536,7 +538,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
std::shared_ptr<MemoryAllocator> allocator,
bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache)
const std::shared_ptr<TieredCache>& tiered_cache)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(allocator)) {
num_shards_ = 1 << num_shard_bits;
@ -546,7 +548,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
for (int i = 0; i < num_shards_; i++) {
new (&shards_[i])
LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio,
use_adaptive_mutex, metadata_charge_policy, nvm_cache);
use_adaptive_mutex, metadata_charge_policy, tiered_cache);
}
}
@ -616,7 +618,7 @@ std::shared_ptr<Cache> NewLRUCache(
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache) {
const std::shared_ptr<TieredCache>& tiered_cache) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
@ -630,15 +632,15 @@ std::shared_ptr<Cache> NewLRUCache(
return std::make_shared<LRUCache>(
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
nvm_cache);
tiered_cache);
}
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit,
cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy, cache_opts.nvm_cache);
return NewLRUCache(
cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy, cache_opts.tiered_cache);
}
std::shared_ptr<Cache> NewLRUCache(

42
cache/lru_cache.h vendored
View File

@ -13,7 +13,7 @@
#include "cache/sharded_cache.h"
#include "port/malloc.h"
#include "port/port.h"
#include "rocksdb/nvm_cache.h"
#include "rocksdb/tiered_cache.h"
#include "util/autovector.h"
namespace ROCKSDB_NAMESPACE {
@ -55,7 +55,7 @@ struct LRUHandle {
void (*deleter)(const Slice&, void* value);
ShardedCache::CacheItemHelperCallback helper_cb;
// This needs to be explicitly constructed and destructed
std::unique_ptr<NvmCacheHandle> nvm_handle;
std::unique_ptr<TieredCacheHandle> tiered_handle;
} info_;
LRUHandle* next_hash;
LRUHandle* next;
@ -76,11 +76,11 @@ struct LRUHandle {
IN_HIGH_PRI_POOL = (1 << 2),
// Whether this entry has had any lookups (hits).
HAS_HIT = (1 << 3),
// Can this be inserted into the NVM cache
IS_NVM_COMPATIBLE = (1 << 4),
// Is the handle still being read from NVM
IS_INCOMPLETE = (1 << 5),
// Has the item been promoted from NVM
// Can this be inserted into the tiered cache
IS_TIERED_CACHE_COMPATIBLE = (1 << 4),
// Is the handle still being read from a lower tier
IS_PENDING = (1 << 5),
// Has the item been promoted from a lower tier
IS_PROMOTED = (1 << 6),
};
@ -108,8 +108,10 @@ struct LRUHandle {
bool IsHighPri() const { return flags & IS_HIGH_PRI; }
bool InHighPriPool() const { return flags & IN_HIGH_PRI_POOL; }
bool HasHit() const { return flags & HAS_HIT; }
bool IsNvmCompatible() const { return flags & IS_NVM_COMPATIBLE; }
bool IsIncomplete() const { return flags & IS_INCOMPLETE; }
bool IsTieredCacheCompatible() const {
return flags & IS_TIERED_CACHE_COMPATIBLE;
}
bool IsPending() const { return flags & IS_PENDING; }
bool IsPromoted() const { return flags & IS_PROMOTED; }
void SetInCache(bool in_cache) {
@ -138,19 +140,19 @@ struct LRUHandle {
void SetHit() { flags |= HAS_HIT; }
void SetNvmCompatible(bool nvm) {
if (nvm) {
flags |= IS_NVM_COMPATIBLE;
void SetTieredCacheCompatible(bool tiered) {
if (tiered) {
flags |= IS_TIERED_CACHE_COMPATIBLE;
} else {
flags &= ~IS_NVM_COMPATIBLE;
flags &= ~IS_TIERED_CACHE_COMPATIBLE;
}
}
void SetIncomplete(bool incomp) {
if (incomp) {
flags |= IS_INCOMPLETE;
flags |= IS_PENDING;
} else {
flags &= ~IS_INCOMPLETE;
flags &= ~IS_PENDING;
}
}
@ -164,9 +166,9 @@ struct LRUHandle {
void Free() {
assert(refs == 0);
if (!IsNvmCompatible() && info_.deleter) {
if (!IsTieredCacheCompatible() && info_.deleter) {
(*info_.deleter)(key(), value);
} else if (IsNvmCompatible()) {
} else if (IsTieredCacheCompatible()) {
ShardedCache::DeletionCallback del_cb;
(*info_.helper_cb)(nullptr, nullptr, &del_cb);
(*del_cb)(key(), value);
@ -238,7 +240,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache);
const std::shared_ptr<TieredCache>& tiered_cache);
virtual ~LRUCacheShard() override = default;
// Separate from constructor so caller can easily make an array of LRUCache
@ -378,7 +380,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// don't mind mutex_ invoking the non-const actions.
mutable port::Mutex mutex_;
std::shared_ptr<NvmCache> nvm_cache_;
std::shared_ptr<TieredCache> tiered_cache_;
};
class LRUCache
@ -393,7 +395,7 @@ class LRUCache
bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy =
kDontChargeCacheMetadata,
const std::shared_ptr<NvmCache>& nvm_cache = nullptr);
const std::shared_ptr<TieredCache>& tiered_cache = nullptr);
virtual ~LRUCache();
virtual const char* Name() const override { return "LRUCache"; }
virtual CacheShard* GetShard(int shard) override;

View File

@ -34,9 +34,9 @@ class LRUCacheTest : public testing::Test {
DeleteCache();
cache_ = reinterpret_cast<LRUCacheShard*>(
port::cacheline_aligned_alloc(sizeof(LRUCacheShard)));
new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/,
high_pri_pool_ratio, use_adaptive_mutex,
kDontChargeCacheMetadata, nullptr /*nvm_cache*/);
new (cache_) LRUCacheShard(
capacity, false /*strict_capcity_limit*/, high_pri_pool_ratio,
use_adaptive_mutex, kDontChargeCacheMetadata, nullptr /*tiered_cache*/);
}
void Insert(const std::string& key,
@ -195,15 +195,15 @@ TEST_F(LRUCacheTest, EntriesWithPriority) {
ValidateLRUList({"e", "f", "g", "Z", "d"}, 2);
}
class TestNvmCache : public NvmCache {
class TestTieredCache : public TieredCache {
public:
TestNvmCache(size_t capacity) : num_inserts_(0), num_lookups_(0) {
TestTieredCache(size_t capacity) : num_inserts_(0), num_lookups_(0) {
cache_ = NewLRUCache(capacity, 0, false, 0.5, nullptr,
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
}
~TestNvmCache() { cache_.reset(); }
~TestTieredCache() { cache_.reset(); }
std::string Name() override { return "TestNvmCache"; }
std::string Name() override { return "TestTieredCache"; }
Status Insert(const Slice& key, void* value,
Cache::CacheItemHelperCallback helper_cb) override {
@ -226,10 +226,10 @@ class TestNvmCache : public NvmCache {
});
}
std::unique_ptr<NvmCacheHandle> Lookup(const Slice& key,
const Cache::CreateCallback& create_cb,
bool /*wait*/) override {
std::unique_ptr<NvmCacheHandle> nvm_handle;
std::unique_ptr<TieredCacheHandle> Lookup(
const Slice& key, const Cache::CreateCallback& create_cb,
bool /*wait*/) override {
std::unique_ptr<TieredCacheHandle> tiered_handle;
Cache::Handle* handle = cache_->Lookup(key);
num_lookups_++;
if (handle) {
@ -240,15 +240,15 @@ class TestNvmCache : public NvmCache {
ptr += sizeof(uint64_t);
Status s = create_cb(ptr, size, &value, &charge);
EXPECT_OK(s);
nvm_handle.reset(
new TestNvmCacheHandle(cache_.get(), handle, value, charge));
tiered_handle.reset(
new TestTieredCacheHandle(cache_.get(), handle, value, charge));
}
return nvm_handle;
return tiered_handle;
}
void Erase(const Slice& /*key*/) override {}
void WaitAll(std::vector<NvmCacheHandle*> /*handles*/) override {}
void WaitAll(std::vector<TieredCacheHandle*> /*handles*/) override {}
std::string GetPrintableOptions() const override { return ""; }
@ -257,12 +257,12 @@ class TestNvmCache : public NvmCache {
uint32_t num_lookups() { return num_lookups_; }
private:
class TestNvmCacheHandle : public NvmCacheHandle {
class TestTieredCacheHandle : public TieredCacheHandle {
public:
TestNvmCacheHandle(Cache* cache, Cache::Handle* handle, void* value,
size_t size)
TestTieredCacheHandle(Cache* cache, Cache::Handle* handle, void* value,
size_t size)
: cache_(cache), handle_(handle), value_(value), size_(size) {}
~TestNvmCacheHandle() { cache_->Release(handle_); }
~TestTieredCacheHandle() { cache_->Release(handle_); }
bool isReady() override { return true; }
@ -284,11 +284,11 @@ class TestNvmCache : public NvmCache {
uint32_t num_lookups_;
};
TEST_F(LRUCacheTest, TestNvmCache) {
TEST_F(LRUCacheTest, TestTieredCache) {
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
kDontChargeCacheMetadata);
std::shared_ptr<TestNvmCache> nvm_cache(new TestNvmCache(2048));
opts.nvm_cache = nvm_cache;
std::shared_ptr<TestTieredCache> tiered_cache(new TestTieredCache(2048));
opts.tiered_cache = tiered_cache;
std::shared_ptr<Cache> cache = NewLRUCache(opts);
class TestItem {
@ -361,8 +361,8 @@ TEST_F(LRUCacheTest, TestNvmCache) {
Cache::Priority::LOW, true);
ASSERT_NE(handle, nullptr);
cache->Release(handle);
ASSERT_EQ(nvm_cache->num_inserts(), 2u);
ASSERT_EQ(nvm_cache->num_lookups(), 1u);
ASSERT_EQ(tiered_cache->num_inserts(), 2u);
ASSERT_EQ(tiered_cache->num_lookups(), 1u);
}
} // namespace ROCKSDB_NAMESPACE

View File

@ -37,7 +37,7 @@ namespace ROCKSDB_NAMESPACE {
class Cache;
struct ConfigOptions;
class NvmCache;
class TieredCache;
extern const bool kDefaultToAdaptiveMutex;
@ -91,8 +91,8 @@ struct LRUCacheOptions {
CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy;
// An NvmCache instance to use a the non-volatile tier
std::shared_ptr<NvmCache> nvm_cache;
// A TieredCache instance to use a the non-volatile tier
std::shared_ptr<TieredCache> tiered_cache;
LRUCacheOptions() {}
LRUCacheOptions(size_t _capacity, int _num_shard_bits,

View File

@ -21,9 +21,9 @@ namespace ROCKSDB_NAMESPACE {
// ready, and call Wait() in order to block until it becomes ready.
// The caller must call value() after it becomes ready to determine if the
// handle successfullly read the item.
class NvmCacheHandle {
class TieredCacheHandle {
public:
virtual ~NvmCacheHandle() {}
virtual ~TieredCacheHandle() {}
// Returns whether the handle is ready or not
virtual bool isReady() = 0;
@ -38,19 +38,20 @@ class NvmCacheHandle {
virtual size_t Size() = 0;
};
// NvmCache
// TieredCache
//
// NVM cache interface for caching blocks on a persistent medium.
class NvmCache {
// Cache interface for caching blocks on a stackable tiers (which can include
// non-volatile mediums)
class TieredCache {
public:
virtual ~NvmCache() {}
virtual ~TieredCache() {}
virtual std::string Name() = 0;
// Insert the given value into the NVM cache. The value is not written
// directly. Rather, the SaveToCallback provided by helper_cb will be
// used to extract the persistable data in value, which will be written
// to NVM. The implementation may or may not write it to NVM depending
// to this tier. The implementation may or may not write it to NVM depending
// on the admission control policy, even if the return status is success.
virtual Status Insert(const Slice& key, void* value,
Cache::CacheItemHelperCallback helper_cb) = 0;
@ -59,7 +60,7 @@ class NvmCache {
// will be used to create the object. The handle returned may not be
// ready yet, unless wait=true, in which case Lookup() will block until
// the handle is ready
virtual std::unique_ptr<NvmCacheHandle> Lookup(
virtual std::unique_ptr<TieredCacheHandle> Lookup(
const Slice& key, const Cache::CreateCallback& create_cb, bool wait) = 0;
// At the discretion of the implementation, erase the data associated
@ -67,7 +68,7 @@ class NvmCache {
virtual void Erase(const Slice& key) = 0;
// Wait for a collection of handles to become ready
virtual void WaitAll(std::vector<NvmCacheHandle*> handles) = 0;
virtual void WaitAll(std::vector<TieredCacheHandle*> handles) = 0;
virtual std::string GetPrintableOptions() const = 0;
};