Address comments

This commit is contained in:
anand76 2021-03-31 12:13:52 -07:00
parent b2c302597d
commit 8015fc9871
5 changed files with 85 additions and 80 deletions

50
cache/lru_cache.cc vendored
View File

@ -98,7 +98,7 @@ LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio, double high_pri_pool_ratio,
bool use_adaptive_mutex, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache) const std::shared_ptr<TieredCache>& tiered_cache)
: capacity_(0), : capacity_(0),
high_pri_pool_usage_(0), high_pri_pool_usage_(0),
strict_capacity_limit_(strict_capacity_limit), strict_capacity_limit_(strict_capacity_limit),
@ -107,7 +107,7 @@ LRUCacheShard::LRUCacheShard(size_t capacity, bool strict_capacity_limit,
usage_(0), usage_(0),
lru_usage_(0), lru_usage_(0),
mutex_(use_adaptive_mutex), mutex_(use_adaptive_mutex),
nvm_cache_(nvm_cache) { tiered_cache_(tiered_cache) {
set_metadata_charge_policy(metadata_charge_policy); set_metadata_charge_policy(metadata_charge_policy);
// Make empty circular linked list // Make empty circular linked list
lru_.next = &lru_; lru_.next = &lru_;
@ -261,8 +261,9 @@ void LRUCacheShard::SetCapacity(size_t capacity) {
// Try to insert the evicted entries into NVM cache // Try to insert the evicted entries into NVM cache
// Free the entries outside of mutex for performance reasons // Free the entries outside of mutex for performance reasons
for (auto entry : last_reference_list) { for (auto entry : last_reference_list) {
if (nvm_cache_ && entry->IsNvmCompatible() && !entry->IsPromoted()) { if (tiered_cache_ && entry->IsTieredCacheCompatible() &&
nvm_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb) !entry->IsPromoted()) {
tiered_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
.PermitUncheckedError(); .PermitUncheckedError();
} }
entry->Free(); entry->Free();
@ -329,8 +330,9 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle) {
// Try to insert the evicted entries into NVM cache // Try to insert the evicted entries into NVM cache
// Free the entries here outside of mutex for performance reasons // Free the entries here outside of mutex for performance reasons
for (auto entry : last_reference_list) { for (auto entry : last_reference_list) {
if (nvm_cache_ && entry->IsNvmCompatible() && !entry->IsPromoted()) { if (tiered_cache_ && entry->IsTieredCacheCompatible() &&
nvm_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb) !entry->IsPromoted()) {
tiered_cache_->Insert(entry->key(), entry->value, entry->info_.helper_cb)
.PermitUncheckedError(); .PermitUncheckedError();
} }
entry->Free(); entry->Free();
@ -363,19 +365,19 @@ Cache::Handle* LRUCacheShard::Lookup(
// mutex if we're going to lookup in the NVM cache // mutex if we're going to lookup in the NVM cache
// Only support synchronous for now // Only support synchronous for now
// TODO: Support asynchronous lookup in NVM cache // TODO: Support asynchronous lookup in NVM cache
if (!e && nvm_cache_ && helper_cb && wait) { if (!e && tiered_cache_ && helper_cb && wait) {
assert(create_cb); assert(create_cb);
std::unique_ptr<NvmCacheHandle> nvm_handle = std::unique_ptr<TieredCacheHandle> tiered_handle =
nvm_cache_->Lookup(key, create_cb, wait); tiered_cache_->Lookup(key, create_cb, wait);
if (nvm_handle != nullptr) { if (tiered_handle != nullptr) {
e = reinterpret_cast<LRUHandle*>( e = reinterpret_cast<LRUHandle*>(
new char[sizeof(LRUHandle) - 1 + key.size()]); new char[sizeof(LRUHandle) - 1 + key.size()]);
e->flags = 0; e->flags = 0;
e->SetPromoted(true); e->SetPromoted(true);
e->SetNvmCompatible(true); e->SetTieredCacheCompatible(true);
e->info_.helper_cb = helper_cb; e->info_.helper_cb = helper_cb;
e->charge = nvm_handle->Size(); e->charge = tiered_handle->Size();
e->key_length = key.size(); e->key_length = key.size();
e->hash = hash; e->hash = hash;
e->refs = 0; e->refs = 0;
@ -384,8 +386,8 @@ Cache::Handle* LRUCacheShard::Lookup(
e->SetPriority(priority); e->SetPriority(priority);
memcpy(e->key_data, key.data(), key.size()); memcpy(e->key_data, key.data(), key.size());
e->value = nvm_handle->Value(); e->value = tiered_handle->Value();
e->charge = nvm_handle->Size(); e->charge = tiered_handle->Size();
// This call could nullify e if the cache is over capacity and // This call could nullify e if the cache is over capacity and
// strict_capacity_limit_ is true. In such a case, the caller will try // strict_capacity_limit_ is true. In such a case, the caller will try
@ -465,7 +467,7 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
e->value = value; e->value = value;
e->flags = 0; e->flags = 0;
if (helper_cb) { if (helper_cb) {
e->SetNvmCompatible(true); e->SetTieredCacheCompatible(true);
e->info_.helper_cb = helper_cb; e->info_.helper_cb = helper_cb;
} else { } else {
e->info_.deleter = deleter; e->info_.deleter = deleter;
@ -536,7 +538,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
std::shared_ptr<MemoryAllocator> allocator, std::shared_ptr<MemoryAllocator> allocator,
bool use_adaptive_mutex, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache) const std::shared_ptr<TieredCache>& tiered_cache)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit, : ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(allocator)) { std::move(allocator)) {
num_shards_ = 1 << num_shard_bits; num_shards_ = 1 << num_shard_bits;
@ -546,7 +548,7 @@ LRUCache::LRUCache(size_t capacity, int num_shard_bits,
for (int i = 0; i < num_shards_; i++) { for (int i = 0; i < num_shards_; i++) {
new (&shards_[i]) new (&shards_[i])
LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio, LRUCacheShard(per_shard, strict_capacity_limit, high_pri_pool_ratio,
use_adaptive_mutex, metadata_charge_policy, nvm_cache); use_adaptive_mutex, metadata_charge_policy, tiered_cache);
} }
} }
@ -616,7 +618,7 @@ std::shared_ptr<Cache> NewLRUCache(
double high_pri_pool_ratio, double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex, std::shared_ptr<MemoryAllocator> memory_allocator, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache) { const std::shared_ptr<TieredCache>& tiered_cache) {
if (num_shard_bits >= 20) { if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces return nullptr; // the cache cannot be sharded into too many fine pieces
} }
@ -630,15 +632,15 @@ std::shared_ptr<Cache> NewLRUCache(
return std::make_shared<LRUCache>( return std::make_shared<LRUCache>(
capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio, capacity, num_shard_bits, strict_capacity_limit, high_pri_pool_ratio,
std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy, std::move(memory_allocator), use_adaptive_mutex, metadata_charge_policy,
nvm_cache); tiered_cache);
} }
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) { std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits, return NewLRUCache(
cache_opts.strict_capacity_limit, cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.high_pri_pool_ratio, cache_opts.strict_capacity_limit, cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator, cache_opts.use_adaptive_mutex, cache_opts.memory_allocator, cache_opts.use_adaptive_mutex,
cache_opts.metadata_charge_policy, cache_opts.nvm_cache); cache_opts.metadata_charge_policy, cache_opts.tiered_cache);
} }
std::shared_ptr<Cache> NewLRUCache( std::shared_ptr<Cache> NewLRUCache(

42
cache/lru_cache.h vendored
View File

@ -13,7 +13,7 @@
#include "cache/sharded_cache.h" #include "cache/sharded_cache.h"
#include "port/malloc.h" #include "port/malloc.h"
#include "port/port.h" #include "port/port.h"
#include "rocksdb/nvm_cache.h" #include "rocksdb/tiered_cache.h"
#include "util/autovector.h" #include "util/autovector.h"
namespace ROCKSDB_NAMESPACE { namespace ROCKSDB_NAMESPACE {
@ -55,7 +55,7 @@ struct LRUHandle {
void (*deleter)(const Slice&, void* value); void (*deleter)(const Slice&, void* value);
ShardedCache::CacheItemHelperCallback helper_cb; ShardedCache::CacheItemHelperCallback helper_cb;
// This needs to be explicitly constructed and destructed // This needs to be explicitly constructed and destructed
std::unique_ptr<NvmCacheHandle> nvm_handle; std::unique_ptr<TieredCacheHandle> tiered_handle;
} info_; } info_;
LRUHandle* next_hash; LRUHandle* next_hash;
LRUHandle* next; LRUHandle* next;
@ -76,11 +76,11 @@ struct LRUHandle {
IN_HIGH_PRI_POOL = (1 << 2), IN_HIGH_PRI_POOL = (1 << 2),
// Whether this entry has had any lookups (hits). // Whether this entry has had any lookups (hits).
HAS_HIT = (1 << 3), HAS_HIT = (1 << 3),
// Can this be inserted into the NVM cache // Can this be inserted into the tiered cache
IS_NVM_COMPATIBLE = (1 << 4), IS_TIERED_CACHE_COMPATIBLE = (1 << 4),
// Is the handle still being read from NVM // Is the handle still being read from a lower tier
IS_INCOMPLETE = (1 << 5), IS_PENDING = (1 << 5),
// Has the item been promoted from NVM // Has the item been promoted from a lower tier
IS_PROMOTED = (1 << 6), IS_PROMOTED = (1 << 6),
}; };
@ -108,8 +108,10 @@ struct LRUHandle {
bool IsHighPri() const { return flags & IS_HIGH_PRI; } bool IsHighPri() const { return flags & IS_HIGH_PRI; }
bool InHighPriPool() const { return flags & IN_HIGH_PRI_POOL; } bool InHighPriPool() const { return flags & IN_HIGH_PRI_POOL; }
bool HasHit() const { return flags & HAS_HIT; } bool HasHit() const { return flags & HAS_HIT; }
bool IsNvmCompatible() const { return flags & IS_NVM_COMPATIBLE; } bool IsTieredCacheCompatible() const {
bool IsIncomplete() const { return flags & IS_INCOMPLETE; } return flags & IS_TIERED_CACHE_COMPATIBLE;
}
bool IsPending() const { return flags & IS_PENDING; }
bool IsPromoted() const { return flags & IS_PROMOTED; } bool IsPromoted() const { return flags & IS_PROMOTED; }
void SetInCache(bool in_cache) { void SetInCache(bool in_cache) {
@ -138,19 +140,19 @@ struct LRUHandle {
void SetHit() { flags |= HAS_HIT; } void SetHit() { flags |= HAS_HIT; }
void SetNvmCompatible(bool nvm) { void SetTieredCacheCompatible(bool tiered) {
if (nvm) { if (tiered) {
flags |= IS_NVM_COMPATIBLE; flags |= IS_TIERED_CACHE_COMPATIBLE;
} else { } else {
flags &= ~IS_NVM_COMPATIBLE; flags &= ~IS_TIERED_CACHE_COMPATIBLE;
} }
} }
void SetIncomplete(bool incomp) { void SetIncomplete(bool incomp) {
if (incomp) { if (incomp) {
flags |= IS_INCOMPLETE; flags |= IS_PENDING;
} else { } else {
flags &= ~IS_INCOMPLETE; flags &= ~IS_PENDING;
} }
} }
@ -164,9 +166,9 @@ struct LRUHandle {
void Free() { void Free() {
assert(refs == 0); assert(refs == 0);
if (!IsNvmCompatible() && info_.deleter) { if (!IsTieredCacheCompatible() && info_.deleter) {
(*info_.deleter)(key(), value); (*info_.deleter)(key(), value);
} else if (IsNvmCompatible()) { } else if (IsTieredCacheCompatible()) {
ShardedCache::DeletionCallback del_cb; ShardedCache::DeletionCallback del_cb;
(*info_.helper_cb)(nullptr, nullptr, &del_cb); (*info_.helper_cb)(nullptr, nullptr, &del_cb);
(*del_cb)(key(), value); (*del_cb)(key(), value);
@ -238,7 +240,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
LRUCacheShard(size_t capacity, bool strict_capacity_limit, LRUCacheShard(size_t capacity, bool strict_capacity_limit,
double high_pri_pool_ratio, bool use_adaptive_mutex, double high_pri_pool_ratio, bool use_adaptive_mutex,
CacheMetadataChargePolicy metadata_charge_policy, CacheMetadataChargePolicy metadata_charge_policy,
const std::shared_ptr<NvmCache>& nvm_cache); const std::shared_ptr<TieredCache>& tiered_cache);
virtual ~LRUCacheShard() override = default; virtual ~LRUCacheShard() override = default;
// Separate from constructor so caller can easily make an array of LRUCache // Separate from constructor so caller can easily make an array of LRUCache
@ -378,7 +380,7 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard final : public CacheShard {
// don't mind mutex_ invoking the non-const actions. // don't mind mutex_ invoking the non-const actions.
mutable port::Mutex mutex_; mutable port::Mutex mutex_;
std::shared_ptr<NvmCache> nvm_cache_; std::shared_ptr<TieredCache> tiered_cache_;
}; };
class LRUCache class LRUCache
@ -393,7 +395,7 @@ class LRUCache
bool use_adaptive_mutex = kDefaultToAdaptiveMutex, bool use_adaptive_mutex = kDefaultToAdaptiveMutex,
CacheMetadataChargePolicy metadata_charge_policy = CacheMetadataChargePolicy metadata_charge_policy =
kDontChargeCacheMetadata, kDontChargeCacheMetadata,
const std::shared_ptr<NvmCache>& nvm_cache = nullptr); const std::shared_ptr<TieredCache>& tiered_cache = nullptr);
virtual ~LRUCache(); virtual ~LRUCache();
virtual const char* Name() const override { return "LRUCache"; } virtual const char* Name() const override { return "LRUCache"; }
virtual CacheShard* GetShard(int shard) override; virtual CacheShard* GetShard(int shard) override;

View File

@ -34,9 +34,9 @@ class LRUCacheTest : public testing::Test {
DeleteCache(); DeleteCache();
cache_ = reinterpret_cast<LRUCacheShard*>( cache_ = reinterpret_cast<LRUCacheShard*>(
port::cacheline_aligned_alloc(sizeof(LRUCacheShard))); port::cacheline_aligned_alloc(sizeof(LRUCacheShard)));
new (cache_) LRUCacheShard(capacity, false /*strict_capcity_limit*/, new (cache_) LRUCacheShard(
high_pri_pool_ratio, use_adaptive_mutex, capacity, false /*strict_capcity_limit*/, high_pri_pool_ratio,
kDontChargeCacheMetadata, nullptr /*nvm_cache*/); use_adaptive_mutex, kDontChargeCacheMetadata, nullptr /*tiered_cache*/);
} }
void Insert(const std::string& key, void Insert(const std::string& key,
@ -195,15 +195,15 @@ TEST_F(LRUCacheTest, EntriesWithPriority) {
ValidateLRUList({"e", "f", "g", "Z", "d"}, 2); ValidateLRUList({"e", "f", "g", "Z", "d"}, 2);
} }
class TestNvmCache : public NvmCache { class TestTieredCache : public TieredCache {
public: public:
TestNvmCache(size_t capacity) : num_inserts_(0), num_lookups_(0) { TestTieredCache(size_t capacity) : num_inserts_(0), num_lookups_(0) {
cache_ = NewLRUCache(capacity, 0, false, 0.5, nullptr, cache_ = NewLRUCache(capacity, 0, false, 0.5, nullptr,
kDefaultToAdaptiveMutex, kDontChargeCacheMetadata); kDefaultToAdaptiveMutex, kDontChargeCacheMetadata);
} }
~TestNvmCache() { cache_.reset(); } ~TestTieredCache() { cache_.reset(); }
std::string Name() override { return "TestNvmCache"; } std::string Name() override { return "TestTieredCache"; }
Status Insert(const Slice& key, void* value, Status Insert(const Slice& key, void* value,
Cache::CacheItemHelperCallback helper_cb) override { Cache::CacheItemHelperCallback helper_cb) override {
@ -226,10 +226,10 @@ class TestNvmCache : public NvmCache {
}); });
} }
std::unique_ptr<NvmCacheHandle> Lookup(const Slice& key, std::unique_ptr<TieredCacheHandle> Lookup(
const Cache::CreateCallback& create_cb, const Slice& key, const Cache::CreateCallback& create_cb,
bool /*wait*/) override { bool /*wait*/) override {
std::unique_ptr<NvmCacheHandle> nvm_handle; std::unique_ptr<TieredCacheHandle> tiered_handle;
Cache::Handle* handle = cache_->Lookup(key); Cache::Handle* handle = cache_->Lookup(key);
num_lookups_++; num_lookups_++;
if (handle) { if (handle) {
@ -240,15 +240,15 @@ class TestNvmCache : public NvmCache {
ptr += sizeof(uint64_t); ptr += sizeof(uint64_t);
Status s = create_cb(ptr, size, &value, &charge); Status s = create_cb(ptr, size, &value, &charge);
EXPECT_OK(s); EXPECT_OK(s);
nvm_handle.reset( tiered_handle.reset(
new TestNvmCacheHandle(cache_.get(), handle, value, charge)); new TestTieredCacheHandle(cache_.get(), handle, value, charge));
} }
return nvm_handle; return tiered_handle;
} }
void Erase(const Slice& /*key*/) override {} void Erase(const Slice& /*key*/) override {}
void WaitAll(std::vector<NvmCacheHandle*> /*handles*/) override {} void WaitAll(std::vector<TieredCacheHandle*> /*handles*/) override {}
std::string GetPrintableOptions() const override { return ""; } std::string GetPrintableOptions() const override { return ""; }
@ -257,12 +257,12 @@ class TestNvmCache : public NvmCache {
uint32_t num_lookups() { return num_lookups_; } uint32_t num_lookups() { return num_lookups_; }
private: private:
class TestNvmCacheHandle : public NvmCacheHandle { class TestTieredCacheHandle : public TieredCacheHandle {
public: public:
TestNvmCacheHandle(Cache* cache, Cache::Handle* handle, void* value, TestTieredCacheHandle(Cache* cache, Cache::Handle* handle, void* value,
size_t size) size_t size)
: cache_(cache), handle_(handle), value_(value), size_(size) {} : cache_(cache), handle_(handle), value_(value), size_(size) {}
~TestNvmCacheHandle() { cache_->Release(handle_); } ~TestTieredCacheHandle() { cache_->Release(handle_); }
bool isReady() override { return true; } bool isReady() override { return true; }
@ -284,11 +284,11 @@ class TestNvmCache : public NvmCache {
uint32_t num_lookups_; uint32_t num_lookups_;
}; };
TEST_F(LRUCacheTest, TestNvmCache) { TEST_F(LRUCacheTest, TestTieredCache) {
LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex, LRUCacheOptions opts(1024, 0, false, 0.5, nullptr, kDefaultToAdaptiveMutex,
kDontChargeCacheMetadata); kDontChargeCacheMetadata);
std::shared_ptr<TestNvmCache> nvm_cache(new TestNvmCache(2048)); std::shared_ptr<TestTieredCache> tiered_cache(new TestTieredCache(2048));
opts.nvm_cache = nvm_cache; opts.tiered_cache = tiered_cache;
std::shared_ptr<Cache> cache = NewLRUCache(opts); std::shared_ptr<Cache> cache = NewLRUCache(opts);
class TestItem { class TestItem {
@ -361,8 +361,8 @@ TEST_F(LRUCacheTest, TestNvmCache) {
Cache::Priority::LOW, true); Cache::Priority::LOW, true);
ASSERT_NE(handle, nullptr); ASSERT_NE(handle, nullptr);
cache->Release(handle); cache->Release(handle);
ASSERT_EQ(nvm_cache->num_inserts(), 2u); ASSERT_EQ(tiered_cache->num_inserts(), 2u);
ASSERT_EQ(nvm_cache->num_lookups(), 1u); ASSERT_EQ(tiered_cache->num_lookups(), 1u);
} }
} // namespace ROCKSDB_NAMESPACE } // namespace ROCKSDB_NAMESPACE

View File

@ -37,7 +37,7 @@ namespace ROCKSDB_NAMESPACE {
class Cache; class Cache;
struct ConfigOptions; struct ConfigOptions;
class NvmCache; class TieredCache;
extern const bool kDefaultToAdaptiveMutex; extern const bool kDefaultToAdaptiveMutex;
@ -91,8 +91,8 @@ struct LRUCacheOptions {
CacheMetadataChargePolicy metadata_charge_policy = CacheMetadataChargePolicy metadata_charge_policy =
kDefaultCacheMetadataChargePolicy; kDefaultCacheMetadataChargePolicy;
// An NvmCache instance to use a the non-volatile tier // A TieredCache instance to use a the non-volatile tier
std::shared_ptr<NvmCache> nvm_cache; std::shared_ptr<TieredCache> tiered_cache;
LRUCacheOptions() {} LRUCacheOptions() {}
LRUCacheOptions(size_t _capacity, int _num_shard_bits, LRUCacheOptions(size_t _capacity, int _num_shard_bits,

View File

@ -21,9 +21,9 @@ namespace ROCKSDB_NAMESPACE {
// ready, and call Wait() in order to block until it becomes ready. // ready, and call Wait() in order to block until it becomes ready.
// The caller must call value() after it becomes ready to determine if the // The caller must call value() after it becomes ready to determine if the
// handle successfullly read the item. // handle successfullly read the item.
class NvmCacheHandle { class TieredCacheHandle {
public: public:
virtual ~NvmCacheHandle() {} virtual ~TieredCacheHandle() {}
// Returns whether the handle is ready or not // Returns whether the handle is ready or not
virtual bool isReady() = 0; virtual bool isReady() = 0;
@ -38,19 +38,20 @@ class NvmCacheHandle {
virtual size_t Size() = 0; virtual size_t Size() = 0;
}; };
// NvmCache // TieredCache
// //
// NVM cache interface for caching blocks on a persistent medium. // Cache interface for caching blocks on a stackable tiers (which can include
class NvmCache { // non-volatile mediums)
class TieredCache {
public: public:
virtual ~NvmCache() {} virtual ~TieredCache() {}
virtual std::string Name() = 0; virtual std::string Name() = 0;
// Insert the given value into the NVM cache. The value is not written // Insert the given value into the NVM cache. The value is not written
// directly. Rather, the SaveToCallback provided by helper_cb will be // directly. Rather, the SaveToCallback provided by helper_cb will be
// used to extract the persistable data in value, which will be written // used to extract the persistable data in value, which will be written
// to NVM. The implementation may or may not write it to NVM depending // to this tier. The implementation may or may not write it to NVM depending
// on the admission control policy, even if the return status is success. // on the admission control policy, even if the return status is success.
virtual Status Insert(const Slice& key, void* value, virtual Status Insert(const Slice& key, void* value,
Cache::CacheItemHelperCallback helper_cb) = 0; Cache::CacheItemHelperCallback helper_cb) = 0;
@ -59,7 +60,7 @@ class NvmCache {
// will be used to create the object. The handle returned may not be // will be used to create the object. The handle returned may not be
// ready yet, unless wait=true, in which case Lookup() will block until // ready yet, unless wait=true, in which case Lookup() will block until
// the handle is ready // the handle is ready
virtual std::unique_ptr<NvmCacheHandle> Lookup( virtual std::unique_ptr<TieredCacheHandle> Lookup(
const Slice& key, const Cache::CreateCallback& create_cb, bool wait) = 0; const Slice& key, const Cache::CreateCallback& create_cb, bool wait) = 0;
// At the discretion of the implementation, erase the data associated // At the discretion of the implementation, erase the data associated
@ -67,7 +68,7 @@ class NvmCache {
virtual void Erase(const Slice& key) = 0; virtual void Erase(const Slice& key) = 0;
// Wait for a collection of handles to become ready // Wait for a collection of handles to become ready
virtual void WaitAll(std::vector<NvmCacheHandle*> handles) = 0; virtual void WaitAll(std::vector<TieredCacheHandle*> handles) = 0;
virtual std::string GetPrintableOptions() const = 0; virtual std::string GetPrintableOptions() const = 0;
}; };