2016-05-24 08:35:23 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2016-05-24 08:35:23 +02:00
|
|
|
|
|
|
|
#include "rocksdb/utilities/sim_cache.h"
|
|
|
|
#include <atomic>
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
#include "env/composite_env_wrapper.h"
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/writable_file_writer.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "monitoring/statistics.h"
|
2016-07-21 00:28:04 +02:00
|
|
|
#include "port/port.h"
|
2017-07-28 21:18:09 +02:00
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "util/mutexlock.h"
|
|
|
|
#include "util/string_util.h"
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2016-05-24 08:35:23 +02:00
|
|
|
|
|
|
|
namespace {
|
2017-07-28 21:18:09 +02:00
|
|
|
|
|
|
|
class CacheActivityLogger {
|
|
|
|
public:
|
|
|
|
CacheActivityLogger()
|
|
|
|
: activity_logging_enabled_(false), max_logging_size_(0) {}
|
|
|
|
|
|
|
|
~CacheActivityLogger() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
|
|
|
|
StopLoggingInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status StartLogging(const std::string& activity_log_file, Env* env,
|
|
|
|
uint64_t max_logging_size = 0) {
|
|
|
|
assert(activity_log_file != "");
|
|
|
|
assert(env != nullptr);
|
|
|
|
|
|
|
|
Status status;
|
|
|
|
EnvOptions env_opts;
|
|
|
|
std::unique_ptr<WritableFile> log_file;
|
|
|
|
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
|
|
|
|
// Stop existing logging if any
|
|
|
|
StopLoggingInternal();
|
|
|
|
|
|
|
|
// Open log file
|
|
|
|
status = env->NewWritableFile(activity_log_file, &log_file, env_opts);
|
|
|
|
if (!status.ok()) {
|
|
|
|
return status;
|
|
|
|
}
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
file_writer_.reset(new WritableFileWriter(
|
|
|
|
NewLegacyWritableFileWrapper(std::move(log_file)), activity_log_file,
|
|
|
|
env_opts));
|
2017-07-28 21:18:09 +02:00
|
|
|
|
|
|
|
max_logging_size_ = max_logging_size;
|
|
|
|
activity_logging_enabled_.store(true);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
void StopLogging() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
|
|
|
|
StopLoggingInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReportLookup(const Slice& key) {
|
|
|
|
if (activity_logging_enabled_.load() == false) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string log_line = "LOOKUP - " + key.ToString(true) + "\n";
|
|
|
|
|
|
|
|
// line format: "LOOKUP - <KEY>"
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
Status s = file_writer_->Append(log_line);
|
|
|
|
if (!s.ok() && bg_status_.ok()) {
|
|
|
|
bg_status_ = s;
|
|
|
|
}
|
|
|
|
if (MaxLoggingSizeReached() || !bg_status_.ok()) {
|
|
|
|
// Stop logging if we have reached the max file size or
|
|
|
|
// encountered an error
|
|
|
|
StopLoggingInternal();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReportAdd(const Slice& key, size_t size) {
|
|
|
|
if (activity_logging_enabled_.load() == false) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string log_line = "ADD - ";
|
|
|
|
log_line += key.ToString(true);
|
|
|
|
log_line += " - ";
|
|
|
|
AppendNumberTo(&log_line, size);
|
2018-01-29 21:43:56 +01:00
|
|
|
// @lint-ignore TXT2 T25377293 Grandfathered in
|
2017-07-28 21:18:09 +02:00
|
|
|
log_line += "\n";
|
|
|
|
|
|
|
|
// line format: "ADD - <KEY> - <KEY-SIZE>"
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
Status s = file_writer_->Append(log_line);
|
|
|
|
if (!s.ok() && bg_status_.ok()) {
|
|
|
|
bg_status_ = s;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (MaxLoggingSizeReached() || !bg_status_.ok()) {
|
|
|
|
// Stop logging if we have reached the max file size or
|
|
|
|
// encountered an error
|
|
|
|
StopLoggingInternal();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status& bg_status() {
|
|
|
|
MutexLock l(&mutex_);
|
|
|
|
return bg_status_;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool MaxLoggingSizeReached() {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
|
|
|
return (max_logging_size_ > 0 &&
|
|
|
|
file_writer_->GetFileSize() >= max_logging_size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
void StopLoggingInternal() {
|
|
|
|
mutex_.AssertHeld();
|
|
|
|
|
|
|
|
if (!activity_logging_enabled_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
activity_logging_enabled_.store(false);
|
|
|
|
Status s = file_writer_->Close();
|
|
|
|
if (!s.ok() && bg_status_.ok()) {
|
|
|
|
bg_status_ = s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Mutex to sync writes to file_writer, and all following
|
|
|
|
// class data members
|
|
|
|
port::Mutex mutex_;
|
|
|
|
// Indicates if logging is currently enabled
|
|
|
|
// atomic to allow reads without mutex
|
|
|
|
std::atomic<bool> activity_logging_enabled_;
|
|
|
|
// When reached, we will stop logging and close the file
|
|
|
|
// Value of 0 means unlimited
|
|
|
|
uint64_t max_logging_size_;
|
|
|
|
std::unique_ptr<WritableFileWriter> file_writer_;
|
|
|
|
Status bg_status_;
|
|
|
|
};
|
|
|
|
|
2016-05-24 08:35:23 +02:00
|
|
|
// SimCacheImpl definition
|
|
|
|
class SimCacheImpl : public SimCache {
|
|
|
|
public:
|
|
|
|
// capacity for real cache (ShardedLRUCache)
|
|
|
|
// test_capacity for key only cache
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
SimCacheImpl(std::shared_ptr<Cache> sim_cache, std::shared_ptr<Cache> cache)
|
2016-05-24 08:35:23 +02:00
|
|
|
: cache_(cache),
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
key_only_cache_(sim_cache),
|
2016-08-11 02:42:24 +02:00
|
|
|
miss_times_(0),
|
2017-11-28 22:15:20 +01:00
|
|
|
hit_times_(0),
|
|
|
|
stats_(nullptr) {}
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
~SimCacheImpl() override {}
|
|
|
|
void SetCapacity(size_t capacity) override { cache_->SetCapacity(capacity); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void SetStrictCapacityLimit(bool strict_capacity_limit) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
cache_->SetStrictCapacityLimit(strict_capacity_limit);
|
|
|
|
}
|
|
|
|
|
2020-04-01 01:09:11 +02:00
|
|
|
Status Insert(const Slice& key, void* value, size_t charge,
|
|
|
|
void (*deleter)(const Slice& key, void* value), Handle** handle,
|
|
|
|
Priority priority) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
// The handle and value passed in are for real cache, so we pass nullptr
|
2020-04-01 01:09:11 +02:00
|
|
|
// to key_only_cache_ for both instead. Also, the deleter function pointer
|
|
|
|
// will be called by user to perform some external operation which should
|
|
|
|
// be applied only once. Thus key_only_cache accepts an empty function.
|
|
|
|
// *Lambda function without capture can be assgined to a function pointer
|
2016-05-24 08:35:23 +02:00
|
|
|
Handle* h = key_only_cache_->Lookup(key);
|
|
|
|
if (h == nullptr) {
|
2020-04-01 01:09:11 +02:00
|
|
|
key_only_cache_->Insert(key, nullptr, charge,
|
|
|
|
[](const Slice& /*k*/, void* /*v*/) {}, nullptr,
|
|
|
|
priority);
|
2016-05-24 08:35:23 +02:00
|
|
|
} else {
|
|
|
|
key_only_cache_->Release(h);
|
|
|
|
}
|
2017-07-28 21:18:09 +02:00
|
|
|
|
|
|
|
cache_activity_logger_.ReportAdd(key, charge);
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
if (!cache_) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2016-08-20 01:43:31 +02:00
|
|
|
return cache_->Insert(key, value, charge, deleter, handle, priority);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
Handle* Lookup(const Slice& key, Statistics* stats) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
Handle* h = key_only_cache_->Lookup(key);
|
|
|
|
if (h != nullptr) {
|
|
|
|
key_only_cache_->Release(h);
|
|
|
|
inc_hit_counter();
|
2016-09-01 22:50:39 +02:00
|
|
|
RecordTick(stats, SIM_BLOCK_CACHE_HIT);
|
2016-08-11 02:42:24 +02:00
|
|
|
} else {
|
|
|
|
inc_miss_counter();
|
2016-09-01 22:50:39 +02:00
|
|
|
RecordTick(stats, SIM_BLOCK_CACHE_MISS);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
2017-07-28 21:18:09 +02:00
|
|
|
|
|
|
|
cache_activity_logger_.ReportLookup(key);
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
if (!cache_) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2016-09-01 22:50:39 +02:00
|
|
|
return cache_->Lookup(key, stats);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
bool Ref(Handle* handle) override { return cache_->Ref(handle); }
|
2017-01-11 01:48:23 +01:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
bool Release(Handle* handle, bool force_erase = false) override {
|
2017-04-24 20:21:47 +02:00
|
|
|
return cache_->Release(handle, force_erase);
|
|
|
|
}
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void Erase(const Slice& key) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
cache_->Erase(key);
|
|
|
|
key_only_cache_->Erase(key);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void* Value(Handle* handle) override { return cache_->Value(handle); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
uint64_t NewId() override { return cache_->NewId(); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetCapacity() const override { return cache_->GetCapacity(); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
bool HasStrictCapacityLimit() const override {
|
2016-05-24 08:35:23 +02:00
|
|
|
return cache_->HasStrictCapacityLimit();
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetUsage() const override { return cache_->GetUsage(); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetUsage(Handle* handle) const override {
|
2016-07-15 19:41:36 +02:00
|
|
|
return cache_->GetUsage(handle);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
|
|
|
|
2019-09-20 21:00:55 +02:00
|
|
|
size_t GetCharge(Handle* handle) const override {
|
|
|
|
return cache_->GetCharge(handle);
|
|
|
|
}
|
2019-06-19 02:32:44 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetPinnedUsage() const override { return cache_->GetPinnedUsage(); }
|
2016-05-24 08:35:23 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void DisownData() override {
|
2016-05-24 08:35:23 +02:00
|
|
|
cache_->DisownData();
|
|
|
|
key_only_cache_->DisownData();
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
|
|
|
bool thread_safe) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
// only apply to _cache since key_only_cache doesn't hold value
|
|
|
|
cache_->ApplyToAllCacheEntries(callback, thread_safe);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void EraseUnRefEntries() override {
|
2016-05-24 08:35:23 +02:00
|
|
|
cache_->EraseUnRefEntries();
|
|
|
|
key_only_cache_->EraseUnRefEntries();
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetSimCapacity() const override {
|
2016-05-24 08:35:23 +02:00
|
|
|
return key_only_cache_->GetCapacity();
|
|
|
|
}
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t GetSimUsage() const override { return key_only_cache_->GetUsage(); }
|
|
|
|
void SetSimCapacity(size_t capacity) override {
|
2016-05-24 08:35:23 +02:00
|
|
|
key_only_cache_->SetCapacity(capacity);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
uint64_t get_miss_counter() const override {
|
2016-08-11 02:42:24 +02:00
|
|
|
return miss_times_.load(std::memory_order_relaxed);
|
2016-07-15 19:41:36 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
uint64_t get_hit_counter() const override {
|
2016-07-15 19:41:36 +02:00
|
|
|
return hit_times_.load(std::memory_order_relaxed);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void reset_counter() override {
|
2016-08-11 02:42:24 +02:00
|
|
|
miss_times_.store(0, std::memory_order_relaxed);
|
2016-07-15 19:41:36 +02:00
|
|
|
hit_times_.store(0, std::memory_order_relaxed);
|
2016-08-11 02:42:24 +02:00
|
|
|
SetTickerCount(stats_, SIM_BLOCK_CACHE_HIT, 0);
|
|
|
|
SetTickerCount(stats_, SIM_BLOCK_CACHE_MISS, 0);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
std::string ToString() const override {
|
2016-05-24 08:35:23 +02:00
|
|
|
std::string res;
|
2016-08-11 02:42:24 +02:00
|
|
|
res.append("SimCache MISSes: " + std::to_string(get_miss_counter()) + "\n");
|
2016-05-24 08:35:23 +02:00
|
|
|
res.append("SimCache HITs: " + std::to_string(get_hit_counter()) + "\n");
|
2016-12-15 04:20:42 +01:00
|
|
|
char buff[350];
|
2016-08-11 02:42:24 +02:00
|
|
|
auto lookups = get_miss_counter() + get_hit_counter();
|
2016-05-24 08:35:23 +02:00
|
|
|
snprintf(buff, sizeof(buff), "SimCache HITRATE: %.2f%%\n",
|
2016-08-11 02:42:24 +02:00
|
|
|
(lookups == 0 ? 0 : get_hit_counter() * 100.0f / lookups));
|
2016-05-24 08:35:23 +02:00
|
|
|
res.append(buff);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
std::string GetPrintableOptions() const override {
|
2016-12-22 23:44:01 +01:00
|
|
|
std::string ret;
|
|
|
|
ret.reserve(20000);
|
|
|
|
ret.append(" cache_options:\n");
|
|
|
|
ret.append(cache_->GetPrintableOptions());
|
|
|
|
ret.append(" sim_cache_options:\n");
|
|
|
|
ret.append(key_only_cache_->GetPrintableOptions());
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
Status StartActivityLogging(const std::string& activity_log_file, Env* env,
|
|
|
|
uint64_t max_logging_size = 0) override {
|
2017-07-28 21:18:09 +02:00
|
|
|
return cache_activity_logger_.StartLogging(activity_log_file, env,
|
|
|
|
max_logging_size);
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void StopActivityLogging() override { cache_activity_logger_.StopLogging(); }
|
2017-07-28 21:18:09 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
Status GetActivityLoggingStatus() override {
|
2017-07-28 21:18:09 +02:00
|
|
|
return cache_activity_logger_.bg_status();
|
|
|
|
}
|
|
|
|
|
2016-05-24 08:35:23 +02:00
|
|
|
private:
|
|
|
|
std::shared_ptr<Cache> cache_;
|
|
|
|
std::shared_ptr<Cache> key_only_cache_;
|
2016-08-11 02:42:24 +02:00
|
|
|
std::atomic<uint64_t> miss_times_;
|
2016-05-24 08:35:23 +02:00
|
|
|
std::atomic<uint64_t> hit_times_;
|
2016-08-11 02:42:24 +02:00
|
|
|
Statistics* stats_;
|
2017-07-28 21:18:09 +02:00
|
|
|
CacheActivityLogger cache_activity_logger_;
|
|
|
|
|
2016-08-11 02:42:24 +02:00
|
|
|
void inc_miss_counter() {
|
|
|
|
miss_times_.fetch_add(1, std::memory_order_relaxed);
|
2016-07-15 19:41:36 +02:00
|
|
|
}
|
|
|
|
void inc_hit_counter() { hit_times_.fetch_add(1, std::memory_order_relaxed); }
|
2016-05-24 08:35:23 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
} // end anonymous namespace
|
|
|
|
|
|
|
|
// For instrumentation purpose, use NewSimCache instead
|
|
|
|
std::shared_ptr<SimCache> NewSimCache(std::shared_ptr<Cache> cache,
|
2016-09-01 22:50:39 +02:00
|
|
|
size_t sim_capacity, int num_shard_bits) {
|
2019-09-17 00:14:51 +02:00
|
|
|
LRUCacheOptions co;
|
|
|
|
co.capacity = sim_capacity;
|
|
|
|
co.num_shard_bits = num_shard_bits;
|
|
|
|
co.metadata_charge_policy = kDontChargeCacheMetadata;
|
|
|
|
return NewSimCache(NewLRUCache(co), cache, num_shard_bits);
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::shared_ptr<SimCache> NewSimCache(std::shared_ptr<Cache> sim_cache,
|
|
|
|
std::shared_ptr<Cache> cache,
|
|
|
|
int num_shard_bits) {
|
2016-05-24 08:35:23 +02:00
|
|
|
if (num_shard_bits >= 20) {
|
|
|
|
return nullptr; // the cache cannot be sharded into too many fine pieces
|
|
|
|
}
|
Support computing miss ratio curves using sim_cache. (#5449)
Summary:
This PR adds a BlockCacheTraceSimulator that reports the miss ratios given different cache configurations. A cache configuration contains "cache_name,num_shard_bits,cache_capacities". For example, "lru, 1, 1K, 2K, 4M, 4G".
When we replay the trace, we also perform lookups and inserts on the simulated caches.
In the end, it reports the miss ratio for each tuple <cache_name, num_shard_bits, cache_capacity> in a output file.
This PR also adds a main source block_cache_trace_analyzer so that we can run the analyzer in command line.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5449
Test Plan:
Added tests for block_cache_trace_analyzer.
COMPILE_WITH_ASAN=1 make check -j32.
Differential Revision: D15797073
Pulled By: HaoyuHuang
fbshipit-source-id: aef0c5c2e7938f3e8b6a10d4a6a50e6928ecf408
2019-06-18 01:33:40 +02:00
|
|
|
return std::make_shared<SimCacheImpl>(sim_cache, cache);
|
2016-05-24 08:35:23 +02:00
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|