2019-06-06 20:21:11 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
|
|
|
|
#include "trace_replay/block_cache_tracer.h"
|
|
|
|
|
|
|
|
#include "db/db_impl/db_impl.h"
|
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "util/coding.h"
|
|
|
|
#include "util/hash.h"
|
|
|
|
#include "util/string_util.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
const unsigned int kCharSize = 1;
|
|
|
|
|
Block cache tracing: Fix minor bugs with downsampling and some benchmark results. (#5473)
Summary:
As the code changes for block cache tracing are almost complete, I did a benchmark to compare the performance when block cache tracing is enabled/disabled.
With 1% downsampling ratio, the performance overhead of block cache tracing is negligible. When we trace all block accesses, the throughput drops by 6 folds with 16 threads issuing random reads and all reads are served in block cache.
Setup:
RocksDB: version 6.2
Date: Mon Jun 17 17:11:13 2019
CPU: 24 * Intel Core Processor (Skylake)
CPUCache: 16384 KB
Keys: 20 bytes each
Values: 100 bytes each (100 bytes after compression)
Entries: 10000000
Prefix: 20 bytes
Keys per prefix: 0
RawSize: 1144.4 MB (estimated)
FileSize: 1144.4 MB (estimated)
Write rate: 0 bytes/second
Read rate: 0 ops/second
Compression: NoCompression
Compression sampling rate: 0
Memtablerep: skip_list
Perf Level: 1
I ran the readrandom workload for 1 minute. Detailed throughput results: (ops/second)
Sample rate 0: no block cache tracing.
Sample rate 1: trace all block accesses.
Sample rate 100: trace accesses 1% blocks.
1 thread | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 13,094 | 13,166 | 13,341
10 GB block cache size | 202,243 | 188,677 | 229,182
16 threads | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 208,761 | 178,700 | 201,872
10 GB block cache size | 2,645,996 | 426,295 | 2,587,605
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5473
Differential Revision: D15869479
Pulled By: HaoyuHuang
fbshipit-source-id: 7ae802abe84811281a6af8649f489887cd7c4618
2019-06-18 02:56:09 +02:00
|
|
|
bool ShouldTrace(const Slice& block_key, const TraceOptions& trace_options) {
|
2019-06-14 00:39:52 +02:00
|
|
|
if (trace_options.sampling_frequency == 0 ||
|
|
|
|
trace_options.sampling_frequency == 1) {
|
2019-06-06 20:21:11 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
// We use spatial downsampling so that we have a complete access history for a
|
|
|
|
// block.
|
Block cache tracing: Fix minor bugs with downsampling and some benchmark results. (#5473)
Summary:
As the code changes for block cache tracing are almost complete, I did a benchmark to compare the performance when block cache tracing is enabled/disabled.
With 1% downsampling ratio, the performance overhead of block cache tracing is negligible. When we trace all block accesses, the throughput drops by 6 folds with 16 threads issuing random reads and all reads are served in block cache.
Setup:
RocksDB: version 6.2
Date: Mon Jun 17 17:11:13 2019
CPU: 24 * Intel Core Processor (Skylake)
CPUCache: 16384 KB
Keys: 20 bytes each
Values: 100 bytes each (100 bytes after compression)
Entries: 10000000
Prefix: 20 bytes
Keys per prefix: 0
RawSize: 1144.4 MB (estimated)
FileSize: 1144.4 MB (estimated)
Write rate: 0 bytes/second
Read rate: 0 ops/second
Compression: NoCompression
Compression sampling rate: 0
Memtablerep: skip_list
Perf Level: 1
I ran the readrandom workload for 1 minute. Detailed throughput results: (ops/second)
Sample rate 0: no block cache tracing.
Sample rate 1: trace all block accesses.
Sample rate 100: trace accesses 1% blocks.
1 thread | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 13,094 | 13,166 | 13,341
10 GB block cache size | 202,243 | 188,677 | 229,182
16 threads | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 208,761 | 178,700 | 201,872
10 GB block cache size | 2,645,996 | 426,295 | 2,587,605
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5473
Differential Revision: D15869479
Pulled By: HaoyuHuang
fbshipit-source-id: 7ae802abe84811281a6af8649f489887cd7c4618
2019-06-18 02:56:09 +02:00
|
|
|
const uint64_t hash = GetSliceNPHash64(block_key);
|
2019-06-14 00:39:52 +02:00
|
|
|
return hash % trace_options.sampling_frequency == 0;
|
2019-06-06 20:21:11 +02:00
|
|
|
}
|
2019-06-15 02:37:24 +02:00
|
|
|
} // namespace
|
|
|
|
|
|
|
|
const std::string BlockCacheTraceHelper::kUnknownColumnFamilyName =
|
|
|
|
"UnknownColumnFamily";
|
|
|
|
|
|
|
|
bool BlockCacheTraceHelper::ShouldTraceReferencedKey(
|
|
|
|
TraceType block_type, BlockCacheLookupCaller caller) {
|
|
|
|
return (block_type == TraceType::kBlockTraceDataBlock) &&
|
|
|
|
(caller == BlockCacheLookupCaller::kUserGet ||
|
|
|
|
caller == BlockCacheLookupCaller::kUserMGet);
|
|
|
|
}
|
2019-06-06 20:21:11 +02:00
|
|
|
|
2019-06-14 00:39:52 +02:00
|
|
|
BlockCacheTraceWriter::BlockCacheTraceWriter(
|
|
|
|
Env* env, const TraceOptions& trace_options,
|
|
|
|
std::unique_ptr<TraceWriter>&& trace_writer)
|
|
|
|
: env_(env),
|
|
|
|
trace_options_(trace_options),
|
|
|
|
trace_writer_(std::move(trace_writer)) {}
|
|
|
|
|
2019-06-06 20:21:11 +02:00
|
|
|
Status BlockCacheTraceWriter::WriteBlockAccess(
|
2019-06-15 02:37:24 +02:00
|
|
|
const BlockCacheTraceRecord& record, const Slice& block_key,
|
|
|
|
const Slice& cf_name, const Slice& referenced_key) {
|
2019-06-06 20:21:11 +02:00
|
|
|
uint64_t trace_file_size = trace_writer_->GetFileSize();
|
2019-06-14 00:39:52 +02:00
|
|
|
if (trace_file_size > trace_options_.max_trace_file_size) {
|
2019-06-06 20:21:11 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
Trace trace;
|
|
|
|
trace.ts = record.access_timestamp;
|
|
|
|
trace.type = record.block_type;
|
2019-06-15 02:37:24 +02:00
|
|
|
PutLengthPrefixedSlice(&trace.payload, block_key);
|
2019-06-06 20:21:11 +02:00
|
|
|
PutFixed64(&trace.payload, record.block_size);
|
2019-06-15 02:37:24 +02:00
|
|
|
PutFixed64(&trace.payload, record.cf_id);
|
|
|
|
PutLengthPrefixedSlice(&trace.payload, cf_name);
|
2019-06-06 20:21:11 +02:00
|
|
|
PutFixed32(&trace.payload, record.level);
|
2019-06-15 02:37:24 +02:00
|
|
|
PutFixed64(&trace.payload, record.sst_fd_number);
|
2019-06-06 20:21:11 +02:00
|
|
|
trace.payload.push_back(record.caller);
|
|
|
|
trace.payload.push_back(record.is_cache_hit);
|
|
|
|
trace.payload.push_back(record.no_insert);
|
2019-06-15 02:37:24 +02:00
|
|
|
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(record.block_type,
|
|
|
|
record.caller)) {
|
|
|
|
PutLengthPrefixedSlice(&trace.payload, referenced_key);
|
|
|
|
PutFixed64(&trace.payload, record.referenced_data_size);
|
2019-06-06 20:21:11 +02:00
|
|
|
PutFixed64(&trace.payload, record.num_keys_in_block);
|
2019-06-15 02:37:24 +02:00
|
|
|
trace.payload.push_back(record.referenced_key_exist_in_block);
|
2019-06-06 20:21:11 +02:00
|
|
|
}
|
|
|
|
std::string encoded_trace;
|
|
|
|
TracerHelper::EncodeTrace(trace, &encoded_trace);
|
|
|
|
return trace_writer_->Write(encoded_trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockCacheTraceWriter::WriteHeader() {
|
|
|
|
Trace trace;
|
|
|
|
trace.ts = env_->NowMicros();
|
|
|
|
trace.type = TraceType::kTraceBegin;
|
|
|
|
PutLengthPrefixedSlice(&trace.payload, kTraceMagic);
|
|
|
|
PutFixed32(&trace.payload, kMajorVersion);
|
|
|
|
PutFixed32(&trace.payload, kMinorVersion);
|
|
|
|
std::string encoded_trace;
|
|
|
|
TracerHelper::EncodeTrace(trace, &encoded_trace);
|
|
|
|
return trace_writer_->Write(encoded_trace);
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockCacheTraceReader::BlockCacheTraceReader(
|
|
|
|
std::unique_ptr<TraceReader>&& reader)
|
|
|
|
: trace_reader_(std::move(reader)) {}
|
|
|
|
|
|
|
|
Status BlockCacheTraceReader::ReadHeader(BlockCacheTraceHeader* header) {
|
|
|
|
assert(header != nullptr);
|
|
|
|
std::string encoded_trace;
|
|
|
|
Status s = trace_reader_->Read(&encoded_trace);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
Trace trace;
|
|
|
|
s = TracerHelper::DecodeTrace(encoded_trace, &trace);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
header->start_time = trace.ts;
|
|
|
|
Slice enc_slice = Slice(trace.payload);
|
|
|
|
Slice magnic_number;
|
|
|
|
if (!GetLengthPrefixedSlice(&enc_slice, &magnic_number)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted header in the trace file: Failed to read the magic number.");
|
|
|
|
}
|
|
|
|
if (magnic_number.ToString() != kTraceMagic) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted header in the trace file: Magic number does not match.");
|
|
|
|
}
|
|
|
|
if (!GetFixed32(&enc_slice, &header->rocksdb_major_version)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted header in the trace file: Failed to read rocksdb major "
|
|
|
|
"version number.");
|
|
|
|
}
|
|
|
|
if (!GetFixed32(&enc_slice, &header->rocksdb_minor_version)) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted header in the trace file: Failed to read rocksdb minor "
|
|
|
|
"version number.");
|
|
|
|
}
|
|
|
|
// We should have retrieved all information in the header.
|
|
|
|
if (!enc_slice.empty()) {
|
|
|
|
return Status::Corruption(
|
|
|
|
"Corrupted header in the trace file: The length of header is too "
|
|
|
|
"long.");
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockCacheTraceReader::ReadAccess(BlockCacheTraceRecord* record) {
|
|
|
|
assert(record);
|
|
|
|
std::string encoded_trace;
|
|
|
|
Status s = trace_reader_->Read(&encoded_trace);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
Trace trace;
|
|
|
|
s = TracerHelper::DecodeTrace(encoded_trace, &trace);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
record->access_timestamp = trace.ts;
|
|
|
|
record->block_type = trace.type;
|
|
|
|
Slice enc_slice = Slice(trace.payload);
|
2019-06-15 02:37:24 +02:00
|
|
|
|
2019-06-06 20:21:11 +02:00
|
|
|
Slice block_key;
|
|
|
|
if (!GetLengthPrefixedSlice(&enc_slice, &block_key)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read block key.");
|
|
|
|
}
|
|
|
|
record->block_key = block_key.ToString();
|
|
|
|
if (!GetFixed64(&enc_slice, &record->block_size)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read block size.");
|
|
|
|
}
|
2019-06-15 02:37:24 +02:00
|
|
|
if (!GetFixed64(&enc_slice, &record->cf_id)) {
|
2019-06-06 20:21:11 +02:00
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read column family ID.");
|
|
|
|
}
|
|
|
|
Slice cf_name;
|
|
|
|
if (!GetLengthPrefixedSlice(&enc_slice, &cf_name)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read column family name.");
|
|
|
|
}
|
|
|
|
record->cf_name = cf_name.ToString();
|
|
|
|
if (!GetFixed32(&enc_slice, &record->level)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read level.");
|
|
|
|
}
|
2019-06-15 02:37:24 +02:00
|
|
|
if (!GetFixed64(&enc_slice, &record->sst_fd_number)) {
|
2019-06-06 20:21:11 +02:00
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read SST file number.");
|
|
|
|
}
|
|
|
|
if (enc_slice.empty()) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read caller.");
|
|
|
|
}
|
|
|
|
record->caller = static_cast<BlockCacheLookupCaller>(enc_slice[0]);
|
|
|
|
enc_slice.remove_prefix(kCharSize);
|
|
|
|
if (enc_slice.empty()) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read is_cache_hit.");
|
|
|
|
}
|
|
|
|
record->is_cache_hit = static_cast<Boolean>(enc_slice[0]);
|
|
|
|
enc_slice.remove_prefix(kCharSize);
|
|
|
|
if (enc_slice.empty()) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read no_insert.");
|
|
|
|
}
|
|
|
|
record->no_insert = static_cast<Boolean>(enc_slice[0]);
|
|
|
|
enc_slice.remove_prefix(kCharSize);
|
|
|
|
|
2019-06-15 02:37:24 +02:00
|
|
|
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(record->block_type,
|
|
|
|
record->caller)) {
|
2019-06-06 20:21:11 +02:00
|
|
|
Slice referenced_key;
|
|
|
|
if (!GetLengthPrefixedSlice(&enc_slice, &referenced_key)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read the referenced key.");
|
|
|
|
}
|
|
|
|
record->referenced_key = referenced_key.ToString();
|
2019-06-15 02:37:24 +02:00
|
|
|
if (!GetFixed64(&enc_slice, &record->referenced_data_size)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read the referenced data size.");
|
|
|
|
}
|
2019-06-06 20:21:11 +02:00
|
|
|
if (!GetFixed64(&enc_slice, &record->num_keys_in_block)) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read the number of keys in the "
|
|
|
|
"block.");
|
|
|
|
}
|
|
|
|
if (enc_slice.empty()) {
|
|
|
|
return Status::Incomplete(
|
|
|
|
"Incomplete access record: Failed to read "
|
2019-06-15 02:37:24 +02:00
|
|
|
"referenced_key_exist_in_block.");
|
2019-06-06 20:21:11 +02:00
|
|
|
}
|
2019-06-15 02:37:24 +02:00
|
|
|
record->referenced_key_exist_in_block = static_cast<Boolean>(enc_slice[0]);
|
2019-06-06 20:21:11 +02:00
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2019-06-14 00:39:52 +02:00
|
|
|
BlockCacheTracer::BlockCacheTracer() { writer_.store(nullptr); }
|
|
|
|
|
|
|
|
BlockCacheTracer::~BlockCacheTracer() { EndTrace(); }
|
|
|
|
|
|
|
|
Status BlockCacheTracer::StartTrace(
|
|
|
|
Env* env, const TraceOptions& trace_options,
|
|
|
|
std::unique_ptr<TraceWriter>&& trace_writer) {
|
|
|
|
InstrumentedMutexLock lock_guard(&trace_writer_mutex_);
|
|
|
|
if (writer_.load()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
trace_options_ = trace_options;
|
|
|
|
writer_.store(
|
|
|
|
new BlockCacheTraceWriter(env, trace_options, std::move(trace_writer)));
|
|
|
|
return writer_.load()->WriteHeader();
|
|
|
|
}
|
|
|
|
|
|
|
|
void BlockCacheTracer::EndTrace() {
|
|
|
|
InstrumentedMutexLock lock_guard(&trace_writer_mutex_);
|
|
|
|
if (!writer_.load()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
delete writer_.load();
|
|
|
|
writer_.store(nullptr);
|
|
|
|
}
|
|
|
|
|
2019-06-15 02:37:24 +02:00
|
|
|
Status BlockCacheTracer::WriteBlockAccess(const BlockCacheTraceRecord& record,
|
|
|
|
const Slice& block_key,
|
|
|
|
const Slice& cf_name,
|
|
|
|
const Slice& referenced_key) {
|
Block cache tracing: Fix minor bugs with downsampling and some benchmark results. (#5473)
Summary:
As the code changes for block cache tracing are almost complete, I did a benchmark to compare the performance when block cache tracing is enabled/disabled.
With 1% downsampling ratio, the performance overhead of block cache tracing is negligible. When we trace all block accesses, the throughput drops by 6 folds with 16 threads issuing random reads and all reads are served in block cache.
Setup:
RocksDB: version 6.2
Date: Mon Jun 17 17:11:13 2019
CPU: 24 * Intel Core Processor (Skylake)
CPUCache: 16384 KB
Keys: 20 bytes each
Values: 100 bytes each (100 bytes after compression)
Entries: 10000000
Prefix: 20 bytes
Keys per prefix: 0
RawSize: 1144.4 MB (estimated)
FileSize: 1144.4 MB (estimated)
Write rate: 0 bytes/second
Read rate: 0 ops/second
Compression: NoCompression
Compression sampling rate: 0
Memtablerep: skip_list
Perf Level: 1
I ran the readrandom workload for 1 minute. Detailed throughput results: (ops/second)
Sample rate 0: no block cache tracing.
Sample rate 1: trace all block accesses.
Sample rate 100: trace accesses 1% blocks.
1 thread | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 13,094 | 13,166 | 13,341
10 GB block cache size | 202,243 | 188,677 | 229,182
16 threads | | | -- | -- | -- | --
Sample rate | 0 | 1 | 100
1 MB block cache size | 208,761 | 178,700 | 201,872
10 GB block cache size | 2,645,996 | 426,295 | 2,587,605
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5473
Differential Revision: D15869479
Pulled By: HaoyuHuang
fbshipit-source-id: 7ae802abe84811281a6af8649f489887cd7c4618
2019-06-18 02:56:09 +02:00
|
|
|
if (!writer_.load() || !ShouldTrace(block_key, trace_options_)) {
|
2019-06-14 00:39:52 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
InstrumentedMutexLock lock_guard(&trace_writer_mutex_);
|
|
|
|
if (!writer_.load()) {
|
|
|
|
return Status::OK();
|
|
|
|
}
|
2019-06-15 02:37:24 +02:00
|
|
|
return writer_.load()->WriteBlockAccess(record, block_key, cf_name,
|
|
|
|
referenced_key);
|
2019-06-14 00:39:52 +02:00
|
|
|
}
|
|
|
|
|
2019-06-06 20:21:11 +02:00
|
|
|
} // namespace rocksdb
|