Block access tracing: Trace referenced key for Get on non-data blocks. (#5548)

Summary:
This PR traces the referenced key for Get for all types of blocks. This is useful when evaluating hybrid row-block caches.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5548

Test Plan: make clean && USE_CLANG=1 make check -j32

Differential Revision: D16157979

Pulled By: HaoyuHuang

fbshipit-source-id: f6327411c9deb74e35e22a35f66cdbae09ab9d87
This commit is contained in:
haoyuhuang 2019-07-17 13:02:00 -07:00 committed by Facebook Github Bot
parent 22ce462450
commit 8a008d4170
9 changed files with 386 additions and 77 deletions

View File

@ -1983,10 +1983,12 @@ CachableEntry<UncompressionDict> BlockBasedTable::GetUncompressionDict(
/*block_size=*/usage, rep_->cf_id_for_tracing(),
/*cf_name=*/"", rep_->level_for_tracing(),
rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
/*no_insert=*/no_io, lookup_context->get_id);
/*no_insert=*/no_io, lookup_context->get_id,
lookup_context->get_from_user_specified_snapshot,
/*referenced_key=*/"");
block_cache_tracer_->WriteBlockAccess(access_record, cache_key,
rep_->cf_name_for_tracing(),
/*referenced_key=*/nullptr);
lookup_context->referenced_key);
}
return {dict, cache_handle ? rep_->table_options.block_cache.get() : nullptr,
cache_handle, false /* own_value */};
@ -2237,7 +2239,6 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
Slice key /* key to the block cache */;
Slice ckey /* key to the compressed block cache */;
bool is_cache_hit = false;
bool no_insert = true;
if (block_cache != nullptr || block_cache_compressed != nullptr) {
// create key for block cache
if (block_cache != nullptr) {
@ -2265,7 +2266,6 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
// Can't find the block from the cache. If I/O is allowed, read from the
// file.
if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
no_insert = false;
Statistics* statistics = rep_->ioptions.statistics;
const bool maybe_compressed =
block_type != BlockType::kFilter && rep_->blocks_maybe_compressed;
@ -2332,11 +2332,11 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
assert(false);
break;
}
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(
bool no_insert = no_io || !ro.fill_cache;
if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
trace_block_type, lookup_context->caller)) {
// Defer logging the access to Get() and MultiGet() to trace additional
// information, e.g., the referenced key,
// referenced_key_exist_in_block.
// information, e.g., referenced_key_exist_in_block.
// Make a copy of the block key here since it will be logged later.
lookup_context->FillLookupContext(
@ -2351,10 +2351,12 @@ Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
/*block_size=*/usage, rep_->cf_id_for_tracing(),
/*cf_name=*/"", rep_->level_for_tracing(),
rep_->sst_number_for_tracing(), lookup_context->caller, is_cache_hit,
no_insert, lookup_context->get_id);
no_insert, lookup_context->get_id,
lookup_context->get_from_user_specified_snapshot,
/*referenced_key=*/"");
block_cache_tracer_->WriteBlockAccess(access_record, key,
rep_->cf_name_for_tracing(),
/*referenced_key=*/nullptr);
lookup_context->referenced_key);
}
}
@ -3288,12 +3290,18 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
// First check the full filter
// If full filter not useful, Then go into each block
uint64_t tracing_get_id = get_context->get_tracing_get_id();
BlockCacheLookupContext lookup_context{TableReaderCaller::kUserGet,
tracing_get_id};
BlockCacheLookupContext lookup_context{
TableReaderCaller::kUserGet, tracing_get_id,
/*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
if (block_cache_tracer_ && block_cache_tracer_->is_tracing_enabled()) {
// Trace the key since it contains both user key and sequence number.
lookup_context.referenced_key = key.ToString();
lookup_context.get_from_user_specified_snapshot =
read_options.snapshot != nullptr;
}
const bool may_match =
FullFilterKeyMayMatch(read_options, filter, key, no_io, prefix_extractor,
get_context, &lookup_context);
if (!may_match) {
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
@ -3347,7 +3355,9 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
}
BlockCacheLookupContext lookup_data_block_context{
TableReaderCaller::kUserGet, tracing_get_id};
TableReaderCaller::kUserGet, tracing_get_id,
/*get_from_user_specified_snapshot=*/read_options.snapshot !=
nullptr};
bool does_referenced_key_exist = false;
DataBlockIter biter;
uint64_t referenced_data_size = 0;
@ -3406,7 +3416,7 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
if (does_referenced_key_exist) {
referenced_key = biter.key();
} else {
referenced_key = ExtractUserKey(key);
referenced_key = key;
}
BlockCacheTraceRecord access_record(
rep_->ioptions.env->NowMicros(),
@ -3417,6 +3427,7 @@ Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
lookup_data_block_context.is_cache_hit,
lookup_data_block_context.no_insert,
lookup_data_block_context.get_id,
lookup_data_block_context.get_from_user_specified_snapshot,
/*referenced_key=*/"", referenced_data_size,
lookup_data_block_context.num_keys_in_block,
does_referenced_key_exist);
@ -3460,8 +3471,9 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
if (!sst_file_range.empty() && sst_file_range.begin()->get_context) {
tracing_mget_id = sst_file_range.begin()->get_context->get_tracing_get_id();
}
BlockCacheLookupContext lookup_context{TableReaderCaller::kUserMultiGet,
tracing_mget_id};
BlockCacheLookupContext lookup_context{
TableReaderCaller::kUserMultiGet, tracing_mget_id,
/*get_from_user_specified_snapshot=*/read_options.snapshot != nullptr};
FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
prefix_extractor, &lookup_context);
@ -3492,11 +3504,8 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
{
MultiGetRange data_block_range(sst_file_range, sst_file_range.begin(),
sst_file_range.end());
BlockCacheLookupContext lookup_compression_dict_context(
TableReaderCaller::kUserMultiGet);
auto uncompression_dict_storage = GetUncompressionDict(nullptr, no_io,
sst_file_range.begin()->get_context,
&lookup_compression_dict_context);
auto uncompression_dict_storage = GetUncompressionDict(
nullptr, no_io, sst_file_range.begin()->get_context, &lookup_context);
const UncompressionDict& uncompression_dict =
uncompression_dict_storage.GetValue() == nullptr
? UncompressionDict::GetEmptyDict()
@ -3591,7 +3600,9 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
uint64_t referenced_data_size = 0;
bool does_referenced_key_exist = false;
BlockCacheLookupContext lookup_data_block_context(
TableReaderCaller::kUserMultiGet, tracing_mget_id);
TableReaderCaller::kUserMultiGet, tracing_mget_id,
/*get_from_user_specified_snapshot=*/read_options.snapshot !=
nullptr);
if (first_block) {
if (!block_handles[idx_in_batch].IsNull() ||
!results[idx_in_batch].IsEmpty()) {
@ -3685,7 +3696,7 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
if (does_referenced_key_exist) {
referenced_key = biter->key();
} else {
referenced_key = ExtractUserKey(key);
referenced_key = key;
}
BlockCacheTraceRecord access_record(
rep_->ioptions.env->NowMicros(),
@ -3696,6 +3707,7 @@ void BlockBasedTable::MultiGet(const ReadOptions& read_options,
lookup_data_block_context.is_cache_hit,
lookup_data_block_context.no_insert,
lookup_data_block_context.get_id,
lookup_data_block_context.get_from_user_specified_snapshot,
/*referenced_key=*/"", referenced_data_size,
lookup_data_block_context.num_keys_in_block,
does_referenced_key_exist);

View File

@ -63,6 +63,8 @@ extern const uint64_t kPlainTableMagicNumber;
namespace {
const std::string kDummyValue(10000, 'o');
// DummyPropertiesCollector used to test BlockBasedTableProperties
class DummyPropertiesCollector : public TablePropertiesCollector {
public:
@ -312,7 +314,9 @@ class TableConstructor: public Constructor {
: Constructor(cmp),
largest_seqno_(largest_seqno),
convert_to_internal_key_(convert_to_internal_key),
level_(level) {}
level_(level) {
env_ = rocksdb::Env::Default();
}
~TableConstructor() override { Reset(); }
Status FinishImpl(const Options& options, const ImmutableCFOptions& ioptions,
@ -371,7 +375,7 @@ class TableConstructor: public Constructor {
return ioptions.table_factory->NewTableReader(
TableReaderOptions(ioptions, moptions.prefix_extractor.get(), soptions,
internal_comparator, !kSkipFilters, !kImmortal,
level_, largest_seqno_, nullptr),
level_, largest_seqno_, &block_cache_tracer_),
std::move(file_reader_), TEST_GetSink()->contents().size(),
&table_reader_);
}
@ -425,6 +429,8 @@ class TableConstructor: public Constructor {
return static_cast<test::StringSink*>(file_writer_->writable_file());
}
BlockCacheTracer block_cache_tracer_;
private:
void Reset() {
uniq_id_ = 0;
@ -445,6 +451,7 @@ class TableConstructor: public Constructor {
static uint64_t cur_uniq_id_;
EnvOptions soptions;
Env* env_;
};
uint64_t TableConstructor::cur_uniq_id_ = 1;
@ -1063,7 +1070,9 @@ class BlockBasedTableTest
: public TableTest,
virtual public ::testing::WithParamInterface<uint32_t> {
public:
BlockBasedTableTest() : format_(GetParam()) {}
BlockBasedTableTest() : format_(GetParam()) {
env_ = rocksdb::Env::Default();
}
BlockBasedTableOptions GetBlockBasedTableOptions() {
BlockBasedTableOptions options;
@ -1071,11 +1080,91 @@ class BlockBasedTableTest
return options;
}
void SetupTracingTest(TableConstructor* c) {
test_path_ = test::PerThreadDBPath("block_based_table_tracing_test");
EXPECT_OK(env_->CreateDir(test_path_));
trace_file_path_ = test_path_ + "/block_cache_trace_file";
TraceOptions trace_opt;
std::unique_ptr<TraceWriter> trace_writer;
EXPECT_OK(NewFileTraceWriter(env_, EnvOptions(), trace_file_path_,
&trace_writer));
c->block_cache_tracer_.StartTrace(env_, trace_opt, std::move(trace_writer));
{
std::string user_key = "k01";
InternalKey internal_key(user_key, 0, kTypeValue);
std::string encoded_key = internal_key.Encode().ToString();
c->Add(encoded_key, kDummyValue);
}
{
std::string user_key = "k02";
InternalKey internal_key(user_key, 0, kTypeValue);
std::string encoded_key = internal_key.Encode().ToString();
c->Add(encoded_key, kDummyValue);
}
}
void VerifyBlockAccessTrace(
TableConstructor* c,
const std::vector<BlockCacheTraceRecord>& expected_records) {
c->block_cache_tracer_.EndTrace();
std::unique_ptr<TraceReader> trace_reader;
Status s =
NewFileTraceReader(env_, EnvOptions(), trace_file_path_, &trace_reader);
EXPECT_OK(s);
BlockCacheTraceReader reader(std::move(trace_reader));
BlockCacheTraceHeader header;
EXPECT_OK(reader.ReadHeader(&header));
uint32_t index = 0;
while (s.ok()) {
BlockCacheTraceRecord access;
s = reader.ReadAccess(&access);
if (!s.ok()) {
break;
}
ASSERT_LT(index, expected_records.size());
EXPECT_NE("", access.block_key);
EXPECT_EQ(access.block_type, expected_records[index].block_type);
EXPECT_GT(access.block_size, 0);
EXPECT_EQ(access.caller, expected_records[index].caller);
EXPECT_EQ(access.no_insert, expected_records[index].no_insert);
EXPECT_EQ(access.is_cache_hit, expected_records[index].is_cache_hit);
// Get
if (access.caller == TableReaderCaller::kUserGet) {
EXPECT_EQ(access.referenced_key,
expected_records[index].referenced_key);
EXPECT_EQ(access.get_id, expected_records[index].get_id);
EXPECT_EQ(access.get_from_user_specified_snapshot,
expected_records[index].get_from_user_specified_snapshot);
if (access.block_type == TraceType::kBlockTraceDataBlock) {
EXPECT_GT(access.referenced_data_size, 0);
EXPECT_GT(access.num_keys_in_block, 0);
EXPECT_EQ(access.referenced_key_exist_in_block,
expected_records[index].referenced_key_exist_in_block);
}
} else {
EXPECT_EQ(access.referenced_key, "");
EXPECT_EQ(access.get_id, 0);
EXPECT_TRUE(access.get_from_user_specified_snapshot == Boolean::kFalse);
EXPECT_EQ(access.referenced_data_size, 0);
EXPECT_EQ(access.num_keys_in_block, 0);
EXPECT_TRUE(access.referenced_key_exist_in_block == Boolean::kFalse);
}
index++;
}
EXPECT_EQ(index, expected_records.size());
EXPECT_OK(env_->DeleteFile(trace_file_path_));
EXPECT_OK(env_->DeleteDir(test_path_));
}
protected:
uint64_t IndexUncompressedHelper(bool indexCompress);
private:
uint32_t format_;
Env* env_;
std::string trace_file_path_;
std::string test_path_;
};
class PlainTableTest : public TableTest {};
class TablePropertyTest : public testing::Test {};
@ -2211,6 +2300,187 @@ TEST_P(BlockBasedTableTest, NumBlockStat) {
c.ResetTableReader();
}
TEST_P(BlockBasedTableTest, TracingGetTest) {
TableConstructor c(BytewiseComparator());
Options options;
BlockBasedTableOptions table_options = GetBlockBasedTableOptions();
options.create_if_missing = true;
table_options.block_cache = NewLRUCache(1024 * 1024, 0);
table_options.cache_index_and_filter_blocks = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
SetupTracingTest(&c);
std::vector<std::string> keys;
stl_wrappers::KVMap kvmap;
ImmutableCFOptions ioptions(options);
MutableCFOptions moptions(options);
c.Finish(options, ioptions, moptions, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
std::string user_key = "k01";
InternalKey internal_key(user_key, 0, kTypeValue);
std::string encoded_key = internal_key.Encode().ToString();
for (uint32_t i = 1; i <= 2; i++) {
PinnableSlice value;
GetContext get_context(options.comparator, nullptr, nullptr, nullptr,
GetContext::kNotFound, user_key, &value, nullptr,
nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
nullptr, /*get_id=*/i);
get_perf_context()->Reset();
ASSERT_OK(c.GetTableReader()->Get(ReadOptions(), encoded_key, &get_context,
moptions.prefix_extractor.get()));
ASSERT_EQ(get_context.State(), GetContext::kFound);
ASSERT_EQ(value.ToString(), kDummyValue);
}
// Verify traces.
std::vector<BlockCacheTraceRecord> expected_records;
// The first two records should be prefetching index and filter blocks.
BlockCacheTraceRecord record;
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kPrefetch;
record.is_cache_hit = Boolean::kFalse;
record.no_insert = Boolean::kFalse;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceFilterBlock;
expected_records.push_back(record);
// Then we should have three records for one index, one filter, and one data
// block access.
record.get_id = 1;
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kUserGet;
record.get_from_user_specified_snapshot = Boolean::kFalse;
record.referenced_key = encoded_key;
record.referenced_key_exist_in_block = Boolean::kTrue;
record.is_cache_hit = Boolean::kTrue;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceFilterBlock;
expected_records.push_back(record);
record.is_cache_hit = Boolean::kFalse;
record.block_type = TraceType::kBlockTraceDataBlock;
expected_records.push_back(record);
// The second get should all observe cache hits.
record.is_cache_hit = Boolean::kTrue;
record.get_id = 2;
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kUserGet;
record.get_from_user_specified_snapshot = Boolean::kFalse;
record.referenced_key = encoded_key;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceFilterBlock;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceDataBlock;
expected_records.push_back(record);
VerifyBlockAccessTrace(&c, expected_records);
c.ResetTableReader();
}
TEST_P(BlockBasedTableTest, TracingApproximateOffsetOfTest) {
TableConstructor c(BytewiseComparator());
Options options;
BlockBasedTableOptions table_options = GetBlockBasedTableOptions();
options.create_if_missing = true;
table_options.block_cache = NewLRUCache(1024 * 1024, 0);
table_options.cache_index_and_filter_blocks = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
SetupTracingTest(&c);
std::vector<std::string> keys;
stl_wrappers::KVMap kvmap;
ImmutableCFOptions ioptions(options);
MutableCFOptions moptions(options);
c.Finish(options, ioptions, moptions, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
for (uint32_t i = 1; i <= 2; i++) {
std::string user_key = "k01";
InternalKey internal_key(user_key, 0, kTypeValue);
std::string encoded_key = internal_key.Encode().ToString();
c.GetTableReader()->ApproximateOffsetOf(
encoded_key, TableReaderCaller::kUserApproximateSize);
}
// Verify traces.
std::vector<BlockCacheTraceRecord> expected_records;
// The first two records should be prefetching index and filter blocks.
BlockCacheTraceRecord record;
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kPrefetch;
record.is_cache_hit = Boolean::kFalse;
record.no_insert = Boolean::kFalse;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceFilterBlock;
expected_records.push_back(record);
// Then we should have two records for only index blocks.
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kUserApproximateSize;
record.is_cache_hit = Boolean::kTrue;
expected_records.push_back(record);
expected_records.push_back(record);
VerifyBlockAccessTrace(&c, expected_records);
c.ResetTableReader();
}
TEST_P(BlockBasedTableTest, TracingIterator) {
TableConstructor c(BytewiseComparator());
Options options;
BlockBasedTableOptions table_options = GetBlockBasedTableOptions();
options.create_if_missing = true;
table_options.block_cache = NewLRUCache(1024 * 1024, 0);
table_options.cache_index_and_filter_blocks = true;
table_options.filter_policy.reset(NewBloomFilterPolicy(10, true));
options.table_factory.reset(new BlockBasedTableFactory(table_options));
SetupTracingTest(&c);
std::vector<std::string> keys;
stl_wrappers::KVMap kvmap;
ImmutableCFOptions ioptions(options);
MutableCFOptions moptions(options);
c.Finish(options, ioptions, moptions, table_options,
GetPlainInternalComparator(options.comparator), &keys, &kvmap);
for (uint32_t i = 1; i <= 2; i++) {
std::unique_ptr<InternalIterator> iter(c.GetTableReader()->NewIterator(
ReadOptions(), moptions.prefix_extractor.get(), /*arena=*/nullptr,
/*skip_filters=*/false, TableReaderCaller::kUserIterator));
iter->SeekToFirst();
while (iter->Valid()) {
iter->key();
iter->value();
iter->Next();
}
ASSERT_OK(iter->status());
iter.reset();
}
// Verify traces.
std::vector<BlockCacheTraceRecord> expected_records;
// The first two records should be prefetching index and filter blocks.
BlockCacheTraceRecord record;
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kPrefetch;
record.is_cache_hit = Boolean::kFalse;
record.no_insert = Boolean::kFalse;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceFilterBlock;
expected_records.push_back(record);
// Then we should have three records for index and two data block access.
record.block_type = TraceType::kBlockTraceIndexBlock;
record.caller = TableReaderCaller::kUserIterator;
record.is_cache_hit = Boolean::kTrue;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceDataBlock;
record.is_cache_hit = Boolean::kFalse;
expected_records.push_back(record);
expected_records.push_back(record);
// When we iterate this file for the second time, we should observe all cache
// hits.
record.block_type = TraceType::kBlockTraceIndexBlock;
record.is_cache_hit = Boolean::kTrue;
expected_records.push_back(record);
record.block_type = TraceType::kBlockTraceDataBlock;
expected_records.push_back(record);
expected_records.push_back(record);
VerifyBlockAccessTrace(&c, expected_records);
c.ResetTableReader();
}
// A simple tool that takes the snapshot of block cache statistics.
class BlockCachePropertiesSnapshot {
public:

View File

@ -57,8 +57,8 @@ struct BlockAccessInfo {
const uint64_t timestamp_in_seconds =
access.access_timestamp / kMicrosInSecond;
caller_num_accesses_timeline[access.caller][timestamp_in_seconds] += 1;
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(access.block_type,
access.caller)) {
if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(access.block_type,
access.caller)) {
num_keys = access.num_keys_in_block;
if (access.referenced_key_exist_in_block == Boolean::kTrue) {
if (key_num_access_map.find(access.referenced_key) ==

View File

@ -35,14 +35,13 @@ const std::string BlockCacheTraceHelper::kUnknownColumnFamilyName =
"UnknownColumnFamily";
const uint64_t BlockCacheTraceHelper::kReservedGetId = 0;
bool BlockCacheTraceHelper::ShouldTraceReferencedKey(TraceType block_type,
TableReaderCaller caller) {
bool BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(
TraceType block_type, TableReaderCaller caller) {
return (block_type == TraceType::kBlockTraceDataBlock) &&
(caller == TableReaderCaller::kUserGet ||
caller == TableReaderCaller::kUserMultiGet);
IsGetOrMultiGet(caller);
}
bool BlockCacheTraceHelper::ShouldTraceGetId(TableReaderCaller caller) {
bool BlockCacheTraceHelper::IsGetOrMultiGet(TableReaderCaller caller) {
return caller == TableReaderCaller::kUserGet ||
caller == TableReaderCaller::kUserMultiGet;
}
@ -81,12 +80,13 @@ Status BlockCacheTraceWriter::WriteBlockAccess(
trace.payload.push_back(record.caller);
trace.payload.push_back(record.is_cache_hit);
trace.payload.push_back(record.no_insert);
if (BlockCacheTraceHelper::ShouldTraceGetId(record.caller)) {
if (BlockCacheTraceHelper::IsGetOrMultiGet(record.caller)) {
PutFixed64(&trace.payload, record.get_id);
}
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(record.block_type,
record.caller)) {
trace.payload.push_back(record.get_from_user_specified_snapshot);
PutLengthPrefixedSlice(&trace.payload, referenced_key);
}
if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(record.block_type,
record.caller)) {
PutFixed64(&trace.payload, record.referenced_data_size);
PutFixed64(&trace.payload, record.num_keys_in_block);
trace.payload.push_back(record.referenced_key_exist_in_block);
@ -216,20 +216,28 @@ Status BlockCacheTraceReader::ReadAccess(BlockCacheTraceRecord* record) {
}
record->no_insert = static_cast<Boolean>(enc_slice[0]);
enc_slice.remove_prefix(kCharSize);
if (BlockCacheTraceHelper::ShouldTraceGetId(record->caller)) {
if (BlockCacheTraceHelper::IsGetOrMultiGet(record->caller)) {
if (!GetFixed64(&enc_slice, &record->get_id)) {
return Status::Incomplete(
"Incomplete access record: Failed to read the get id.");
}
}
if (BlockCacheTraceHelper::ShouldTraceReferencedKey(record->block_type,
record->caller)) {
if (enc_slice.empty()) {
return Status::Incomplete(
"Incomplete access record: Failed to read "
"get_from_user_specified_snapshot.");
}
record->get_from_user_specified_snapshot =
static_cast<Boolean>(enc_slice[0]);
enc_slice.remove_prefix(kCharSize);
Slice referenced_key;
if (!GetLengthPrefixedSlice(&enc_slice, &referenced_key)) {
return Status::Incomplete(
"Incomplete access record: Failed to read the referenced key.");
}
record->referenced_key = referenced_key.ToString();
}
if (BlockCacheTraceHelper::IsGetOrMultiGetOnDataBlock(record->block_type,
record->caller)) {
if (!GetFixed64(&enc_slice, &record->referenced_data_size)) {
return Status::Incomplete(
"Incomplete access record: Failed to read the referenced data size.");

View File

@ -23,9 +23,9 @@ extern const uint64_t kSecondInHour;
class BlockCacheTraceHelper {
public:
static bool ShouldTraceReferencedKey(TraceType block_type,
TableReaderCaller caller);
static bool ShouldTraceGetId(TableReaderCaller caller);
static bool IsGetOrMultiGetOnDataBlock(TraceType block_type,
TableReaderCaller caller);
static bool IsGetOrMultiGet(TableReaderCaller caller);
static bool IsUserAccess(TableReaderCaller caller);
static const std::string kUnknownColumnFamilyName;
@ -53,8 +53,11 @@ class BlockCacheTraceHelper {
// kUserApproximateSize).
struct BlockCacheLookupContext {
BlockCacheLookupContext(const TableReaderCaller& _caller) : caller(_caller) {}
BlockCacheLookupContext(const TableReaderCaller& _caller, uint64_t _get_id)
: caller(_caller), get_id(_get_id) {}
BlockCacheLookupContext(const TableReaderCaller& _caller, uint64_t _get_id,
bool _get_from_user_specified_snapshot)
: caller(_caller),
get_id(_get_id),
get_from_user_specified_snapshot(_get_from_user_specified_snapshot) {}
const TableReaderCaller caller;
// These are populated when we perform lookup/insert on block cache. The block
// cache tracer uses these inforation when logging the block access at
@ -69,6 +72,8 @@ struct BlockCacheLookupContext {
// how many blocks a Get/MultiGet request accesses. We can also measure the
// impact of row cache vs block cache.
uint64_t get_id = 0;
std::string referenced_key;
bool get_from_user_specified_snapshot = false;
void FillLookupContext(bool _is_cache_hit, bool _no_insert,
TraceType _block_type, uint64_t _block_size,
@ -100,23 +105,25 @@ struct BlockCacheTraceRecord {
Boolean no_insert = Boolean::kFalse;
// Required field for Get and MultiGet
uint64_t get_id = BlockCacheTraceHelper::kReservedGetId;
// Required fields for data block and user Get/Multi-Get only.
Boolean get_from_user_specified_snapshot = Boolean::kFalse;
std::string referenced_key;
// Required fields for data block and user Get/Multi-Get only.
uint64_t referenced_data_size = 0;
uint64_t num_keys_in_block = 0;
Boolean referenced_key_exist_in_block = Boolean::kFalse;
BlockCacheTraceRecord() {}
BlockCacheTraceRecord(uint64_t _access_timestamp, std::string _block_key,
TraceType _block_type, uint64_t _block_size,
uint64_t _cf_id, std::string _cf_name, uint32_t _level,
uint64_t _sst_fd_number, TableReaderCaller _caller,
bool _is_cache_hit, bool _no_insert, uint64_t _get_id,
std::string _referenced_key = "",
uint64_t _referenced_data_size = 0,
uint64_t _num_keys_in_block = 0,
bool _referenced_key_exist_in_block = false)
BlockCacheTraceRecord(
uint64_t _access_timestamp, std::string _block_key, TraceType _block_type,
uint64_t _block_size, uint64_t _cf_id, std::string _cf_name,
uint32_t _level, uint64_t _sst_fd_number, TableReaderCaller _caller,
bool _is_cache_hit, bool _no_insert,
uint64_t _get_id = BlockCacheTraceHelper::kReservedGetId,
bool _get_from_user_specified_snapshot = false,
std::string _referenced_key = "", uint64_t _referenced_data_size = 0,
uint64_t _num_keys_in_block = 0,
bool _referenced_key_exist_in_block = false)
: access_timestamp(_access_timestamp),
block_key(_block_key),
block_type(_block_type),
@ -129,6 +136,9 @@ struct BlockCacheTraceRecord {
is_cache_hit(_is_cache_hit ? Boolean::kTrue : Boolean::kFalse),
no_insert(_no_insert ? Boolean::kTrue : Boolean::kFalse),
get_id(_get_id),
get_from_user_specified_snapshot(_get_from_user_specified_snapshot
? Boolean::kTrue
: Boolean::kFalse),
referenced_key(_referenced_key),
referenced_data_size(_referenced_data_size),
num_keys_in_block(_num_keys_in_block),

View File

@ -74,6 +74,7 @@ class BlockCacheTracerTest : public testing::Test {
// Provide get_id for all callers. The writer should only write get_id
// when the caller is either GET or MGET.
record.get_id = key_id + 1;
record.get_from_user_specified_snapshot = Boolean::kTrue;
// Provide these fields for all block types.
// The writer should only write these fields for data blocks and the
// caller is either GET or MGET.
@ -126,20 +127,22 @@ class BlockCacheTracerTest : public testing::Test {
if (record.caller == TableReaderCaller::kUserGet ||
record.caller == TableReaderCaller::kUserMultiGet) {
ASSERT_EQ(key_id + 1, record.get_id);
ASSERT_EQ(Boolean::kTrue, record.get_from_user_specified_snapshot);
ASSERT_EQ(kRefKeyPrefix + std::to_string(key_id),
record.referenced_key);
} else {
ASSERT_EQ(BlockCacheTraceHelper::kReservedGetId, record.get_id);
ASSERT_EQ(Boolean::kFalse, record.get_from_user_specified_snapshot);
ASSERT_EQ("", record.referenced_key);
}
if (block_type == TraceType::kBlockTraceDataBlock &&
(record.caller == TableReaderCaller::kUserGet ||
record.caller == TableReaderCaller::kUserMultiGet)) {
ASSERT_EQ(kRefKeyPrefix + std::to_string(key_id),
record.referenced_key);
ASSERT_EQ(Boolean::kTrue, record.referenced_key_exist_in_block);
ASSERT_EQ(kNumKeysInBlock, record.num_keys_in_block);
ASSERT_EQ(kReferencedDataSize + key_id, record.referenced_data_size);
continue;
}
ASSERT_EQ("", record.referenced_key);
ASSERT_EQ(Boolean::kFalse, record.referenced_key_exist_in_block);
ASSERT_EQ(0, record.num_keys_in_block);
ASSERT_EQ(0, record.referenced_data_size);

View File

@ -110,19 +110,22 @@ void PrioritizedCacheSimulator::Access(const BlockCacheTraceRecord& access) {
std::string HybridRowBlockCacheSimulator::ComputeRowKey(
const BlockCacheTraceRecord& access) {
assert(access.get_id != BlockCacheTraceHelper::kReservedGetId);
Slice key;
if (access.referenced_key_exist_in_block == Boolean::kTrue) {
key = ExtractUserKey(access.referenced_key);
} else {
key = access.referenced_key;
}
return std::to_string(access.sst_fd_number) + "_" + key.ToString();
Slice key = ExtractUserKey(access.referenced_key);
uint64_t seq_no = access.get_from_user_specified_snapshot == Boolean::kFalse
? 0
: 1 + GetInternalKeySeqno(access.referenced_key);
return std::to_string(access.sst_fd_number) + "_" + key.ToString() + "_" +
std::to_string(seq_no);
}
void HybridRowBlockCacheSimulator::Access(const BlockCacheTraceRecord& access) {
bool is_cache_miss = true;
bool admitted = true;
if (access.get_id != BlockCacheTraceHelper::kReservedGetId) {
// TODO (haoyu): We only support Get for now. We need to extend the tracing
// for MultiGet, i.e., non-data block accesses must log all keys in a
// MultiGet.
if (access.caller == TableReaderCaller::kUserGet &&
access.get_id != BlockCacheTraceHelper::kReservedGetId) {
// This is a Get/MultiGet request.
const std::string& row_key = ComputeRowKey(access);
if (getid_getkeys_map_[access.get_id].find(row_key) ==

View File

@ -137,7 +137,6 @@ class HybridRowBlockCacheSimulator : public PrioritizedCacheSimulator {
private:
// Row key is a concatenation of the access's fd_number and the referenced
// user key.
// TODO(haoyu): the row key should contain sequence number.
std::string ComputeRowKey(const BlockCacheTraceRecord& access);
enum InsertResult : char {

View File

@ -174,10 +174,11 @@ TEST_F(CacheSimulatorTest, GhostPrioritizedCacheSimulator) {
TEST_F(CacheSimulatorTest, HybridRowBlockCacheSimulator) {
uint64_t block_id = 100;
BlockCacheTraceRecord first_get = GenerateGetRecord(kGetId);
first_get.get_from_user_specified_snapshot = Boolean::kTrue;
BlockCacheTraceRecord second_get = GenerateGetRecord(kGetId + 1);
second_get.referenced_data_size = 0;
second_get.referenced_key_exist_in_block = Boolean::kFalse;
second_get.referenced_key = kRefKeyPrefix + std::to_string(kGetId);
second_get.get_from_user_specified_snapshot = Boolean::kTrue;
BlockCacheTraceRecord third_get = GenerateGetRecord(kGetId + 2);
third_get.referenced_data_size = 0;
third_get.referenced_key_exist_in_block = Boolean::kFalse;
@ -203,9 +204,10 @@ TEST_F(CacheSimulatorTest, HybridRowBlockCacheSimulator) {
ASSERT_EQ(100, cache_simulator->miss_ratio());
ASSERT_EQ(10, cache_simulator->user_accesses());
ASSERT_EQ(100, cache_simulator->user_miss_ratio());
auto handle =
sim_cache->Lookup(ExtractUserKey(std::to_string(first_get.sst_fd_number) +
"_" + first_get.referenced_key));
auto handle = sim_cache->Lookup(
std::to_string(first_get.sst_fd_number) + "_" +
ExtractUserKey(first_get.referenced_key).ToString() + "_" +
std::to_string(1 + GetInternalKeySeqno(first_get.referenced_key)));
ASSERT_NE(nullptr, handle);
sim_cache->Release(handle);
for (uint32_t i = 100; i < block_id; i++) {
@ -227,8 +229,10 @@ TEST_F(CacheSimulatorTest, HybridRowBlockCacheSimulator) {
ASSERT_EQ(66, static_cast<uint64_t>(cache_simulator->miss_ratio()));
ASSERT_EQ(15, cache_simulator->user_accesses());
ASSERT_EQ(66, static_cast<uint64_t>(cache_simulator->user_miss_ratio()));
handle = sim_cache->Lookup(std::to_string(second_get.sst_fd_number) + "_" +
second_get.referenced_key);
handle = sim_cache->Lookup(
std::to_string(second_get.sst_fd_number) + "_" +
ExtractUserKey(second_get.referenced_key).ToString() + "_" +
std::to_string(1 + GetInternalKeySeqno(second_get.referenced_key)));
ASSERT_NE(nullptr, handle);
sim_cache->Release(handle);
for (uint32_t i = 100; i < block_id; i++) {
@ -283,9 +287,9 @@ TEST_F(CacheSimulatorTest, HybridRowBlockNoInsertCacheSimulator) {
cache_simulator->Access(first_get);
block_id++;
}
auto handle =
sim_cache->Lookup(ExtractUserKey(std::to_string(first_get.sst_fd_number) +
"_" + first_get.referenced_key));
auto handle = sim_cache->Lookup(
std::to_string(first_get.sst_fd_number) + "_" +
ExtractUserKey(first_get.referenced_key).ToString() + "_0");
ASSERT_NE(nullptr, handle);
sim_cache->Release(handle);
// All blocks are missing from the cache since insert_blocks_row_kvpair_misses