2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-06-14 02:25:09 +02:00
|
|
|
#include "table/table.h"
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/cache.h"
|
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block.h"
|
2012-04-17 17:36:46 +02:00
|
|
|
#include "table/filter_block.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/format.h"
|
2013-10-10 20:43:24 +02:00
|
|
|
#include "table/table.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
#include "util/perf_context_imp.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// The longest the prefix of the cache key used to identify blocks can be.
|
|
|
|
// We are using the fact that we know for Posix files the unique ID is three
|
|
|
|
// varints.
|
|
|
|
const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length*3+1;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
struct Table::Rep {
|
|
|
|
~Rep() {
|
2012-04-17 17:36:46 +02:00
|
|
|
delete filter;
|
|
|
|
delete [] filter_data;
|
2011-03-18 23:37:00 +01:00
|
|
|
delete index_block;
|
|
|
|
}
|
2013-03-15 01:00:04 +01:00
|
|
|
Rep(const EnvOptions& storage_options) :
|
|
|
|
soptions(storage_options) {
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Options options;
|
2013-03-15 01:00:04 +01:00
|
|
|
const EnvOptions& soptions;
|
2011-03-18 23:37:00 +01:00
|
|
|
Status status;
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<RandomAccessFile> file;
|
2013-02-01 00:20:24 +01:00
|
|
|
char cache_key_prefix[kMaxCacheKeyPrefixSize];
|
|
|
|
size_t cache_key_prefix_size;
|
2012-04-17 17:36:46 +02:00
|
|
|
FilterBlockReader* filter;
|
|
|
|
const char* filter_data;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
BlockHandle metaindex_handle; // Handle to metaindex_block: saved from footer
|
|
|
|
Block* index_block;
|
2013-10-10 20:43:24 +02:00
|
|
|
TableStats table_stats;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// Helper function to setup the cache key's prefix for the Table.
|
|
|
|
void Table::SetupCacheKeyPrefix(Rep* rep) {
|
|
|
|
assert(kMaxCacheKeyPrefixSize >= 10);
|
|
|
|
rep->cache_key_prefix_size = 0;
|
|
|
|
if (rep->options.block_cache) {
|
|
|
|
rep->cache_key_prefix_size = rep->file->GetUniqueId(rep->cache_key_prefix,
|
|
|
|
kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
if (rep->cache_key_prefix_size == 0) {
|
|
|
|
// If the prefix wasn't generated or was too long, we create one from the
|
|
|
|
// cache.
|
|
|
|
char* end = EncodeVarint64(rep->cache_key_prefix,
|
|
|
|
rep->options.block_cache->NewId());
|
|
|
|
rep->cache_key_prefix_size =
|
|
|
|
static_cast<size_t>(end - rep->cache_key_prefix);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-23 08:47:56 +02:00
|
|
|
namespace { // anonymous namespace, not visible externally
|
|
|
|
|
|
|
|
// Read the block identified by "handle" from "file".
|
|
|
|
// The only relevant option is options.verify_checksums for now.
|
|
|
|
// Set *didIO to true if didIO is not null.
|
|
|
|
// On failure return non-OK.
|
|
|
|
// On success fill *result and return OK - caller owns *result
|
|
|
|
Status ReadBlock(RandomAccessFile* file,
|
|
|
|
const ReadOptions& options,
|
|
|
|
const BlockHandle& handle,
|
|
|
|
Block** result,
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
Env* env,
|
2013-04-23 08:47:56 +02:00
|
|
|
bool* didIO = nullptr) {
|
|
|
|
BlockContents contents;
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
Status s = ReadBlockContents(file, options, handle, &contents, env);
|
2013-04-23 08:47:56 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
*result = new Block(contents);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (didIO) {
|
|
|
|
*didIO = true;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // end of anonymous namespace
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Status Table::Open(const Options& options,
|
2013-03-15 01:00:04 +01:00
|
|
|
const EnvOptions& soptions,
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<RandomAccessFile>&& file,
|
2011-03-28 22:43:44 +02:00
|
|
|
uint64_t size,
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<Table>* table) {
|
|
|
|
table->reset();
|
2011-03-18 23:37:00 +01:00
|
|
|
if (size < Footer::kEncodedLength) {
|
|
|
|
return Status::InvalidArgument("file is too short to be an sstable");
|
|
|
|
}
|
|
|
|
|
|
|
|
char footer_space[Footer::kEncodedLength];
|
|
|
|
Slice footer_input;
|
|
|
|
Status s = file->Read(size - Footer::kEncodedLength, Footer::kEncodedLength,
|
|
|
|
&footer_input, footer_space);
|
|
|
|
if (!s.ok()) return s;
|
|
|
|
|
2013-01-09 19:44:30 +01:00
|
|
|
// Check that we actually read the whole footer from the file. It may be
|
|
|
|
// that size isn't correct.
|
|
|
|
if (footer_input.size() != Footer::kEncodedLength) {
|
|
|
|
return Status::InvalidArgument("file is too short to be an sstable");
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Footer footer;
|
|
|
|
s = footer.DecodeFrom(&footer_input);
|
|
|
|
if (!s.ok()) return s;
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
Block* index_block = nullptr;
|
2013-04-23 08:47:56 +02:00
|
|
|
// TODO: we never really verify check sum for index block
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
s = ReadBlock(file.get(), ReadOptions(), footer.index_handle(), &index_block,
|
|
|
|
options.env);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
// We've successfully read the footer and the index block: we're
|
|
|
|
// ready to serve requests.
|
2013-03-15 01:00:04 +01:00
|
|
|
Rep* rep = new Table::Rep(soptions);
|
2011-03-18 23:37:00 +01:00
|
|
|
rep->options = options;
|
2013-01-20 11:07:13 +01:00
|
|
|
rep->file = std::move(file);
|
2011-03-18 23:37:00 +01:00
|
|
|
rep->metaindex_handle = footer.metaindex_handle();
|
|
|
|
rep->index_block = index_block;
|
2013-02-01 00:20:24 +01:00
|
|
|
SetupCacheKeyPrefix(rep);
|
2013-03-01 03:04:58 +01:00
|
|
|
rep->filter_data = nullptr;
|
|
|
|
rep->filter = nullptr;
|
2013-01-20 11:07:13 +01:00
|
|
|
table->reset(new Table(rep));
|
2012-04-17 17:36:46 +02:00
|
|
|
(*table)->ReadMeta(footer);
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
|
|
|
if (index_block) delete index_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-06-14 02:25:09 +02:00
|
|
|
void Table::SetupForCompaction() {
|
2013-05-18 00:53:01 +02:00
|
|
|
switch (rep_->options.access_hint_on_compaction_start) {
|
|
|
|
case Options::NONE:
|
|
|
|
break;
|
|
|
|
case Options::NORMAL:
|
|
|
|
rep_->file->Hint(RandomAccessFile::NORMAL);
|
|
|
|
break;
|
|
|
|
case Options::SEQUENTIAL:
|
|
|
|
rep_->file->Hint(RandomAccessFile::SEQUENTIAL);
|
|
|
|
break;
|
|
|
|
case Options::WILLNEED:
|
|
|
|
rep_->file->Hint(RandomAccessFile::WILLNEED);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
2013-06-14 02:25:09 +02:00
|
|
|
compaction_optimized_ = true;
|
2013-05-18 00:53:01 +02:00
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
const TableStats& Table::GetTableStats() const {
|
|
|
|
return rep_->table_stats;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
void Table::ReadMeta(const Footer& footer) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
|
|
|
|
// it is an empty block.
|
2013-04-23 08:47:56 +02:00
|
|
|
// TODO: we never really verify check sum for meta index block
|
|
|
|
Block* meta = nullptr;
|
|
|
|
if (!ReadBlock(rep_->file.get(), ReadOptions(), footer.metaindex_handle(),
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
&meta, rep_->options.env).ok()) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// Do not propagate errors since meta info is not needed for operation
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* iter = meta->NewIterator(BytewiseComparator());
|
2013-10-10 20:43:24 +02:00
|
|
|
// read filter
|
|
|
|
if (rep_->options.filter_policy) {
|
|
|
|
std::string key = kFilterBlockPrefix;
|
|
|
|
key.append(rep_->options.filter_policy->Name());
|
|
|
|
iter->Seek(key);
|
|
|
|
|
|
|
|
if (iter->Valid() && iter->key() == Slice(key)) {
|
|
|
|
ReadFilter(iter->value());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// read stats
|
|
|
|
iter->Seek(kStatsBlock);
|
|
|
|
if (iter->Valid() && iter->key() == Slice(kStatsBlock)) {
|
|
|
|
auto s = iter->status();
|
|
|
|
if (s.ok()) {
|
|
|
|
s = ReadStats(iter->value(), rep_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
auto err_msg =
|
|
|
|
"[Warning] Encountered error while reading data from stats block " +
|
|
|
|
s.ToString();
|
|
|
|
Log(rep_->options.info_log, err_msg.c_str());
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2012-04-17 17:36:46 +02:00
|
|
|
delete iter;
|
|
|
|
delete meta;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Table::ReadFilter(const Slice& filter_handle_value) {
|
|
|
|
Slice v = filter_handle_value;
|
|
|
|
BlockHandle filter_handle;
|
|
|
|
if (!filter_handle.DecodeFrom(&v).ok()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-23 08:47:56 +02:00
|
|
|
// TODO: We might want to unify with ReadBlock() if we start
|
2012-04-17 17:36:46 +02:00
|
|
|
// requiring checksum verification in Table::Open.
|
|
|
|
ReadOptions opt;
|
|
|
|
BlockContents block;
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
if (!ReadBlockContents(rep_->file.get(), opt, filter_handle, &block,
|
|
|
|
rep_->options.env).ok()) {
|
2012-04-17 17:36:46 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (block.heap_allocated) {
|
|
|
|
rep_->filter_data = block.data.data(); // Will need to delete later
|
|
|
|
}
|
2013-08-13 23:04:56 +02:00
|
|
|
rep_->filter = new FilterBlockReader(rep_->options, block.data);
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
Status Table::ReadStats(const Slice& handle_value, Rep* rep) {
|
|
|
|
Slice v = handle_value;
|
|
|
|
BlockHandle handle;
|
|
|
|
if (!handle.DecodeFrom(&v).ok()) {
|
|
|
|
return Status::InvalidArgument("Failed to decode stats block handle");
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockContents block_contents;
|
|
|
|
Status s = ReadBlockContents(
|
|
|
|
rep->file.get(),
|
|
|
|
ReadOptions(),
|
|
|
|
handle,
|
|
|
|
&block_contents,
|
|
|
|
rep->options.env
|
|
|
|
);
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Block stats_block(block_contents);
|
|
|
|
std::unique_ptr<Iterator> iter(
|
|
|
|
stats_block.NewIterator(BytewiseComparator())
|
|
|
|
);
|
|
|
|
|
|
|
|
auto& table_stats = rep->table_stats;
|
2013-10-17 01:57:20 +02:00
|
|
|
// All pre-defined stats of type uint64_t
|
|
|
|
std::unordered_map<std::string, uint64_t*> predefined_uint64_stats = {
|
2013-10-10 20:43:24 +02:00
|
|
|
{ TableStatsNames::kDataSize, &table_stats.data_size },
|
|
|
|
{ TableStatsNames::kIndexSize, &table_stats.index_size },
|
|
|
|
{ TableStatsNames::kRawKeySize, &table_stats.raw_key_size },
|
|
|
|
{ TableStatsNames::kRawValueSize, &table_stats.raw_value_size },
|
|
|
|
{ TableStatsNames::kNumDataBlocks, &table_stats.num_data_blocks },
|
|
|
|
{ TableStatsNames::kNumEntries, &table_stats.num_entries },
|
|
|
|
};
|
|
|
|
|
|
|
|
std::string last_key;
|
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
s = iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto key = iter->key().ToString();
|
|
|
|
// stats block is strictly sorted with no duplicate key.
|
|
|
|
assert(
|
|
|
|
last_key.empty() ||
|
|
|
|
BytewiseComparator()->Compare(key, last_key) > 0
|
|
|
|
);
|
|
|
|
last_key = key;
|
|
|
|
|
|
|
|
auto raw_val = iter->value();
|
2013-10-17 01:57:20 +02:00
|
|
|
auto pos = predefined_uint64_stats.find(key);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2013-10-17 01:57:20 +02:00
|
|
|
if (pos != predefined_uint64_stats.end()) {
|
2013-10-10 20:43:24 +02:00
|
|
|
// handle predefined rocksdb stats
|
|
|
|
uint64_t val;
|
|
|
|
if (!GetVarint64(&raw_val, &val)) {
|
|
|
|
// skip malformed value
|
|
|
|
auto error_msg =
|
|
|
|
"[Warning] detect malformed value in stats meta-block:"
|
|
|
|
"\tkey: " + key + "\tval: " + raw_val.ToString();
|
|
|
|
Log(rep->options.info_log, error_msg.c_str());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
*(pos->second) = val;
|
2013-10-17 01:57:20 +02:00
|
|
|
} else if (key == TableStatsNames::kFilterPolicy) {
|
|
|
|
table_stats.filter_policy_name = raw_val.ToString();
|
|
|
|
} else {
|
|
|
|
// handle user-collected
|
|
|
|
table_stats.user_collected_stats.insert(
|
|
|
|
std::make_pair(iter->key().ToString(), raw_val.ToString())
|
|
|
|
);
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Table::~Table() {
|
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DeleteBlock(void* arg, void* ignored) {
|
|
|
|
delete reinterpret_cast<Block*>(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void DeleteCachedBlock(const Slice& key, void* value) {
|
|
|
|
Block* block = reinterpret_cast<Block*>(value);
|
|
|
|
delete block;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ReleaseBlock(void* arg, void* h) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg);
|
|
|
|
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
|
|
|
// into an iterator over the contents of the corresponding block.
|
|
|
|
Iterator* Table::BlockReader(void* arg,
|
|
|
|
const ReadOptions& options,
|
2012-09-27 10:05:38 +02:00
|
|
|
const Slice& index_value,
|
2013-06-14 02:25:09 +02:00
|
|
|
bool* didIO,
|
2013-08-25 07:48:51 +02:00
|
|
|
bool for_compaction) {
|
|
|
|
const bool no_io = (options.read_tier == kBlockCacheTier);
|
2011-03-18 23:37:00 +01:00
|
|
|
Table* table = reinterpret_cast<Table*>(arg);
|
2013-01-20 11:07:13 +01:00
|
|
|
Cache* block_cache = table->rep_->options.block_cache.get();
|
2013-03-27 19:27:39 +01:00
|
|
|
std::shared_ptr<Statistics> statistics = table->rep_->options.statistics;
|
2013-03-01 03:04:58 +01:00
|
|
|
Block* block = nullptr;
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_value;
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
// We intentionally allow extra stuff in index_value so that we
|
|
|
|
// can add more features in the future.
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2013-03-01 03:04:58 +01:00
|
|
|
if (block_cache != nullptr) {
|
2013-02-01 00:20:24 +01:00
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
const size_t cache_key_prefix_size = table->rep_->cache_key_prefix_size;
|
|
|
|
assert(cache_key_prefix_size != 0);
|
|
|
|
assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
|
|
|
|
memcpy(cache_key, table->rep_->cache_key_prefix,
|
|
|
|
cache_key_prefix_size);
|
|
|
|
char* end = EncodeVarint64(cache_key + cache_key_prefix_size,
|
|
|
|
handle.offset());
|
|
|
|
Slice key(cache_key, static_cast<size_t>(end-cache_key));
|
2011-03-18 23:37:00 +01:00
|
|
|
cache_handle = block_cache->Lookup(key);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (cache_handle != nullptr) {
|
2011-03-18 23:37:00 +01:00
|
|
|
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
|
2012-11-03 05:02:40 +01:00
|
|
|
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
BumpPerfCount(&perf_context.block_cache_hit_count);
|
2012-11-03 05:02:40 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
2013-07-26 21:57:01 +02:00
|
|
|
} else if (no_io) {
|
2013-08-25 07:48:51 +02:00
|
|
|
// Did not find in block_cache and can't do IO
|
|
|
|
return NewErrorIterator(Status::Incomplete("no blocking io"));
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
2013-06-14 02:25:09 +02:00
|
|
|
Histograms histogram = for_compaction ?
|
|
|
|
READ_BLOCK_COMPACTION_MICROS : READ_BLOCK_GET_MICROS;
|
|
|
|
{ // block for stop watch
|
|
|
|
StopWatch sw(table->rep_->options.env, statistics, histogram);
|
|
|
|
s = ReadBlock(
|
|
|
|
table->rep_->file.get(),
|
|
|
|
options,
|
|
|
|
handle,
|
|
|
|
&block,
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
table->rep_->options.env,
|
2013-06-14 02:25:09 +02:00
|
|
|
didIO
|
|
|
|
);
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
if (s.ok()) {
|
2013-04-23 08:47:56 +02:00
|
|
|
if (block->isCachable() && options.fill_cache) {
|
2012-04-17 17:36:46 +02:00
|
|
|
cache_handle = block_cache->Insert(
|
2013-04-23 08:47:56 +02:00
|
|
|
key, block, block->size(), &DeleteCachedBlock);
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2012-11-03 05:02:40 +01:00
|
|
|
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-07-26 21:57:01 +02:00
|
|
|
} else if (no_io) {
|
2013-08-25 07:48:51 +02:00
|
|
|
// Could not read from block_cache and can't do IO
|
|
|
|
return NewErrorIterator(Status::Incomplete("no blocking io"));
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
} else {
|
|
|
|
s = ReadBlock(table->rep_->file.get(), options, handle, &block,
|
|
|
|
table->rep_->options.env, didIO);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* iter;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (block != nullptr) {
|
2011-03-18 23:37:00 +01:00
|
|
|
iter = block->NewIterator(table->rep_->options.comparator);
|
2013-03-01 03:04:58 +01:00
|
|
|
if (cache_handle == nullptr) {
|
|
|
|
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
iter = NewErrorIterator(s);
|
|
|
|
}
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2012-09-27 10:05:38 +02:00
|
|
|
Iterator* Table::BlockReader(void* arg,
|
|
|
|
const ReadOptions& options,
|
2013-03-15 01:00:04 +01:00
|
|
|
const EnvOptions& soptions,
|
2013-05-18 00:53:01 +02:00
|
|
|
const Slice& index_value,
|
|
|
|
bool for_compaction) {
|
2013-06-14 02:25:09 +02:00
|
|
|
return BlockReader(arg, options, index_value, nullptr, for_compaction);
|
2012-09-27 10:05:38 +02:00
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
// This will be broken if the user specifies an unusual implementation
|
|
|
|
// of Options.comparator, or if the user specifies an unusual
|
|
|
|
// definition of prefixes in Options.filter_policy. In particular, we
|
|
|
|
// require the following three properties:
|
|
|
|
//
|
|
|
|
// 1) key.starts_with(prefix(key))
|
|
|
|
// 2) Compare(prefix(key), key) <= 0.
|
|
|
|
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
|
2013-08-23 23:49:57 +02:00
|
|
|
//
|
|
|
|
// TODO(tylerharter): right now, this won't cause I/O since blooms are
|
|
|
|
// in memory. When blooms may need to be paged in, we should refactor so that
|
|
|
|
// this is only ever called lazily. In particular, this shouldn't be called
|
|
|
|
// while the DB lock is held like it is now.
|
2013-08-13 23:04:56 +02:00
|
|
|
bool Table::PrefixMayMatch(const Slice& internal_prefix) const {
|
|
|
|
FilterBlockReader* filter = rep_->filter;
|
|
|
|
bool may_match = true;
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
if (filter == nullptr) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-08-23 23:49:57 +02:00
|
|
|
std::unique_ptr<Iterator> iiter(rep_->index_block->NewIterator(
|
|
|
|
rep_->options.comparator));
|
2013-08-13 23:04:56 +02:00
|
|
|
iiter->Seek(internal_prefix);
|
2013-08-23 23:49:57 +02:00
|
|
|
if (!iiter->Valid()) {
|
2013-08-13 23:04:56 +02:00
|
|
|
// we're past end of file
|
|
|
|
may_match = false;
|
2013-08-23 23:49:57 +02:00
|
|
|
} else if (ExtractUserKey(iiter->key()).starts_with(
|
|
|
|
ExtractUserKey(internal_prefix))) {
|
2013-08-13 23:04:56 +02:00
|
|
|
// we need to check for this subtle case because our only
|
|
|
|
// guarantee is that "the key is a string >= last key in that data
|
|
|
|
// block" according to the doc/table_format.txt spec.
|
|
|
|
//
|
|
|
|
// Suppose iiter->key() starts with the desired prefix; it is not
|
|
|
|
// necessarily the case that the corresponding data block will
|
|
|
|
// contain the prefix, since iiter->key() need not be in the
|
|
|
|
// block. However, the next data block may contain the prefix, so
|
|
|
|
// we return true to play it safe.
|
|
|
|
may_match = true;
|
|
|
|
} else {
|
|
|
|
// iiter->key() does NOT start with the desired prefix. Because
|
|
|
|
// Seek() finds the first key that is >= the seek target, this
|
|
|
|
// means that iiter->key() > prefix. Thus, any data blocks coming
|
|
|
|
// after the data block corresponding to iiter->key() cannot
|
|
|
|
// possibly contain the key. Thus, the corresponding data block
|
|
|
|
// is the only one which could potentially contain the prefix.
|
|
|
|
Slice handle_value = iiter->value();
|
|
|
|
BlockHandle handle;
|
|
|
|
s = handle.DecodeFrom(&handle_value);
|
|
|
|
assert(s.ok());
|
|
|
|
may_match = filter->PrefixMayMatch(handle.offset(), internal_prefix);
|
|
|
|
}
|
2013-08-23 23:49:57 +02:00
|
|
|
|
|
|
|
RecordTick(rep_->options.statistics, BLOOM_FILTER_PREFIX_CHECKED);
|
|
|
|
if (!may_match) {
|
|
|
|
RecordTick(rep_->options.statistics, BLOOM_FILTER_PREFIX_USEFUL);
|
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
return may_match;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Iterator* Table::NewIterator(const ReadOptions& options) const {
|
2013-08-13 23:04:56 +02:00
|
|
|
if (options.prefix) {
|
|
|
|
InternalKey internal_prefix(*options.prefix, 0, kTypeValue);
|
|
|
|
if (!PrefixMayMatch(internal_prefix.Encode())) {
|
|
|
|
// nothing in this file can match the prefix, so we should not
|
|
|
|
// bother doing I/O to this file when iterating.
|
|
|
|
return NewEmptyIterator();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
return NewTwoLevelIterator(
|
|
|
|
rep_->index_block->NewIterator(rep_->options.comparator),
|
2013-03-15 01:00:04 +01:00
|
|
|
&Table::BlockReader, const_cast<Table*>(this), options, rep_->soptions);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2012-04-17 17:36:46 +02:00
|
|
|
Status Table::InternalGet(const ReadOptions& options, const Slice& k,
|
|
|
|
void* arg,
|
2013-05-18 00:53:01 +02:00
|
|
|
bool (*saver)(void*, const Slice&, const Slice&,
|
2013-07-06 03:49:18 +02:00
|
|
|
bool),
|
2013-08-25 07:48:51 +02:00
|
|
|
void (*mark_key_may_exist)(void*)) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Status s;
|
|
|
|
Iterator* iiter = rep_->index_block->NewIterator(rep_->options.comparator);
|
2013-03-21 23:59:47 +01:00
|
|
|
bool done = false;
|
|
|
|
for (iiter->Seek(k); iiter->Valid() && !done; iiter->Next()) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Slice handle_value = iiter->value();
|
|
|
|
FilterBlockReader* filter = rep_->filter;
|
|
|
|
BlockHandle handle;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (filter != nullptr &&
|
2012-04-17 17:36:46 +02:00
|
|
|
handle.DecodeFrom(&handle_value).ok() &&
|
|
|
|
!filter->KeyMayMatch(handle.offset(), k)) {
|
|
|
|
// Not found
|
2013-03-21 23:59:47 +01:00
|
|
|
// TODO: think about interaction with Merge. If a user key cannot
|
|
|
|
// cross one data block, we should be fine.
|
2012-11-09 03:18:34 +01:00
|
|
|
RecordTick(rep_->options.statistics, BLOOM_FILTER_USEFUL);
|
2013-03-21 23:59:47 +01:00
|
|
|
break;
|
2012-04-17 17:36:46 +02:00
|
|
|
} else {
|
2012-09-27 10:05:38 +02:00
|
|
|
bool didIO = false;
|
2013-09-03 00:17:03 +02:00
|
|
|
std::unique_ptr<Iterator> block_iter(
|
|
|
|
BlockReader(this, options, iiter->value(), &didIO));
|
2013-03-21 23:59:47 +01:00
|
|
|
|
2013-08-25 07:48:51 +02:00
|
|
|
if (options.read_tier && block_iter->status().IsIncomplete()) {
|
|
|
|
// couldn't get block from block_cache
|
2013-07-26 21:57:01 +02:00
|
|
|
// Update Saver.state to Found because we are only looking for whether
|
|
|
|
// we can guarantee the key is not there when "no_io" is set
|
|
|
|
(*mark_key_may_exist)(arg);
|
|
|
|
break;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
|
|
|
|
// Call the *saver function on each entry/block until it returns false
|
2013-03-21 23:59:47 +01:00
|
|
|
for (block_iter->Seek(k); block_iter->Valid(); block_iter->Next()) {
|
|
|
|
if (!(*saver)(arg, block_iter->key(), block_iter->value(), didIO)) {
|
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
s = block_iter->status();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = iiter->status();
|
|
|
|
}
|
|
|
|
delete iiter;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-03-21 23:59:47 +01:00
|
|
|
bool SaveDidIO(void* arg, const Slice& key, const Slice& value, bool didIO) {
|
2013-02-01 00:20:24 +01:00
|
|
|
*reinterpret_cast<bool*>(arg) = didIO;
|
2013-03-21 23:59:47 +01:00
|
|
|
return false;
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
bool Table::TEST_KeyInCache(const ReadOptions& options, const Slice& key) {
|
|
|
|
// We use InternalGet() as it has logic that checks whether we read the
|
|
|
|
// block from the disk or not.
|
|
|
|
bool didIO = false;
|
|
|
|
Status s = InternalGet(options, key, &didIO, SaveDidIO);
|
|
|
|
assert(s.ok());
|
|
|
|
return !didIO;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t Table::ApproximateOffsetOf(const Slice& key) const {
|
|
|
|
Iterator* index_iter =
|
|
|
|
rep_->index_block->NewIterator(rep_->options.comparator);
|
|
|
|
index_iter->Seek(key);
|
|
|
|
uint64_t result;
|
|
|
|
if (index_iter->Valid()) {
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_iter->value();
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
if (s.ok()) {
|
|
|
|
result = handle.offset();
|
|
|
|
} else {
|
|
|
|
// Strange: we can't decode the block handle in the index block.
|
|
|
|
// We'll just return the offset of the metaindex block, which is
|
|
|
|
// close to the whole file size for this case.
|
|
|
|
result = rep_->metaindex_handle.offset();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// key is past the last key in the file. Approximate the offset
|
|
|
|
// by returning the offset of the metaindex block (which is
|
|
|
|
// right near the end of the file).
|
|
|
|
result = rep_->metaindex_handle.offset();
|
|
|
|
}
|
|
|
|
delete index_iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-10-10 20:43:24 +02:00
|
|
|
const std::string Table::kFilterBlockPrefix = "filter.";
|
|
|
|
const std::string Table::kStatsBlock = "rocksdb.stats";
|
|
|
|
|
2013-10-17 01:57:20 +02:00
|
|
|
const std::string TableStatsNames::kDataSize = "rocksdb.data.size";
|
|
|
|
const std::string TableStatsNames::kIndexSize = "rocksdb.index.size";
|
|
|
|
const std::string TableStatsNames::kRawKeySize = "rocksdb.raw.key.size";
|
|
|
|
const std::string TableStatsNames::kRawValueSize = "rocksdb.raw.value.size";
|
2013-10-10 20:43:24 +02:00
|
|
|
const std::string TableStatsNames::kNumDataBlocks = "rocksdb.num.data.blocks";
|
2013-10-17 01:57:20 +02:00
|
|
|
const std::string TableStatsNames::kNumEntries = "rocksdb.num.entries";
|
|
|
|
const std::string TableStatsNames::kFilterPolicy = "rocksdb.filter.policy";
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|