2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-30 18:52:33 +01:00
|
|
|
#include "table/block_based_table_reader.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
#include "rocksdb/table.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block.h"
|
2012-04-17 17:36:46 +02:00
|
|
|
#include "table/filter_block.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/format.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
#include "table/meta_blocks.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
#include "util/perf_context_imp.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
#include "util/stop_watch.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-05 01:35:48 +01:00
|
|
|
extern uint64_t kBlockBasedTableMagicNumber;
|
2013-12-05 00:09:41 +01:00
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// The longest the prefix of the cache key used to identify blocks can be.
|
|
|
|
// We are using the fact that we know for Posix files the unique ID is three
|
|
|
|
// varints.
|
|
|
|
const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length*3+1;
|
2013-11-13 07:46:51 +01:00
|
|
|
using std::unique_ptr;
|
2013-02-01 00:20:24 +01:00
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
struct BlockBasedTable::Rep {
|
2014-01-27 22:53:22 +01:00
|
|
|
Rep(const EnvOptions& storage_options,
|
|
|
|
const InternalKeyComparator& internal_comparator)
|
|
|
|
: soptions(storage_options), internal_comparator_(internal_comparator) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
Options options;
|
2013-03-15 01:00:04 +01:00
|
|
|
const EnvOptions& soptions;
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator_;
|
2011-03-18 23:37:00 +01:00
|
|
|
Status status;
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<RandomAccessFile> file;
|
2013-02-01 00:20:24 +01:00
|
|
|
char cache_key_prefix[kMaxCacheKeyPrefixSize];
|
2014-01-24 19:57:15 +01:00
|
|
|
size_t cache_key_prefix_size = 0;
|
2013-09-02 08:23:40 +02:00
|
|
|
char compressed_cache_key_prefix[kMaxCacheKeyPrefixSize];
|
2014-01-24 19:57:15 +01:00
|
|
|
size_t compressed_cache_key_prefix_size = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Handle to metaindex_block: saved from footer
|
|
|
|
BlockHandle metaindex_handle;
|
|
|
|
// Handle to index: saved from footer
|
|
|
|
BlockHandle index_handle;
|
|
|
|
// index_block will be populated and used only when options.block_cache is
|
|
|
|
// NULL; otherwise we will get the index block via the block cache.
|
|
|
|
unique_ptr<Block> index_block;
|
|
|
|
unique_ptr<FilterBlockReader> filter;
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
BlockBasedTable::~BlockBasedTable() {
|
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// CachableEntry represents the entries that *may* be fetched from block cache.
|
|
|
|
// field `value` is the item we want to get.
|
|
|
|
// field `cache_handle` is the cache handle to the block cache. If the value
|
|
|
|
// was not read from cache, `cache_handle` will be nullptr.
|
|
|
|
template <class TValue>
|
|
|
|
struct BlockBasedTable::CachableEntry {
|
|
|
|
CachableEntry(TValue* value, Cache::Handle* cache_handle)
|
|
|
|
: value(value)
|
|
|
|
, cache_handle(cache_handle) {
|
|
|
|
}
|
|
|
|
CachableEntry(): CachableEntry(nullptr, nullptr) { }
|
|
|
|
void Release(Cache* cache) {
|
|
|
|
if (cache_handle) {
|
|
|
|
cache->Release(cache_handle);
|
|
|
|
value = nullptr;
|
|
|
|
cache_handle = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TValue* value = nullptr;
|
|
|
|
// if the entry is from the cache, cache_handle will be populated.
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
};
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// Helper function to setup the cache key's prefix for the Table.
|
2013-10-29 01:54:09 +01:00
|
|
|
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep) {
|
2013-02-01 00:20:24 +01:00
|
|
|
assert(kMaxCacheKeyPrefixSize >= 10);
|
|
|
|
rep->cache_key_prefix_size = 0;
|
2013-09-02 08:23:40 +02:00
|
|
|
rep->compressed_cache_key_prefix_size = 0;
|
|
|
|
if (rep->options.block_cache != nullptr) {
|
2013-12-03 20:17:58 +01:00
|
|
|
GenerateCachePrefix(rep->options.block_cache.get(), rep->file.get(),
|
2013-09-02 08:23:40 +02:00
|
|
|
&rep->cache_key_prefix[0],
|
|
|
|
&rep->cache_key_prefix_size);
|
|
|
|
}
|
|
|
|
if (rep->options.block_cache_compressed != nullptr) {
|
2013-12-03 20:17:58 +01:00
|
|
|
GenerateCachePrefix(rep->options.block_cache_compressed.get(),
|
|
|
|
rep->file.get(), &rep->compressed_cache_key_prefix[0],
|
2013-09-02 08:23:40 +02:00
|
|
|
&rep->compressed_cache_key_prefix_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-03 20:17:58 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
2013-09-02 08:23:40 +02:00
|
|
|
RandomAccessFile* file, char* buffer, size_t* size) {
|
|
|
|
|
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
|
|
|
if (*size == 0) {
|
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-03 20:17:58 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
2013-09-02 08:23:40 +02:00
|
|
|
WritableFile* file, char* buffer, size_t* size) {
|
|
|
|
|
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
|
|
|
if (*size == 0) {
|
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-23 08:47:56 +02:00
|
|
|
namespace { // anonymous namespace, not visible externally
|
|
|
|
|
|
|
|
// Read the block identified by "handle" from "file".
|
|
|
|
// The only relevant option is options.verify_checksums for now.
|
|
|
|
// Set *didIO to true if didIO is not null.
|
|
|
|
// On failure return non-OK.
|
|
|
|
// On success fill *result and return OK - caller owns *result
|
2013-11-13 07:46:51 +01:00
|
|
|
Status ReadBlockFromFile(
|
|
|
|
RandomAccessFile* file,
|
|
|
|
const ReadOptions& options,
|
|
|
|
const BlockHandle& handle,
|
|
|
|
Block** result,
|
|
|
|
Env* env,
|
|
|
|
bool* didIO = nullptr,
|
|
|
|
bool do_uncompress = true) {
|
2013-04-23 08:47:56 +02:00
|
|
|
BlockContents contents;
|
2013-09-02 08:23:40 +02:00
|
|
|
Status s = ReadBlockContents(file, options, handle, &contents,
|
|
|
|
env, do_uncompress);
|
2013-04-23 08:47:56 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
*result = new Block(contents);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (didIO) {
|
|
|
|
*didIO = true;
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
void DeleteBlock(void* arg, void* ignored) {
|
|
|
|
delete reinterpret_cast<Block*>(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCachedBlock(const Slice& key, void* value) {
|
|
|
|
Block* block = reinterpret_cast<Block*>(value);
|
|
|
|
delete block;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCachedFilter(const Slice& key, void* value) {
|
|
|
|
auto filter = reinterpret_cast<FilterBlockReader*>(value);
|
|
|
|
delete filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReleaseBlock(void* arg, void* h) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg);
|
|
|
|
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
|
|
|
|
|
|
|
Slice GetCacheKey(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size,
|
|
|
|
const BlockHandle& handle,
|
|
|
|
char* cache_key) {
|
|
|
|
assert(cache_key != nullptr);
|
|
|
|
assert(cache_key_prefix_size != 0);
|
|
|
|
assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
|
|
|
|
memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
|
|
|
|
char* end = EncodeVarint64(cache_key + cache_key_prefix_size,
|
|
|
|
handle.offset());
|
|
|
|
return Slice(cache_key, static_cast<size_t>(end - cache_key));
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::Handle* GetFromBlockCache(
|
|
|
|
Cache* block_cache,
|
|
|
|
const Slice& key,
|
|
|
|
Tickers block_cache_miss_ticker,
|
|
|
|
Tickers block_cache_hit_ticker,
|
2013-11-22 23:14:05 +01:00
|
|
|
Statistics* statistics) {
|
2013-11-13 07:46:51 +01:00
|
|
|
auto cache_handle = block_cache->Lookup(key);
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
BumpPerfCount(&perf_context.block_cache_hit_count);
|
|
|
|
// overall cache hit
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
|
|
|
// block-type specific cache hit
|
|
|
|
RecordTick(statistics, block_cache_hit_ticker);
|
|
|
|
} else {
|
|
|
|
// overall cache miss
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
|
|
|
// block-type specific cache miss
|
|
|
|
RecordTick(statistics, block_cache_miss_ticker);
|
|
|
|
}
|
|
|
|
|
|
|
|
return cache_handle;
|
|
|
|
}
|
|
|
|
|
2013-04-23 08:47:56 +02:00
|
|
|
} // end of anonymous namespace
|
|
|
|
|
2014-01-24 19:57:15 +01:00
|
|
|
Status BlockBasedTable::Open(const Options& options, const EnvOptions& soptions,
|
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2014-01-24 19:57:15 +01:00
|
|
|
unique_ptr<RandomAccessFile>&& file,
|
|
|
|
uint64_t file_size,
|
2013-10-30 18:52:33 +01:00
|
|
|
unique_ptr<TableReader>* table_reader) {
|
|
|
|
table_reader->reset();
|
2013-01-09 19:44:30 +01:00
|
|
|
|
2013-12-05 01:35:48 +01:00
|
|
|
Footer footer(kBlockBasedTableMagicNumber);
|
2014-01-28 19:35:48 +01:00
|
|
|
auto s = ReadFooterFromFile(file.get(), file_size, &footer);
|
2011-03-18 23:37:00 +01:00
|
|
|
if (!s.ok()) return s;
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// We've successfully read the footer and the index block: we're
|
|
|
|
// ready to serve requests.
|
2014-01-27 22:53:22 +01:00
|
|
|
Rep* rep = new BlockBasedTable::Rep(soptions, internal_comparator);
|
2013-11-13 07:46:51 +01:00
|
|
|
rep->options = options;
|
|
|
|
rep->file = std::move(file);
|
|
|
|
rep->metaindex_handle = footer.metaindex_handle();
|
|
|
|
rep->index_handle = footer.index_handle();
|
|
|
|
SetupCacheKeyPrefix(rep);
|
|
|
|
unique_ptr<BlockBasedTable> new_table(new BlockBasedTable(rep));
|
|
|
|
|
|
|
|
// Read meta index
|
|
|
|
std::unique_ptr<Block> meta;
|
|
|
|
std::unique_ptr<Iterator> meta_iter;
|
|
|
|
s = ReadMetaBlock(rep, &meta, &meta_iter);
|
|
|
|
|
2013-11-20 01:29:42 +01:00
|
|
|
// Read the properties
|
|
|
|
meta_iter->Seek(kPropertiesBlock);
|
2013-12-06 01:51:26 +01:00
|
|
|
if (meta_iter->Valid() && meta_iter->key() == kPropertiesBlock) {
|
2013-11-13 07:46:51 +01:00
|
|
|
s = meta_iter->status();
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* table_properties = nullptr;
|
2013-11-13 07:46:51 +01:00
|
|
|
if (s.ok()) {
|
2014-01-28 19:35:48 +01:00
|
|
|
s = ReadProperties(meta_iter->value(), rep->file.get(), rep->options.env,
|
2014-02-08 04:26:49 +01:00
|
|
|
rep->options.info_log.get(), &table_properties);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
auto err_msg =
|
2013-11-20 01:29:42 +01:00
|
|
|
"[Warning] Encountered error while reading data from properties "
|
|
|
|
"block " + s.ToString();
|
2013-11-17 08:44:39 +01:00
|
|
|
Log(rep->options.info_log, "%s", err_msg.c_str());
|
2014-02-08 04:26:49 +01:00
|
|
|
} else {
|
|
|
|
rep->table_properties.reset(table_properties);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-24 19:57:15 +01:00
|
|
|
// Will use block cache for index/filter blocks access?
|
|
|
|
if (options.block_cache && table_options.cache_index_and_filter_blocks) {
|
|
|
|
// Call IndexBlockReader() to implicitly add index to the block_cache
|
|
|
|
unique_ptr<Iterator> iter(new_table->IndexBlockReader(ReadOptions()));
|
|
|
|
s = iter->status();
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
// Call GetFilter() to implicitly add filter to the block_cache
|
|
|
|
auto filter_entry = new_table->GetFilter();
|
|
|
|
filter_entry.Release(options.block_cache.get());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we don't use block cache for index/filter blocks access, we'll
|
|
|
|
// pre-load these blocks, which will kept in member variables in Rep
|
|
|
|
// and with a same life-time as this table object.
|
2013-11-13 07:46:51 +01:00
|
|
|
Block* index_block = nullptr;
|
|
|
|
// TODO: we never really verify check sum for index block
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
rep->file.get(),
|
|
|
|
ReadOptions(),
|
|
|
|
footer.index_handle(),
|
|
|
|
&index_block,
|
|
|
|
options.env
|
|
|
|
);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(index_block->compressionType() == kNoCompression);
|
|
|
|
rep->index_block.reset(index_block);
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
// Set filter block
|
2013-11-13 07:46:51 +01:00
|
|
|
if (rep->options.filter_policy) {
|
|
|
|
std::string key = kFilterBlockPrefix;
|
|
|
|
key.append(rep->options.filter_policy->Name());
|
|
|
|
meta_iter->Seek(key);
|
|
|
|
|
|
|
|
if (meta_iter->Valid() && meta_iter->key() == Slice(key)) {
|
|
|
|
rep->filter.reset(ReadFilter(meta_iter->value(), rep));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
delete index_block;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
*table_reader = std::move(new_table);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
void BlockBasedTable::SetupForCompaction() {
|
2013-05-18 00:53:01 +02:00
|
|
|
switch (rep_->options.access_hint_on_compaction_start) {
|
|
|
|
case Options::NONE:
|
|
|
|
break;
|
|
|
|
case Options::NORMAL:
|
|
|
|
rep_->file->Hint(RandomAccessFile::NORMAL);
|
|
|
|
break;
|
|
|
|
case Options::SEQUENTIAL:
|
|
|
|
rep_->file->Hint(RandomAccessFile::SEQUENTIAL);
|
|
|
|
break;
|
|
|
|
case Options::WILLNEED:
|
|
|
|
rep_->file->Hint(RandomAccessFile::WILLNEED);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
2013-06-14 02:25:09 +02:00
|
|
|
compaction_optimized_ = true;
|
2013-05-18 00:53:01 +02:00
|
|
|
}
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
|
|
|
|
const {
|
2013-11-20 01:29:42 +01:00
|
|
|
return rep_->table_properties;
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Load the meta-block from the file. On success, return the loaded meta block
|
|
|
|
// and its iterator.
|
|
|
|
Status BlockBasedTable::ReadMetaBlock(
|
|
|
|
Rep* rep,
|
|
|
|
std::unique_ptr<Block>* meta_block,
|
|
|
|
std::unique_ptr<Iterator>* iter) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
|
|
|
|
// it is an empty block.
|
2013-04-23 08:47:56 +02:00
|
|
|
// TODO: we never really verify check sum for meta index block
|
|
|
|
Block* meta = nullptr;
|
2013-11-13 07:46:51 +01:00
|
|
|
Status s = ReadBlockFromFile(
|
|
|
|
rep->file.get(),
|
|
|
|
ReadOptions(),
|
|
|
|
rep->metaindex_handle,
|
|
|
|
&meta,
|
|
|
|
rep->options.env);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
auto err_msg =
|
2013-11-20 01:29:42 +01:00
|
|
|
"[Warning] Encountered error while reading data from properties"
|
|
|
|
"block " + s.ToString();
|
2013-11-13 07:46:51 +01:00
|
|
|
Log(rep->options.info_log, "%s", err_msg.c_str());
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
delete meta;
|
|
|
|
return s;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
meta_block->reset(meta);
|
|
|
|
// meta block uses bytewise comparator.
|
|
|
|
iter->reset(meta->NewIterator(BytewiseComparator()));
|
|
|
|
return Status::OK();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
FilterBlockReader* BlockBasedTable::ReadFilter (
|
|
|
|
const Slice& filter_handle_value,
|
|
|
|
BlockBasedTable::Rep* rep,
|
|
|
|
size_t* filter_size) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Slice v = filter_handle_value;
|
|
|
|
BlockHandle filter_handle;
|
|
|
|
if (!filter_handle.DecodeFrom(&v).ok()) {
|
2013-11-13 07:46:51 +01:00
|
|
|
return nullptr;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// TODO: We might want to unify with ReadBlockFromFile() if we start
|
|
|
|
// requiring checksum verification in Table::Open.
|
2012-04-17 17:36:46 +02:00
|
|
|
ReadOptions opt;
|
|
|
|
BlockContents block;
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!ReadBlockContents(rep->file.get(), opt, filter_handle, &block,
|
|
|
|
rep->options.env, false).ok()) {
|
|
|
|
return nullptr;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
if (filter_size) {
|
|
|
|
*filter_size = block.data.size();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
return new FilterBlockReader(
|
|
|
|
rep->options, block.data, block.heap_allocated);
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
Status BlockBasedTable::GetBlock(
|
|
|
|
const BlockBasedTable* table,
|
|
|
|
const BlockHandle& handle,
|
|
|
|
const ReadOptions& options,
|
|
|
|
const bool for_compaction,
|
|
|
|
const Tickers block_cache_miss_ticker,
|
|
|
|
const Tickers block_cache_hit_ticker,
|
|
|
|
bool* didIO,
|
|
|
|
CachableEntry<Block>* entry) {
|
|
|
|
bool no_io = options.read_tier == kBlockCacheTier;
|
|
|
|
Cache* block_cache = table->rep_->options.block_cache.get();
|
2013-11-22 23:14:05 +01:00
|
|
|
Statistics* statistics = table->rep_->options.statistics.get();
|
2013-11-13 07:46:51 +01:00
|
|
|
Status s;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (block_cache != nullptr) {
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
auto key = GetCacheKey(
|
|
|
|
table->rep_->cache_key_prefix,
|
|
|
|
table->rep_->cache_key_prefix_size,
|
|
|
|
handle,
|
|
|
|
cache_key
|
|
|
|
);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
entry->cache_handle = GetFromBlockCache(
|
|
|
|
block_cache,
|
|
|
|
key,
|
|
|
|
block_cache_miss_ticker,
|
|
|
|
block_cache_hit_ticker,
|
2013-11-22 23:14:05 +01:00
|
|
|
statistics
|
2013-11-13 07:46:51 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
if (entry->cache_handle != nullptr) {
|
|
|
|
entry->value =
|
|
|
|
reinterpret_cast<Block*>(block_cache->Value(entry->cache_handle));
|
|
|
|
} else if (no_io) {
|
|
|
|
// Did not find in block_cache and can't do IO
|
|
|
|
return Status::Incomplete("no blocking io");
|
|
|
|
} else {
|
|
|
|
Histograms histogram = for_compaction ?
|
|
|
|
READ_BLOCK_COMPACTION_MICROS : READ_BLOCK_GET_MICROS;
|
|
|
|
{
|
|
|
|
// block for stop watch
|
|
|
|
StopWatch sw(table->rep_->options.env, statistics, histogram);
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
table->rep_->file.get(),
|
|
|
|
options,
|
|
|
|
handle,
|
|
|
|
&entry->value,
|
|
|
|
table->rep_->options.env,
|
|
|
|
didIO
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
if (options.fill_cache && entry->value->isCachable()) {
|
|
|
|
entry->cache_handle = block_cache->Insert(
|
|
|
|
key, entry->value, entry->value->size(), &DeleteCachedBlock);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (no_io) {
|
|
|
|
// Could not read from block_cache and can't do IO
|
|
|
|
return Status::Incomplete("no blocking io");
|
|
|
|
} else {
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
table->rep_->file.get(),
|
|
|
|
options,
|
|
|
|
handle,
|
|
|
|
&entry->value,
|
|
|
|
table->rep_->options.env,
|
|
|
|
didIO
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
|
|
|
// into an iterator over the contents of the corresponding block.
|
2013-10-29 01:54:09 +01:00
|
|
|
Iterator* BlockBasedTable::BlockReader(void* arg,
|
|
|
|
const ReadOptions& options,
|
|
|
|
const Slice& index_value,
|
|
|
|
bool* didIO,
|
|
|
|
bool for_compaction) {
|
2013-08-25 07:48:51 +02:00
|
|
|
const bool no_io = (options.read_tier == kBlockCacheTier);
|
2013-10-29 01:54:09 +01:00
|
|
|
BlockBasedTable* table = reinterpret_cast<BlockBasedTable*>(arg);
|
2013-01-20 11:07:13 +01:00
|
|
|
Cache* block_cache = table->rep_->options.block_cache.get();
|
2013-09-02 08:23:40 +02:00
|
|
|
Cache* block_cache_compressed = table->rep_->options.
|
|
|
|
block_cache_compressed.get();
|
2013-11-22 23:14:05 +01:00
|
|
|
Statistics* statistics = table->rep_->options.statistics.get();
|
2013-03-01 03:04:58 +01:00
|
|
|
Block* block = nullptr;
|
2013-09-02 08:23:40 +02:00
|
|
|
Block* cblock = nullptr;
|
2013-03-01 03:04:58 +01:00
|
|
|
Cache::Handle* cache_handle = nullptr;
|
2013-09-02 08:23:40 +02:00
|
|
|
Cache::Handle* compressed_cache_handle = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_value;
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
// We intentionally allow extra stuff in index_value so that we
|
|
|
|
// can add more features in the future.
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return NewErrorIterator(s);
|
|
|
|
}
|
2012-11-03 05:02:40 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (block_cache != nullptr || block_cache_compressed != nullptr) {
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice key, /* key to the block cache */
|
|
|
|
ckey /* key to the compressed block cache */ ;
|
|
|
|
|
|
|
|
// create key for block cache
|
|
|
|
if (block_cache != nullptr) {
|
|
|
|
key = GetCacheKey(
|
|
|
|
table->rep_->cache_key_prefix,
|
|
|
|
table->rep_->cache_key_prefix_size,
|
|
|
|
handle,
|
|
|
|
cache_key
|
|
|
|
);
|
|
|
|
}
|
2013-09-02 08:23:40 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (block_cache_compressed != nullptr) {
|
|
|
|
ckey = GetCacheKey(
|
|
|
|
table->rep_->compressed_cache_key_prefix,
|
|
|
|
table->rep_->compressed_cache_key_prefix_size,
|
|
|
|
handle,
|
|
|
|
compressed_cache_key
|
|
|
|
);
|
|
|
|
}
|
2013-09-02 08:23:40 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Lookup uncompressed cache first
|
|
|
|
if (block_cache != nullptr) {
|
|
|
|
assert(!key.empty());
|
|
|
|
cache_handle = block_cache->Lookup(key);
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
block = reinterpret_cast<Block*>(block_cache->Value(cache_handle));
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_HIT);
|
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_MISS);
|
|
|
|
}
|
|
|
|
}
|
2013-09-02 08:23:40 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// If not found in uncompressed cache, lookup compressed cache
|
|
|
|
if (block == nullptr && block_cache_compressed != nullptr) {
|
|
|
|
assert(!ckey.empty());
|
|
|
|
compressed_cache_handle = block_cache_compressed->Lookup(ckey);
|
|
|
|
|
|
|
|
// if we found in the compressed cache, then uncompress and
|
|
|
|
// insert into uncompressed cache
|
|
|
|
if (compressed_cache_handle != nullptr) {
|
|
|
|
// found compressed block
|
|
|
|
cblock = reinterpret_cast<Block*>(block_cache_compressed->
|
|
|
|
Value(compressed_cache_handle));
|
|
|
|
assert(cblock->compressionType() != kNoCompression);
|
|
|
|
|
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents contents;
|
|
|
|
s = UncompressBlockContents(cblock->data(), cblock->size(),
|
|
|
|
&contents);
|
|
|
|
|
|
|
|
// Insert uncompressed block into block cache
|
|
|
|
if (s.ok()) {
|
|
|
|
block = new Block(contents); // uncompressed block
|
|
|
|
assert(block->compressionType() == kNoCompression);
|
|
|
|
if (block_cache != nullptr && block->isCachable() &&
|
|
|
|
options.fill_cache) {
|
|
|
|
cache_handle = block_cache->Insert(key, block, block->size(),
|
|
|
|
&DeleteCachedBlock);
|
|
|
|
assert(reinterpret_cast<Block*>(block_cache->Value(cache_handle))
|
|
|
|
== block);
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
// Release hold on compressed cache entry
|
|
|
|
block_cache_compressed->Release(compressed_cache_handle);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2013-09-02 08:23:40 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (block != nullptr) {
|
|
|
|
BumpPerfCount(&perf_context.block_cache_hit_count);
|
|
|
|
} else if (no_io) {
|
|
|
|
// Did not find in block_cache and can't do IO
|
|
|
|
return NewErrorIterator(Status::Incomplete("no blocking io"));
|
|
|
|
} else {
|
|
|
|
Histograms histogram = for_compaction ?
|
|
|
|
READ_BLOCK_COMPACTION_MICROS : READ_BLOCK_GET_MICROS;
|
|
|
|
{ // block for stop watch
|
|
|
|
StopWatch sw(table->rep_->options.env, statistics, histogram);
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
table->rep_->file.get(),
|
|
|
|
options,
|
|
|
|
handle,
|
|
|
|
&cblock,
|
|
|
|
table->rep_->options.env,
|
|
|
|
didIO,
|
|
|
|
block_cache_compressed == nullptr
|
|
|
|
);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(cblock->compressionType() == kNoCompression ||
|
|
|
|
block_cache_compressed != nullptr);
|
2013-09-02 08:23:40 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents contents;
|
|
|
|
if (cblock->compressionType() != kNoCompression) {
|
|
|
|
s = UncompressBlockContents(cblock->data(), cblock->size(),
|
|
|
|
&contents);
|
2013-06-14 02:25:09 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
if (s.ok()) {
|
2013-09-02 08:23:40 +02:00
|
|
|
if (cblock->compressionType() != kNoCompression) {
|
2013-11-13 07:46:51 +01:00
|
|
|
block = new Block(contents); // uncompressed block
|
|
|
|
} else {
|
|
|
|
block = cblock;
|
|
|
|
cblock = nullptr;
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
if (block->isCachable() && options.fill_cache) {
|
|
|
|
// Insert compressed block into compressed block cache.
|
|
|
|
// Release the hold on the compressed cache entry immediately.
|
|
|
|
if (block_cache_compressed != nullptr && cblock != nullptr) {
|
|
|
|
compressed_cache_handle = block_cache_compressed->Insert(
|
|
|
|
ckey, cblock, cblock->size(), &DeleteCachedBlock);
|
|
|
|
block_cache_compressed->Release(compressed_cache_handle);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
|
2013-09-02 08:23:40 +02:00
|
|
|
cblock = nullptr;
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
// insert into uncompressed block cache
|
|
|
|
assert((block->compressionType() == kNoCompression));
|
|
|
|
if (block_cache != nullptr) {
|
|
|
|
cache_handle = block_cache->Insert(
|
|
|
|
key, block, block->size(), &DeleteCachedBlock);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
|
|
|
assert(reinterpret_cast<Block*>(block_cache->Value(
|
|
|
|
cache_handle))== block);
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
if (cblock != nullptr) {
|
|
|
|
delete cblock;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
} else if (no_io) {
|
|
|
|
// Could not read from block_cache and can't do IO
|
|
|
|
return NewErrorIterator(Status::Incomplete("no blocking io"));
|
|
|
|
} else {
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
table->rep_->file.get(),
|
|
|
|
options,
|
|
|
|
handle,
|
|
|
|
&block,
|
|
|
|
table->rep_->options.env,
|
|
|
|
didIO
|
|
|
|
);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
Iterator* iter;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (block != nullptr) {
|
2014-01-27 22:53:22 +01:00
|
|
|
iter = block->NewIterator(&(table->rep_->internal_comparator_));
|
2013-09-02 08:23:40 +02:00
|
|
|
if (cache_handle != nullptr) {
|
2011-03-18 23:37:00 +01:00
|
|
|
iter->RegisterCleanup(&ReleaseBlock, block_cache, cache_handle);
|
2013-09-02 08:23:40 +02:00
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&DeleteBlock, block, nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
iter = NewErrorIterator(s);
|
|
|
|
}
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
BlockBasedTable::CachableEntry<FilterBlockReader>
|
|
|
|
BlockBasedTable::GetFilter(bool no_io) const {
|
2014-02-20 00:38:57 +01:00
|
|
|
// filter pre-populated
|
|
|
|
if (rep_->filter != nullptr) {
|
|
|
|
return {rep_->filter.get(), nullptr /* cache handle */};
|
|
|
|
}
|
|
|
|
|
|
|
|
if (rep_->options.filter_policy == nullptr /* do not use filter at all */ ||
|
|
|
|
rep_->options.block_cache == nullptr /* no block cache at all */) {
|
|
|
|
return {nullptr /* filter */, nullptr /* cache handle */};
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Fetching from the cache
|
|
|
|
Cache* block_cache = rep_->options.block_cache.get();
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
auto key = GetCacheKey(
|
|
|
|
rep_->cache_key_prefix,
|
|
|
|
rep_->cache_key_prefix_size,
|
|
|
|
rep_->metaindex_handle,
|
|
|
|
cache_key
|
|
|
|
);
|
|
|
|
|
2013-11-22 23:14:05 +01:00
|
|
|
Statistics* statistics = rep_->options.statistics.get();
|
2013-11-13 07:46:51 +01:00
|
|
|
auto cache_handle = GetFromBlockCache(
|
|
|
|
block_cache,
|
|
|
|
key,
|
|
|
|
BLOCK_CACHE_FILTER_MISS,
|
|
|
|
BLOCK_CACHE_FILTER_HIT,
|
2013-11-22 23:14:05 +01:00
|
|
|
statistics
|
2013-11-13 07:46:51 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
FilterBlockReader* filter = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
filter = reinterpret_cast<FilterBlockReader*>(
|
|
|
|
block_cache->Value(cache_handle));
|
|
|
|
} else if (no_io) {
|
|
|
|
// Do not invoke any io.
|
|
|
|
return CachableEntry<FilterBlockReader>();
|
|
|
|
} else {
|
|
|
|
size_t filter_size = 0;
|
|
|
|
std::unique_ptr<Block> meta;
|
|
|
|
std::unique_ptr<Iterator> iter;
|
|
|
|
auto s = ReadMetaBlock(rep_, &meta, &iter);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
std::string filter_block_key = kFilterBlockPrefix;
|
|
|
|
filter_block_key.append(rep_->options.filter_policy->Name());
|
|
|
|
iter->Seek(filter_block_key);
|
|
|
|
|
|
|
|
if (iter->Valid() && iter->key() == Slice(filter_block_key)) {
|
|
|
|
filter = ReadFilter(iter->value(), rep_, &filter_size);
|
|
|
|
assert(filter);
|
|
|
|
assert(filter_size > 0);
|
|
|
|
|
|
|
|
cache_handle = block_cache->Insert(
|
|
|
|
key, filter, filter_size, &DeleteCachedFilter);
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return { filter, cache_handle };
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the iterator from the index block.
|
|
|
|
Iterator* BlockBasedTable::IndexBlockReader(const ReadOptions& options) const {
|
|
|
|
if (rep_->index_block) {
|
2014-01-27 22:53:22 +01:00
|
|
|
return rep_->index_block->NewIterator(&(rep_->internal_comparator_));
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// get index block from cache
|
|
|
|
assert (rep_->options.block_cache);
|
|
|
|
bool didIO = false;
|
|
|
|
CachableEntry<Block> entry;
|
|
|
|
|
|
|
|
auto s = GetBlock(
|
|
|
|
this,
|
|
|
|
rep_->index_handle,
|
|
|
|
options,
|
|
|
|
false, /* for compaction */
|
|
|
|
BLOCK_CACHE_INDEX_MISS,
|
|
|
|
BLOCK_CACHE_INDEX_HIT,
|
|
|
|
&didIO,
|
|
|
|
&entry
|
|
|
|
);
|
|
|
|
|
|
|
|
Iterator* iter;
|
|
|
|
if (entry.value != nullptr) {
|
2014-01-27 22:53:22 +01:00
|
|
|
iter = entry.value->NewIterator(&(rep_->internal_comparator_));
|
2013-11-13 07:46:51 +01:00
|
|
|
if (entry.cache_handle) {
|
|
|
|
iter->RegisterCleanup(
|
|
|
|
&ReleaseBlock, rep_->options.block_cache.get(), entry.cache_handle
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&DeleteBlock, entry.value, nullptr);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
iter = NewErrorIterator(s);
|
|
|
|
}
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
Iterator* BlockBasedTable::BlockReader(void* arg, const ReadOptions& options,
|
2013-10-29 01:54:09 +01:00
|
|
|
const EnvOptions& soptions,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& icomparator,
|
2013-10-29 01:54:09 +01:00
|
|
|
const Slice& index_value,
|
|
|
|
bool for_compaction) {
|
2013-06-14 02:25:09 +02:00
|
|
|
return BlockReader(arg, options, index_value, nullptr, for_compaction);
|
2012-09-27 10:05:38 +02:00
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
// This will be broken if the user specifies an unusual implementation
|
|
|
|
// of Options.comparator, or if the user specifies an unusual
|
|
|
|
// definition of prefixes in Options.filter_policy. In particular, we
|
|
|
|
// require the following three properties:
|
|
|
|
//
|
|
|
|
// 1) key.starts_with(prefix(key))
|
|
|
|
// 2) Compare(prefix(key), key) <= 0.
|
|
|
|
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
|
2013-08-23 23:49:57 +02:00
|
|
|
//
|
2013-11-13 07:46:51 +01:00
|
|
|
// Otherwise, this method guarantees no I/O will be incurred.
|
|
|
|
//
|
|
|
|
// REQUIRES: this method shouldn't be called while the DB lock is held.
|
2013-10-29 01:54:09 +01:00
|
|
|
bool BlockBasedTable::PrefixMayMatch(const Slice& internal_prefix) {
|
2013-08-13 23:04:56 +02:00
|
|
|
bool may_match = true;
|
|
|
|
Status s;
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!rep_->options.filter_policy) {
|
2013-08-13 23:04:56 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// To prevent any io operation in this method, we set `read_tier` to make
|
|
|
|
// sure we always read index or filter only when they have already been
|
|
|
|
// loaded to memory.
|
|
|
|
ReadOptions no_io_read_options;
|
|
|
|
no_io_read_options.read_tier = kBlockCacheTier;
|
|
|
|
unique_ptr<Iterator> iiter(
|
|
|
|
IndexBlockReader(no_io_read_options)
|
|
|
|
);
|
2013-08-13 23:04:56 +02:00
|
|
|
iiter->Seek(internal_prefix);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2013-08-23 23:49:57 +02:00
|
|
|
if (!iiter->Valid()) {
|
2013-08-13 23:04:56 +02:00
|
|
|
// we're past end of file
|
2013-11-14 23:05:15 +01:00
|
|
|
// if it's incomplete, it means that we avoided I/O
|
|
|
|
// and we're not really sure that we're past the end
|
|
|
|
// of the file
|
|
|
|
may_match = iiter->status().IsIncomplete();
|
2013-08-23 23:49:57 +02:00
|
|
|
} else if (ExtractUserKey(iiter->key()).starts_with(
|
2013-11-13 07:46:51 +01:00
|
|
|
ExtractUserKey(internal_prefix))) {
|
2013-08-13 23:04:56 +02:00
|
|
|
// we need to check for this subtle case because our only
|
|
|
|
// guarantee is that "the key is a string >= last key in that data
|
|
|
|
// block" according to the doc/table_format.txt spec.
|
|
|
|
//
|
|
|
|
// Suppose iiter->key() starts with the desired prefix; it is not
|
|
|
|
// necessarily the case that the corresponding data block will
|
|
|
|
// contain the prefix, since iiter->key() need not be in the
|
|
|
|
// block. However, the next data block may contain the prefix, so
|
|
|
|
// we return true to play it safe.
|
|
|
|
may_match = true;
|
|
|
|
} else {
|
|
|
|
// iiter->key() does NOT start with the desired prefix. Because
|
|
|
|
// Seek() finds the first key that is >= the seek target, this
|
|
|
|
// means that iiter->key() > prefix. Thus, any data blocks coming
|
|
|
|
// after the data block corresponding to iiter->key() cannot
|
|
|
|
// possibly contain the key. Thus, the corresponding data block
|
|
|
|
// is the only one which could potentially contain the prefix.
|
|
|
|
Slice handle_value = iiter->value();
|
|
|
|
BlockHandle handle;
|
|
|
|
s = handle.DecodeFrom(&handle_value);
|
|
|
|
assert(s.ok());
|
2013-11-13 07:46:51 +01:00
|
|
|
auto filter_entry = GetFilter(true /* no io */);
|
|
|
|
may_match =
|
2013-11-14 23:05:15 +01:00
|
|
|
filter_entry.value == nullptr ||
|
2013-11-13 07:46:51 +01:00
|
|
|
filter_entry.value->PrefixMayMatch(handle.offset(), internal_prefix);
|
|
|
|
filter_entry.Release(rep_->options.block_cache.get());
|
2013-08-13 23:04:56 +02:00
|
|
|
}
|
2013-08-23 23:49:57 +02:00
|
|
|
|
2013-11-22 23:14:05 +01:00
|
|
|
Statistics* statistics = rep_->options.statistics.get();
|
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
|
2013-08-23 23:49:57 +02:00
|
|
|
if (!may_match) {
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
|
2013-08-23 23:49:57 +02:00
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
return may_match;
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
Iterator* BlockBasedTable::NewIterator(const ReadOptions& options) {
|
2013-08-13 23:04:56 +02:00
|
|
|
if (options.prefix) {
|
|
|
|
InternalKey internal_prefix(*options.prefix, 0, kTypeValue);
|
|
|
|
if (!PrefixMayMatch(internal_prefix.Encode())) {
|
|
|
|
// nothing in this file can match the prefix, so we should not
|
|
|
|
// bother doing I/O to this file when iterating.
|
|
|
|
return NewEmptyIterator();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
return NewTwoLevelIterator(IndexBlockReader(options),
|
|
|
|
&BlockBasedTable::BlockReader,
|
|
|
|
const_cast<BlockBasedTable*>(this), options,
|
|
|
|
rep_->soptions, rep_->internal_comparator_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-10-30 18:52:33 +01:00
|
|
|
Status BlockBasedTable::Get(
|
2014-01-27 22:53:22 +01:00
|
|
|
const ReadOptions& readOptions, const Slice& key, void* handle_context,
|
|
|
|
bool (*result_handler)(void* handle_context, const ParsedInternalKey& k,
|
2013-10-30 18:52:33 +01:00
|
|
|
const Slice& v, bool didIO),
|
|
|
|
void (*mark_key_may_exist_handler)(void* handle_context)) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Status s;
|
2013-11-13 07:46:51 +01:00
|
|
|
Iterator* iiter = IndexBlockReader(readOptions);
|
|
|
|
auto filter_entry = GetFilter(readOptions.read_tier == kBlockCacheTier);
|
|
|
|
FilterBlockReader* filter = filter_entry.value;
|
2013-03-21 23:59:47 +01:00
|
|
|
bool done = false;
|
2013-10-30 18:52:33 +01:00
|
|
|
for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Slice handle_value = iiter->value();
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2012-04-17 17:36:46 +02:00
|
|
|
BlockHandle handle;
|
2013-11-13 07:46:51 +01:00
|
|
|
bool may_not_exist_in_filter =
|
|
|
|
filter != nullptr &&
|
|
|
|
handle.DecodeFrom(&handle_value).ok() &&
|
|
|
|
!filter->KeyMayMatch(handle.offset(), key);
|
|
|
|
|
|
|
|
if (may_not_exist_in_filter) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// Not found
|
2013-03-21 23:59:47 +01:00
|
|
|
// TODO: think about interaction with Merge. If a user key cannot
|
|
|
|
// cross one data block, we should be fine.
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(rep_->options.statistics.get(), BLOOM_FILTER_USEFUL);
|
2013-03-21 23:59:47 +01:00
|
|
|
break;
|
2012-04-17 17:36:46 +02:00
|
|
|
} else {
|
2012-09-27 10:05:38 +02:00
|
|
|
bool didIO = false;
|
2013-11-13 07:46:51 +01:00
|
|
|
unique_ptr<Iterator> block_iter(
|
2013-10-30 18:52:33 +01:00
|
|
|
BlockReader(this, readOptions, iiter->value(), &didIO));
|
2013-03-21 23:59:47 +01:00
|
|
|
|
2013-10-30 18:52:33 +01:00
|
|
|
if (readOptions.read_tier && block_iter->status().IsIncomplete()) {
|
2013-08-25 07:48:51 +02:00
|
|
|
// couldn't get block from block_cache
|
2013-07-26 21:57:01 +02:00
|
|
|
// Update Saver.state to Found because we are only looking for whether
|
|
|
|
// we can guarantee the key is not there when "no_io" is set
|
2013-10-30 18:52:33 +01:00
|
|
|
(*mark_key_may_exist_handler)(handle_context);
|
2013-07-26 21:57:01 +02:00
|
|
|
break;
|
|
|
|
}
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
|
|
|
|
// Call the *saver function on each entry/block until it returns false
|
2013-10-30 18:52:33 +01:00
|
|
|
for (block_iter->Seek(key); block_iter->Valid(); block_iter->Next()) {
|
2014-01-27 22:53:22 +01:00
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(block_iter->key(), &parsed_key)) {
|
|
|
|
s = Status::Corruption(Slice());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!(*result_handler)(handle_context, parsed_key, block_iter->value(),
|
|
|
|
didIO)) {
|
2013-03-21 23:59:47 +01:00
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
s = block_iter->status();
|
|
|
|
}
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
filter_entry.Release(rep_->options.block_cache.get());
|
2012-04-17 17:36:46 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
s = iiter->status();
|
|
|
|
}
|
|
|
|
delete iiter;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-01-27 22:53:22 +01:00
|
|
|
bool SaveDidIO(void* arg, const ParsedInternalKey& key, const Slice& value,
|
|
|
|
bool didIO) {
|
2013-02-01 00:20:24 +01:00
|
|
|
*reinterpret_cast<bool*>(arg) = didIO;
|
2013-03-21 23:59:47 +01:00
|
|
|
return false;
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
2013-10-29 01:54:09 +01:00
|
|
|
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
|
|
|
|
const Slice& key) {
|
|
|
|
// We use Get() as it has logic that checks whether we read the
|
2013-02-01 00:20:24 +01:00
|
|
|
// block from the disk or not.
|
|
|
|
bool didIO = false;
|
2013-10-29 01:54:09 +01:00
|
|
|
Status s = Get(options, key, &didIO, SaveDidIO);
|
2013-02-01 00:20:24 +01:00
|
|
|
assert(s.ok());
|
|
|
|
return !didIO;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
|
2013-11-13 07:46:51 +01:00
|
|
|
Iterator* index_iter = IndexBlockReader(ReadOptions());
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
index_iter->Seek(key);
|
|
|
|
uint64_t result;
|
|
|
|
if (index_iter->Valid()) {
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_iter->value();
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
if (s.ok()) {
|
|
|
|
result = handle.offset();
|
|
|
|
} else {
|
|
|
|
// Strange: we can't decode the block handle in the index block.
|
|
|
|
// We'll just return the offset of the metaindex block, which is
|
|
|
|
// close to the whole file size for this case.
|
|
|
|
result = rep_->metaindex_handle.offset();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// key is past the last key in the file. Approximate the offset
|
|
|
|
// by returning the offset of the metaindex block (which is
|
|
|
|
// right near the end of the file).
|
|
|
|
result = rep_->metaindex_handle.offset();
|
|
|
|
}
|
|
|
|
delete index_iter;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
bool BlockBasedTable::TEST_filter_block_preloaded() const {
|
|
|
|
return rep_->filter != nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool BlockBasedTable::TEST_index_block_preloaded() const {
|
|
|
|
return rep_->index_block != nullptr;
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|