2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 23:59:46 +02:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2013-10-30 18:52:33 +01:00
|
|
|
#include "table/block_based_table_reader.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2016-08-13 01:34:11 +02:00
|
|
|
#include <algorithm>
|
|
|
|
#include <limits>
|
2014-03-01 03:19:07 +01:00
|
|
|
#include <string>
|
|
|
|
#include <utility>
|
2016-08-01 23:50:19 +02:00
|
|
|
#include <vector>
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
#include "db/dbformat.h"
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
#include "db/pinned_iterators_manager.h"
|
2013-08-13 23:04:56 +02:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/iterator.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
#include "rocksdb/table.h"
|
2014-04-22 02:49:47 +02:00
|
|
|
#include "rocksdb/table_properties.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block.h"
|
2014-09-08 19:37:05 +02:00
|
|
|
#include "table/block_based_filter_block.h"
|
2015-02-05 02:03:57 +01:00
|
|
|
#include "table/block_based_table_factory.h"
|
2014-06-13 04:03:22 +02:00
|
|
|
#include "table/block_prefix_index.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/filter_block.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/format.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/full_filter_block.h"
|
|
|
|
#include "table/get_context.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
#include "table/meta_blocks.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/persistent_cache_helper.h"
|
2016-10-19 01:59:37 +02:00
|
|
|
#include "table/sst_file_writer_collectors.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
#include "util/file_reader_writer.h"
|
[RocksDB] Added nano second stopwatch and new perf counters to track block read cost
Summary: The pupose of this diff is to expose per user-call level precise timing of block read, so that we can answer questions like: a Get() costs me 100ms, is that somehow related to loading blocks from file system, or sth else? We will answer that with EXACTLY how many blocks have been read, how much time was spent on transfering the bytes from os, how much time was spent on checksum verification and how much time was spent on block decompression, just for that one Get. A nano second stopwatch was introduced to track time with higher precision. The cost/precision of the stopwatch is also measured in unit-test. On my dev box, retrieving one time instance costs about 30ns, on average. The deviation of timing results is good enough to track 100ns-1us level events. And the overhead could be safely ignored for 100us level events (10000 instances/s), for example, a viewstate thrift call.
Test Plan: perf_context_test, also testing with viewstate shadow traffic.
Reviewers: dhruba
Reviewed By: dhruba
CC: leveldb, xjin
Differential Revision: https://reviews.facebook.net/D12351
2013-06-04 08:09:15 +02:00
|
|
|
#include "util/perf_context_imp.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
#include "util/stop_watch.h"
|
2014-11-25 05:44:49 +01:00
|
|
|
#include "util/string_util.h"
|
2016-08-24 03:20:41 +02:00
|
|
|
#include "util/sync_point.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
2014-05-15 23:09:03 +02:00
|
|
|
extern const std::string kHashIndexPrefixesBlock;
|
|
|
|
extern const std::string kHashIndexPrefixesMetadataBlock;
|
2014-03-01 03:19:07 +01:00
|
|
|
using std::unique_ptr;
|
|
|
|
|
|
|
|
typedef BlockBasedTable::IndexReader IndexReader;
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
// Read the block identified by "handle" from "file".
|
|
|
|
// The only relevant option is options.verify_checksums for now.
|
|
|
|
// On failure return non-OK.
|
|
|
|
// On success fill *result and return OK - caller owns *result
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// @param compression_dict Data for presetting the compression library's
|
|
|
|
// dictionary.
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
Status ReadBlockFromFile(RandomAccessFileReader* file, const Footer& footer,
|
2014-05-01 20:09:32 +02:00
|
|
|
const ReadOptions& options, const BlockHandle& handle,
|
2016-07-19 18:44:03 +02:00
|
|
|
std::unique_ptr<Block>* result,
|
2016-08-27 03:55:58 +02:00
|
|
|
const ImmutableCFOptions& ioptions, bool do_uncompress,
|
|
|
|
const Slice& compression_dict,
|
|
|
|
const PersistentCacheOptions& cache_options,
|
2016-10-19 01:59:37 +02:00
|
|
|
SequenceNumber global_seqno,
|
2016-08-27 03:55:58 +02:00
|
|
|
size_t read_amp_bytes_per_bit) {
|
2014-03-01 03:19:07 +01:00
|
|
|
BlockContents contents;
|
2016-07-19 18:44:03 +02:00
|
|
|
Status s = ReadBlockContents(file, footer, options, handle, &contents, ioptions,
|
|
|
|
do_uncompress, compression_dict, cache_options);
|
2014-03-01 03:19:07 +01:00
|
|
|
if (s.ok()) {
|
2016-10-19 01:59:37 +02:00
|
|
|
result->reset(new Block(std::move(contents), global_seqno,
|
|
|
|
read_amp_bytes_per_bit, ioptions.statistics));
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the resource that is held by the iterator.
|
|
|
|
template <class ResourceType>
|
|
|
|
void DeleteHeldResource(void* arg, void* ignored) {
|
|
|
|
delete reinterpret_cast<ResourceType*>(arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the entry resided in the cache.
|
|
|
|
template <class Entry>
|
|
|
|
void DeleteCachedEntry(const Slice& key, void* value) {
|
|
|
|
auto entry = reinterpret_cast<Entry*>(value);
|
|
|
|
delete entry;
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
void DeleteCachedFilterEntry(const Slice& key, void* value);
|
|
|
|
void DeleteCachedIndexEntry(const Slice& key, void* value);
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
// Release the cached entry and decrement its ref count.
|
|
|
|
void ReleaseCachedEntry(void* arg, void* h) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg);
|
|
|
|
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
|
|
|
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
Slice GetCacheKeyFromOffset(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size, uint64_t offset,
|
|
|
|
char* cache_key) {
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(cache_key != nullptr);
|
|
|
|
assert(cache_key_prefix_size != 0);
|
2015-12-16 03:20:10 +01:00
|
|
|
assert(cache_key_prefix_size <= BlockBasedTable::kMaxCacheKeyPrefixSize);
|
2014-03-01 03:19:07 +01:00
|
|
|
memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
char* end = EncodeVarint64(cache_key + cache_key_prefix_size, offset);
|
2014-03-01 03:19:07 +01:00
|
|
|
return Slice(cache_key, static_cast<size_t>(end - cache_key));
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::Handle* GetEntryFromCache(Cache* block_cache, const Slice& key,
|
|
|
|
Tickers block_cache_miss_ticker,
|
|
|
|
Tickers block_cache_hit_ticker,
|
|
|
|
Statistics* statistics) {
|
2016-09-01 22:50:39 +02:00
|
|
|
auto cache_handle = block_cache->Lookup(key, statistics);
|
2014-03-01 03:19:07 +01:00
|
|
|
if (cache_handle != nullptr) {
|
2014-04-08 19:58:07 +02:00
|
|
|
PERF_COUNTER_ADD(block_cache_hit_count, 1);
|
2014-03-01 03:19:07 +01:00
|
|
|
// overall cache hit
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
2015-10-08 00:17:20 +02:00
|
|
|
// total bytes read from cache
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_READ,
|
|
|
|
block_cache->GetUsage(cache_handle));
|
2014-03-01 03:19:07 +01:00
|
|
|
// block-type specific cache hit
|
|
|
|
RecordTick(statistics, block_cache_hit_ticker);
|
|
|
|
} else {
|
|
|
|
// overall cache miss
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
|
|
|
// block-type specific cache miss
|
|
|
|
RecordTick(statistics, block_cache_miss_ticker);
|
|
|
|
}
|
|
|
|
|
|
|
|
return cache_handle;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
// -- IndexReader and its subclasses
|
|
|
|
// IndexReader is the interface that provide the functionality for index access.
|
|
|
|
class BlockBasedTable::IndexReader {
|
|
|
|
public:
|
2016-06-03 19:47:47 +02:00
|
|
|
explicit IndexReader(const Comparator* comparator, Statistics* stats)
|
|
|
|
: comparator_(comparator), statistics_(stats) {}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
virtual ~IndexReader() {}
|
|
|
|
|
|
|
|
// Create an iterator for index access.
|
2014-07-31 01:34:35 +02:00
|
|
|
// An iter is passed in, if it is not null, update this one and return it
|
|
|
|
// If it is null, create a new Iterator
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
|
|
|
|
bool total_order_seek = true) = 0;
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// The size of the index.
|
|
|
|
virtual size_t size() const = 0;
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 20:48:09 +02:00
|
|
|
// Memory usage of the index block
|
|
|
|
virtual size_t usable_size() const = 0;
|
2016-06-03 19:47:47 +02:00
|
|
|
// return the statistics pointer
|
|
|
|
virtual Statistics* statistics() const { return statistics_; }
|
2014-08-05 20:27:34 +02:00
|
|
|
// Report an approximation of how much memory has been used other than memory
|
|
|
|
// that was allocated in block cache.
|
|
|
|
virtual size_t ApproximateMemoryUsage() const = 0;
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
protected:
|
|
|
|
const Comparator* comparator_;
|
2016-06-03 19:47:47 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
Statistics* statistics_;
|
2014-03-01 03:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Index that allows binary search lookup for the first key of each block.
|
|
|
|
// This class can be viewed as a thin wrapper for `Block` class which already
|
|
|
|
// supports binary search.
|
|
|
|
class BinarySearchIndexReader : public IndexReader {
|
|
|
|
public:
|
|
|
|
// Read index from the file and create an intance for
|
|
|
|
// `BinarySearchIndexReader`.
|
2014-03-02 08:40:08 +01:00
|
|
|
// On success, index_reader will be populated; otherwise it will remain
|
|
|
|
// unmodified.
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
static Status Create(RandomAccessFileReader* file, const Footer& footer,
|
2016-07-19 18:44:03 +02:00
|
|
|
const BlockHandle& index_handle,
|
|
|
|
const ImmutableCFOptions &ioptions,
|
2015-12-16 03:20:10 +01:00
|
|
|
const Comparator* comparator, IndexReader** index_reader,
|
2016-07-19 18:44:03 +02:00
|
|
|
const PersistentCacheOptions& cache_options) {
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> index_block;
|
2016-10-19 01:59:37 +02:00
|
|
|
auto s = ReadBlockFromFile(
|
|
|
|
file, footer, ReadOptions(), index_handle, &index_block, ioptions,
|
|
|
|
true /* decompress */, Slice() /*compression dict*/, cache_options,
|
|
|
|
kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */);
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2014-03-02 08:40:08 +01:00
|
|
|
if (s.ok()) {
|
2016-06-03 19:47:47 +02:00
|
|
|
*index_reader = new BinarySearchIndexReader(
|
2016-07-19 18:44:03 +02:00
|
|
|
comparator, std::move(index_block), ioptions.statistics);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2014-03-02 08:40:08 +01:00
|
|
|
return s;
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
|
|
|
|
bool dont_care = true) override {
|
2014-08-26 01:14:30 +02:00
|
|
|
return index_block_->NewIterator(comparator_, iter, true);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
virtual size_t size() const override { return index_block_->size(); }
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 20:48:09 +02:00
|
|
|
virtual size_t usable_size() const override {
|
|
|
|
return index_block_->usable_size();
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
virtual size_t ApproximateMemoryUsage() const override {
|
|
|
|
assert(index_block_);
|
|
|
|
return index_block_->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
private:
|
2015-02-19 23:07:38 +01:00
|
|
|
BinarySearchIndexReader(const Comparator* comparator,
|
2016-06-03 19:47:47 +02:00
|
|
|
std::unique_ptr<Block>&& index_block,
|
|
|
|
Statistics* stats)
|
|
|
|
: IndexReader(comparator, stats), index_block_(std::move(index_block)) {
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(index_block_ != nullptr);
|
|
|
|
}
|
2014-03-01 20:50:35 +01:00
|
|
|
std::unique_ptr<Block> index_block_;
|
2014-03-01 03:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Index that leverages an internal hash table to quicken the lookup for a given
|
|
|
|
// key.
|
|
|
|
class HashIndexReader : public IndexReader {
|
|
|
|
public:
|
2016-08-27 03:55:58 +02:00
|
|
|
static Status Create(const SliceTransform* hash_key_extractor,
|
|
|
|
const Footer& footer, RandomAccessFileReader* file,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const Comparator* comparator,
|
|
|
|
const BlockHandle& index_handle,
|
|
|
|
InternalIterator* meta_index_iter,
|
|
|
|
IndexReader** index_reader,
|
|
|
|
bool hash_index_allow_collision,
|
|
|
|
const PersistentCacheOptions& cache_options) {
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> index_block;
|
2016-10-19 01:59:37 +02:00
|
|
|
auto s = ReadBlockFromFile(
|
|
|
|
file, footer, ReadOptions(), index_handle, &index_block, ioptions,
|
|
|
|
true /* decompress */, Slice() /*compression dict*/, cache_options,
|
|
|
|
kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */);
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-06-13 04:03:22 +02:00
|
|
|
// Note, failure to create prefix hash index does not need to be a
|
|
|
|
// hard error. We can still fall back to the original binary search index.
|
|
|
|
// So, Create will succeed regardless, from this point on.
|
|
|
|
|
|
|
|
auto new_index_reader =
|
2016-07-19 18:44:03 +02:00
|
|
|
new HashIndexReader(comparator, std::move(index_block),
|
|
|
|
ioptions.statistics);
|
2014-06-13 04:03:22 +02:00
|
|
|
*index_reader = new_index_reader;
|
|
|
|
|
2014-05-15 23:09:03 +02:00
|
|
|
// Get prefixes block
|
|
|
|
BlockHandle prefixes_handle;
|
|
|
|
s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
|
|
|
|
&prefixes_handle);
|
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get index metadata block
|
|
|
|
BlockHandle prefixes_meta_handle;
|
|
|
|
s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
|
|
|
|
&prefixes_meta_handle);
|
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read contents for the blocks
|
|
|
|
BlockContents prefixes_contents;
|
|
|
|
s = ReadBlockContents(file, footer, ReadOptions(), prefixes_handle,
|
2016-07-19 18:44:03 +02:00
|
|
|
&prefixes_contents, ioptions, true /* decompress */,
|
2015-12-16 03:20:10 +01:00
|
|
|
Slice() /*compression dict*/, cache_options);
|
2014-05-15 23:09:03 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
BlockContents prefixes_meta_contents;
|
|
|
|
s = ReadBlockContents(file, footer, ReadOptions(), prefixes_meta_handle,
|
2016-07-19 18:44:03 +02:00
|
|
|
&prefixes_meta_contents, ioptions, true /* decompress */,
|
2015-12-16 03:20:10 +01:00
|
|
|
Slice() /*compression dict*/, cache_options);
|
2014-05-15 23:09:03 +02:00
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
2016-05-21 02:14:38 +02:00
|
|
|
BlockPrefixIndex* prefix_index = nullptr;
|
|
|
|
s = BlockPrefixIndex::Create(hash_key_extractor, prefixes_contents.data,
|
|
|
|
prefixes_meta_contents.data, &prefix_index);
|
|
|
|
// TODO: log error
|
|
|
|
if (s.ok()) {
|
|
|
|
new_index_reader->index_block_->SetBlockPrefixIndex(prefix_index);
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
2014-06-13 04:03:22 +02:00
|
|
|
return Status::OK();
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
virtual InternalIterator* NewIterator(BlockIter* iter = nullptr,
|
|
|
|
bool total_order_seek = true) override {
|
2014-08-26 01:14:30 +02:00
|
|
|
return index_block_->NewIterator(comparator_, iter, total_order_seek);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
virtual size_t size() const override { return index_block_->size(); }
|
Use malloc_usable_size() for accounting block cache size
Summary:
Currently, when we insert something into block cache, we say that the block cache capacity decreased by the size of the block. However, size of the block might be less than the actual memory used by this object. For example, 4.5KB block will actually use 8KB of memory. So even if we configure block cache to 10GB, our actually memory usage of block cache will be 20GB!
This problem showed up a lot in testing and just recently also showed up in MongoRocks production where we were using 30GB more memory than expected.
This diff will fix the problem. Instead of counting the block size, we will count memory used by the block. That way, a block cache configured to be 10GB will actually use only 10GB of memory.
I'm using non-portable function and I couldn't find info on portability on Google. However, it seems to work on Linux, which will cover majority of our use-cases.
Test Plan:
1. fill up mongo instance with 80GB of data
2. restart mongo with block cache size configured to 10GB
3. do a table scan in mongo
4. memory usage before the diff: 12GB. memory usage after the diff: 10.5GB
Reviewers: sdong, MarkCallaghan, rven, yhchiang
Reviewed By: yhchiang
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D40635
2015-06-26 20:48:09 +02:00
|
|
|
virtual size_t usable_size() const override {
|
|
|
|
return index_block_->usable_size();
|
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
virtual size_t ApproximateMemoryUsage() const override {
|
|
|
|
assert(index_block_);
|
|
|
|
return index_block_->ApproximateMemoryUsage() +
|
|
|
|
prefixes_contents_.data.size();
|
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
private:
|
2015-02-19 23:07:38 +01:00
|
|
|
HashIndexReader(const Comparator* comparator,
|
2016-06-03 19:47:47 +02:00
|
|
|
std::unique_ptr<Block>&& index_block, Statistics* stats)
|
|
|
|
: IndexReader(comparator, stats), index_block_(std::move(index_block)) {
|
2014-04-10 23:19:43 +02:00
|
|
|
assert(index_block_ != nullptr);
|
|
|
|
}
|
2014-05-15 23:09:03 +02:00
|
|
|
|
|
|
|
~HashIndexReader() {
|
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
std::unique_ptr<Block> index_block_;
|
2014-05-15 23:09:03 +02:00
|
|
|
BlockContents prefixes_contents_;
|
2014-03-01 03:19:07 +01:00
|
|
|
};
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// CachableEntry represents the entries that *may* be fetched from block cache.
|
|
|
|
// field `value` is the item we want to get.
|
|
|
|
// field `cache_handle` is the cache handle to the block cache. If the value
|
|
|
|
// was not read from cache, `cache_handle` will be nullptr.
|
|
|
|
template <class TValue>
|
|
|
|
struct BlockBasedTable::CachableEntry {
|
|
|
|
CachableEntry(TValue* _value, Cache::Handle* _cache_handle)
|
|
|
|
: value(_value), cache_handle(_cache_handle) {}
|
|
|
|
CachableEntry() : CachableEntry(nullptr, nullptr) {}
|
|
|
|
void Release(Cache* cache) {
|
|
|
|
if (cache_handle) {
|
|
|
|
cache->Release(cache_handle);
|
|
|
|
value = nullptr;
|
|
|
|
cache_handle = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bool IsSet() const { return cache_handle != nullptr; }
|
|
|
|
|
|
|
|
TValue* value = nullptr;
|
|
|
|
// if the entry is from the cache, cache_handle will be populated.
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
};
|
2013-02-01 00:20:24 +01:00
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
struct BlockBasedTable::Rep {
|
2014-10-31 19:59:54 +01:00
|
|
|
Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options,
|
|
|
|
const BlockBasedTableOptions& _table_opt,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
const InternalKeyComparator& _internal_comparator, bool skip_filters)
|
2014-10-31 19:59:54 +01:00
|
|
|
: ioptions(_ioptions),
|
|
|
|
env_options(_env_options),
|
|
|
|
table_options(_table_opt),
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
filter_policy(skip_filters ? nullptr : _table_opt.filter_policy.get()),
|
2015-02-05 02:03:57 +01:00
|
|
|
internal_comparator(_internal_comparator),
|
2015-09-03 00:36:47 +02:00
|
|
|
filter_type(FilterType::kNoFilter),
|
2015-02-05 02:03:57 +01:00
|
|
|
whole_key_filtering(_table_opt.whole_key_filtering),
|
2016-08-20 00:10:31 +02:00
|
|
|
prefix_filtering(true),
|
2016-10-19 01:59:37 +02:00
|
|
|
range_del_block(nullptr),
|
|
|
|
global_seqno(kDisableGlobalSequenceNumber) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
const ImmutableCFOptions& ioptions;
|
|
|
|
const EnvOptions& env_options;
|
2014-08-25 23:22:05 +02:00
|
|
|
const BlockBasedTableOptions& table_options;
|
|
|
|
const FilterPolicy* const filter_policy;
|
2014-03-01 03:19:07 +01:00
|
|
|
const InternalKeyComparator& internal_comparator;
|
2011-03-18 23:37:00 +01:00
|
|
|
Status status;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader> file;
|
2013-02-01 00:20:24 +01:00
|
|
|
char cache_key_prefix[kMaxCacheKeyPrefixSize];
|
2014-01-24 19:57:15 +01:00
|
|
|
size_t cache_key_prefix_size = 0;
|
2015-12-16 03:20:10 +01:00
|
|
|
char persistent_cache_key_prefix[kMaxCacheKeyPrefixSize];
|
|
|
|
size_t persistent_cache_key_prefix_size = 0;
|
2013-09-02 08:23:40 +02:00
|
|
|
char compressed_cache_key_prefix[kMaxCacheKeyPrefixSize];
|
2014-01-24 19:57:15 +01:00
|
|
|
size_t compressed_cache_key_prefix_size = 0;
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
uint64_t dummy_index_reader_offset =
|
|
|
|
0; // ID that is unique for the block cache.
|
2015-12-16 03:20:10 +01:00
|
|
|
PersistentCacheOptions persistent_cache_options;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
// Footer contains the fixed table information
|
|
|
|
Footer footer;
|
2014-03-01 03:19:07 +01:00
|
|
|
// index_reader and filter will be populated and used only when
|
|
|
|
// options.block_cache is nullptr; otherwise we will get the index block via
|
|
|
|
// the block cache.
|
|
|
|
unique_ptr<IndexReader> index_reader;
|
2013-11-13 07:46:51 +01:00
|
|
|
unique_ptr<FilterBlockReader> filter;
|
|
|
|
|
2015-09-03 00:36:47 +02:00
|
|
|
enum class FilterType {
|
|
|
|
kNoFilter,
|
|
|
|
kFullFilter,
|
|
|
|
kBlockFilter,
|
|
|
|
};
|
|
|
|
FilterType filter_type;
|
|
|
|
BlockHandle filter_handle;
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
std::shared_ptr<const TableProperties> table_properties;
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// Block containing the data for the compression dictionary. We take ownership
|
|
|
|
// for the entire block struct, even though we only use its Slice member. This
|
|
|
|
// is easier because the Slice member depends on the continued existence of
|
|
|
|
// another member ("allocation").
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
2014-03-01 03:19:07 +01:00
|
|
|
BlockBasedTableOptions::IndexType index_type;
|
2014-06-13 04:03:22 +02:00
|
|
|
bool hash_index_allow_collision;
|
2015-02-05 02:03:57 +01:00
|
|
|
bool whole_key_filtering;
|
|
|
|
bool prefix_filtering;
|
2014-04-10 23:19:43 +02:00
|
|
|
// TODO(kailiu) It is very ugly to use internal key in table, since table
|
|
|
|
// module should not be relying on db module. However to make things easier
|
|
|
|
// and compatible with existing code, we introduce a wrapper that allows
|
|
|
|
// block to extract prefix without knowing if a key is internal or not.
|
|
|
|
unique_ptr<SliceTransform> internal_prefix_transform;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
|
|
|
// only used in level 0 files:
|
|
|
|
// when pin_l0_filter_and_index_blocks_in_cache is true, we do use the
|
|
|
|
// LRU cache, but we always keep the filter & idndex block's handle checked
|
|
|
|
// out here (=we don't call Release()), plus the parsed out objects
|
|
|
|
// the LRU cache will never push flush them out, hence they're pinned
|
|
|
|
CachableEntry<FilterBlockReader> filter_entry;
|
|
|
|
CachableEntry<IndexReader> index_entry;
|
2016-08-20 00:10:31 +02:00
|
|
|
unique_ptr<Block> range_del_block;
|
2016-10-19 01:59:37 +02:00
|
|
|
|
|
|
|
// If global_seqno is used, all Keys in this file will have the same
|
|
|
|
// seqno with value `global_seqno`.
|
|
|
|
//
|
|
|
|
// A value of kDisableGlobalSequenceNumber means that this feature is disabled
|
|
|
|
// and every key have it's own seqno.
|
|
|
|
SequenceNumber global_seqno;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
BlockBasedTable::~BlockBasedTable() {
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
Close();
|
2013-10-29 01:54:09 +01:00
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// Helper function to setup the cache key's prefix for the Table.
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep, uint64_t file_size) {
|
2013-02-01 00:20:24 +01:00
|
|
|
assert(kMaxCacheKeyPrefixSize >= 10);
|
|
|
|
rep->cache_key_prefix_size = 0;
|
2013-09-02 08:23:40 +02:00
|
|
|
rep->compressed_cache_key_prefix_size = 0;
|
2014-08-25 23:22:05 +02:00
|
|
|
if (rep->table_options.block_cache != nullptr) {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
|
|
|
|
&rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
// Create dummy offset of index reader which is beyond the file size.
|
|
|
|
rep->dummy_index_reader_offset =
|
|
|
|
file_size + rep->table_options.block_cache->NewId();
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
2015-12-16 03:20:10 +01:00
|
|
|
if (rep->table_options.persistent_cache != nullptr) {
|
|
|
|
GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
|
|
|
|
&rep->persistent_cache_key_prefix[0],
|
|
|
|
&rep->persistent_cache_key_prefix_size);
|
|
|
|
}
|
2014-08-25 23:22:05 +02:00
|
|
|
if (rep->table_options.block_cache_compressed != nullptr) {
|
|
|
|
GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
rep->file->file(), &rep->compressed_cache_key_prefix[0],
|
2013-09-02 08:23:40 +02:00
|
|
|
&rep->compressed_cache_key_prefix_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-03 20:17:58 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
2013-09-02 08:23:40 +02:00
|
|
|
RandomAccessFile* file, char* buffer, size_t* size) {
|
|
|
|
|
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
2015-12-16 03:20:10 +01:00
|
|
|
if (cc && *size == 0) {
|
2013-09-02 08:23:40 +02:00
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-12-03 20:17:58 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc,
|
2013-09-02 08:23:40 +02:00
|
|
|
WritableFile* file, char* buffer, size_t* size) {
|
|
|
|
|
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
|
|
|
if (*size == 0) {
|
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 02:03:57 +01:00
|
|
|
namespace {
|
|
|
|
// Return True if table_properties has `user_prop_name` has a `true` value
|
|
|
|
// or it doesn't contain this property (for backward compatible).
|
|
|
|
bool IsFeatureSupported(const TableProperties& table_properties,
|
|
|
|
const std::string& user_prop_name, Logger* info_log) {
|
|
|
|
auto& props = table_properties.user_collected_properties;
|
|
|
|
auto pos = props.find(user_prop_name);
|
|
|
|
// Older version doesn't have this value set. Skip this check.
|
|
|
|
if (pos != props.end()) {
|
|
|
|
if (pos->second == kPropFalse) {
|
|
|
|
return false;
|
|
|
|
} else if (pos->second != kPropTrue) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, info_log,
|
|
|
|
"Property %s has invalidate value %s", user_prop_name.c_str(),
|
|
|
|
pos->second.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2016-10-19 01:59:37 +02:00
|
|
|
|
|
|
|
SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
|
|
|
|
Logger* info_log) {
|
|
|
|
auto& props = table_properties.user_collected_properties;
|
|
|
|
|
|
|
|
auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
|
|
|
|
auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
|
|
|
|
|
|
|
|
if (version_pos == props.end()) {
|
|
|
|
if (seqno_pos != props.end()) {
|
|
|
|
// This is not an external sst file, global_seqno is not supported.
|
|
|
|
assert(false);
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, info_log,
|
|
|
|
"A non-external sst file have global seqno property with value %s",
|
|
|
|
seqno_pos->second.c_str());
|
|
|
|
}
|
|
|
|
return kDisableGlobalSequenceNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t version = DecodeFixed32(version_pos->second.c_str());
|
|
|
|
if (version < 2) {
|
|
|
|
if (seqno_pos != props.end() || version != 1) {
|
|
|
|
// This is a v1 external sst file, global_seqno is not supported.
|
|
|
|
assert(false);
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, info_log,
|
|
|
|
"An external sst file with version %u have global seqno property "
|
|
|
|
"with value %s",
|
|
|
|
version, seqno_pos->second.c_str());
|
|
|
|
}
|
|
|
|
return kDisableGlobalSequenceNumber;
|
|
|
|
}
|
|
|
|
|
|
|
|
SequenceNumber global_seqno = DecodeFixed64(seqno_pos->second.c_str());
|
|
|
|
|
|
|
|
if (global_seqno > kMaxSequenceNumber) {
|
|
|
|
assert(false);
|
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, info_log,
|
|
|
|
"An external sst file with version %u have global seqno property "
|
|
|
|
"with value %llu, which is greater than kMaxSequenceNumber",
|
|
|
|
version, global_seqno);
|
|
|
|
}
|
|
|
|
|
|
|
|
return global_seqno;
|
|
|
|
}
|
2015-02-05 02:03:57 +01:00
|
|
|
} // namespace
|
|
|
|
|
2015-12-16 03:20:10 +01:00
|
|
|
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size,
|
|
|
|
const BlockHandle& handle, char* cache_key) {
|
|
|
|
assert(cache_key != nullptr);
|
|
|
|
assert(cache_key_prefix_size != 0);
|
|
|
|
assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
|
|
|
|
memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
|
|
|
|
char* end =
|
|
|
|
EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
|
|
|
|
return Slice(cache_key, static_cast<size_t>(end - cache_key));
|
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& env_options,
|
2014-01-24 19:57:15 +01:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<RandomAccessFileReader>&& file,
|
2014-01-24 19:57:15 +01:00
|
|
|
uint64_t file_size,
|
2015-02-26 01:34:26 +01:00
|
|
|
unique_ptr<TableReader>* table_reader,
|
2016-07-20 20:23:31 +02:00
|
|
|
const bool prefetch_index_and_filter_in_cache,
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
const bool skip_filters, const int level) {
|
2013-10-30 18:52:33 +01:00
|
|
|
table_reader->reset();
|
2013-01-09 19:44:30 +01:00
|
|
|
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer;
|
|
|
|
auto s = ReadFooterFromFile(file.get(), file_size, &footer,
|
|
|
|
kBlockBasedTableMagicNumber);
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
if (!BlockBasedTableSupportedVersion(footer.version())) {
|
2015-01-13 23:33:04 +01:00
|
|
|
return Status::Corruption(
|
2015-01-15 01:24:24 +01:00
|
|
|
"Unknown Footer version. Maybe this file was created with newer "
|
2015-01-13 23:33:04 +01:00
|
|
|
"version of RocksDB?");
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// We've successfully read the footer and the index block: we're
|
|
|
|
// ready to serve requests.
|
2016-08-24 03:20:41 +02:00
|
|
|
// Better not mutate rep_ after the creation. eg. internal_prefix_transform
|
|
|
|
// raw pointer will be used to create HashIndexReader, whose reset may
|
|
|
|
// access a dangling pointer.
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
|
|
|
|
internal_comparator, skip_filters);
|
2013-11-13 07:46:51 +01:00
|
|
|
rep->file = std::move(file);
|
2014-05-01 20:09:32 +02:00
|
|
|
rep->footer = footer;
|
2014-03-01 03:19:07 +01:00
|
|
|
rep->index_type = table_options.index_type;
|
2014-06-13 04:03:22 +02:00
|
|
|
rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
|
2016-08-24 03:20:41 +02:00
|
|
|
// We need to wrap data with internal_prefix_transform to make sure it can
|
|
|
|
// handle prefix correctly.
|
2016-09-08 23:45:32 +02:00
|
|
|
rep->internal_prefix_transform.reset(
|
|
|
|
new InternalKeySliceTransform(rep->ioptions.prefix_extractor));
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
SetupCacheKeyPrefix(rep, file_size);
|
2013-11-13 07:46:51 +01:00
|
|
|
unique_ptr<BlockBasedTable> new_table(new BlockBasedTable(rep));
|
|
|
|
|
2015-12-16 03:20:10 +01:00
|
|
|
// page cache options
|
|
|
|
rep->persistent_cache_options =
|
|
|
|
PersistentCacheOptions(rep->table_options.persistent_cache,
|
|
|
|
std::string(rep->persistent_cache_key_prefix,
|
|
|
|
rep->persistent_cache_key_prefix_size),
|
2016-07-19 18:44:03 +02:00
|
|
|
rep->ioptions.statistics);
|
2015-12-16 03:20:10 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Read meta index
|
|
|
|
std::unique_ptr<Block> meta;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
2013-11-13 07:46:51 +01:00
|
|
|
s = ReadMetaBlock(rep, &meta, &meta_iter);
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2015-09-03 00:36:47 +02:00
|
|
|
// Find filter handle and filter type
|
|
|
|
if (rep->filter_policy) {
|
|
|
|
for (auto prefix : {kFullFilterBlockPrefix, kFilterBlockPrefix}) {
|
|
|
|
std::string filter_block_key = prefix;
|
|
|
|
filter_block_key.append(rep->filter_policy->Name());
|
|
|
|
if (FindMetaBlock(meta_iter.get(), filter_block_key, &rep->filter_handle)
|
|
|
|
.ok()) {
|
|
|
|
rep->filter_type = (prefix == kFullFilterBlockPrefix)
|
|
|
|
? Rep::FilterType::kFullFilter
|
|
|
|
: Rep::FilterType::kBlockFilter;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-11-20 01:29:42 +01:00
|
|
|
// Read the properties
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
bool found_properties_block = true;
|
2014-04-22 02:49:47 +02:00
|
|
|
s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block);
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
2016-08-20 00:10:31 +02:00
|
|
|
"Error when seeking to properties block from file: %s",
|
2014-10-31 19:41:15 +01:00
|
|
|
s.ToString().c_str());
|
|
|
|
} else if (found_properties_block) {
|
2013-11-13 07:46:51 +01:00
|
|
|
s = meta_iter->status();
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* table_properties = nullptr;
|
2013-11-13 07:46:51 +01:00
|
|
|
if (s.ok()) {
|
2014-05-01 20:09:32 +02:00
|
|
|
s = ReadProperties(meta_iter->value(), rep->file.get(), rep->footer,
|
2016-07-19 18:44:03 +02:00
|
|
|
rep->ioptions, &table_properties);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
2014-10-31 19:41:15 +01:00
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from properties "
|
|
|
|
"block %s", s.ToString().c_str());
|
2014-02-08 04:26:49 +01:00
|
|
|
} else {
|
|
|
|
rep->table_properties.reset(table_properties);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-04-22 02:49:47 +02:00
|
|
|
} else {
|
2014-10-31 19:41:15 +01:00
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log,
|
2014-04-22 02:49:47 +02:00
|
|
|
"Cannot find Properties block from file.");
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// Read the compression dictionary meta block
|
|
|
|
bool found_compression_dict;
|
|
|
|
s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict);
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
2016-08-20 00:10:31 +02:00
|
|
|
"Error when seeking to compression dictionary block from file: %s",
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
s.ToString().c_str());
|
|
|
|
} else if (found_compression_dict) {
|
|
|
|
// TODO(andrewkr): Add to block cache if cache_index_and_filter_blocks is
|
|
|
|
// true.
|
|
|
|
unique_ptr<BlockContents> compression_dict_block{new BlockContents()};
|
2016-08-20 00:10:31 +02:00
|
|
|
// TODO(andrewkr): ReadMetaBlock repeats SeekToCompressionDictBlock().
|
|
|
|
// maybe decode a handle from meta_iter
|
|
|
|
// and do ReadBlockContents(handle) instead
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
s = rocksdb::ReadMetaBlock(rep->file.get(), file_size,
|
2016-07-19 18:44:03 +02:00
|
|
|
kBlockBasedTableMagicNumber, rep->ioptions,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
rocksdb::kCompressionDictBlock,
|
|
|
|
compression_dict_block.get());
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from compression dictionary "
|
|
|
|
"block %s",
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
rep->compression_dict_block = std::move(compression_dict_block);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
// Read the range del meta block
|
|
|
|
// TODO(wanning&andrewkr): cache range delete tombstone block
|
|
|
|
bool found_range_del_block;
|
|
|
|
BlockHandle range_del_handle;
|
|
|
|
s = SeekToRangeDelBlock(meta_iter.get(), &found_range_del_block,
|
|
|
|
&range_del_handle);
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
|
|
|
"Error when seeking to range delete tombstones block from file: %s",
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
if (found_range_del_block && !range_del_handle.IsNull()) {
|
|
|
|
BlockContents range_del_block_contents;
|
|
|
|
ReadOptions read_options;
|
|
|
|
s = ReadBlockContents(rep->file.get(), rep->footer, read_options,
|
|
|
|
range_del_handle, &range_del_block_contents,
|
|
|
|
rep->ioptions, false /* decompressed */);
|
|
|
|
if (!s.ok()) {
|
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from range del block %s",
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
2016-10-19 01:59:37 +02:00
|
|
|
rep->range_del_block.reset(new Block(
|
|
|
|
std::move(range_del_block_contents), kDisableGlobalSequenceNumber));
|
2016-08-20 00:10:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 02:03:57 +01:00
|
|
|
// Determine whether whole key filtering is supported.
|
|
|
|
if (rep->table_properties) {
|
|
|
|
rep->whole_key_filtering &=
|
|
|
|
IsFeatureSupported(*(rep->table_properties),
|
|
|
|
BlockBasedTablePropertyNames::kWholeKeyFiltering,
|
|
|
|
rep->ioptions.info_log);
|
|
|
|
rep->prefix_filtering &= IsFeatureSupported(
|
|
|
|
*(rep->table_properties),
|
|
|
|
BlockBasedTablePropertyNames::kPrefixFiltering, rep->ioptions.info_log);
|
2016-10-19 01:59:37 +02:00
|
|
|
|
|
|
|
rep->global_seqno = GetGlobalSequenceNumber(*(rep->table_properties),
|
|
|
|
rep->ioptions.info_log);
|
2015-02-05 02:03:57 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 01:34:26 +01:00
|
|
|
// pre-fetching of blocks is turned on
|
2016-07-20 20:23:31 +02:00
|
|
|
// Will use block cache for index/filter blocks access
|
|
|
|
// Always prefetch index and filter for level 0
|
|
|
|
if (table_options.cache_index_and_filter_blocks) {
|
|
|
|
if (prefetch_index_and_filter_in_cache || level == 0) {
|
2015-02-26 01:34:26 +01:00
|
|
|
assert(table_options.block_cache != nullptr);
|
|
|
|
// Hack: Call NewIndexIterator() to implicitly add index to the
|
|
|
|
// block_cache
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
|
|
|
// if pin_l0_filter_and_index_blocks_in_cache is true and this is
|
|
|
|
// a level0 file, then we will pass in this pointer to rep->index
|
|
|
|
// to NewIndexIterator(), which will save the index block in there
|
|
|
|
// else it's a nullptr and nothing special happens
|
|
|
|
CachableEntry<IndexReader>* index_entry = nullptr;
|
|
|
|
if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache &&
|
|
|
|
level == 0) {
|
|
|
|
index_entry = &rep->index_entry;
|
|
|
|
}
|
2015-10-13 00:06:38 +02:00
|
|
|
unique_ptr<InternalIterator> iter(
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
new_table->NewIndexIterator(ReadOptions(), nullptr, index_entry));
|
2015-02-26 01:34:26 +01:00
|
|
|
s = iter->status();
|
2014-01-24 19:57:15 +01:00
|
|
|
|
2015-02-26 01:34:26 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
// Hack: Call GetFilter() to implicitly add filter to the block_cache
|
|
|
|
auto filter_entry = new_table->GetFilter();
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// if pin_l0_filter_and_index_blocks_in_cache is true, and this is
|
|
|
|
// a level0 file, then save it in rep_->filter_entry; it will be
|
|
|
|
// released in the destructor only, hence it will be pinned in the
|
2016-07-20 20:23:31 +02:00
|
|
|
// cache while this reader is alive
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache &&
|
|
|
|
level == 0) {
|
|
|
|
rep->filter_entry = filter_entry;
|
|
|
|
} else {
|
|
|
|
filter_entry.Release(table_options.block_cache.get());
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2016-07-20 20:23:31 +02:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// If we don't use block cache for index/filter blocks access, we'll
|
|
|
|
// pre-load these blocks, which will kept in member variables in Rep
|
|
|
|
// and with a same life-time as this table object.
|
|
|
|
IndexReader* index_reader = nullptr;
|
|
|
|
s = new_table->CreateIndexReader(&index_reader, meta_iter.get());
|
2015-02-26 01:34:26 +01:00
|
|
|
|
2016-07-20 20:23:31 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
rep->index_reader.reset(index_reader);
|
2015-02-26 01:34:26 +01:00
|
|
|
|
2016-07-20 20:23:31 +02:00
|
|
|
// Set filter block
|
|
|
|
if (rep->filter_policy) {
|
|
|
|
rep->filter.reset(ReadFilter(rep));
|
2015-02-26 01:34:26 +01:00
|
|
|
}
|
2016-07-20 20:23:31 +02:00
|
|
|
} else {
|
|
|
|
delete index_reader;
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
*table_reader = std::move(new_table);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
void BlockBasedTable::SetupForCompaction() {
|
2014-09-23 23:18:57 +02:00
|
|
|
switch (rep_->ioptions.access_hint_on_compaction_start) {
|
2013-05-18 00:53:01 +02:00
|
|
|
case Options::NONE:
|
|
|
|
break;
|
|
|
|
case Options::NORMAL:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::NORMAL);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
case Options::SEQUENTIAL:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
case Options::WILLNEED:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
2013-06-14 02:25:09 +02:00
|
|
|
compaction_optimized_ = true;
|
2013-05-18 00:53:01 +02:00
|
|
|
}
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
|
|
|
|
const {
|
2013-11-20 01:29:42 +01:00
|
|
|
return rep_->table_properties;
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
size_t BlockBasedTable::ApproximateMemoryUsage() const {
|
|
|
|
size_t usage = 0;
|
|
|
|
if (rep_->filter) {
|
|
|
|
usage += rep_->filter->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
if (rep_->index_reader) {
|
|
|
|
usage += rep_->index_reader->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
return usage;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Load the meta-block from the file. On success, return the loaded meta block
|
|
|
|
// and its iterator.
|
2015-10-13 00:06:38 +02:00
|
|
|
Status BlockBasedTable::ReadMetaBlock(Rep* rep,
|
|
|
|
std::unique_ptr<Block>* meta_block,
|
|
|
|
std::unique_ptr<InternalIterator>* iter) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
|
|
|
|
// it is an empty block.
|
2013-04-23 08:47:56 +02:00
|
|
|
// TODO: we never really verify check sum for meta index block
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> meta;
|
2013-11-13 07:46:51 +01:00
|
|
|
Status s = ReadBlockFromFile(
|
2015-12-16 03:20:10 +01:00
|
|
|
rep->file.get(), rep->footer, ReadOptions(),
|
2016-07-19 18:44:03 +02:00
|
|
|
rep->footer.metaindex_handle(), &meta, rep->ioptions,
|
2015-12-16 03:20:10 +01:00
|
|
|
true /* decompress */, Slice() /*compression dict*/,
|
2016-10-19 01:59:37 +02:00
|
|
|
rep->persistent_cache_options, kDisableGlobalSequenceNumber,
|
|
|
|
0 /* read_amp_bytes_per_bit */);
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
2014-10-31 19:41:15 +01:00
|
|
|
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from properties"
|
|
|
|
" block %s", s.ToString().c_str());
|
2013-11-13 07:46:51 +01:00
|
|
|
return s;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2015-02-19 23:07:38 +01:00
|
|
|
*meta_block = std::move(meta);
|
2013-11-13 07:46:51 +01:00
|
|
|
// meta block uses bytewise comparator.
|
2015-02-19 23:07:38 +01:00
|
|
|
iter->reset(meta_block->get()->NewIterator(BytewiseComparator()));
|
2013-11-13 07:46:51 +01:00
|
|
|
return Status::OK();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
Status BlockBasedTable::GetDataBlockFromCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
2016-07-19 18:44:03 +02:00
|
|
|
Cache* block_cache, Cache* block_cache_compressed,
|
2016-08-27 03:55:58 +02:00
|
|
|
const ImmutableCFOptions& ioptions, const ReadOptions& read_options,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
BlockBasedTable::CachableEntry<Block>* block, uint32_t format_version,
|
2016-08-27 03:55:58 +02:00
|
|
|
const Slice& compression_dict, size_t read_amp_bytes_per_bit) {
|
2014-03-01 03:19:07 +01:00
|
|
|
Status s;
|
|
|
|
Block* compressed_block = nullptr;
|
|
|
|
Cache::Handle* block_cache_compressed_handle = nullptr;
|
2016-07-19 18:44:03 +02:00
|
|
|
Statistics* statistics = ioptions.statistics;
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// Lookup uncompressed cache first
|
|
|
|
if (block_cache != nullptr) {
|
|
|
|
block->cache_handle =
|
|
|
|
GetEntryFromCache(block_cache, block_cache_key, BLOCK_CACHE_DATA_MISS,
|
|
|
|
BLOCK_CACHE_DATA_HIT, statistics);
|
|
|
|
if (block->cache_handle != nullptr) {
|
|
|
|
block->value =
|
|
|
|
reinterpret_cast<Block*>(block_cache->Value(block->cache_handle));
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not found, search from the compressed block cache.
|
|
|
|
assert(block->cache_handle == nullptr && block->value == nullptr);
|
|
|
|
|
|
|
|
if (block_cache_compressed == nullptr) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!compressed_block_cache_key.empty());
|
|
|
|
block_cache_compressed_handle =
|
|
|
|
block_cache_compressed->Lookup(compressed_block_cache_key);
|
|
|
|
// if we found in the compressed cache, then uncompress and insert into
|
|
|
|
// uncompressed cache
|
|
|
|
if (block_cache_compressed_handle == nullptr) {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// found compressed block
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
|
|
|
|
compressed_block = reinterpret_cast<Block*>(
|
|
|
|
block_cache_compressed->Value(block_cache_compressed_handle));
|
|
|
|
assert(compressed_block->compression_type() != kNoCompression);
|
|
|
|
|
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents contents;
|
|
|
|
s = UncompressBlockContents(compressed_block->data(),
|
2015-01-15 01:24:24 +01:00
|
|
|
compressed_block->size(), &contents,
|
2016-07-19 18:44:03 +02:00
|
|
|
format_version, compression_dict,
|
|
|
|
ioptions);
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// Insert uncompressed block into block cache
|
|
|
|
if (s.ok()) {
|
2016-10-19 01:59:37 +02:00
|
|
|
block->value =
|
|
|
|
new Block(std::move(contents), compressed_block->global_seqno(),
|
|
|
|
read_amp_bytes_per_bit,
|
|
|
|
statistics); // uncompressed block
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(block->value->compression_type() == kNoCompression);
|
|
|
|
if (block_cache != nullptr && block->value->cachable() &&
|
|
|
|
read_options.fill_cache) {
|
2016-03-11 02:35:19 +01:00
|
|
|
s = block_cache->Insert(
|
|
|
|
block_cache_key, block->value, block->value->usable_size(),
|
|
|
|
&DeleteCachedEntry<Block>, &(block->cache_handle));
|
|
|
|
if (s.ok()) {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT,
|
|
|
|
block->value->usable_size());
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE,
|
|
|
|
block->value->usable_size());
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
delete block->value;
|
|
|
|
block->value = nullptr;
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release hold on compressed cache entry
|
|
|
|
block_cache_compressed->Release(block_cache_compressed_handle);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::PutDataBlockToCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
|
|
|
Cache* block_cache, Cache* block_cache_compressed,
|
2016-08-27 03:55:58 +02:00
|
|
|
const ReadOptions& read_options, const ImmutableCFOptions& ioptions,
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
CachableEntry<Block>* block, Block* raw_block, uint32_t format_version,
|
2016-08-27 03:55:58 +02:00
|
|
|
const Slice& compression_dict, size_t read_amp_bytes_per_bit) {
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(raw_block->compression_type() == kNoCompression ||
|
|
|
|
block_cache_compressed != nullptr);
|
|
|
|
|
|
|
|
Status s;
|
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents contents;
|
2016-07-19 18:44:03 +02:00
|
|
|
Statistics* statistics = ioptions.statistics;
|
2014-03-01 03:19:07 +01:00
|
|
|
if (raw_block->compression_type() != kNoCompression) {
|
2015-01-15 01:24:24 +01:00
|
|
|
s = UncompressBlockContents(raw_block->data(), raw_block->size(), &contents,
|
2016-07-19 18:44:03 +02:00
|
|
|
format_version, compression_dict, ioptions);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
delete raw_block;
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (raw_block->compression_type() != kNoCompression) {
|
2016-10-19 01:59:37 +02:00
|
|
|
block->value = new Block(std::move(contents), raw_block->global_seqno(),
|
|
|
|
read_amp_bytes_per_bit,
|
|
|
|
statistics); // uncompressed block
|
2014-03-01 03:19:07 +01:00
|
|
|
} else {
|
|
|
|
block->value = raw_block;
|
|
|
|
raw_block = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert compressed block into compressed block cache.
|
|
|
|
// Release the hold on the compressed cache entry immediately.
|
|
|
|
if (block_cache_compressed != nullptr && raw_block != nullptr &&
|
|
|
|
raw_block->cachable()) {
|
2016-03-11 02:35:19 +01:00
|
|
|
s = block_cache_compressed->Insert(compressed_block_cache_key, raw_block,
|
|
|
|
raw_block->usable_size(),
|
|
|
|
&DeleteCachedEntry<Block>);
|
|
|
|
if (s.ok()) {
|
|
|
|
// Avoid the following code to delete this cached block.
|
|
|
|
raw_block = nullptr;
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
|
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
delete raw_block;
|
|
|
|
|
|
|
|
// insert into uncompressed block cache
|
|
|
|
assert((block->value->compression_type() == kNoCompression));
|
|
|
|
if (block_cache != nullptr && block->value->cachable()) {
|
2016-03-11 02:35:19 +01:00
|
|
|
s = block_cache->Insert(block_cache_key, block->value,
|
|
|
|
block->value->usable_size(),
|
|
|
|
&DeleteCachedEntry<Block>, &(block->cache_handle));
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(block->cache_handle != nullptr);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT,
|
|
|
|
block->value->usable_size());
|
2016-03-11 02:35:19 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE,
|
|
|
|
block->value->usable_size());
|
|
|
|
assert(reinterpret_cast<Block*>(
|
|
|
|
block_cache->Value(block->cache_handle)) == block->value);
|
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
delete block->value;
|
|
|
|
block->value = nullptr;
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
FilterBlockReader* BlockBasedTable::ReadFilter(Rep* rep) {
|
2013-11-13 07:46:51 +01:00
|
|
|
// TODO: We might want to unify with ReadBlockFromFile() if we start
|
|
|
|
// requiring checksum verification in Table::Open.
|
2015-09-03 00:36:47 +02:00
|
|
|
if (rep->filter_type == Rep::FilterType::kNoFilter) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
BlockContents block;
|
|
|
|
if (!ReadBlockContents(rep->file.get(), rep->footer, ReadOptions(),
|
2016-07-19 18:44:03 +02:00
|
|
|
rep->filter_handle, &block, rep->ioptions,
|
2015-12-16 03:20:10 +01:00
|
|
|
false /* decompress */, Slice() /*compression dict*/,
|
|
|
|
rep->persistent_cache_options)
|
|
|
|
.ok()) {
|
2015-09-03 00:36:47 +02:00
|
|
|
// Error reading the block
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(rep->filter_policy);
|
|
|
|
|
|
|
|
if (rep->filter_type == Rep::FilterType::kBlockFilter) {
|
|
|
|
return new BlockBasedFilterBlockReader(
|
|
|
|
rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr,
|
2016-06-03 19:47:47 +02:00
|
|
|
rep->table_options, rep->whole_key_filtering, std::move(block),
|
|
|
|
rep->ioptions.statistics);
|
2015-09-03 00:36:47 +02:00
|
|
|
} else if (rep->filter_type == Rep::FilterType::kFullFilter) {
|
|
|
|
auto filter_bits_reader =
|
|
|
|
rep->filter_policy->GetFilterBitsReader(block.data);
|
|
|
|
if (filter_bits_reader != nullptr) {
|
|
|
|
return new FullFilterBlockReader(
|
|
|
|
rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr,
|
2016-06-03 19:47:47 +02:00
|
|
|
rep->whole_key_filtering, std::move(block), filter_bits_reader,
|
|
|
|
rep->ioptions.statistics);
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
|
|
|
}
|
2015-09-03 00:36:47 +02:00
|
|
|
|
|
|
|
// filter_type is either kNoFilter (exited the function at the first if),
|
|
|
|
// kBlockFilter or kFullFilter. there is no way for the execution to come here
|
|
|
|
assert(false);
|
2014-09-08 19:37:05 +02:00
|
|
|
return nullptr;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
BlockBasedTable::CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
|
2014-09-08 19:37:05 +02:00
|
|
|
bool no_io) const {
|
2014-10-22 20:52:35 +02:00
|
|
|
// If cache_index_and_filter_blocks is false, filter should be pre-populated.
|
|
|
|
// We will return rep_->filter anyway. rep_->filter can be nullptr if filter
|
|
|
|
// read fails at Open() time. We don't want to reload again since it will
|
|
|
|
// most probably fail again.
|
|
|
|
if (!rep_->table_options.cache_index_and_filter_blocks) {
|
2014-02-20 00:38:57 +01:00
|
|
|
return {rep_->filter.get(), nullptr /* cache handle */};
|
|
|
|
}
|
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
|
|
|
if (rep_->filter_policy == nullptr /* do not use filter */ ||
|
|
|
|
block_cache == nullptr /* no block cache at all */) {
|
2014-02-20 00:38:57 +01:00
|
|
|
return {nullptr /* filter */, nullptr /* cache handle */};
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// we have a pinned filter block
|
|
|
|
if (rep_->filter_entry.IsSet()) {
|
|
|
|
return rep_->filter_entry;
|
|
|
|
}
|
|
|
|
|
|
|
|
PERF_TIMER_GUARD(read_filter_block_nanos);
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Fetching from the cache
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
2014-09-08 19:37:05 +02:00
|
|
|
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
rep_->footer.metaindex_handle(),
|
2014-10-22 20:52:35 +02:00
|
|
|
cache_key);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
2014-03-01 03:19:07 +01:00
|
|
|
auto cache_handle =
|
|
|
|
GetEntryFromCache(block_cache, key, BLOCK_CACHE_FILTER_MISS,
|
|
|
|
BLOCK_CACHE_FILTER_HIT, statistics);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
FilterBlockReader* filter = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
2014-09-08 19:37:05 +02:00
|
|
|
filter = reinterpret_cast<FilterBlockReader*>(
|
|
|
|
block_cache->Value(cache_handle));
|
2013-11-13 07:46:51 +01:00
|
|
|
} else if (no_io) {
|
|
|
|
// Do not invoke any io.
|
|
|
|
return CachableEntry<FilterBlockReader>();
|
|
|
|
} else {
|
2016-06-03 19:47:47 +02:00
|
|
|
filter = ReadFilter(rep_);
|
2015-09-03 00:36:47 +02:00
|
|
|
if (filter != nullptr) {
|
2016-06-03 19:47:47 +02:00
|
|
|
assert(filter->size() > 0);
|
2016-08-23 22:44:13 +02:00
|
|
|
Status s = block_cache->Insert(
|
|
|
|
key, filter, filter->size(), &DeleteCachedFilterEntry, &cache_handle,
|
|
|
|
rep_->table_options.cache_index_and_filter_blocks_with_high_priority
|
|
|
|
? Cache::Priority::HIGH
|
|
|
|
: Cache::Priority::LOW);
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
|
2016-06-03 19:47:47 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, filter->size());
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, filter->size());
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
delete filter;
|
|
|
|
return CachableEntry<FilterBlockReader>();
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return { filter, cache_handle };
|
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* BlockBasedTable::NewIndexIterator(
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
const ReadOptions& read_options, BlockIter* input_iter,
|
|
|
|
CachableEntry<IndexReader>* index_entry) {
|
2014-03-01 03:19:07 +01:00
|
|
|
// index reader has already been pre-populated.
|
|
|
|
if (rep_->index_reader) {
|
2014-08-26 01:14:30 +02:00
|
|
|
return rep_->index_reader->NewIterator(
|
|
|
|
input_iter, read_options.total_order_seek);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// we have a pinned index block
|
|
|
|
if (rep_->index_entry.IsSet()) {
|
|
|
|
return rep_->index_entry.value->NewIterator(input_iter,
|
|
|
|
read_options.total_order_seek);
|
|
|
|
}
|
|
|
|
|
2015-07-09 01:34:48 +02:00
|
|
|
PERF_TIMER_GUARD(read_index_block_nanos);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
bool no_io = read_options.read_tier == kBlockCacheTier;
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
2014-03-01 03:19:07 +01:00
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
auto key =
|
|
|
|
GetCacheKeyFromOffset(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
rep_->dummy_index_reader_offset, cache_key);
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
2014-03-01 03:19:07 +01:00
|
|
|
auto cache_handle =
|
|
|
|
GetEntryFromCache(block_cache, key, BLOCK_CACHE_INDEX_MISS,
|
|
|
|
BLOCK_CACHE_INDEX_HIT, statistics);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
if (cache_handle == nullptr && no_io) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
|
|
|
input_iter->SetStatus(Status::Incomplete("no blocking io"));
|
|
|
|
return input_iter;
|
|
|
|
} else {
|
2015-10-13 00:06:38 +02:00
|
|
|
return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
IndexReader* index_reader = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
index_reader =
|
|
|
|
reinterpret_cast<IndexReader*>(block_cache->Value(cache_handle));
|
2013-11-13 07:46:51 +01:00
|
|
|
} else {
|
2014-03-01 03:19:07 +01:00
|
|
|
// Create index reader and put it in the cache.
|
|
|
|
Status s;
|
2016-08-24 03:20:41 +02:00
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:2");
|
2014-03-02 08:40:08 +01:00
|
|
|
s = CreateIndexReader(&index_reader);
|
2016-08-24 03:20:41 +02:00
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:1");
|
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:3");
|
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:4");
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2016-07-09 02:50:51 +02:00
|
|
|
assert(index_reader != nullptr);
|
2016-08-23 22:44:13 +02:00
|
|
|
s = block_cache->Insert(
|
|
|
|
key, index_reader, index_reader->usable_size(),
|
|
|
|
&DeleteCachedIndexEntry, &cache_handle,
|
|
|
|
rep_->table_options.cache_index_and_filter_blocks_with_high_priority
|
|
|
|
? Cache::Priority::HIGH
|
|
|
|
: Cache::Priority::LOW);
|
2016-03-11 02:35:19 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2016-06-03 19:47:47 +02:00
|
|
|
size_t usable_size = index_reader->usable_size();
|
2016-03-11 02:35:19 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
|
2016-06-03 19:47:47 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size);
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
2016-07-09 02:50:51 +02:00
|
|
|
if (index_reader != nullptr) {
|
|
|
|
delete index_reader;
|
|
|
|
}
|
2016-03-11 02:35:19 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
2014-03-01 03:19:07 +01:00
|
|
|
// make sure if something goes wrong, index_reader shall remain intact.
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
|
|
|
input_iter->SetStatus(s);
|
|
|
|
return input_iter;
|
|
|
|
} else {
|
2015-10-13 00:06:38 +02:00
|
|
|
return NewErrorInternalIterator(s);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
assert(cache_handle);
|
2014-08-26 01:14:30 +02:00
|
|
|
auto* iter = index_reader->NewIterator(
|
|
|
|
input_iter, read_options.total_order_seek);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
|
|
|
// the caller would like to take ownership of the index block
|
|
|
|
// don't call RegisterCleanup() in this case, the caller will take care of it
|
|
|
|
if (index_entry != nullptr) {
|
|
|
|
*index_entry = {index_reader, cache_handle};
|
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache, cache_handle);
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2014-04-25 21:22:23 +02:00
|
|
|
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
|
|
|
// into an iterator over the contents of the corresponding block.
|
2014-07-31 01:34:35 +02:00
|
|
|
// If input_iter is null, new a iterator
|
|
|
|
// If input_iter is not null, update this iter and return it
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* BlockBasedTable::NewDataBlockIterator(
|
|
|
|
Rep* rep, const ReadOptions& ro, const Slice& index_value,
|
2014-07-31 01:34:35 +02:00
|
|
|
BlockIter* input_iter) {
|
2015-07-09 01:34:48 +02:00
|
|
|
PERF_TIMER_GUARD(new_table_block_iter_nanos);
|
|
|
|
|
2014-04-25 21:22:23 +02:00
|
|
|
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep->table_options.block_cache.get();
|
|
|
|
Cache* block_cache_compressed =
|
|
|
|
rep->table_options.block_cache_compressed.get();
|
2014-04-25 21:22:23 +02:00
|
|
|
CachableEntry<Block> block;
|
|
|
|
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_value;
|
|
|
|
// We intentionally allow extra stuff in index_value so that we
|
|
|
|
// can add more features in the future.
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
|
|
|
input_iter->SetStatus(s);
|
|
|
|
return input_iter;
|
|
|
|
} else {
|
2015-10-13 00:06:38 +02:00
|
|
|
return NewErrorInternalIterator(s);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
Slice compression_dict;
|
|
|
|
if (rep->compression_dict_block) {
|
|
|
|
compression_dict = rep->compression_dict_block->data;
|
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
// If either block cache is enabled, we'll try to read from it.
|
|
|
|
if (block_cache != nullptr || block_cache_compressed != nullptr) {
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep->ioptions.statistics;
|
2014-04-25 21:22:23 +02:00
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice key, /* key to the block cache */
|
|
|
|
ckey /* key to the compressed block cache */;
|
|
|
|
|
|
|
|
// create key for block cache
|
|
|
|
if (block_cache != nullptr) {
|
2014-09-08 19:37:05 +02:00
|
|
|
key = GetCacheKey(rep->cache_key_prefix, rep->cache_key_prefix_size,
|
|
|
|
handle, cache_key);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (block_cache_compressed != nullptr) {
|
|
|
|
ckey = GetCacheKey(rep->compressed_cache_key_prefix,
|
|
|
|
rep->compressed_cache_key_prefix_size, handle,
|
|
|
|
compressed_cache_key);
|
|
|
|
}
|
|
|
|
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
s = GetDataBlockFromCache(
|
2016-08-27 03:55:58 +02:00
|
|
|
key, ckey, block_cache, block_cache_compressed, rep->ioptions, ro,
|
|
|
|
&block, rep->table_options.format_version, compression_dict,
|
|
|
|
rep->table_options.read_amp_bytes_per_bit);
|
2014-04-25 21:22:23 +02:00
|
|
|
|
|
|
|
if (block.value == nullptr && !no_io && ro.fill_cache) {
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> raw_block;
|
2014-04-25 21:22:23 +02:00
|
|
|
{
|
2014-09-05 01:18:36 +02:00
|
|
|
StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
|
2016-10-19 01:59:37 +02:00
|
|
|
s = ReadBlockFromFile(
|
|
|
|
rep->file.get(), rep->footer, ro, handle, &raw_block, rep->ioptions,
|
|
|
|
block_cache_compressed == nullptr, compression_dict,
|
|
|
|
rep->persistent_cache_options, rep->global_seqno,
|
|
|
|
rep->table_options.read_amp_bytes_per_bit);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2016-08-27 03:55:58 +02:00
|
|
|
s = PutDataBlockToCache(
|
|
|
|
key, ckey, block_cache, block_cache_compressed, ro, rep->ioptions,
|
|
|
|
&block, raw_block.release(), rep->table_options.format_version,
|
|
|
|
compression_dict, rep->table_options.read_amp_bytes_per_bit);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Didn't get any data from block caches.
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok() && block.value == nullptr) {
|
2014-04-25 21:22:23 +02:00
|
|
|
if (no_io) {
|
|
|
|
// Could not read from block_cache and can't do IO
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
|
|
|
input_iter->SetStatus(Status::Incomplete("no blocking io"));
|
|
|
|
return input_iter;
|
|
|
|
} else {
|
2015-10-13 00:06:38 +02:00
|
|
|
return NewErrorInternalIterator(Status::Incomplete("no blocking io"));
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> block_value;
|
2016-10-19 01:59:37 +02:00
|
|
|
s = ReadBlockFromFile(
|
|
|
|
rep->file.get(), rep->footer, ro, handle, &block_value, rep->ioptions,
|
|
|
|
true /* compress */, compression_dict, rep->persistent_cache_options,
|
|
|
|
rep->global_seqno, rep->table_options.read_amp_bytes_per_bit);
|
2015-02-19 23:07:38 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
block.value = block_value.release();
|
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter;
|
2016-07-09 02:50:51 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
assert(block.value != nullptr);
|
2016-08-27 03:55:58 +02:00
|
|
|
iter = block.value->NewIterator(&rep->internal_comparator, input_iter, true,
|
|
|
|
rep->ioptions.statistics);
|
2014-04-25 21:22:23 +02:00
|
|
|
if (block.cache_handle != nullptr) {
|
|
|
|
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache,
|
2014-07-31 01:34:35 +02:00
|
|
|
block.cache_handle);
|
2014-04-25 21:22:23 +02:00
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&DeleteHeldResource<Block>, block.value, nullptr);
|
|
|
|
}
|
|
|
|
} else {
|
2016-07-09 02:50:51 +02:00
|
|
|
assert(block.value == nullptr);
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
|
|
|
input_iter->SetStatus(s);
|
|
|
|
iter = input_iter;
|
|
|
|
} else {
|
2015-10-13 00:06:38 +02:00
|
|
|
iter = NewErrorInternalIterator(s);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
return iter;
|
2012-09-27 10:05:38 +02:00
|
|
|
}
|
|
|
|
|
2014-04-25 21:22:23 +02:00
|
|
|
class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState {
|
|
|
|
public:
|
|
|
|
BlockEntryIteratorState(BlockBasedTable* table,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
const ReadOptions& read_options, bool skip_filters)
|
|
|
|
: TwoLevelIteratorState(table->rep_->ioptions.prefix_extractor !=
|
|
|
|
nullptr),
|
2014-06-20 10:23:02 +02:00
|
|
|
table_(table),
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
read_options_(read_options),
|
|
|
|
skip_filters_(skip_filters) {}
|
2014-04-25 21:22:23 +02:00
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* NewSecondaryIterator(const Slice& index_value) override {
|
2014-06-20 10:23:02 +02:00
|
|
|
return NewDataBlockIterator(table_->rep_, read_options_, index_value);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
bool PrefixMayMatch(const Slice& internal_key) override {
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
if (read_options_.total_order_seek || skip_filters_) {
|
2014-08-26 01:14:30 +02:00
|
|
|
return true;
|
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
return table_->PrefixMayMatch(internal_key);
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Don't own table_
|
|
|
|
BlockBasedTable* table_;
|
|
|
|
const ReadOptions read_options_;
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
bool skip_filters_;
|
2014-04-25 21:22:23 +02:00
|
|
|
};
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
// This will be broken if the user specifies an unusual implementation
|
|
|
|
// of Options.comparator, or if the user specifies an unusual
|
2014-08-25 23:22:05 +02:00
|
|
|
// definition of prefixes in BlockBasedTableOptions.filter_policy.
|
|
|
|
// In particular, we require the following three properties:
|
2013-08-13 23:04:56 +02:00
|
|
|
//
|
|
|
|
// 1) key.starts_with(prefix(key))
|
|
|
|
// 2) Compare(prefix(key), key) <= 0.
|
|
|
|
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
|
2013-08-23 23:49:57 +02:00
|
|
|
//
|
2013-11-13 07:46:51 +01:00
|
|
|
// Otherwise, this method guarantees no I/O will be incurred.
|
|
|
|
//
|
|
|
|
// REQUIRES: this method shouldn't be called while the DB lock is held.
|
2014-04-25 21:22:23 +02:00
|
|
|
bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) {
|
2014-08-25 23:22:05 +02:00
|
|
|
if (!rep_->filter_policy) {
|
2014-06-10 18:36:59 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
assert(rep_->ioptions.prefix_extractor != nullptr);
|
2016-01-26 23:47:42 +01:00
|
|
|
auto user_key = ExtractUserKey(internal_key);
|
2016-08-26 20:46:32 +02:00
|
|
|
if (!rep_->ioptions.prefix_extractor->InDomain(user_key) ||
|
|
|
|
rep_->table_properties->prefix_extractor_name.compare(
|
|
|
|
rep_->ioptions.prefix_extractor->Name()) != 0) {
|
2016-01-26 23:47:42 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
auto prefix = rep_->ioptions.prefix_extractor->Transform(user_key);
|
2016-01-06 20:46:32 +01:00
|
|
|
InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
|
2014-04-25 21:22:23 +02:00
|
|
|
auto internal_prefix = internal_key_prefix.Encode();
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
bool may_match = true;
|
|
|
|
Status s;
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// To prevent any io operation in this method, we set `read_tier` to make
|
|
|
|
// sure we always read index or filter only when they have already been
|
|
|
|
// loaded to memory.
|
|
|
|
ReadOptions no_io_read_options;
|
|
|
|
no_io_read_options.read_tier = kBlockCacheTier;
|
2014-09-08 19:37:05 +02:00
|
|
|
|
|
|
|
// First, try check with full filter
|
|
|
|
auto filter_entry = GetFilter(true /* no io */);
|
|
|
|
FilterBlockReader* filter = filter_entry.value;
|
2016-04-13 22:02:33 +02:00
|
|
|
if (filter != nullptr) {
|
|
|
|
if (!filter->IsBlockBased()) {
|
|
|
|
may_match = filter->PrefixMayMatch(prefix);
|
|
|
|
} else {
|
|
|
|
// Then, try find it within each block
|
|
|
|
unique_ptr<InternalIterator> iiter(NewIndexIterator(no_io_read_options));
|
|
|
|
iiter->Seek(internal_prefix);
|
|
|
|
|
|
|
|
if (!iiter->Valid()) {
|
|
|
|
// we're past end of file
|
|
|
|
// if it's incomplete, it means that we avoided I/O
|
|
|
|
// and we're not really sure that we're past the end
|
|
|
|
// of the file
|
|
|
|
may_match = iiter->status().IsIncomplete();
|
|
|
|
} else if (ExtractUserKey(iiter->key())
|
|
|
|
.starts_with(ExtractUserKey(internal_prefix))) {
|
|
|
|
// we need to check for this subtle case because our only
|
|
|
|
// guarantee is that "the key is a string >= last key in that data
|
|
|
|
// block" according to the doc/table_format.txt spec.
|
|
|
|
//
|
|
|
|
// Suppose iiter->key() starts with the desired prefix; it is not
|
|
|
|
// necessarily the case that the corresponding data block will
|
|
|
|
// contain the prefix, since iiter->key() need not be in the
|
|
|
|
// block. However, the next data block may contain the prefix, so
|
|
|
|
// we return true to play it safe.
|
|
|
|
may_match = true;
|
|
|
|
} else if (filter->IsBlockBased()) {
|
|
|
|
// iiter->key() does NOT start with the desired prefix. Because
|
|
|
|
// Seek() finds the first key that is >= the seek target, this
|
|
|
|
// means that iiter->key() > prefix. Thus, any data blocks coming
|
|
|
|
// after the data block corresponding to iiter->key() cannot
|
|
|
|
// possibly contain the key. Thus, the corresponding data block
|
|
|
|
// is the only on could potentially contain the prefix.
|
|
|
|
Slice handle_value = iiter->value();
|
|
|
|
BlockHandle handle;
|
|
|
|
s = handle.DecodeFrom(&handle_value);
|
|
|
|
assert(s.ok());
|
|
|
|
may_match = filter->PrefixMayMatch(prefix, handle.offset());
|
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
2013-08-13 23:04:56 +02:00
|
|
|
}
|
2013-08-23 23:49:57 +02:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
|
2013-08-23 23:49:57 +02:00
|
|
|
if (!may_match) {
|
2013-11-22 23:14:05 +01:00
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
|
2013-08-23 23:49:57 +02:00
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// if rep_->filter_entry is not set, we should call Release(); otherwise
|
|
|
|
// don't call, in this case we have a local copy in rep_->filter_entry,
|
|
|
|
// it's pinned to the cache and will be released in the destructor
|
|
|
|
if (!rep_->filter_entry.IsSet()) {
|
|
|
|
filter_entry.Release(rep_->table_options.block_cache.get());
|
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
return may_match;
|
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* BlockBasedTable::NewIterator(const ReadOptions& read_options,
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Arena* arena,
|
|
|
|
bool skip_filters) {
|
|
|
|
return NewTwoLevelIterator(
|
|
|
|
new BlockEntryIteratorState(this, read_options, skip_filters),
|
|
|
|
NewIndexIterator(read_options), arena);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-20 00:10:31 +02:00
|
|
|
InternalIterator* BlockBasedTable::NewRangeTombstoneIterator(
|
|
|
|
const ReadOptions& read_options) {
|
|
|
|
if (rep_->range_del_block.get() != nullptr) {
|
|
|
|
auto iter =
|
|
|
|
rep_->range_del_block->NewIterator(&(rep_->internal_comparator));
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
return NewEmptyInternalIterator();
|
|
|
|
}
|
|
|
|
|
2016-05-06 02:20:22 +02:00
|
|
|
bool BlockBasedTable::FullFilterKeyMayMatch(const ReadOptions& read_options,
|
|
|
|
FilterBlockReader* filter,
|
2015-02-03 02:42:57 +01:00
|
|
|
const Slice& internal_key) const {
|
|
|
|
if (filter == nullptr || filter->IsBlockBased()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
Slice user_key = ExtractUserKey(internal_key);
|
2016-06-10 00:48:45 +02:00
|
|
|
if (filter->whole_key_filtering()) {
|
|
|
|
return filter->KeyMayMatch(user_key);
|
2015-02-03 02:42:57 +01:00
|
|
|
}
|
2016-05-06 02:20:22 +02:00
|
|
|
if (!read_options.total_order_seek && rep_->ioptions.prefix_extractor &&
|
2016-08-26 20:46:32 +02:00
|
|
|
rep_->table_properties->prefix_extractor_name.compare(
|
|
|
|
rep_->ioptions.prefix_extractor->Name()) == 0 &&
|
2016-01-26 20:07:08 +01:00
|
|
|
rep_->ioptions.prefix_extractor->InDomain(user_key) &&
|
2015-02-03 02:42:57 +01:00
|
|
|
!filter->PrefixMayMatch(
|
|
|
|
rep_->ioptions.prefix_extractor->Transform(user_key))) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
|
|
|
|
GetContext* get_context, bool skip_filters) {
|
2012-04-17 17:36:46 +02:00
|
|
|
Status s;
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
CachableEntry<FilterBlockReader> filter_entry;
|
|
|
|
if (!skip_filters) {
|
|
|
|
filter_entry = GetFilter(read_options.read_tier == kBlockCacheTier);
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
FilterBlockReader* filter = filter_entry.value;
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
// First check the full filter
|
|
|
|
// If full filter not useful, Then go into each block
|
2016-05-06 02:20:22 +02:00
|
|
|
if (!FullFilterKeyMayMatch(read_options, filter, key)) {
|
2014-09-08 19:37:05 +02:00
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
|
|
|
|
} else {
|
|
|
|
BlockIter iiter;
|
|
|
|
NewIndexIterator(read_options, &iiter);
|
|
|
|
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
PinnedIteratorsManager* pinned_iters_mgr = get_context->pinned_iters_mgr();
|
|
|
|
bool pin_blocks = pinned_iters_mgr && pinned_iters_mgr->PinningEnabled();
|
|
|
|
BlockIter* biter = nullptr;
|
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
bool done = false;
|
|
|
|
for (iiter.Seek(key); iiter.Valid() && !done; iiter.Next()) {
|
|
|
|
Slice handle_value = iiter.value();
|
2014-01-27 22:53:22 +01:00
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
BlockHandle handle;
|
|
|
|
bool not_exist_in_filter =
|
|
|
|
filter != nullptr && filter->IsBlockBased() == true &&
|
|
|
|
handle.DecodeFrom(&handle_value).ok() &&
|
|
|
|
!filter->KeyMayMatch(ExtractUserKey(key), handle.offset());
|
|
|
|
|
|
|
|
if (not_exist_in_filter) {
|
|
|
|
// Not found
|
|
|
|
// TODO: think about interaction with Merge. If a user key cannot
|
|
|
|
// cross one data block, we should be fine.
|
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
|
|
|
|
break;
|
|
|
|
} else {
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
BlockIter stack_biter;
|
|
|
|
if (pin_blocks) {
|
|
|
|
// We need to create the BlockIter on heap because we may need to
|
|
|
|
// pin it if we encounterd merge operands
|
|
|
|
biter = static_cast<BlockIter*>(
|
|
|
|
NewDataBlockIterator(rep_, read_options, iiter.value()));
|
|
|
|
} else {
|
|
|
|
biter = &stack_biter;
|
|
|
|
NewDataBlockIterator(rep_, read_options, iiter.value(), biter);
|
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
|
2016-02-09 20:20:22 +01:00
|
|
|
if (read_options.read_tier == kBlockCacheTier &&
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
biter->status().IsIncomplete()) {
|
2014-09-08 19:37:05 +02:00
|
|
|
// couldn't get block from block_cache
|
|
|
|
// Update Saver.state to Found because we are only looking for whether
|
|
|
|
// we can guarantee the key is not there when "no_io" is set
|
2014-09-29 20:09:09 +02:00
|
|
|
get_context->MarkKeyMayExist();
|
2013-03-21 23:59:47 +01:00
|
|
|
break;
|
|
|
|
}
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
if (!biter->status().ok()) {
|
|
|
|
s = biter->status();
|
2014-09-08 19:37:05 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call the *saver function on each entry/block until it returns false
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
for (biter->Seek(key); biter->Valid(); biter->Next()) {
|
2014-09-08 19:37:05 +02:00
|
|
|
ParsedInternalKey parsed_key;
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
if (!ParseInternalKey(biter->key(), &parsed_key)) {
|
2014-09-08 19:37:05 +02:00
|
|
|
s = Status::Corruption(Slice());
|
|
|
|
}
|
|
|
|
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
if (!get_context->SaveValue(parsed_key, biter->value(), pin_blocks)) {
|
2014-09-08 19:37:05 +02:00
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
s = biter->status();
|
|
|
|
|
|
|
|
if (pin_blocks) {
|
|
|
|
if (get_context->State() == GetContext::kMerge) {
|
|
|
|
// Pin blocks as long as we are merging
|
2016-08-11 20:54:17 +02:00
|
|
|
pinned_iters_mgr->PinIterator(biter);
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
} else {
|
|
|
|
delete biter;
|
|
|
|
}
|
|
|
|
biter = nullptr;
|
|
|
|
} else {
|
|
|
|
// biter is on stack, Nothing to clean
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
if (pin_blocks && biter != nullptr) {
|
|
|
|
delete biter;
|
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
s = iiter.status();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// if rep_->filter_entry is not set, we should call Release(); otherwise
|
|
|
|
// don't call, in this case we have a local copy in rep_->filter_entry,
|
|
|
|
// it's pinned to the cache and will be released in the destructor
|
|
|
|
if (!rep_->filter_entry.IsSet()) {
|
|
|
|
filter_entry.Release(rep_->table_options.block_cache.get());
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
Status BlockBasedTable::Prefetch(const Slice* const begin,
|
|
|
|
const Slice* const end) {
|
|
|
|
auto& comparator = rep_->internal_comparator;
|
|
|
|
// pre-condition
|
|
|
|
if (begin && end && comparator.Compare(*begin, *end) > 0) {
|
|
|
|
return Status::InvalidArgument(*begin, *end);
|
|
|
|
}
|
|
|
|
|
|
|
|
BlockIter iiter;
|
|
|
|
NewIndexIterator(ReadOptions(), &iiter);
|
|
|
|
|
|
|
|
if (!iiter.status().ok()) {
|
|
|
|
// error opening index iterator
|
|
|
|
return iiter.status();
|
|
|
|
}
|
|
|
|
|
|
|
|
// indicates if we are on the last page that need to be pre-fetched
|
|
|
|
bool prefetching_boundary_page = false;
|
|
|
|
|
|
|
|
for (begin ? iiter.Seek(*begin) : iiter.SeekToFirst(); iiter.Valid();
|
|
|
|
iiter.Next()) {
|
|
|
|
Slice block_handle = iiter.value();
|
|
|
|
|
|
|
|
if (end && comparator.Compare(iiter.key(), *end) >= 0) {
|
|
|
|
if (prefetching_boundary_page) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The index entry represents the last key in the data block.
|
|
|
|
// We should load this page into memory as well, but no more
|
|
|
|
prefetching_boundary_page = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the block specified by the block_handle into the block cache
|
|
|
|
BlockIter biter;
|
|
|
|
NewDataBlockIterator(rep_, ReadOptions(), block_handle, &biter);
|
|
|
|
|
|
|
|
if (!biter.status().ok()) {
|
|
|
|
// there was an unexpected error while pre-fetching
|
|
|
|
return biter.status();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
|
|
|
|
const Slice& key) {
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> iiter(NewIndexIterator(options));
|
2014-06-20 10:23:02 +02:00
|
|
|
iiter->Seek(key);
|
|
|
|
assert(iiter->Valid());
|
|
|
|
CachableEntry<Block> block;
|
|
|
|
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = iiter->value();
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
2013-02-01 00:20:24 +01:00
|
|
|
assert(s.ok());
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
2014-06-20 10:23:02 +02:00
|
|
|
assert(block_cache != nullptr);
|
|
|
|
|
|
|
|
char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice cache_key =
|
2014-09-08 19:37:05 +02:00
|
|
|
GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
handle, cache_key_storage);
|
2014-06-20 10:23:02 +02:00
|
|
|
Slice ckey;
|
|
|
|
|
2016-08-27 03:55:58 +02:00
|
|
|
s = GetDataBlockFromCache(
|
|
|
|
cache_key, ckey, block_cache, nullptr, rep_->ioptions, options, &block,
|
|
|
|
rep_->table_options.format_version,
|
|
|
|
rep_->compression_dict_block ? rep_->compression_dict_block->data
|
|
|
|
: Slice(),
|
|
|
|
0 /* read_amp_bytes_per_bit */);
|
2014-06-20 10:23:02 +02:00
|
|
|
assert(s.ok());
|
|
|
|
bool in_cache = block.value != nullptr;
|
|
|
|
if (in_cache) {
|
|
|
|
ReleaseCachedEntry(block_cache, block.cache_handle);
|
|
|
|
}
|
|
|
|
return in_cache;
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
// REQUIRES: The following fields of rep_ should have already been populated:
|
|
|
|
// 1. file
|
|
|
|
// 2. index_handle,
|
|
|
|
// 3. options
|
|
|
|
// 4. internal_comparator
|
|
|
|
// 5. index_type
|
2015-10-13 00:06:38 +02:00
|
|
|
Status BlockBasedTable::CreateIndexReader(
|
|
|
|
IndexReader** index_reader, InternalIterator* preloaded_meta_index_iter) {
|
2014-03-01 03:19:07 +01:00
|
|
|
// Some old version of block-based tables don't have index type present in
|
|
|
|
// table properties. If that's the case we can safely use the kBinarySearch.
|
Use a different approach to make sure BlockBasedTableReader can use hash index on older files
Summary:
A recent commit https://github.com/facebook/rocksdb/commit/e37dd216f9384bfdabc6760fa296e8ee28c79d30 makes sure hash index can be used when reading existing files. This patch uses another way to achieve the approach:
(1) Currently, always writing kBinarySearch to files, despite of BlockBasedTableOptions.IndexType setting.
(2) When reading a file, read out the field, and make sure it is kBinarySearch, while always use index type by users.
The reason for doing it is, to reserve kHashSearch property on disk to future. If now we write out binary index for both of kHashSearch and kBinarySearch. We have to use a new flag in the future for hash index on disk, otherwise compatibility would break. Also, we want the real index type and type shown in properties block to be consistent.
Test Plan: make all check
Reviewers: haobo, kailiu
Reviewed By: kailiu
CC: igor, ljin, yhchiang, xjin, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D18009
2014-04-18 03:00:58 +02:00
|
|
|
auto index_type_on_file = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
if (rep_->table_properties) {
|
|
|
|
auto& props = rep_->table_properties->user_collected_properties;
|
|
|
|
auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
|
|
|
|
if (pos != props.end()) {
|
|
|
|
index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
|
|
|
|
DecodeFixed32(pos->second.c_str()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
auto file = rep_->file.get();
|
|
|
|
auto comparator = &rep_->internal_comparator;
|
2014-05-01 20:09:32 +02:00
|
|
|
const Footer& footer = rep_->footer;
|
2014-06-20 00:32:31 +02:00
|
|
|
if (index_type_on_file == BlockBasedTableOptions::kHashSearch &&
|
2014-09-05 01:18:36 +02:00
|
|
|
rep_->ioptions.prefix_extractor == nullptr) {
|
2014-10-31 19:41:15 +01:00
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
|
2014-06-20 00:32:31 +02:00
|
|
|
"BlockBasedTableOptions::kHashSearch requires "
|
|
|
|
"options.prefix_extractor to be set."
|
2015-04-25 11:14:27 +02:00
|
|
|
" Fall back to binary search index.");
|
2014-06-20 00:32:31 +02:00
|
|
|
index_type_on_file = BlockBasedTableOptions::kBinarySearch;
|
|
|
|
}
|
|
|
|
|
2014-05-15 23:09:03 +02:00
|
|
|
switch (index_type_on_file) {
|
2014-03-01 03:19:07 +01:00
|
|
|
case BlockBasedTableOptions::kBinarySearch: {
|
2014-05-01 20:09:32 +02:00
|
|
|
return BinarySearchIndexReader::Create(
|
2016-08-13 01:34:11 +02:00
|
|
|
file, footer, footer.index_handle(), rep_->ioptions, comparator,
|
2016-07-19 18:44:03 +02:00
|
|
|
index_reader, rep_->persistent_cache_options);
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
case BlockBasedTableOptions::kHashSearch: {
|
2014-05-15 23:09:03 +02:00
|
|
|
std::unique_ptr<Block> meta_guard;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter_guard;
|
2014-05-15 23:09:03 +02:00
|
|
|
auto meta_index_iter = preloaded_meta_index_iter;
|
|
|
|
if (meta_index_iter == nullptr) {
|
|
|
|
auto s = ReadMetaBlock(rep_, &meta_guard, &meta_iter_guard);
|
|
|
|
if (!s.ok()) {
|
2014-06-20 00:32:31 +02:00
|
|
|
// we simply fall back to binary search in case there is any
|
|
|
|
// problem with prefix hash index loading.
|
2014-10-31 19:41:15 +01:00
|
|
|
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log,
|
2014-06-20 00:32:31 +02:00
|
|
|
"Unable to read the metaindex block."
|
2015-04-25 11:14:27 +02:00
|
|
|
" Fall back to binary search index.");
|
2014-06-20 00:32:31 +02:00
|
|
|
return BinarySearchIndexReader::Create(
|
2016-07-19 18:44:03 +02:00
|
|
|
file, footer, footer.index_handle(), rep_->ioptions, comparator,
|
|
|
|
index_reader, rep_->persistent_cache_options);
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
meta_index_iter = meta_iter_guard.get();
|
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
return HashIndexReader::Create(
|
2016-07-19 18:44:03 +02:00
|
|
|
rep_->internal_prefix_transform.get(), footer, file, rep_->ioptions,
|
|
|
|
comparator, footer.index_handle(), meta_index_iter, index_reader,
|
|
|
|
rep_->hash_index_allow_collision, rep_->persistent_cache_options);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
default: {
|
|
|
|
std::string error_message =
|
2014-11-25 05:44:49 +01:00
|
|
|
"Unrecognized index type: " + ToString(rep_->index_type);
|
2014-03-02 08:40:08 +01:00
|
|
|
return Status::InvalidArgument(error_message.c_str());
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
|
2015-10-13 00:06:38 +02:00
|
|
|
unique_ptr<InternalIterator> index_iter(NewIndexIterator(ReadOptions()));
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
index_iter->Seek(key);
|
|
|
|
uint64_t result;
|
|
|
|
if (index_iter->Valid()) {
|
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_iter->value();
|
|
|
|
Status s = handle.DecodeFrom(&input);
|
|
|
|
if (s.ok()) {
|
|
|
|
result = handle.offset();
|
|
|
|
} else {
|
|
|
|
// Strange: we can't decode the block handle in the index block.
|
|
|
|
// We'll just return the offset of the metaindex block, which is
|
|
|
|
// close to the whole file size for this case.
|
2014-05-01 20:09:32 +02:00
|
|
|
result = rep_->footer.metaindex_handle().offset();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
} else {
|
2014-03-01 05:37:32 +01:00
|
|
|
// key is past the last key in the file. If table_properties is not
|
|
|
|
// available, approximate the offset by returning the offset of the
|
|
|
|
// metaindex block (which is right near the end of the file).
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
result = 0;
|
|
|
|
if (rep_->table_properties) {
|
|
|
|
result = rep_->table_properties->data_size;
|
|
|
|
}
|
2014-03-01 05:37:32 +01:00
|
|
|
// table_properties is not present in the table.
|
|
|
|
if (result == 0) {
|
2014-05-01 20:09:32 +02:00
|
|
|
result = rep_->footer.metaindex_handle().offset();
|
2014-03-01 05:37:32 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
bool BlockBasedTable::TEST_filter_block_preloaded() const {
|
|
|
|
return rep_->filter != nullptr;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
bool BlockBasedTable::TEST_index_reader_preloaded() const {
|
|
|
|
return rep_->index_reader != nullptr;
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-08-01 23:50:19 +02:00
|
|
|
Status BlockBasedTable::GetKVPairsFromDataBlocks(
|
|
|
|
std::vector<KVPairBlock>* kv_pair_blocks) {
|
|
|
|
std::unique_ptr<InternalIterator> blockhandles_iter(
|
|
|
|
NewIndexIterator(ReadOptions()));
|
|
|
|
|
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Cannot read Index Block
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> datablock_iter;
|
|
|
|
datablock_iter.reset(
|
|
|
|
NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value()));
|
|
|
|
s = datablock_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Error reading the block - Skipped
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
KVPairBlock kv_pair_block;
|
|
|
|
for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
|
|
|
|
datablock_iter->Next()) {
|
|
|
|
s = datablock_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Error reading the block - Skipped
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
const Slice& key = datablock_iter->key();
|
|
|
|
const Slice& value = datablock_iter->value();
|
|
|
|
std::string key_copy = std::string(key.data(), key.size());
|
|
|
|
std::string value_copy = std::string(value.data(), value.size());
|
|
|
|
|
|
|
|
kv_pair_block.push_back(
|
|
|
|
std::make_pair(std::move(key_copy), std::move(value_copy)));
|
|
|
|
}
|
|
|
|
kv_pair_blocks->push_back(std::move(kv_pair_block));
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
Status BlockBasedTable::DumpTable(WritableFile* out_file) {
|
|
|
|
// Output Footer
|
|
|
|
out_file->Append(
|
|
|
|
"Footer Details:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(rep_->footer.ToString().c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
|
|
|
// Output MetaIndex
|
|
|
|
out_file->Append(
|
|
|
|
"Metaindex Details:\n"
|
|
|
|
"--------------------------------------\n");
|
|
|
|
std::unique_ptr<Block> meta;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
2014-12-23 22:24:07 +01:00
|
|
|
Status s = ReadMetaBlock(rep_, &meta, &meta_iter);
|
|
|
|
if (s.ok()) {
|
|
|
|
for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
|
|
|
|
s = meta_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (meta_iter->key() == rocksdb::kPropertiesBlock) {
|
|
|
|
out_file->Append(" Properties block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
} else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
|
|
|
|
out_file->Append(" Compression dictionary block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
2014-12-23 22:24:07 +01:00
|
|
|
} else if (strstr(meta_iter->key().ToString().c_str(),
|
|
|
|
"filter.rocksdb.") != nullptr) {
|
|
|
|
out_file->Append(" Filter block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output TableProperties
|
|
|
|
const rocksdb::TableProperties* table_properties;
|
|
|
|
table_properties = rep_->table_properties.get();
|
|
|
|
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
out_file->Append(
|
|
|
|
"Table Properties:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(table_properties->ToString("\n ", ": ").c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output Filter blocks
|
|
|
|
if (!rep_->filter && !table_properties->filter_policy_name.empty()) {
|
|
|
|
// Support only BloomFilter as off now
|
|
|
|
rocksdb::BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(1));
|
|
|
|
if (table_properties->filter_policy_name.compare(
|
|
|
|
table_options.filter_policy->Name()) == 0) {
|
|
|
|
std::string filter_block_key = kFilterBlockPrefix;
|
|
|
|
filter_block_key.append(table_properties->filter_policy_name);
|
|
|
|
BlockHandle handle;
|
|
|
|
if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) {
|
|
|
|
BlockContents block;
|
2015-12-16 03:20:10 +01:00
|
|
|
if (ReadBlockContents(
|
|
|
|
rep_->file.get(), rep_->footer, ReadOptions(), handle, &block,
|
2016-07-19 18:44:03 +02:00
|
|
|
rep_->ioptions, false /*decompress*/,
|
2015-12-16 03:20:10 +01:00
|
|
|
Slice() /*compression dict*/, rep_->persistent_cache_options)
|
|
|
|
.ok()) {
|
2015-02-05 02:03:57 +01:00
|
|
|
rep_->filter.reset(new BlockBasedFilterBlockReader(
|
|
|
|
rep_->ioptions.prefix_extractor, table_options,
|
2016-06-03 19:47:47 +02:00
|
|
|
table_options.whole_key_filtering, std::move(block),
|
|
|
|
rep_->ioptions.statistics));
|
2014-12-23 22:24:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rep_->filter) {
|
|
|
|
out_file->Append(
|
|
|
|
"Filter Details:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(rep_->filter->ToString().c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output Index block
|
|
|
|
s = DumpIndexBlock(out_file);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
// Output Data blocks
|
|
|
|
s = DumpDataBlocks(out_file);
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
void BlockBasedTable::Close() {
|
|
|
|
rep_->filter_entry.Release(rep_->table_options.block_cache.get());
|
|
|
|
rep_->index_entry.Release(rep_->table_options.block_cache.get());
|
2016-06-03 19:47:47 +02:00
|
|
|
// cleanup index and filter blocks to avoid accessing dangling pointer
|
|
|
|
if (!rep_->table_options.no_block_cache) {
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
// Get the filter block key
|
|
|
|
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
rep_->footer.metaindex_handle(), cache_key);
|
|
|
|
rep_->table_options.block_cache.get()->Erase(key);
|
|
|
|
// Get the index block key
|
|
|
|
key = GetCacheKeyFromOffset(rep_->cache_key_prefix,
|
|
|
|
rep_->cache_key_prefix_size,
|
|
|
|
rep_->dummy_index_reader_offset, cache_key);
|
|
|
|
rep_->table_options.block_cache.get()->Erase(key);
|
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
|
|
|
|
out_file->Append(
|
|
|
|
"Index Details:\n"
|
|
|
|
"--------------------------------------\n");
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> blockhandles_iter(
|
|
|
|
NewIndexIterator(ReadOptions()));
|
2014-12-23 22:24:07 +01:00
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Can not read Index Block \n\n");
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_file->Append(" Block key hex dump: Data block handle\n");
|
|
|
|
out_file->Append(" Block key ascii\n\n");
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Slice key = blockhandles_iter->key();
|
|
|
|
InternalKey ikey;
|
|
|
|
ikey.DecodeFrom(key);
|
|
|
|
|
|
|
|
out_file->Append(" HEX ");
|
|
|
|
out_file->Append(ikey.user_key().ToString(true).c_str());
|
|
|
|
out_file->Append(": ");
|
|
|
|
out_file->Append(blockhandles_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
|
|
|
std::string str_key = ikey.user_key().ToString();
|
|
|
|
std::string res_key("");
|
|
|
|
char cspace = ' ';
|
|
|
|
for (size_t i = 0; i < str_key.size(); i++) {
|
|
|
|
res_key.append(&str_key[i], 1);
|
|
|
|
res_key.append(1, cspace);
|
|
|
|
}
|
|
|
|
out_file->Append(" ASCII ");
|
|
|
|
out_file->Append(res_key.c_str());
|
|
|
|
out_file->Append("\n ------\n");
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> blockhandles_iter(
|
|
|
|
NewIndexIterator(ReadOptions()));
|
2014-12-23 22:24:07 +01:00
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Can not read Index Block \n\n");
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-08-13 01:34:11 +02:00
|
|
|
uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
|
|
|
|
uint64_t datablock_size_max = 0;
|
|
|
|
uint64_t datablock_size_sum = 0;
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
size_t block_id = 1;
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
block_id++, blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2016-08-13 01:34:11 +02:00
|
|
|
Slice bh_val = blockhandles_iter->value();
|
|
|
|
BlockHandle bh;
|
|
|
|
bh.DecodeFrom(&bh_val);
|
|
|
|
uint64_t datablock_size = bh.size();
|
|
|
|
datablock_size_min = std::min(datablock_size_min, datablock_size);
|
|
|
|
datablock_size_max = std::max(datablock_size_max, datablock_size);
|
|
|
|
datablock_size_sum += datablock_size;
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append("Data Block # ");
|
2015-04-24 04:17:57 +02:00
|
|
|
out_file->Append(rocksdb::ToString(block_id));
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append(" @ ");
|
|
|
|
out_file->Append(blockhandles_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
out_file->Append("--------------------------------------\n");
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> datablock_iter;
|
2014-12-23 22:24:07 +01:00
|
|
|
datablock_iter.reset(
|
|
|
|
NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value()));
|
|
|
|
s = datablock_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Error reading the block - Skipped \n\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
|
|
|
|
datablock_iter->Next()) {
|
|
|
|
s = datablock_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Error reading the block - Skipped \n");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Slice key = datablock_iter->key();
|
|
|
|
Slice value = datablock_iter->value();
|
2016-08-22 19:27:50 +02:00
|
|
|
InternalKey ikey;
|
2014-12-23 22:24:07 +01:00
|
|
|
ikey.DecodeFrom(key);
|
|
|
|
|
|
|
|
out_file->Append(" HEX ");
|
|
|
|
out_file->Append(ikey.user_key().ToString(true).c_str());
|
|
|
|
out_file->Append(": ");
|
2016-08-22 19:27:50 +02:00
|
|
|
out_file->Append(value.ToString(true).c_str());
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append("\n");
|
|
|
|
|
|
|
|
std::string str_key = ikey.user_key().ToString();
|
2016-08-22 19:27:50 +02:00
|
|
|
std::string str_value = value.ToString();
|
2014-12-23 22:24:07 +01:00
|
|
|
std::string res_key(""), res_value("");
|
|
|
|
char cspace = ' ';
|
|
|
|
for (size_t i = 0; i < str_key.size(); i++) {
|
|
|
|
res_key.append(&str_key[i], 1);
|
|
|
|
res_key.append(1, cspace);
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < str_value.size(); i++) {
|
|
|
|
res_value.append(&str_value[i], 1);
|
|
|
|
res_value.append(1, cspace);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_file->Append(" ASCII ");
|
|
|
|
out_file->Append(res_key.c_str());
|
|
|
|
out_file->Append(": ");
|
|
|
|
out_file->Append(res_value.c_str());
|
|
|
|
out_file->Append("\n ------\n");
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
2016-08-13 01:34:11 +02:00
|
|
|
|
|
|
|
uint64_t num_datablocks = block_id - 1;
|
|
|
|
if (num_datablocks) {
|
|
|
|
double datablock_size_avg =
|
|
|
|
static_cast<double>(datablock_size_sum) / num_datablocks;
|
|
|
|
out_file->Append("Data Block Summary:\n");
|
|
|
|
out_file->Append("--------------------------------------");
|
|
|
|
out_file->Append("\n # data blocks: ");
|
|
|
|
out_file->Append(rocksdb::ToString(num_datablocks));
|
|
|
|
out_file->Append("\n min data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_min));
|
|
|
|
out_file->Append("\n max data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_max));
|
|
|
|
out_file->Append("\n avg data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_avg));
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void DeleteCachedFilterEntry(const Slice& key, void* value) {
|
|
|
|
FilterBlockReader* filter = reinterpret_cast<FilterBlockReader*>(value);
|
|
|
|
if (filter->statistics() != nullptr) {
|
|
|
|
RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT,
|
|
|
|
filter->size());
|
|
|
|
}
|
|
|
|
delete filter;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCachedIndexEntry(const Slice& key, void* value) {
|
|
|
|
IndexReader* index_reader = reinterpret_cast<IndexReader*>(value);
|
|
|
|
if (index_reader->statistics() != nullptr) {
|
|
|
|
RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT,
|
|
|
|
index_reader->usable_size());
|
|
|
|
}
|
|
|
|
delete index_reader;
|
|
|
|
}
|
|
|
|
|
|
|
|
} // anonymous namespace
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|