2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2013-10-30 18:52:33 +01:00
|
|
|
#include "table/block_based_table_reader.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2016-08-13 01:34:11 +02:00
|
|
|
#include <algorithm>
|
2018-07-28 01:00:26 +02:00
|
|
|
#include <array>
|
2016-08-13 01:34:11 +02:00
|
|
|
#include <limits>
|
2014-03-01 03:19:07 +01:00
|
|
|
#include <string>
|
|
|
|
#include <utility>
|
2016-08-01 23:50:19 +02:00
|
|
|
#include <vector>
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
#include "db/dbformat.h"
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
#include "db/pinned_iterators_manager.h"
|
2013-08-13 23:04:56 +02:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/comparator.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/filter_policy.h"
|
2014-03-01 03:19:07 +01:00
|
|
|
#include "rocksdb/iterator.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/options.h"
|
|
|
|
#include "rocksdb/statistics.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
#include "rocksdb/table.h"
|
2014-04-22 02:49:47 +02:00
|
|
|
#include "rocksdb/table_properties.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/block.h"
|
2014-09-08 19:37:05 +02:00
|
|
|
#include "table/block_based_filter_block.h"
|
2015-02-05 02:03:57 +01:00
|
|
|
#include "table/block_based_table_factory.h"
|
2017-12-12 00:16:37 +01:00
|
|
|
#include "table/block_fetcher.h"
|
2014-06-13 04:03:22 +02:00
|
|
|
#include "table/block_prefix_index.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/filter_block.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/format.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/full_filter_block.h"
|
|
|
|
#include "table/get_context.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2013-12-06 01:51:26 +01:00
|
|
|
#include "table/meta_blocks.h"
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
#include "table/multiget_context.h"
|
2017-03-22 17:11:23 +01:00
|
|
|
#include "table/partitioned_filter_block.h"
|
2015-12-16 03:20:10 +01:00
|
|
|
#include "table/persistent_cache_helper.h"
|
2016-10-19 01:59:37 +02:00
|
|
|
#include "table/sst_file_writer_collectors.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "table/two_level_iterator.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "monitoring/perf_context_imp.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
2019-02-11 20:37:07 +01:00
|
|
|
#include "util/crc32c.h"
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
#include "util/file_reader_writer.h"
|
2013-06-14 02:25:09 +02:00
|
|
|
#include "util/stop_watch.h"
|
2014-11-25 05:44:49 +01:00
|
|
|
#include "util/string_util.h"
|
2016-08-24 03:20:41 +02:00
|
|
|
#include "util/sync_point.h"
|
2019-02-11 20:37:07 +01:00
|
|
|
#include "util/xxhash.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
2014-05-15 23:09:03 +02:00
|
|
|
extern const std::string kHashIndexPrefixesBlock;
|
|
|
|
extern const std::string kHashIndexPrefixesMetadataBlock;
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
typedef BlockBasedTable::IndexReader IndexReader;
|
|
|
|
|
2017-03-04 03:09:43 +01:00
|
|
|
BlockBasedTable::~BlockBasedTable() {
|
|
|
|
Close();
|
|
|
|
delete rep_;
|
|
|
|
}
|
|
|
|
|
2018-01-29 23:34:56 +01:00
|
|
|
std::atomic<uint64_t> BlockBasedTable::next_cache_key_id_(0);
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
namespace {
|
|
|
|
// Read the block identified by "handle" from "file".
|
|
|
|
// The only relevant option is options.verify_checksums for now.
|
|
|
|
// On failure return non-OK.
|
|
|
|
// On success fill *result and return OK - caller owns *result
|
2019-01-24 03:11:08 +01:00
|
|
|
// @param uncompression_dict Data for presetting the compression library's
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
// dictionary.
|
2017-08-11 20:59:13 +02:00
|
|
|
Status ReadBlockFromFile(
|
|
|
|
RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
|
|
|
|
std::unique_ptr<Block>* result, const ImmutableCFOptions& ioptions,
|
2019-01-24 03:11:08 +01:00
|
|
|
bool do_uncompress, bool maybe_compressed,
|
|
|
|
const UncompressionDict& uncompression_dict,
|
2017-08-11 20:59:13 +02:00
|
|
|
const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
|
2018-11-29 02:58:08 +01:00
|
|
|
size_t read_amp_bytes_per_bit, MemoryAllocator* memory_allocator) {
|
2014-03-01 03:19:07 +01:00
|
|
|
BlockContents contents;
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockFetcher block_fetcher(file, prefetch_buffer, footer, options, handle,
|
|
|
|
&contents, ioptions, do_uncompress,
|
2019-01-24 03:11:08 +01:00
|
|
|
maybe_compressed, uncompression_dict,
|
|
|
|
cache_options, memory_allocator);
|
2017-12-12 00:16:37 +01:00
|
|
|
Status s = block_fetcher.ReadBlockContents();
|
2014-03-01 03:19:07 +01:00
|
|
|
if (s.ok()) {
|
2016-10-19 01:59:37 +02:00
|
|
|
result->reset(new Block(std::move(contents), global_seqno,
|
|
|
|
read_amp_bytes_per_bit, ioptions.statistics));
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-10-26 23:27:09 +02:00
|
|
|
inline MemoryAllocator* GetMemoryAllocator(
|
2018-10-03 02:21:54 +02:00
|
|
|
const BlockBasedTableOptions& table_options) {
|
2018-11-21 20:28:02 +01:00
|
|
|
return table_options.block_cache.get()
|
|
|
|
? table_options.block_cache->memory_allocator()
|
|
|
|
: nullptr;
|
2018-10-03 02:21:54 +02:00
|
|
|
}
|
|
|
|
|
2018-11-29 02:58:08 +01:00
|
|
|
inline MemoryAllocator* GetMemoryAllocatorForCompressedBlock(
|
|
|
|
const BlockBasedTableOptions& table_options) {
|
|
|
|
return table_options.block_cache_compressed.get()
|
|
|
|
? table_options.block_cache_compressed->memory_allocator()
|
|
|
|
: nullptr;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
// Delete the entry resided in the cache.
|
|
|
|
template <class Entry>
|
2018-03-05 22:08:17 +01:00
|
|
|
void DeleteCachedEntry(const Slice& /*key*/, void* value) {
|
2014-03-01 03:19:07 +01:00
|
|
|
auto entry = reinterpret_cast<Entry*>(value);
|
|
|
|
delete entry;
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
void DeleteCachedFilterEntry(const Slice& key, void* value);
|
|
|
|
void DeleteCachedIndexEntry(const Slice& key, void* value);
|
2019-01-24 03:11:08 +01:00
|
|
|
void DeleteCachedUncompressionDictEntry(const Slice& key, void* value);
|
2016-06-03 19:47:47 +02:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
// Release the cached entry and decrement its ref count.
|
|
|
|
void ReleaseCachedEntry(void* arg, void* h) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg);
|
|
|
|
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
|
|
|
|
cache->Release(handle);
|
|
|
|
}
|
|
|
|
|
2018-01-29 23:34:56 +01:00
|
|
|
// Release the cached entry and decrement its ref count.
|
|
|
|
void ForceReleaseCachedEntry(void* arg, void* h) {
|
|
|
|
Cache* cache = reinterpret_cast<Cache*>(arg);
|
|
|
|
Cache::Handle* handle = reinterpret_cast<Cache::Handle*>(h);
|
2018-04-16 02:19:57 +02:00
|
|
|
cache->Release(handle, true /* force_erase */);
|
2018-01-29 23:34:56 +01:00
|
|
|
}
|
|
|
|
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
Slice GetCacheKeyFromOffset(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size, uint64_t offset,
|
|
|
|
char* cache_key) {
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(cache_key != nullptr);
|
|
|
|
assert(cache_key_prefix_size != 0);
|
2015-12-16 03:20:10 +01:00
|
|
|
assert(cache_key_prefix_size <= BlockBasedTable::kMaxCacheKeyPrefixSize);
|
2014-03-01 03:19:07 +01:00
|
|
|
memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
char* end = EncodeVarint64(cache_key + cache_key_prefix_size, offset);
|
2014-03-01 03:19:07 +01:00
|
|
|
return Slice(cache_key, static_cast<size_t>(end - cache_key));
|
|
|
|
}
|
|
|
|
|
|
|
|
Cache::Handle* GetEntryFromCache(Cache* block_cache, const Slice& key,
|
2019-03-28 00:13:08 +01:00
|
|
|
int level, Tickers block_cache_miss_ticker,
|
2014-03-01 03:19:07 +01:00
|
|
|
Tickers block_cache_hit_ticker,
|
2018-07-21 01:43:13 +02:00
|
|
|
uint64_t* block_cache_miss_stats,
|
|
|
|
uint64_t* block_cache_hit_stats,
|
2017-12-13 06:06:26 +01:00
|
|
|
Statistics* statistics,
|
|
|
|
GetContext* get_context) {
|
2016-09-01 22:50:39 +02:00
|
|
|
auto cache_handle = block_cache->Lookup(key, statistics);
|
2014-03-01 03:19:07 +01:00
|
|
|
if (cache_handle != nullptr) {
|
2014-04-08 19:58:07 +02:00
|
|
|
PERF_COUNTER_ADD(block_cache_hit_count, 1);
|
2018-12-21 22:15:47 +01:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(block_cache_hit_count, 1,
|
2019-03-28 00:13:08 +01:00
|
|
|
static_cast<uint32_t>(level));
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
|
|
|
// overall cache hit
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_hit++;
|
2017-12-13 06:06:26 +01:00
|
|
|
// total bytes read from cache
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_bytes_read +=
|
|
|
|
block_cache->GetUsage(cache_handle);
|
2017-12-13 06:06:26 +01:00
|
|
|
// block-type specific cache hit
|
2018-07-21 01:43:13 +02:00
|
|
|
(*block_cache_hit_stats)++;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
// overall cache hit
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_HIT);
|
|
|
|
// total bytes read from cache
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_READ,
|
|
|
|
block_cache->GetUsage(cache_handle));
|
|
|
|
RecordTick(statistics, block_cache_hit_ticker);
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
} else {
|
2018-12-21 22:15:47 +01:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(block_cache_miss_count, 1,
|
2019-03-28 00:13:08 +01:00
|
|
|
static_cast<uint32_t>(level));
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
|
|
|
// overall cache miss
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_miss++;
|
2017-12-13 06:06:26 +01:00
|
|
|
// block-type specific cache miss
|
2018-07-21 01:43:13 +02:00
|
|
|
(*block_cache_miss_stats)++;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_MISS);
|
|
|
|
RecordTick(statistics, block_cache_miss_ticker);
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return cache_handle;
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
// For hash based index, return true if prefix_extractor and
|
|
|
|
// prefix_extractor_block mismatch, false otherwise. This flag will be used
|
|
|
|
// as total_order_seek via NewIndexIterator
|
2018-06-05 04:59:44 +02:00
|
|
|
bool PrefixExtractorChanged(const TableProperties* table_properties,
|
|
|
|
const SliceTransform* prefix_extractor) {
|
2018-05-21 23:33:55 +02:00
|
|
|
// BlockBasedTableOptions::kHashSearch requires prefix_extractor to be set.
|
|
|
|
// Turn off hash index in prefix_extractor is not set; if prefix_extractor
|
|
|
|
// is set but prefix_extractor_block is not set, also disable hash index
|
2018-05-22 22:51:21 +02:00
|
|
|
if (prefix_extractor == nullptr || table_properties == nullptr ||
|
|
|
|
table_properties->prefix_extractor_name.empty()) {
|
2018-05-21 23:33:55 +02:00
|
|
|
return true;
|
|
|
|
}
|
2018-05-22 22:51:21 +02:00
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
// prefix_extractor and prefix_extractor_block are both non-empty
|
2018-05-22 22:51:21 +02:00
|
|
|
if (table_properties->prefix_extractor_name.compare(
|
|
|
|
prefix_extractor->Name()) != 0) {
|
2018-05-21 23:33:55 +02:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
} // namespace
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
// Index that allows binary search lookup in a two-level index structure.
|
2019-05-10 20:53:33 +02:00
|
|
|
class PartitionIndexReader : public IndexReader {
|
2017-02-07 01:29:29 +01:00
|
|
|
public:
|
|
|
|
// Read the partition index from the file and create an instance for
|
|
|
|
// `PartitionIndexReader`.
|
|
|
|
// On success, index_reader will be populated; otherwise it will remain
|
|
|
|
// unmodified.
|
|
|
|
static Status Create(BlockBasedTable* table, RandomAccessFileReader* file,
|
2017-08-11 20:59:13 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
2017-02-07 01:29:29 +01:00
|
|
|
const Footer& footer, const BlockHandle& index_handle,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
2017-05-06 00:01:04 +02:00
|
|
|
const InternalKeyComparator* icomparator,
|
|
|
|
IndexReader** index_reader,
|
2017-03-22 17:11:23 +01:00
|
|
|
const PersistentCacheOptions& cache_options,
|
2018-08-10 01:49:45 +02:00
|
|
|
const int level, const bool index_key_includes_seq,
|
2018-11-29 02:58:08 +01:00
|
|
|
const bool index_value_is_full,
|
|
|
|
MemoryAllocator* memory_allocator) {
|
2017-02-07 01:29:29 +01:00
|
|
|
std::unique_ptr<Block> index_block;
|
|
|
|
auto s = ReadBlockFromFile(
|
2017-08-11 20:59:13 +02:00
|
|
|
file, prefetch_buffer, footer, ReadOptions(), index_handle,
|
|
|
|
&index_block, ioptions, true /* decompress */,
|
2019-01-24 03:11:08 +01:00
|
|
|
true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
|
|
|
cache_options, kDisableGlobalSequenceNumber,
|
|
|
|
0 /* read_amp_bytes_per_bit */, memory_allocator);
|
2017-02-07 01:29:29 +01:00
|
|
|
|
|
|
|
if (s.ok()) {
|
2018-05-26 03:41:31 +02:00
|
|
|
*index_reader = new PartitionIndexReader(
|
|
|
|
table, icomparator, std::move(index_block), ioptions.statistics,
|
2018-08-10 01:49:45 +02:00
|
|
|
level, index_key_includes_seq, index_value_is_full);
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// return a two-level iterator: first level is on the partition index
|
2019-02-14 22:52:47 +01:00
|
|
|
InternalIteratorBase<BlockHandle>* NewIterator(
|
2018-08-10 01:49:45 +02:00
|
|
|
IndexBlockIter* /*iter*/ = nullptr, bool /*dont_care*/ = true,
|
|
|
|
bool fill_cache = true) override {
|
2018-07-13 02:19:57 +02:00
|
|
|
Statistics* kNullStats = nullptr;
|
2017-03-22 17:11:23 +01:00
|
|
|
// Filters are already checked before seeking the index
|
2018-02-13 01:57:56 +01:00
|
|
|
if (!partition_map_.empty()) {
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2018-02-13 01:57:56 +01:00
|
|
|
return NewTwoLevelIterator(
|
|
|
|
new BlockBasedTable::PartitionedIndexIteratorState(
|
2018-08-10 01:49:45 +02:00
|
|
|
table_, &partition_map_, index_key_includes_seq_,
|
|
|
|
index_value_is_full_),
|
2018-07-13 02:19:57 +02:00
|
|
|
index_block_->NewIterator<IndexBlockIter>(
|
|
|
|
icomparator_, icomparator_->user_comparator(), nullptr,
|
2018-08-10 01:49:45 +02:00
|
|
|
kNullStats, true, index_key_includes_seq_, index_value_is_full_));
|
2018-02-13 01:57:56 +01:00
|
|
|
} else {
|
2018-04-21 00:08:00 +02:00
|
|
|
auto ro = ReadOptions();
|
|
|
|
ro.fill_cache = fill_cache;
|
2018-05-26 03:41:31 +02:00
|
|
|
bool kIsIndex = true;
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2018-08-10 01:49:45 +02:00
|
|
|
return new BlockBasedTableIterator<IndexBlockIter, BlockHandle>(
|
2018-04-21 00:08:00 +02:00
|
|
|
table_, ro, *icomparator_,
|
2018-07-13 02:19:57 +02:00
|
|
|
index_block_->NewIterator<IndexBlockIter>(
|
|
|
|
icomparator_, icomparator_->user_comparator(), nullptr,
|
2018-08-10 01:49:45 +02:00
|
|
|
kNullStats, true, index_key_includes_seq_, index_value_is_full_),
|
2018-06-27 00:56:26 +02:00
|
|
|
false, true, /* prefix_extractor */ nullptr, kIsIndex,
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_, index_value_is_full_);
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
2017-03-22 17:11:23 +01:00
|
|
|
// TODO(myabandeh): Update TwoLevelIterator to be able to make use of
|
2017-08-18 19:53:03 +02:00
|
|
|
// on-stack BlockIter while the state is on heap. Currentlly it assumes
|
|
|
|
// the first level iter is always on heap and will attempt to delete it
|
|
|
|
// in its destructor.
|
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
void CacheDependencies(bool pin) override {
|
2017-08-18 19:53:03 +02:00
|
|
|
// Before read partitions, prefetch them to avoid lots of IOs
|
|
|
|
auto rep = table_->rep_;
|
2018-07-13 02:19:57 +02:00
|
|
|
IndexBlockIter biter;
|
2017-08-18 19:53:03 +02:00
|
|
|
BlockHandle handle;
|
2018-07-13 02:19:57 +02:00
|
|
|
Statistics* kNullStats = nullptr;
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2018-07-13 02:19:57 +02:00
|
|
|
index_block_->NewIterator<IndexBlockIter>(
|
|
|
|
icomparator_, icomparator_->user_comparator(), &biter, kNullStats, true,
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_, index_value_is_full_);
|
2017-08-18 19:53:03 +02:00
|
|
|
// Index partitions are assumed to be consecuitive. Prefetch them all.
|
|
|
|
// Read the first block offset
|
|
|
|
biter.SeekToFirst();
|
2018-07-12 00:45:21 +02:00
|
|
|
if (!biter.Valid()) {
|
|
|
|
// Empty index.
|
|
|
|
return;
|
|
|
|
}
|
2018-08-10 01:49:45 +02:00
|
|
|
handle = biter.value();
|
2017-08-18 19:53:03 +02:00
|
|
|
uint64_t prefetch_off = handle.offset();
|
|
|
|
|
|
|
|
// Read the last block's offset
|
|
|
|
biter.SeekToLast();
|
2018-07-12 00:45:21 +02:00
|
|
|
if (!biter.Valid()) {
|
|
|
|
// Empty index.
|
|
|
|
return;
|
|
|
|
}
|
2018-08-10 01:49:45 +02:00
|
|
|
handle = biter.value();
|
2017-08-18 19:53:03 +02:00
|
|
|
uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
|
|
|
|
uint64_t prefetch_len = last_off - prefetch_off;
|
|
|
|
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
|
|
|
|
auto& file = table_->rep_->file;
|
|
|
|
prefetch_buffer.reset(new FilePrefetchBuffer());
|
2018-08-10 01:49:45 +02:00
|
|
|
Status s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
|
|
|
|
static_cast<size_t>(prefetch_len));
|
2017-08-18 19:53:03 +02:00
|
|
|
|
|
|
|
// After prefetch, read the partitions one by one
|
|
|
|
biter.SeekToFirst();
|
|
|
|
auto ro = ReadOptions();
|
|
|
|
for (; biter.Valid(); biter.Next()) {
|
2018-08-10 01:49:45 +02:00
|
|
|
handle = biter.value();
|
2019-05-10 20:53:33 +02:00
|
|
|
CachableEntry<Block> block;
|
2017-08-18 19:53:03 +02:00
|
|
|
const bool is_index = true;
|
2017-12-13 06:06:26 +01:00
|
|
|
// TODO: Support counter batch update for partitioned index and
|
|
|
|
// filter blocks
|
2018-11-14 02:00:49 +01:00
|
|
|
s = table_->MaybeReadBlockAndLoadToCache(
|
2019-01-24 03:11:08 +01:00
|
|
|
prefetch_buffer.get(), rep, ro, handle,
|
|
|
|
UncompressionDict::GetEmptyDict(), &block, is_index,
|
|
|
|
nullptr /* get_context */);
|
2017-08-18 19:53:03 +02:00
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(s.ok() || block.GetValue() == nullptr);
|
|
|
|
if (s.ok() && block.GetValue() != nullptr) {
|
|
|
|
if (block.IsCached()) {
|
2017-09-29 16:55:22 +02:00
|
|
|
if (pin) {
|
2019-05-10 20:53:33 +02:00
|
|
|
partition_map_[handle.offset()] = std::move(block);
|
2017-09-29 16:55:22 +02:00
|
|
|
}
|
2017-08-18 19:53:03 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t size() const override { return index_block_->size(); }
|
|
|
|
size_t usable_size() const override { return index_block_->usable_size(); }
|
2017-02-07 01:29:29 +01:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t ApproximateMemoryUsage() const override {
|
2017-02-07 01:29:29 +01:00
|
|
|
assert(index_block_);
|
2018-06-29 17:55:33 +02:00
|
|
|
size_t usage = index_block_->ApproximateMemoryUsage();
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
usage += malloc_usable_size((void*)this);
|
|
|
|
#else
|
|
|
|
usage += sizeof(*this);
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
// TODO(myabandeh): more accurate estimate of partition_map_ mem usage
|
|
|
|
return usage;
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2017-05-06 00:01:04 +02:00
|
|
|
PartitionIndexReader(BlockBasedTable* table,
|
|
|
|
const InternalKeyComparator* icomparator,
|
2017-03-22 17:11:23 +01:00
|
|
|
std::unique_ptr<Block>&& index_block, Statistics* stats,
|
2018-08-10 01:49:45 +02:00
|
|
|
const int /*level*/, const bool index_key_includes_seq,
|
|
|
|
const bool index_value_is_full)
|
2017-05-06 00:01:04 +02:00
|
|
|
: IndexReader(icomparator, stats),
|
2017-02-07 01:29:29 +01:00
|
|
|
table_(table),
|
2018-05-26 03:41:31 +02:00
|
|
|
index_block_(std::move(index_block)),
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_(index_key_includes_seq),
|
|
|
|
index_value_is_full_(index_value_is_full) {
|
2017-02-07 01:29:29 +01:00
|
|
|
assert(index_block_ != nullptr);
|
|
|
|
}
|
|
|
|
BlockBasedTable* table_;
|
|
|
|
std::unique_ptr<Block> index_block_;
|
2019-05-10 20:53:33 +02:00
|
|
|
std::unordered_map<uint64_t, CachableEntry<Block>> partition_map_;
|
2018-05-26 03:41:31 +02:00
|
|
|
const bool index_key_includes_seq_;
|
2018-08-10 01:49:45 +02:00
|
|
|
const bool index_value_is_full_;
|
2017-02-07 01:29:29 +01:00
|
|
|
};
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
// Index that allows binary search lookup for the first key of each block.
|
|
|
|
// This class can be viewed as a thin wrapper for `Block` class which already
|
|
|
|
// supports binary search.
|
|
|
|
class BinarySearchIndexReader : public IndexReader {
|
|
|
|
public:
|
|
|
|
// Read index from the file and create an intance for
|
|
|
|
// `BinarySearchIndexReader`.
|
2014-03-02 08:40:08 +01:00
|
|
|
// On success, index_reader will be populated; otherwise it will remain
|
|
|
|
// unmodified.
|
2017-08-11 20:59:13 +02:00
|
|
|
static Status Create(RandomAccessFileReader* file,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const Footer& footer, const BlockHandle& index_handle,
|
2017-05-06 00:01:04 +02:00
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const InternalKeyComparator* icomparator,
|
|
|
|
IndexReader** index_reader,
|
2018-05-26 03:41:31 +02:00
|
|
|
const PersistentCacheOptions& cache_options,
|
2018-08-10 01:49:45 +02:00
|
|
|
const bool index_key_includes_seq,
|
2018-11-29 02:58:08 +01:00
|
|
|
const bool index_value_is_full,
|
|
|
|
MemoryAllocator* memory_allocator) {
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> index_block;
|
2016-10-19 01:59:37 +02:00
|
|
|
auto s = ReadBlockFromFile(
|
2017-08-11 20:59:13 +02:00
|
|
|
file, prefetch_buffer, footer, ReadOptions(), index_handle,
|
|
|
|
&index_block, ioptions, true /* decompress */,
|
2019-01-24 03:11:08 +01:00
|
|
|
true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
|
|
|
cache_options, kDisableGlobalSequenceNumber,
|
|
|
|
0 /* read_amp_bytes_per_bit */, memory_allocator);
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2014-03-02 08:40:08 +01:00
|
|
|
if (s.ok()) {
|
2016-06-03 19:47:47 +02:00
|
|
|
*index_reader = new BinarySearchIndexReader(
|
2018-05-26 03:41:31 +02:00
|
|
|
icomparator, std::move(index_block), ioptions.statistics,
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq, index_value_is_full);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2014-03-02 08:40:08 +01:00
|
|
|
return s;
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
InternalIteratorBase<BlockHandle>* NewIterator(
|
2018-08-10 01:49:45 +02:00
|
|
|
IndexBlockIter* iter = nullptr, bool /*dont_care*/ = true,
|
|
|
|
bool /*dont_care*/ = true) override {
|
2018-07-13 02:19:57 +02:00
|
|
|
Statistics* kNullStats = nullptr;
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2018-07-13 02:19:57 +02:00
|
|
|
return index_block_->NewIterator<IndexBlockIter>(
|
|
|
|
icomparator_, icomparator_->user_comparator(), iter, kNullStats, true,
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_, index_value_is_full_);
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t size() const override { return index_block_->size(); }
|
|
|
|
size_t usable_size() const override { return index_block_->usable_size(); }
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t ApproximateMemoryUsage() const override {
|
2014-08-05 20:27:34 +02:00
|
|
|
assert(index_block_);
|
2018-06-29 17:55:33 +02:00
|
|
|
size_t usage = index_block_->ApproximateMemoryUsage();
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
usage += malloc_usable_size((void*)this);
|
|
|
|
#else
|
|
|
|
usage += sizeof(*this);
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return usage;
|
2014-08-05 20:27:34 +02:00
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
private:
|
2017-05-06 00:01:04 +02:00
|
|
|
BinarySearchIndexReader(const InternalKeyComparator* icomparator,
|
2016-06-03 19:47:47 +02:00
|
|
|
std::unique_ptr<Block>&& index_block,
|
2018-08-10 01:49:45 +02:00
|
|
|
Statistics* stats, const bool index_key_includes_seq,
|
|
|
|
const bool index_value_is_full)
|
2018-05-26 03:41:31 +02:00
|
|
|
: IndexReader(icomparator, stats),
|
|
|
|
index_block_(std::move(index_block)),
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_(index_key_includes_seq),
|
|
|
|
index_value_is_full_(index_value_is_full) {
|
2014-03-01 03:19:07 +01:00
|
|
|
assert(index_block_ != nullptr);
|
|
|
|
}
|
2014-03-01 20:50:35 +01:00
|
|
|
std::unique_ptr<Block> index_block_;
|
2018-05-26 03:41:31 +02:00
|
|
|
const bool index_key_includes_seq_;
|
2018-08-10 01:49:45 +02:00
|
|
|
const bool index_value_is_full_;
|
2014-03-01 03:19:07 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
// Index that leverages an internal hash table to quicken the lookup for a given
|
|
|
|
// key.
|
|
|
|
class HashIndexReader : public IndexReader {
|
|
|
|
public:
|
2018-08-10 01:49:45 +02:00
|
|
|
static Status Create(
|
|
|
|
const SliceTransform* hash_key_extractor, const Footer& footer,
|
|
|
|
RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
const InternalKeyComparator* icomparator, const BlockHandle& index_handle,
|
|
|
|
InternalIterator* meta_index_iter, IndexReader** index_reader,
|
|
|
|
bool /*hash_index_allow_collision*/,
|
|
|
|
const PersistentCacheOptions& cache_options,
|
2018-11-29 02:58:08 +01:00
|
|
|
const bool index_key_includes_seq, const bool index_value_is_full,
|
|
|
|
MemoryAllocator* memory_allocator) {
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> index_block;
|
2016-10-19 01:59:37 +02:00
|
|
|
auto s = ReadBlockFromFile(
|
2017-08-11 20:59:13 +02:00
|
|
|
file, prefetch_buffer, footer, ReadOptions(), index_handle,
|
|
|
|
&index_block, ioptions, true /* decompress */,
|
2019-01-24 03:11:08 +01:00
|
|
|
true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
|
|
|
cache_options, kDisableGlobalSequenceNumber,
|
|
|
|
0 /* read_amp_bytes_per_bit */, memory_allocator);
|
2014-04-10 23:19:43 +02:00
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2014-06-13 04:03:22 +02:00
|
|
|
// Note, failure to create prefix hash index does not need to be a
|
|
|
|
// hard error. We can still fall back to the original binary search index.
|
|
|
|
// So, Create will succeed regardless, from this point on.
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
auto new_index_reader = new HashIndexReader(
|
|
|
|
icomparator, std::move(index_block), ioptions.statistics,
|
|
|
|
index_key_includes_seq, index_value_is_full);
|
2014-06-13 04:03:22 +02:00
|
|
|
*index_reader = new_index_reader;
|
|
|
|
|
2014-05-15 23:09:03 +02:00
|
|
|
// Get prefixes block
|
|
|
|
BlockHandle prefixes_handle;
|
|
|
|
s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock,
|
|
|
|
&prefixes_handle);
|
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get index metadata block
|
|
|
|
BlockHandle prefixes_meta_handle;
|
|
|
|
s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock,
|
|
|
|
&prefixes_meta_handle);
|
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read contents for the blocks
|
|
|
|
BlockContents prefixes_contents;
|
2017-12-12 00:16:37 +01:00
|
|
|
BlockFetcher prefixes_block_fetcher(
|
|
|
|
file, prefetch_buffer, footer, ReadOptions(), prefixes_handle,
|
2018-11-29 02:58:08 +01:00
|
|
|
&prefixes_contents, ioptions, true /*decompress*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
2018-11-29 02:58:08 +01:00
|
|
|
cache_options, memory_allocator);
|
2017-12-12 00:16:37 +01:00
|
|
|
s = prefixes_block_fetcher.ReadBlockContents();
|
2014-05-15 23:09:03 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
BlockContents prefixes_meta_contents;
|
2017-12-12 00:16:37 +01:00
|
|
|
BlockFetcher prefixes_meta_block_fetcher(
|
|
|
|
file, prefetch_buffer, footer, ReadOptions(), prefixes_meta_handle,
|
2018-11-29 02:58:08 +01:00
|
|
|
&prefixes_meta_contents, ioptions, true /*decompress*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
true /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
2018-11-29 02:58:08 +01:00
|
|
|
cache_options, memory_allocator);
|
2018-05-26 02:35:36 +02:00
|
|
|
s = prefixes_meta_block_fetcher.ReadBlockContents();
|
2014-05-15 23:09:03 +02:00
|
|
|
if (!s.ok()) {
|
2014-06-13 04:03:22 +02:00
|
|
|
// TODO: log error
|
|
|
|
return Status::OK();
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
2016-05-21 02:14:38 +02:00
|
|
|
BlockPrefixIndex* prefix_index = nullptr;
|
|
|
|
s = BlockPrefixIndex::Create(hash_key_extractor, prefixes_contents.data,
|
|
|
|
prefixes_meta_contents.data, &prefix_index);
|
|
|
|
// TODO: log error
|
|
|
|
if (s.ok()) {
|
2018-07-13 02:19:57 +02:00
|
|
|
new_index_reader->prefix_index_.reset(prefix_index);
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
|
2014-06-13 04:03:22 +02:00
|
|
|
return Status::OK();
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
InternalIteratorBase<BlockHandle>* NewIterator(
|
2018-08-10 01:49:45 +02:00
|
|
|
IndexBlockIter* iter = nullptr, bool total_order_seek = true,
|
|
|
|
bool /*dont_care*/ = true) override {
|
2018-07-13 02:19:57 +02:00
|
|
|
Statistics* kNullStats = nullptr;
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2018-07-13 02:19:57 +02:00
|
|
|
return index_block_->NewIterator<IndexBlockIter>(
|
|
|
|
icomparator_, icomparator_->user_comparator(), iter, kNullStats,
|
2018-08-10 01:49:45 +02:00
|
|
|
total_order_seek, index_key_includes_seq_, index_value_is_full_,
|
2018-11-14 02:00:49 +01:00
|
|
|
false /* block_contents_pinned */, prefix_index_.get());
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t size() const override { return index_block_->size(); }
|
|
|
|
size_t usable_size() const override { return index_block_->usable_size(); }
|
2014-04-10 23:19:43 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
size_t ApproximateMemoryUsage() const override {
|
2014-08-05 20:27:34 +02:00
|
|
|
assert(index_block_);
|
2018-06-29 17:55:33 +02:00
|
|
|
size_t usage = index_block_->ApproximateMemoryUsage();
|
|
|
|
usage += prefixes_contents_.usable_size();
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
usage += malloc_usable_size((void*)this);
|
|
|
|
#else
|
2018-07-13 02:19:57 +02:00
|
|
|
if (prefix_index_) {
|
|
|
|
usage += prefix_index_->ApproximateMemoryUsage();
|
|
|
|
}
|
2018-06-29 17:55:33 +02:00
|
|
|
usage += sizeof(*this);
|
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return usage;
|
2014-08-05 20:27:34 +02:00
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
private:
|
2017-05-06 00:01:04 +02:00
|
|
|
HashIndexReader(const InternalKeyComparator* icomparator,
|
2018-05-26 03:41:31 +02:00
|
|
|
std::unique_ptr<Block>&& index_block, Statistics* stats,
|
2018-08-10 01:49:45 +02:00
|
|
|
const bool index_key_includes_seq,
|
|
|
|
const bool index_value_is_full)
|
2018-05-26 03:41:31 +02:00
|
|
|
: IndexReader(icomparator, stats),
|
|
|
|
index_block_(std::move(index_block)),
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_(index_key_includes_seq),
|
|
|
|
index_value_is_full_(index_value_is_full) {
|
2014-04-10 23:19:43 +02:00
|
|
|
assert(index_block_ != nullptr);
|
|
|
|
}
|
2014-05-15 23:09:03 +02:00
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
~HashIndexReader() override {}
|
2014-05-15 23:09:03 +02:00
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
std::unique_ptr<Block> index_block_;
|
2018-07-13 02:19:57 +02:00
|
|
|
std::unique_ptr<BlockPrefixIndex> prefix_index_;
|
2014-05-15 23:09:03 +02:00
|
|
|
BlockContents prefixes_contents_;
|
2018-05-26 03:41:31 +02:00
|
|
|
const bool index_key_includes_seq_;
|
2018-08-10 01:49:45 +02:00
|
|
|
const bool index_value_is_full_;
|
2014-03-01 03:19:07 +01:00
|
|
|
};
|
|
|
|
|
2013-02-01 00:20:24 +01:00
|
|
|
// Helper function to setup the cache key's prefix for the Table.
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep, uint64_t file_size) {
|
2013-02-01 00:20:24 +01:00
|
|
|
assert(kMaxCacheKeyPrefixSize >= 10);
|
|
|
|
rep->cache_key_prefix_size = 0;
|
2013-09-02 08:23:40 +02:00
|
|
|
rep->compressed_cache_key_prefix_size = 0;
|
2014-08-25 23:22:05 +02:00
|
|
|
if (rep->table_options.block_cache != nullptr) {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(),
|
|
|
|
&rep->cache_key_prefix[0], &rep->cache_key_prefix_size);
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
// Create dummy offset of index reader which is beyond the file size.
|
|
|
|
rep->dummy_index_reader_offset =
|
|
|
|
file_size + rep->table_options.block_cache->NewId();
|
2013-09-02 08:23:40 +02:00
|
|
|
}
|
2015-12-16 03:20:10 +01:00
|
|
|
if (rep->table_options.persistent_cache != nullptr) {
|
|
|
|
GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(),
|
|
|
|
&rep->persistent_cache_key_prefix[0],
|
|
|
|
&rep->persistent_cache_key_prefix_size);
|
|
|
|
}
|
2014-08-25 23:22:05 +02:00
|
|
|
if (rep->table_options.block_cache_compressed != nullptr) {
|
|
|
|
GenerateCachePrefix(rep->table_options.block_cache_compressed.get(),
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
rep->file->file(), &rep->compressed_cache_key_prefix[0],
|
2013-09-02 08:23:40 +02:00
|
|
|
&rep->compressed_cache_key_prefix_size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 00:13:08 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc, RandomAccessFile* file,
|
|
|
|
char* buffer, size_t* size) {
|
2013-09-02 08:23:40 +02:00
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
2015-12-16 03:20:10 +01:00
|
|
|
if (cc && *size == 0) {
|
2013-09-02 08:23:40 +02:00
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-03-28 00:13:08 +01:00
|
|
|
void BlockBasedTable::GenerateCachePrefix(Cache* cc, WritableFile* file,
|
|
|
|
char* buffer, size_t* size) {
|
2013-09-02 08:23:40 +02:00
|
|
|
// generate an id from the file
|
|
|
|
*size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize);
|
|
|
|
|
|
|
|
// If the prefix wasn't generated or was too long,
|
|
|
|
// create one from the cache.
|
|
|
|
if (*size == 0) {
|
|
|
|
char* end = EncodeVarint64(buffer, cc->NewId());
|
|
|
|
*size = static_cast<size_t>(end - buffer);
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-05 02:03:57 +01:00
|
|
|
namespace {
|
|
|
|
// Return True if table_properties has `user_prop_name` has a `true` value
|
|
|
|
// or it doesn't contain this property (for backward compatible).
|
|
|
|
bool IsFeatureSupported(const TableProperties& table_properties,
|
|
|
|
const std::string& user_prop_name, Logger* info_log) {
|
|
|
|
auto& props = table_properties.user_collected_properties;
|
|
|
|
auto pos = props.find(user_prop_name);
|
|
|
|
// Older version doesn't have this value set. Skip this check.
|
|
|
|
if (pos != props.end()) {
|
|
|
|
if (pos->second == kPropFalse) {
|
|
|
|
return false;
|
|
|
|
} else if (pos->second != kPropTrue) {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
|
|
|
|
user_prop_name.c_str(), pos->second.c_str());
|
2015-02-05 02:03:57 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
2016-10-19 01:59:37 +02:00
|
|
|
|
2018-07-28 01:00:26 +02:00
|
|
|
// Caller has to ensure seqno is not nullptr.
|
|
|
|
Status GetGlobalSequenceNumber(const TableProperties& table_properties,
|
|
|
|
SequenceNumber largest_seqno,
|
|
|
|
SequenceNumber* seqno) {
|
|
|
|
const auto& props = table_properties.user_collected_properties;
|
|
|
|
const auto version_pos = props.find(ExternalSstFilePropertyNames::kVersion);
|
|
|
|
const auto seqno_pos = props.find(ExternalSstFilePropertyNames::kGlobalSeqno);
|
2016-10-19 01:59:37 +02:00
|
|
|
|
2018-07-28 01:00:26 +02:00
|
|
|
*seqno = kDisableGlobalSequenceNumber;
|
2016-10-19 01:59:37 +02:00
|
|
|
if (version_pos == props.end()) {
|
|
|
|
if (seqno_pos != props.end()) {
|
2018-07-28 01:00:26 +02:00
|
|
|
std::array<char, 200> msg_buf;
|
2016-10-19 01:59:37 +02:00
|
|
|
// This is not an external sst file, global_seqno is not supported.
|
2018-07-28 01:00:26 +02:00
|
|
|
snprintf(
|
|
|
|
msg_buf.data(), msg_buf.max_size(),
|
2016-10-19 01:59:37 +02:00
|
|
|
"A non-external sst file have global seqno property with value %s",
|
|
|
|
seqno_pos->second.c_str());
|
2018-07-28 01:00:26 +02:00
|
|
|
return Status::Corruption(msg_buf.data());
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
2018-07-28 01:00:26 +02:00
|
|
|
return Status::OK();
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t version = DecodeFixed32(version_pos->second.c_str());
|
|
|
|
if (version < 2) {
|
|
|
|
if (seqno_pos != props.end() || version != 1) {
|
2018-07-28 01:00:26 +02:00
|
|
|
std::array<char, 200> msg_buf;
|
2016-10-19 01:59:37 +02:00
|
|
|
// This is a v1 external sst file, global_seqno is not supported.
|
2018-07-28 01:00:26 +02:00
|
|
|
snprintf(msg_buf.data(), msg_buf.max_size(),
|
|
|
|
"An external sst file with version %u have global seqno "
|
|
|
|
"property with value %s",
|
|
|
|
version, seqno_pos->second.c_str());
|
|
|
|
return Status::Corruption(msg_buf.data());
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
2018-07-28 01:00:26 +02:00
|
|
|
return Status::OK();
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
|
|
|
|
2018-07-28 01:00:26 +02:00
|
|
|
// Since we have a plan to deprecate global_seqno, we do not return failure
|
|
|
|
// if seqno_pos == props.end(). We rely on version_pos to detect whether the
|
|
|
|
// SST is external.
|
|
|
|
SequenceNumber global_seqno(0);
|
|
|
|
if (seqno_pos != props.end()) {
|
|
|
|
global_seqno = DecodeFixed64(seqno_pos->second.c_str());
|
|
|
|
}
|
2019-03-26 18:15:43 +01:00
|
|
|
// SstTableReader open table reader with kMaxSequenceNumber as largest_seqno
|
|
|
|
// to denote it is unknown.
|
|
|
|
if (largest_seqno < kMaxSequenceNumber) {
|
|
|
|
if (global_seqno == 0) {
|
|
|
|
global_seqno = largest_seqno;
|
|
|
|
}
|
|
|
|
if (global_seqno != largest_seqno) {
|
|
|
|
std::array<char, 200> msg_buf;
|
|
|
|
snprintf(
|
|
|
|
msg_buf.data(), msg_buf.max_size(),
|
|
|
|
"An external sst file with version %u have global seqno property "
|
|
|
|
"with value %s, while largest seqno in the file is %llu",
|
|
|
|
version, seqno_pos->second.c_str(),
|
|
|
|
static_cast<unsigned long long>(largest_seqno));
|
|
|
|
return Status::Corruption(msg_buf.data());
|
|
|
|
}
|
2018-07-28 01:00:26 +02:00
|
|
|
}
|
2019-03-26 18:15:43 +01:00
|
|
|
*seqno = global_seqno;
|
2016-10-19 01:59:37 +02:00
|
|
|
|
|
|
|
if (global_seqno > kMaxSequenceNumber) {
|
2018-07-28 01:00:26 +02:00
|
|
|
std::array<char, 200> msg_buf;
|
|
|
|
snprintf(msg_buf.data(), msg_buf.max_size(),
|
|
|
|
"An external sst file with version %u have global seqno property "
|
|
|
|
"with value %llu, which is greater than kMaxSequenceNumber",
|
|
|
|
version, static_cast<unsigned long long>(global_seqno));
|
|
|
|
return Status::Corruption(msg_buf.data());
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
|
|
|
|
2018-07-28 01:00:26 +02:00
|
|
|
return Status::OK();
|
2016-10-19 01:59:37 +02:00
|
|
|
}
|
2015-02-05 02:03:57 +01:00
|
|
|
} // namespace
|
|
|
|
|
2015-12-16 03:20:10 +01:00
|
|
|
Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix,
|
|
|
|
size_t cache_key_prefix_size,
|
|
|
|
const BlockHandle& handle, char* cache_key) {
|
|
|
|
assert(cache_key != nullptr);
|
|
|
|
assert(cache_key_prefix_size != 0);
|
|
|
|
assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize);
|
|
|
|
memcpy(cache_key, cache_key_prefix, cache_key_prefix_size);
|
|
|
|
char* end =
|
|
|
|
EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset());
|
|
|
|
return Slice(cache_key, static_cast<size_t>(end - cache_key));
|
|
|
|
}
|
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& env_options,
|
2014-01-24 19:57:15 +01:00
|
|
|
const BlockBasedTableOptions& table_options,
|
2014-01-27 22:53:22 +01:00
|
|
|
const InternalKeyComparator& internal_comparator,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<RandomAccessFileReader>&& file,
|
2014-01-24 19:57:15 +01:00
|
|
|
uint64_t file_size,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableReader>* table_reader,
|
2018-05-21 23:33:55 +02:00
|
|
|
const SliceTransform* prefix_extractor,
|
2016-07-20 20:23:31 +02:00
|
|
|
const bool prefetch_index_and_filter_in_cache,
|
2018-06-28 02:09:29 +02:00
|
|
|
const bool skip_filters, const int level,
|
2018-07-20 23:31:27 +02:00
|
|
|
const bool immortal_table,
|
2018-07-28 01:00:26 +02:00
|
|
|
const SequenceNumber largest_seqno,
|
2018-07-20 23:31:27 +02:00
|
|
|
TailPrefetchStats* tail_prefetch_stats) {
|
2013-10-30 18:52:33 +01:00
|
|
|
table_reader->reset();
|
2013-01-09 19:44:30 +01:00
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
Status s;
|
2015-01-13 23:33:04 +01:00
|
|
|
Footer footer;
|
2017-08-11 20:59:13 +02:00
|
|
|
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
|
|
|
|
|
2018-07-20 01:07:53 +02:00
|
|
|
// prefetch both index and filters, down to all partitions
|
|
|
|
const bool prefetch_all = prefetch_index_and_filter_in_cache || level == 0;
|
|
|
|
const bool preload_all = !table_options.cache_index_and_filter_blocks;
|
2018-07-20 23:31:27 +02:00
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
s = PrefetchTail(file.get(), file_size, tail_prefetch_stats, prefetch_all,
|
|
|
|
preload_all, &prefetch_buffer);
|
|
|
|
|
|
|
|
// Read in the following order:
|
|
|
|
// 1. Footer
|
|
|
|
// 2. [metaindex block]
|
|
|
|
// 3. [meta block: properties]
|
|
|
|
// 4. [meta block: range deletion tombstone]
|
|
|
|
// 5. [meta block: compression dictionary]
|
|
|
|
// 6. [meta block: index]
|
|
|
|
// 7. [meta block: filter]
|
2017-08-11 20:59:13 +02:00
|
|
|
s = ReadFooterFromFile(file.get(), prefetch_buffer.get(), file_size, &footer,
|
|
|
|
kBlockBasedTableMagicNumber);
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2015-01-15 01:24:24 +01:00
|
|
|
if (!BlockBasedTableSupportedVersion(footer.version())) {
|
2015-01-13 23:33:04 +01:00
|
|
|
return Status::Corruption(
|
2015-01-15 01:24:24 +01:00
|
|
|
"Unknown Footer version. Maybe this file was created with newer "
|
2015-01-13 23:33:04 +01:00
|
|
|
"version of RocksDB?");
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2017-04-15 03:43:32 +02:00
|
|
|
// We've successfully read the footer. We are ready to serve requests.
|
2016-08-24 03:20:41 +02:00
|
|
|
// Better not mutate rep_ after the creation. eg. internal_prefix_transform
|
|
|
|
// raw pointer will be used to create HashIndexReader, whose reset may
|
|
|
|
// access a dangling pointer.
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options,
|
2018-10-24 21:10:59 +02:00
|
|
|
internal_comparator, skip_filters, level,
|
2018-06-28 02:09:29 +02:00
|
|
|
immortal_table);
|
2013-11-13 07:46:51 +01:00
|
|
|
rep->file = std::move(file);
|
2014-05-01 20:09:32 +02:00
|
|
|
rep->footer = footer;
|
2014-03-01 03:19:07 +01:00
|
|
|
rep->index_type = table_options.index_type;
|
2014-06-13 04:03:22 +02:00
|
|
|
rep->hash_index_allow_collision = table_options.hash_index_allow_collision;
|
2016-08-24 03:20:41 +02:00
|
|
|
// We need to wrap data with internal_prefix_transform to make sure it can
|
|
|
|
// handle prefix correctly.
|
2016-09-08 23:45:32 +02:00
|
|
|
rep->internal_prefix_transform.reset(
|
2018-05-21 23:33:55 +02:00
|
|
|
new InternalKeySliceTransform(prefix_extractor));
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
SetupCacheKeyPrefix(rep, file_size);
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<BlockBasedTable> new_table(new BlockBasedTable(rep));
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2015-12-16 03:20:10 +01:00
|
|
|
// page cache options
|
|
|
|
rep->persistent_cache_options =
|
|
|
|
PersistentCacheOptions(rep->table_options.persistent_cache,
|
|
|
|
std::string(rep->persistent_cache_key_prefix,
|
|
|
|
rep->persistent_cache_key_prefix_size),
|
2018-12-07 22:15:09 +01:00
|
|
|
rep->ioptions.statistics);
|
2015-12-16 03:20:10 +01:00
|
|
|
|
2019-04-30 18:46:40 +02:00
|
|
|
// Meta-blocks are not dictionary compressed. Explicitly set the dictionary
|
|
|
|
// handle to null, otherwise it may be seen as uninitialized during the below
|
|
|
|
// meta-block reads.
|
|
|
|
rep->compression_dict_handle = BlockHandle::NullBlockHandle();
|
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
// Read metaindex
|
2013-11-13 07:46:51 +01:00
|
|
|
std::unique_ptr<Block> meta;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
2017-08-11 20:59:13 +02:00
|
|
|
s = ReadMetaBlock(rep, prefetch_buffer.get(), &meta, &meta_iter);
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
s = ReadPropertiesBlock(rep, prefetch_buffer.get(), meta_iter.get(),
|
|
|
|
largest_seqno);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
s = ReadRangeDelBlock(rep, prefetch_buffer.get(), meta_iter.get(),
|
2018-12-21 06:57:18 +01:00
|
|
|
internal_comparator);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2018-12-07 22:15:09 +01:00
|
|
|
s = PrefetchIndexAndFilterBlocks(rep, prefetch_buffer.get(), meta_iter.get(),
|
|
|
|
new_table.get(), prefix_extractor,
|
|
|
|
prefetch_all, table_options, level,
|
|
|
|
prefetch_index_and_filter_in_cache);
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
// Update tail prefetch stats
|
|
|
|
assert(prefetch_buffer.get() != nullptr);
|
|
|
|
if (tail_prefetch_stats != nullptr) {
|
|
|
|
assert(prefetch_buffer->min_offset_read() < file_size);
|
|
|
|
tail_prefetch_stats->RecordEffectiveSize(
|
|
|
|
static_cast<size_t>(file_size) - prefetch_buffer->min_offset_read());
|
2015-09-03 00:36:47 +02:00
|
|
|
}
|
2018-12-07 22:15:09 +01:00
|
|
|
|
|
|
|
*table_reader = std::move(new_table);
|
2015-09-03 00:36:47 +02:00
|
|
|
}
|
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::PrefetchTail(
|
|
|
|
RandomAccessFileReader* file, uint64_t file_size,
|
|
|
|
TailPrefetchStats* tail_prefetch_stats, const bool prefetch_all,
|
|
|
|
const bool preload_all,
|
|
|
|
std::unique_ptr<FilePrefetchBuffer>* prefetch_buffer) {
|
|
|
|
size_t tail_prefetch_size = 0;
|
|
|
|
if (tail_prefetch_stats != nullptr) {
|
|
|
|
// Multiple threads may get a 0 (no history) when running in parallel,
|
|
|
|
// but it will get cleared after the first of them finishes.
|
|
|
|
tail_prefetch_size = tail_prefetch_stats->GetSuggestedPrefetchSize();
|
|
|
|
}
|
|
|
|
if (tail_prefetch_size == 0) {
|
|
|
|
// Before read footer, readahead backwards to prefetch data. Do more
|
|
|
|
// readahead if we're going to read index/filter.
|
|
|
|
// TODO: This may incorrectly select small readahead in case partitioned
|
|
|
|
// index/filter is enabled and top-level partition pinning is enabled.
|
|
|
|
// That's because we need to issue readahead before we read the properties,
|
|
|
|
// at which point we don't yet know the index type.
|
|
|
|
tail_prefetch_size = prefetch_all || preload_all ? 512 * 1024 : 4 * 1024;
|
|
|
|
}
|
|
|
|
size_t prefetch_off;
|
|
|
|
size_t prefetch_len;
|
|
|
|
if (file_size < tail_prefetch_size) {
|
|
|
|
prefetch_off = 0;
|
|
|
|
prefetch_len = static_cast<size_t>(file_size);
|
|
|
|
} else {
|
|
|
|
prefetch_off = static_cast<size_t>(file_size - tail_prefetch_size);
|
|
|
|
prefetch_len = tail_prefetch_size;
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT_CALLBACK("BlockBasedTable::Open::TailPrefetchLen",
|
|
|
|
&tail_prefetch_size);
|
|
|
|
Status s;
|
|
|
|
// TODO should not have this special logic in the future.
|
|
|
|
if (!file->use_direct_io()) {
|
|
|
|
prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, false, true));
|
|
|
|
s = file->Prefetch(prefetch_off, prefetch_len);
|
|
|
|
} else {
|
|
|
|
prefetch_buffer->reset(new FilePrefetchBuffer(nullptr, 0, 0, true, true));
|
|
|
|
s = (*prefetch_buffer)->Prefetch(file, prefetch_off, prefetch_len);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-02-11 20:37:07 +01:00
|
|
|
Status VerifyChecksum(const ChecksumType type, const char* buf, size_t len,
|
|
|
|
uint32_t expected) {
|
|
|
|
Status s;
|
|
|
|
uint32_t actual = 0;
|
|
|
|
switch (type) {
|
|
|
|
case kNoChecksum:
|
|
|
|
break;
|
|
|
|
case kCRC32c:
|
|
|
|
expected = crc32c::Unmask(expected);
|
|
|
|
actual = crc32c::Value(buf, len);
|
|
|
|
break;
|
|
|
|
case kxxHash:
|
|
|
|
actual = XXH32(buf, static_cast<int>(len), 0);
|
|
|
|
break;
|
|
|
|
case kxxHash64:
|
|
|
|
actual = static_cast<uint32_t>(XXH64(buf, static_cast<int>(len), 0) &
|
|
|
|
uint64_t{0xffffffff});
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
s = Status::Corruption("unknown checksum type");
|
|
|
|
}
|
|
|
|
if (s.ok() && actual != expected) {
|
|
|
|
s = Status::Corruption("properties block checksum mismatched");
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-03-26 18:15:43 +01:00
|
|
|
Status BlockBasedTable::TryReadPropertiesWithGlobalSeqno(
|
|
|
|
Rep* rep, FilePrefetchBuffer* prefetch_buffer, const Slice& handle_value,
|
|
|
|
TableProperties** table_properties) {
|
|
|
|
assert(table_properties != nullptr);
|
|
|
|
// If this is an external SST file ingested with write_global_seqno set to
|
|
|
|
// true, then we expect the checksum mismatch because checksum was written
|
|
|
|
// by SstFileWriter, but its global seqno in the properties block may have
|
|
|
|
// been changed during ingestion. In this case, we read the properties
|
|
|
|
// block, copy it to a memory buffer, change the global seqno to its
|
|
|
|
// original value, i.e. 0, and verify the checksum again.
|
|
|
|
BlockHandle props_block_handle;
|
|
|
|
CacheAllocationPtr tmp_buf;
|
|
|
|
Status s = ReadProperties(handle_value, rep->file.get(), prefetch_buffer,
|
|
|
|
rep->footer, rep->ioptions, table_properties,
|
|
|
|
false /* verify_checksum */, &props_block_handle,
|
|
|
|
&tmp_buf, false /* compression_type_missing */,
|
|
|
|
nullptr /* memory_allocator */);
|
|
|
|
if (s.ok() && tmp_buf) {
|
|
|
|
const auto seqno_pos_iter =
|
|
|
|
(*table_properties)
|
|
|
|
->properties_offsets.find(
|
|
|
|
ExternalSstFilePropertyNames::kGlobalSeqno);
|
2019-04-23 00:59:16 +02:00
|
|
|
size_t block_size = static_cast<size_t>(props_block_handle.size());
|
2019-03-26 18:15:43 +01:00
|
|
|
if (seqno_pos_iter != (*table_properties)->properties_offsets.end()) {
|
|
|
|
uint64_t global_seqno_offset = seqno_pos_iter->second;
|
|
|
|
EncodeFixed64(
|
|
|
|
tmp_buf.get() + global_seqno_offset - props_block_handle.offset(), 0);
|
|
|
|
}
|
|
|
|
uint32_t value = DecodeFixed32(tmp_buf.get() + block_size + 1);
|
|
|
|
s = rocksdb::VerifyChecksum(rep->footer.checksum(), tmp_buf.get(),
|
|
|
|
block_size + 1, value);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
Status BlockBasedTable::ReadPropertiesBlock(
|
|
|
|
Rep* rep, FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
|
|
|
|
const SequenceNumber largest_seqno) {
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
bool found_properties_block = true;
|
2018-12-07 22:15:09 +01:00
|
|
|
Status s;
|
|
|
|
s = SeekToPropertiesBlock(meta_iter, &found_properties_block);
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
|
2014-10-31 19:41:15 +01:00
|
|
|
if (!s.ok()) {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_WARN(rep->ioptions.info_log,
|
|
|
|
"Error when seeking to properties block from file: %s",
|
|
|
|
s.ToString().c_str());
|
2014-10-31 19:41:15 +01:00
|
|
|
} else if (found_properties_block) {
|
2013-11-13 07:46:51 +01:00
|
|
|
s = meta_iter->status();
|
2014-02-08 04:26:49 +01:00
|
|
|
TableProperties* table_properties = nullptr;
|
2013-11-13 07:46:51 +01:00
|
|
|
if (s.ok()) {
|
2019-02-11 20:37:07 +01:00
|
|
|
s = ReadProperties(
|
|
|
|
meta_iter->value(), rep->file.get(), prefetch_buffer, rep->footer,
|
|
|
|
rep->ioptions, &table_properties, true /* verify_checksum */,
|
|
|
|
nullptr /* ret_block_handle */, nullptr /* ret_block_contents */,
|
|
|
|
false /* compression_type_missing */, nullptr /* memory_allocator */);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.IsCorruption()) {
|
2019-03-26 18:15:43 +01:00
|
|
|
s = TryReadPropertiesWithGlobalSeqno(
|
|
|
|
rep, prefetch_buffer, meta_iter->value(), &table_properties);
|
2019-02-11 20:37:07 +01:00
|
|
|
}
|
|
|
|
std::unique_ptr<TableProperties> props_guard;
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
props_guard.reset(table_properties);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_WARN(rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from properties "
|
|
|
|
"block %s",
|
|
|
|
s.ToString().c_str());
|
2014-02-08 04:26:49 +01:00
|
|
|
} else {
|
2018-02-08 00:42:35 +01:00
|
|
|
assert(table_properties != nullptr);
|
2019-02-11 20:37:07 +01:00
|
|
|
rep->table_properties.reset(props_guard.release());
|
2018-02-08 00:42:35 +01:00
|
|
|
rep->blocks_maybe_compressed = rep->table_properties->compression_name !=
|
|
|
|
CompressionTypeToString(kNoCompression);
|
2019-01-24 03:11:08 +01:00
|
|
|
rep->blocks_definitely_zstd_compressed =
|
|
|
|
(rep->table_properties->compression_name ==
|
|
|
|
CompressionTypeToString(kZSTD) ||
|
|
|
|
rep->table_properties->compression_name ==
|
|
|
|
CompressionTypeToString(kZSTDNotFinalCompression));
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-04-22 02:49:47 +02:00
|
|
|
} else {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_ERROR(rep->ioptions.info_log,
|
|
|
|
"Cannot find Properties block from file.");
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2018-06-27 00:56:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
if (rep->table_properties) {
|
|
|
|
ParseSliceTransform(rep->table_properties->prefix_extractor_name,
|
|
|
|
&(rep->table_prefix_extractor));
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2018-07-14 07:40:23 +02:00
|
|
|
// Read the table properties, if provided.
|
|
|
|
if (rep->table_properties) {
|
|
|
|
rep->whole_key_filtering &=
|
|
|
|
IsFeatureSupported(*(rep->table_properties),
|
|
|
|
BlockBasedTablePropertyNames::kWholeKeyFiltering,
|
|
|
|
rep->ioptions.info_log);
|
|
|
|
rep->prefix_filtering &= IsFeatureSupported(
|
|
|
|
*(rep->table_properties),
|
|
|
|
BlockBasedTablePropertyNames::kPrefixFiltering, rep->ioptions.info_log);
|
|
|
|
|
2018-07-28 01:00:26 +02:00
|
|
|
s = GetGlobalSequenceNumber(*(rep->table_properties), largest_seqno,
|
|
|
|
&(rep->global_seqno));
|
|
|
|
if (!s.ok()) {
|
|
|
|
ROCKS_LOG_ERROR(rep->ioptions.info_log, "%s", s.ToString().c_str());
|
|
|
|
}
|
2018-07-14 07:40:23 +02:00
|
|
|
}
|
2018-12-07 22:15:09 +01:00
|
|
|
return s;
|
|
|
|
}
|
2018-07-14 07:40:23 +02:00
|
|
|
|
2018-12-07 22:15:09 +01:00
|
|
|
Status BlockBasedTable::ReadRangeDelBlock(
|
|
|
|
Rep* rep, FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
|
|
|
|
const InternalKeyComparator& internal_comparator) {
|
|
|
|
Status s;
|
2016-08-20 00:10:31 +02:00
|
|
|
bool found_range_del_block;
|
2018-12-21 06:57:18 +01:00
|
|
|
BlockHandle range_del_handle;
|
|
|
|
s = SeekToRangeDelBlock(meta_iter, &found_range_del_block, &range_del_handle);
|
2016-08-20 00:10:31 +02:00
|
|
|
if (!s.ok()) {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_WARN(
|
|
|
|
rep->ioptions.info_log,
|
2016-08-20 00:10:31 +02:00
|
|
|
"Error when seeking to range delete tombstones block from file: %s",
|
|
|
|
s.ToString().c_str());
|
2018-12-21 06:57:18 +01:00
|
|
|
} else if (found_range_del_block && !range_del_handle.IsNull()) {
|
Cache fragmented range tombstones in BlockBasedTableReader (#4493)
Summary:
This allows tombstone fragmenting to only be performed when the table is opened, and cached for subsequent accesses.
On the same DB used in #4449, running `readrandom` results in the following:
```
readrandom : 0.983 micros/op 1017076 ops/sec; 78.3 MB/s (63103 of 100000 found)
```
Now that Get performance in the presence of range tombstones is reasonable, I also compared the performance between a DB with range tombstones, "expanded" range tombstones (several point tombstones that cover the same keys the equivalent range tombstone would cover, a common workaround for DeleteRange), and no range tombstones. The created DBs had 5 million keys each, and DeleteRange was called at regular intervals (depending on the total number of range tombstones being written) after 4.5 million Puts. The table below summarizes the results of a `readwhilewriting` benchmark (in order to provide somewhat more realistic results):
```
Tombstones? | avg micros/op | stddev micros/op | avg ops/s | stddev ops/s
----------------- | ------------- | ---------------- | ------------ | ------------
None | 0.6186 | 0.04637 | 1,625,252.90 | 124,679.41
500 Expanded | 0.6019 | 0.03628 | 1,666,670.40 | 101,142.65
500 Unexpanded | 0.6435 | 0.03994 | 1,559,979.40 | 104,090.52
1k Expanded | 0.6034 | 0.04349 | 1,665,128.10 | 125,144.57
1k Unexpanded | 0.6261 | 0.03093 | 1,600,457.50 | 79,024.94
5k Expanded | 0.6163 | 0.05926 | 1,636,668.80 | 154,888.85
5k Unexpanded | 0.6402 | 0.04002 | 1,567,804.70 | 100,965.55
10k Expanded | 0.6036 | 0.05105 | 1,667,237.70 | 142,830.36
10k Unexpanded | 0.6128 | 0.02598 | 1,634,633.40 | 72,161.82
25k Expanded | 0.6198 | 0.04542 | 1,620,980.50 | 116,662.93
25k Unexpanded | 0.5478 | 0.0362 | 1,833,059.10 | 121,233.81
50k Expanded | 0.5104 | 0.04347 | 1,973,107.90 | 184,073.49
50k Unexpanded | 0.4528 | 0.03387 | 2,219,034.50 | 170,984.32
```
After a large enough quantity of range tombstones are written, range tombstone Gets can become faster than reading from an equivalent DB with several point tombstones.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4493
Differential Revision: D10842844
Pulled By: abhimadan
fbshipit-source-id: a7d44534f8120e6aabb65779d26c6b9df954c509
2018-10-26 04:25:00 +02:00
|
|
|
ReadOptions read_options;
|
2018-12-21 06:57:18 +01:00
|
|
|
std::unique_ptr<InternalIterator> iter(NewDataBlockIterator<DataBlockIter>(
|
|
|
|
rep, read_options, range_del_handle, nullptr /* input_iter */,
|
|
|
|
false /* is_index */, true /* key_includes_seq */,
|
|
|
|
true /* index_key_is_full */, nullptr /* get_context */, Status(),
|
|
|
|
prefetch_buffer));
|
|
|
|
assert(iter != nullptr);
|
|
|
|
s = iter->status();
|
Cache fragmented range tombstones in BlockBasedTableReader (#4493)
Summary:
This allows tombstone fragmenting to only be performed when the table is opened, and cached for subsequent accesses.
On the same DB used in #4449, running `readrandom` results in the following:
```
readrandom : 0.983 micros/op 1017076 ops/sec; 78.3 MB/s (63103 of 100000 found)
```
Now that Get performance in the presence of range tombstones is reasonable, I also compared the performance between a DB with range tombstones, "expanded" range tombstones (several point tombstones that cover the same keys the equivalent range tombstone would cover, a common workaround for DeleteRange), and no range tombstones. The created DBs had 5 million keys each, and DeleteRange was called at regular intervals (depending on the total number of range tombstones being written) after 4.5 million Puts. The table below summarizes the results of a `readwhilewriting` benchmark (in order to provide somewhat more realistic results):
```
Tombstones? | avg micros/op | stddev micros/op | avg ops/s | stddev ops/s
----------------- | ------------- | ---------------- | ------------ | ------------
None | 0.6186 | 0.04637 | 1,625,252.90 | 124,679.41
500 Expanded | 0.6019 | 0.03628 | 1,666,670.40 | 101,142.65
500 Unexpanded | 0.6435 | 0.03994 | 1,559,979.40 | 104,090.52
1k Expanded | 0.6034 | 0.04349 | 1,665,128.10 | 125,144.57
1k Unexpanded | 0.6261 | 0.03093 | 1,600,457.50 | 79,024.94
5k Expanded | 0.6163 | 0.05926 | 1,636,668.80 | 154,888.85
5k Unexpanded | 0.6402 | 0.04002 | 1,567,804.70 | 100,965.55
10k Expanded | 0.6036 | 0.05105 | 1,667,237.70 | 142,830.36
10k Unexpanded | 0.6128 | 0.02598 | 1,634,633.40 | 72,161.82
25k Expanded | 0.6198 | 0.04542 | 1,620,980.50 | 116,662.93
25k Unexpanded | 0.5478 | 0.0362 | 1,833,059.10 | 121,233.81
50k Expanded | 0.5104 | 0.04347 | 1,973,107.90 | 184,073.49
50k Unexpanded | 0.4528 | 0.03387 | 2,219,034.50 | 170,984.32
```
After a large enough quantity of range tombstones are written, range tombstone Gets can become faster than reading from an equivalent DB with several point tombstones.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4493
Differential Revision: D10842844
Pulled By: abhimadan
fbshipit-source-id: a7d44534f8120e6aabb65779d26c6b9df954c509
2018-10-26 04:25:00 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
ROCKS_LOG_WARN(
|
|
|
|
rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from range del block %s",
|
|
|
|
s.ToString().c_str());
|
2018-12-21 06:57:18 +01:00
|
|
|
} else {
|
|
|
|
rep->fragmented_range_dels =
|
|
|
|
std::make_shared<FragmentedRangeTombstoneList>(std::move(iter),
|
|
|
|
internal_comparator);
|
2016-08-20 00:10:31 +02:00
|
|
|
}
|
|
|
|
}
|
2018-12-07 22:15:09 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::ReadCompressionDictBlock(
|
|
|
|
Rep* rep, FilePrefetchBuffer* prefetch_buffer,
|
2019-01-24 03:11:08 +01:00
|
|
|
std::unique_ptr<const BlockContents>* compression_dict_block) {
|
|
|
|
assert(compression_dict_block != nullptr);
|
2018-12-07 22:15:09 +01:00
|
|
|
Status s;
|
2019-01-24 03:11:08 +01:00
|
|
|
if (!rep->compression_dict_handle.IsNull()) {
|
2018-12-07 22:15:09 +01:00
|
|
|
std::unique_ptr<BlockContents> compression_dict_cont{new BlockContents()};
|
|
|
|
PersistentCacheOptions cache_options;
|
|
|
|
ReadOptions read_options;
|
2019-01-24 03:11:08 +01:00
|
|
|
read_options.verify_checksums = true;
|
2018-12-07 22:15:09 +01:00
|
|
|
BlockFetcher compression_block_fetcher(
|
|
|
|
rep->file.get(), prefetch_buffer, rep->footer, read_options,
|
2019-01-24 03:11:08 +01:00
|
|
|
rep->compression_dict_handle, compression_dict_cont.get(),
|
|
|
|
rep->ioptions, false /* decompress */, false /*maybe_compressed*/,
|
|
|
|
UncompressionDict::GetEmptyDict(), cache_options);
|
2018-12-07 22:15:09 +01:00
|
|
|
s = compression_block_fetcher.ReadBlockContents();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
ROCKS_LOG_WARN(
|
|
|
|
rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from compression dictionary "
|
|
|
|
"block %s",
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
2019-01-24 03:11:08 +01:00
|
|
|
*compression_dict_block = std::move(compression_dict_cont);
|
2018-12-07 22:15:09 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
|
|
|
|
Rep* rep, FilePrefetchBuffer* prefetch_buffer, InternalIterator* meta_iter,
|
|
|
|
BlockBasedTable* new_table, const SliceTransform* prefix_extractor,
|
|
|
|
bool prefetch_all, const BlockBasedTableOptions& table_options,
|
|
|
|
const int level, const bool prefetch_index_and_filter_in_cache) {
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
// Find filter handle and filter type
|
|
|
|
if (rep->filter_policy) {
|
|
|
|
for (auto filter_type :
|
|
|
|
{Rep::FilterType::kFullFilter, Rep::FilterType::kPartitionedFilter,
|
|
|
|
Rep::FilterType::kBlockFilter}) {
|
|
|
|
std::string prefix;
|
|
|
|
switch (filter_type) {
|
|
|
|
case Rep::FilterType::kFullFilter:
|
|
|
|
prefix = kFullFilterBlockPrefix;
|
|
|
|
break;
|
|
|
|
case Rep::FilterType::kPartitionedFilter:
|
|
|
|
prefix = kPartitionedFilterBlockPrefix;
|
|
|
|
break;
|
|
|
|
case Rep::FilterType::kBlockFilter:
|
|
|
|
prefix = kFilterBlockPrefix;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(0);
|
|
|
|
}
|
|
|
|
std::string filter_block_key = prefix;
|
|
|
|
filter_block_key.append(rep->filter_policy->Name());
|
|
|
|
if (FindMetaBlock(meta_iter, filter_block_key, &rep->filter_handle)
|
|
|
|
.ok()) {
|
|
|
|
rep->filter_type = filter_type;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-08-20 00:10:31 +02:00
|
|
|
|
2019-01-24 03:11:08 +01:00
|
|
|
{
|
|
|
|
// Find compression dictionary handle
|
|
|
|
bool found_compression_dict;
|
|
|
|
s = SeekToCompressionDictBlock(meta_iter, &found_compression_dict,
|
|
|
|
&rep->compression_dict_handle);
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:56:26 +02:00
|
|
|
bool need_upper_bound_check =
|
|
|
|
PrefixExtractorChanged(rep->table_properties.get(), prefix_extractor);
|
|
|
|
|
2018-06-23 00:14:05 +02:00
|
|
|
BlockBasedTableOptions::IndexType index_type = new_table->UpdateIndexType();
|
|
|
|
// prefetch the first level of index
|
|
|
|
const bool prefetch_index =
|
|
|
|
prefetch_all ||
|
|
|
|
(table_options.pin_top_level_index_and_filter &&
|
|
|
|
index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
|
|
|
|
// prefetch the first level of filter
|
|
|
|
const bool prefetch_filter =
|
|
|
|
prefetch_all || (table_options.pin_top_level_index_and_filter &&
|
|
|
|
rep->filter_type == Rep::FilterType::kPartitionedFilter);
|
|
|
|
// Partition fitlers cannot be enabled without partition indexes
|
2018-06-29 18:13:57 +02:00
|
|
|
assert(!prefetch_filter || prefetch_index);
|
2018-06-23 00:14:05 +02:00
|
|
|
// pin both index and filters, down to all partitions
|
|
|
|
const bool pin_all =
|
2017-08-23 16:48:54 +02:00
|
|
|
rep->table_options.pin_l0_filter_and_index_blocks_in_cache && level == 0;
|
2018-06-23 00:14:05 +02:00
|
|
|
// pin the first level of index
|
|
|
|
const bool pin_index =
|
|
|
|
pin_all || (table_options.pin_top_level_index_and_filter &&
|
|
|
|
index_type == BlockBasedTableOptions::kTwoLevelIndexSearch);
|
|
|
|
// pin the first level of filter
|
|
|
|
const bool pin_filter =
|
|
|
|
pin_all || (table_options.pin_top_level_index_and_filter &&
|
|
|
|
rep->filter_type == Rep::FilterType::kPartitionedFilter);
|
2017-08-23 16:48:54 +02:00
|
|
|
// pre-fetching of blocks is turned on
|
2019-01-24 03:11:08 +01:00
|
|
|
// Will use block cache for meta-blocks access
|
2016-07-20 20:23:31 +02:00
|
|
|
// Always prefetch index and filter for level 0
|
2019-01-24 03:11:08 +01:00
|
|
|
// TODO(ajkr): also prefetch compression dictionary block
|
2016-07-20 20:23:31 +02:00
|
|
|
if (table_options.cache_index_and_filter_blocks) {
|
2018-06-23 00:14:05 +02:00
|
|
|
assert(table_options.block_cache != nullptr);
|
|
|
|
if (prefetch_index) {
|
2015-02-26 01:34:26 +01:00
|
|
|
// Hack: Call NewIndexIterator() to implicitly add index to the
|
|
|
|
// block_cache
|
2017-08-18 19:53:03 +02:00
|
|
|
CachableEntry<IndexReader> index_entry;
|
2018-05-21 23:33:55 +02:00
|
|
|
// check prefix_extractor match only if hash based index is used
|
2018-06-27 00:56:26 +02:00
|
|
|
bool disable_prefix_seek =
|
|
|
|
rep->index_type == BlockBasedTableOptions::kHashSearch &&
|
|
|
|
need_upper_bound_check;
|
2019-01-24 03:11:08 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iter(
|
|
|
|
new_table->NewIndexIterator(ReadOptions(), disable_prefix_seek,
|
|
|
|
nullptr, &index_entry));
|
|
|
|
s = iter->status();
|
|
|
|
}
|
2015-02-26 01:34:26 +01:00
|
|
|
if (s.ok()) {
|
2017-09-28 08:57:07 +02:00
|
|
|
// This is the first call to NewIndexIterator() since we're in Open().
|
|
|
|
// On success it should give us ownership of the `CachableEntry` by
|
|
|
|
// populating `index_entry`.
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(index_entry.GetValue() != nullptr);
|
2018-06-23 00:14:05 +02:00
|
|
|
if (prefetch_all) {
|
2019-05-10 20:53:33 +02:00
|
|
|
index_entry.GetValue()->CacheDependencies(pin_all);
|
2018-06-23 00:14:05 +02:00
|
|
|
}
|
|
|
|
if (pin_index) {
|
2017-09-28 08:57:07 +02:00
|
|
|
rep->index_entry = std::move(index_entry);
|
|
|
|
}
|
2018-06-23 00:14:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (s.ok() && prefetch_filter) {
|
|
|
|
// Hack: Call GetFilter() to implicitly add filter to the block_cache
|
2018-06-27 00:56:26 +02:00
|
|
|
auto filter_entry =
|
|
|
|
new_table->GetFilter(rep->table_prefix_extractor.get());
|
2019-05-10 20:53:33 +02:00
|
|
|
if (filter_entry.GetValue() != nullptr && prefetch_all) {
|
|
|
|
filter_entry.GetValue()->CacheDependencies(
|
2018-06-27 00:56:26 +02:00
|
|
|
pin_all, rep->table_prefix_extractor.get());
|
2018-06-23 00:14:05 +02:00
|
|
|
}
|
|
|
|
// if pin_filter is true then save it in rep_->filter_entry; it will be
|
|
|
|
// released in the destructor only, hence it will be pinned in the
|
|
|
|
// cache while this reader is alive
|
|
|
|
if (pin_filter) {
|
2019-05-10 20:53:33 +02:00
|
|
|
rep->filter_entry = std::move(filter_entry);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2016-07-20 20:23:31 +02:00
|
|
|
}
|
|
|
|
} else {
|
2019-01-24 03:11:08 +01:00
|
|
|
// If we don't use block cache for meta-block access, we'll pre-load these
|
|
|
|
// blocks, which will kept in member variables in Rep and with a same life-
|
|
|
|
// time as this table object.
|
2016-07-20 20:23:31 +02:00
|
|
|
IndexReader* index_reader = nullptr;
|
2019-01-24 03:11:08 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
s = new_table->CreateIndexReader(prefetch_buffer, &index_reader,
|
|
|
|
meta_iter, level);
|
|
|
|
}
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
2016-07-20 20:23:31 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
rep->index_reader.reset(index_reader);
|
2017-08-23 16:48:54 +02:00
|
|
|
// The partitions of partitioned index are always stored in cache. They
|
|
|
|
// are hence follow the configuration for pin and prefetch regardless of
|
|
|
|
// the value of cache_index_and_filter_blocks
|
|
|
|
if (prefetch_index_and_filter_in_cache || level == 0) {
|
2018-06-23 00:14:05 +02:00
|
|
|
rep->index_reader->CacheDependencies(pin_all);
|
2017-08-23 16:48:54 +02:00
|
|
|
}
|
2015-02-26 01:34:26 +01:00
|
|
|
|
2016-07-20 20:23:31 +02:00
|
|
|
// Set filter block
|
|
|
|
if (rep->filter_policy) {
|
2017-03-22 17:11:23 +01:00
|
|
|
const bool is_a_filter_partition = true;
|
2018-12-07 22:15:09 +01:00
|
|
|
auto filter = new_table->ReadFilter(prefetch_buffer, rep->filter_handle,
|
|
|
|
!is_a_filter_partition,
|
|
|
|
rep->table_prefix_extractor.get());
|
2017-08-23 16:48:54 +02:00
|
|
|
rep->filter.reset(filter);
|
|
|
|
// Refer to the comment above about paritioned indexes always being
|
|
|
|
// cached
|
|
|
|
if (filter && (prefetch_index_and_filter_in_cache || level == 0)) {
|
2018-06-27 00:56:26 +02:00
|
|
|
filter->CacheDependencies(pin_all, rep->table_prefix_extractor.get());
|
2017-03-22 17:11:23 +01:00
|
|
|
}
|
2015-02-26 01:34:26 +01:00
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
s = ReadCompressionDictBlock(rep, prefetch_buffer,
|
|
|
|
&compression_dict_block);
|
2016-07-20 20:23:31 +02:00
|
|
|
} else {
|
|
|
|
delete index_reader;
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
if (s.ok() && !rep->compression_dict_handle.IsNull()) {
|
|
|
|
assert(compression_dict_block != nullptr);
|
|
|
|
// TODO(ajkr): find a way to avoid the `compression_dict_block` data copy
|
|
|
|
rep->uncompression_dict.reset(new UncompressionDict(
|
|
|
|
compression_dict_block->data.ToString(),
|
|
|
|
rep->blocks_definitely_zstd_compressed, rep->ioptions.statistics));
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
void BlockBasedTable::SetupForCompaction() {
|
2014-09-23 23:18:57 +02:00
|
|
|
switch (rep_->ioptions.access_hint_on_compaction_start) {
|
2013-05-18 00:53:01 +02:00
|
|
|
case Options::NONE:
|
|
|
|
break;
|
|
|
|
case Options::NORMAL:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::NORMAL);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
case Options::SEQUENTIAL:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
case Options::WILLNEED:
|
2015-09-23 03:21:10 +02:00
|
|
|
rep_->file->file()->Hint(RandomAccessFile::WILLNEED);
|
2013-05-18 00:53:01 +02:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
assert(false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-08 04:26:49 +01:00
|
|
|
std::shared_ptr<const TableProperties> BlockBasedTable::GetTableProperties()
|
|
|
|
const {
|
2013-11-20 01:29:42 +01:00
|
|
|
return rep_->table_properties;
|
2013-10-10 20:43:24 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2014-08-05 20:27:34 +02:00
|
|
|
size_t BlockBasedTable::ApproximateMemoryUsage() const {
|
|
|
|
size_t usage = 0;
|
|
|
|
if (rep_->filter) {
|
|
|
|
usage += rep_->filter->ApproximateMemoryUsage();
|
|
|
|
}
|
|
|
|
if (rep_->index_reader) {
|
|
|
|
usage += rep_->index_reader->ApproximateMemoryUsage();
|
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
if (rep_->uncompression_dict) {
|
|
|
|
usage += rep_->uncompression_dict->ApproximateMemoryUsage();
|
|
|
|
}
|
2014-08-05 20:27:34 +02:00
|
|
|
return usage;
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Load the meta-block from the file. On success, return the loaded meta block
|
|
|
|
// and its iterator.
|
2015-10-13 00:06:38 +02:00
|
|
|
Status BlockBasedTable::ReadMetaBlock(Rep* rep,
|
2017-08-11 20:59:13 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<Block>* meta_block,
|
|
|
|
std::unique_ptr<InternalIterator>* iter) {
|
2012-04-17 17:36:46 +02:00
|
|
|
// TODO(sanjay): Skip this if footer.metaindex_handle() size indicates
|
|
|
|
// it is an empty block.
|
2015-02-19 23:07:38 +01:00
|
|
|
std::unique_ptr<Block> meta;
|
2013-11-13 07:46:51 +01:00
|
|
|
Status s = ReadBlockFromFile(
|
2017-08-11 20:59:13 +02:00
|
|
|
rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(),
|
2016-07-19 18:44:03 +02:00
|
|
|
rep->footer.metaindex_handle(), &meta, rep->ioptions,
|
2018-11-29 02:58:08 +01:00
|
|
|
true /* decompress */, true /*maybe_compressed*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionDict::GetEmptyDict(), rep->persistent_cache_options,
|
2018-11-29 02:58:08 +01:00
|
|
|
kDisableGlobalSequenceNumber, 0 /* read_amp_bytes_per_bit */,
|
|
|
|
GetMemoryAllocator(rep->table_options));
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
if (!s.ok()) {
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_ERROR(rep->ioptions.info_log,
|
|
|
|
"Encountered error while reading data from properties"
|
|
|
|
" block %s",
|
|
|
|
s.ToString().c_str());
|
2013-11-13 07:46:51 +01:00
|
|
|
return s;
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2013-10-10 20:43:24 +02:00
|
|
|
|
2015-02-19 23:07:38 +01:00
|
|
|
*meta_block = std::move(meta);
|
2013-11-13 07:46:51 +01:00
|
|
|
// meta block uses bytewise comparator.
|
2018-07-16 18:58:58 +02:00
|
|
|
iter->reset(meta_block->get()->NewIterator<DataBlockIter>(
|
|
|
|
BytewiseComparator(), BytewiseComparator()));
|
2013-11-13 07:46:51 +01:00
|
|
|
return Status::OK();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
Status BlockBasedTable::GetDataBlockFromCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
2018-11-14 02:00:49 +01:00
|
|
|
Cache* block_cache, Cache* block_cache_compressed, Rep* rep,
|
2019-05-10 20:53:33 +02:00
|
|
|
const ReadOptions& read_options, CachableEntry<Block>* block,
|
2019-01-24 03:11:08 +01:00
|
|
|
const UncompressionDict& uncompression_dict, size_t read_amp_bytes_per_bit,
|
|
|
|
bool is_index, GetContext* get_context) {
|
2019-05-10 20:53:33 +02:00
|
|
|
|
|
|
|
assert(block);
|
|
|
|
assert(block->IsEmpty());
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
Status s;
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockContents* compressed_block = nullptr;
|
2014-03-01 03:19:07 +01:00
|
|
|
Cache::Handle* block_cache_compressed_handle = nullptr;
|
2018-11-14 02:00:49 +01:00
|
|
|
Statistics* statistics = rep->ioptions.statistics;
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// Lookup uncompressed cache first
|
|
|
|
if (block_cache != nullptr) {
|
2019-05-10 20:53:33 +02:00
|
|
|
auto cache_handle = GetEntryFromCache(
|
2018-12-21 22:15:47 +01:00
|
|
|
block_cache, block_cache_key, rep->level,
|
2017-03-22 17:11:23 +01:00
|
|
|
is_index ? BLOCK_CACHE_INDEX_MISS : BLOCK_CACHE_DATA_MISS,
|
2018-07-21 01:43:13 +02:00
|
|
|
is_index ? BLOCK_CACHE_INDEX_HIT : BLOCK_CACHE_DATA_HIT,
|
|
|
|
get_context
|
|
|
|
? (is_index ? &get_context->get_context_stats_.num_cache_index_miss
|
|
|
|
: &get_context->get_context_stats_.num_cache_data_miss)
|
|
|
|
: nullptr,
|
|
|
|
get_context
|
|
|
|
? (is_index ? &get_context->get_context_stats_.num_cache_index_hit
|
|
|
|
: &get_context->get_context_stats_.num_cache_data_hit)
|
|
|
|
: nullptr,
|
|
|
|
statistics, get_context);
|
2019-05-10 20:53:33 +02:00
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
block->SetCachedValue(
|
|
|
|
reinterpret_cast<Block*>(block_cache->Value(cache_handle)),
|
|
|
|
block_cache, cache_handle);
|
2014-03-01 03:19:07 +01:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If not found, search from the compressed block cache.
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(block->IsEmpty());
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
if (block_cache_compressed == nullptr) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(!compressed_block_cache_key.empty());
|
|
|
|
block_cache_compressed_handle =
|
|
|
|
block_cache_compressed->Lookup(compressed_block_cache_key);
|
|
|
|
// if we found in the compressed cache, then uncompress and insert into
|
|
|
|
// uncompressed cache
|
|
|
|
if (block_cache_compressed_handle == nullptr) {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// found compressed block
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT);
|
2018-11-14 02:00:49 +01:00
|
|
|
compressed_block = reinterpret_cast<BlockContents*>(
|
2014-03-01 03:19:07 +01:00
|
|
|
block_cache_compressed->Value(block_cache_compressed_handle));
|
2018-11-14 02:00:49 +01:00
|
|
|
CompressionType compression_type = compressed_block->get_compression_type();
|
|
|
|
assert(compression_type != kNoCompression);
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents contents;
|
2019-01-19 04:10:17 +01:00
|
|
|
UncompressionContext context(compression_type);
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionInfo info(context, uncompression_dict, compression_type);
|
2019-01-19 04:10:17 +01:00
|
|
|
s = UncompressBlockContents(info, compressed_block->data.data(),
|
2018-11-14 02:00:49 +01:00
|
|
|
compressed_block->data.size(), &contents,
|
|
|
|
rep->table_options.format_version, rep->ioptions,
|
|
|
|
GetMemoryAllocator(rep->table_options));
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
// Insert uncompressed block into block cache
|
|
|
|
if (s.ok()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
std::unique_ptr<Block> block_holder(
|
|
|
|
new Block(std::move(contents), rep->get_global_seqno(is_index),
|
|
|
|
read_amp_bytes_per_bit, statistics)); // uncompressed block
|
|
|
|
|
|
|
|
if (block_cache != nullptr && block_holder->own_bytes() &&
|
2014-03-01 03:19:07 +01:00
|
|
|
read_options.fill_cache) {
|
2019-05-10 20:53:33 +02:00
|
|
|
size_t charge = block_holder->ApproximateMemoryUsage();
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
|
2018-06-29 17:55:33 +02:00
|
|
|
&DeleteCachedEntry<Block>,
|
2019-05-10 20:53:33 +02:00
|
|
|
&cache_handle);
|
2018-11-14 02:00:49 +01:00
|
|
|
#ifndef NDEBUG
|
2018-06-29 17:55:33 +02:00
|
|
|
block_cache->TEST_mark_as_data_block(block_cache_key, charge);
|
2018-11-14 02:00:49 +01:00
|
|
|
#endif // NDEBUG
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(cache_handle != nullptr);
|
|
|
|
block->SetCachedValue(block_holder.release(), block_cache,
|
|
|
|
cache_handle);
|
|
|
|
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_bytes_write += charge;
|
2017-03-22 17:11:23 +01:00
|
|
|
} else {
|
2017-12-13 06:06:26 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, charge);
|
2017-03-22 17:11:23 +01:00
|
|
|
}
|
2017-12-13 06:06:26 +01:00
|
|
|
if (is_index) {
|
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_index_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_index_bytes_insert +=
|
|
|
|
charge;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, charge);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_data_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_data_bytes_insert +=
|
|
|
|
charge;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, charge);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
|
|
|
}
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
} else {
|
|
|
|
block->SetOwnedValue(block_holder.release());
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Release hold on compressed cache entry
|
|
|
|
block_cache_compressed->Release(block_cache_compressed_handle);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::PutDataBlockToCache(
|
|
|
|
const Slice& block_cache_key, const Slice& compressed_block_cache_key,
|
|
|
|
Cache* block_cache, Cache* block_cache_compressed,
|
2018-03-05 22:08:17 +01:00
|
|
|
const ReadOptions& /*read_options*/, const ImmutableCFOptions& ioptions,
|
2018-11-14 02:00:49 +01:00
|
|
|
CachableEntry<Block>* cached_block, BlockContents* raw_block_contents,
|
|
|
|
CompressionType raw_block_comp_type, uint32_t format_version,
|
2019-01-24 03:11:08 +01:00
|
|
|
const UncompressionDict& uncompression_dict, SequenceNumber seq_no,
|
2018-11-29 02:58:08 +01:00
|
|
|
size_t read_amp_bytes_per_bit, MemoryAllocator* memory_allocator,
|
|
|
|
bool is_index, Cache::Priority priority, GetContext* get_context) {
|
2019-05-10 20:53:33 +02:00
|
|
|
|
|
|
|
assert(cached_block);
|
|
|
|
assert(cached_block->IsEmpty());
|
2018-11-14 02:00:49 +01:00
|
|
|
assert(raw_block_comp_type == kNoCompression ||
|
2014-03-01 03:19:07 +01:00
|
|
|
block_cache_compressed != nullptr);
|
|
|
|
|
|
|
|
Status s;
|
2016-07-19 18:44:03 +02:00
|
|
|
Statistics* statistics = ioptions.statistics;
|
2019-05-10 20:53:33 +02:00
|
|
|
|
|
|
|
std::unique_ptr<Block> block_holder;
|
2018-11-14 02:00:49 +01:00
|
|
|
if (raw_block_comp_type != kNoCompression) {
|
2019-05-10 20:53:33 +02:00
|
|
|
// Retrieve the uncompressed contents into a new buffer
|
|
|
|
BlockContents uncompressed_block_contents;
|
2019-01-19 04:10:17 +01:00
|
|
|
UncompressionContext context(raw_block_comp_type);
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionInfo info(context, uncompression_dict, raw_block_comp_type);
|
2019-01-19 04:10:17 +01:00
|
|
|
s = UncompressBlockContents(info, raw_block_contents->data.data(),
|
|
|
|
raw_block_contents->data.size(),
|
|
|
|
&uncompressed_block_contents, format_version,
|
|
|
|
ioptions, memory_allocator);
|
2019-05-10 20:53:33 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
block_holder.reset(new Block(std::move(uncompressed_block_contents), seq_no,
|
|
|
|
read_amp_bytes_per_bit, statistics));
|
2014-03-01 03:19:07 +01:00
|
|
|
} else {
|
2019-05-10 20:53:33 +02:00
|
|
|
block_holder.reset(new Block(std::move(*raw_block_contents), seq_no,
|
|
|
|
read_amp_bytes_per_bit, statistics));
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Insert compressed block into compressed block cache.
|
|
|
|
// Release the hold on the compressed cache entry immediately.
|
2018-11-14 02:00:49 +01:00
|
|
|
if (block_cache_compressed != nullptr &&
|
|
|
|
raw_block_comp_type != kNoCompression && raw_block_contents != nullptr &&
|
|
|
|
raw_block_contents->own_bytes()) {
|
|
|
|
#ifndef NDEBUG
|
|
|
|
assert(raw_block_contents->is_raw_block);
|
|
|
|
#endif // NDEBUG
|
|
|
|
|
|
|
|
// We cannot directly put raw_block_contents because this could point to
|
|
|
|
// an object in the stack.
|
|
|
|
BlockContents* block_cont_for_comp_cache =
|
|
|
|
new BlockContents(std::move(*raw_block_contents));
|
|
|
|
s = block_cache_compressed->Insert(
|
|
|
|
compressed_block_cache_key, block_cont_for_comp_cache,
|
|
|
|
block_cont_for_comp_cache->ApproximateMemoryUsage(),
|
|
|
|
&DeleteCachedEntry<BlockContents>);
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
// Avoid the following code to delete this cached block.
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD);
|
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES);
|
2018-11-14 02:00:49 +01:00
|
|
|
delete block_cont_for_comp_cache;
|
2016-03-11 02:35:19 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// insert into uncompressed block cache
|
2019-05-10 20:53:33 +02:00
|
|
|
if (block_cache != nullptr && block_holder->own_bytes()) {
|
|
|
|
size_t charge = block_holder->ApproximateMemoryUsage();
|
|
|
|
Cache::Handle* cache_handle = nullptr;
|
|
|
|
s = block_cache->Insert(block_cache_key, block_holder.get(), charge,
|
2018-11-14 02:00:49 +01:00
|
|
|
&DeleteCachedEntry<Block>,
|
2019-05-10 20:53:33 +02:00
|
|
|
&cache_handle, priority);
|
2018-11-14 02:00:49 +01:00
|
|
|
#ifndef NDEBUG
|
2018-06-29 17:55:33 +02:00
|
|
|
block_cache->TEST_mark_as_data_block(block_cache_key, charge);
|
2018-11-14 02:00:49 +01:00
|
|
|
#endif // NDEBUG
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(cache_handle != nullptr);
|
|
|
|
cached_block->SetCachedValue(block_holder.release(), block_cache,
|
|
|
|
cache_handle);
|
|
|
|
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_bytes_write += charge;
|
2017-03-22 17:11:23 +01:00
|
|
|
} else {
|
2017-12-13 06:06:26 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, charge);
|
2017-03-22 17:11:23 +01:00
|
|
|
}
|
2017-12-13 06:06:26 +01:00
|
|
|
if (is_index) {
|
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_index_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_index_bytes_insert +=
|
|
|
|
charge;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, charge);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_data_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_data_bytes_insert += charge;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_DATA_BYTES_INSERT, charge);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-14 02:00:49 +01:00
|
|
|
assert(reinterpret_cast<Block*>(block_cache->Value(
|
2019-05-10 20:53:33 +02:00
|
|
|
cached_block->GetCacheHandle())) == cached_block->GetValue());
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
} else {
|
|
|
|
cached_block->SetOwnedValue(block_holder.release());
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2017-03-22 17:11:23 +01:00
|
|
|
FilterBlockReader* BlockBasedTable::ReadFilter(
|
2017-08-11 20:59:13 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_handle,
|
2018-05-21 23:33:55 +02:00
|
|
|
const bool is_a_filter_partition,
|
|
|
|
const SliceTransform* prefix_extractor) const {
|
2017-03-22 17:11:23 +01:00
|
|
|
auto& rep = rep_;
|
2013-11-13 07:46:51 +01:00
|
|
|
// TODO: We might want to unify with ReadBlockFromFile() if we start
|
|
|
|
// requiring checksum verification in Table::Open.
|
2015-09-03 00:36:47 +02:00
|
|
|
if (rep->filter_type == Rep::FilterType::kNoFilter) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
BlockContents block;
|
2017-12-12 00:16:37 +01:00
|
|
|
|
2018-11-29 02:58:08 +01:00
|
|
|
BlockFetcher block_fetcher(
|
|
|
|
rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(),
|
|
|
|
filter_handle, &block, rep->ioptions, false /* decompress */,
|
2019-01-24 03:11:08 +01:00
|
|
|
false /*maybe_compressed*/, UncompressionDict::GetEmptyDict(),
|
2018-11-29 02:58:08 +01:00
|
|
|
rep->persistent_cache_options, GetMemoryAllocator(rep->table_options));
|
2017-12-12 00:16:37 +01:00
|
|
|
Status s = block_fetcher.ReadBlockContents();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
2015-09-03 00:36:47 +02:00
|
|
|
// Error reading the block
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(rep->filter_policy);
|
|
|
|
|
2017-03-22 17:11:23 +01:00
|
|
|
auto filter_type = rep->filter_type;
|
|
|
|
if (rep->filter_type == Rep::FilterType::kPartitionedFilter &&
|
|
|
|
is_a_filter_partition) {
|
|
|
|
filter_type = Rep::FilterType::kFullFilter;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (filter_type) {
|
|
|
|
case Rep::FilterType::kPartitionedFilter: {
|
|
|
|
return new PartitionedFilterBlockReader(
|
2018-05-21 23:33:55 +02:00
|
|
|
rep->prefix_filtering ? prefix_extractor : nullptr,
|
2017-03-22 17:11:23 +01:00
|
|
|
rep->whole_key_filtering, std::move(block), nullptr,
|
2018-06-07 01:44:52 +02:00
|
|
|
rep->ioptions.statistics, rep->internal_comparator, this,
|
|
|
|
rep_->table_properties == nullptr ||
|
2018-08-10 01:49:45 +02:00
|
|
|
rep_->table_properties->index_key_is_user_key == 0,
|
|
|
|
rep_->table_properties == nullptr ||
|
|
|
|
rep_->table_properties->index_value_is_delta_encoded == 0);
|
2017-03-22 17:11:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
case Rep::FilterType::kBlockFilter:
|
|
|
|
return new BlockBasedFilterBlockReader(
|
2018-05-21 23:33:55 +02:00
|
|
|
rep->prefix_filtering ? prefix_extractor : nullptr,
|
2017-03-22 17:11:23 +01:00
|
|
|
rep->table_options, rep->whole_key_filtering, std::move(block),
|
|
|
|
rep->ioptions.statistics);
|
|
|
|
|
|
|
|
case Rep::FilterType::kFullFilter: {
|
|
|
|
auto filter_bits_reader =
|
|
|
|
rep->filter_policy->GetFilterBitsReader(block.data);
|
|
|
|
assert(filter_bits_reader != nullptr);
|
2015-09-03 00:36:47 +02:00
|
|
|
return new FullFilterBlockReader(
|
2018-05-21 23:33:55 +02:00
|
|
|
rep->prefix_filtering ? prefix_extractor : nullptr,
|
2016-06-03 19:47:47 +02:00
|
|
|
rep->whole_key_filtering, std::move(block), filter_bits_reader,
|
|
|
|
rep->ioptions.statistics);
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
2015-09-03 00:36:47 +02:00
|
|
|
|
2017-03-22 17:11:23 +01:00
|
|
|
default:
|
|
|
|
// filter_type is either kNoFilter (exited the function at the first if),
|
|
|
|
// or it must be covered in this switch block
|
|
|
|
assert(false);
|
|
|
|
return nullptr;
|
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
|
2018-05-21 23:33:55 +02:00
|
|
|
const SliceTransform* prefix_extractor, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
bool no_io, GetContext* get_context) const {
|
2017-03-22 17:11:23 +01:00
|
|
|
const BlockHandle& filter_blk_handle = rep_->filter_handle;
|
|
|
|
const bool is_a_filter_partition = true;
|
2017-08-23 16:48:54 +02:00
|
|
|
return GetFilter(prefetch_buffer, filter_blk_handle, !is_a_filter_partition,
|
2018-05-21 23:33:55 +02:00
|
|
|
no_io, get_context, prefix_extractor);
|
2017-03-22 17:11:23 +01:00
|
|
|
}
|
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
CachableEntry<FilterBlockReader> BlockBasedTable::GetFilter(
|
2017-08-23 16:48:54 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer, const BlockHandle& filter_blk_handle,
|
2018-05-21 23:33:55 +02:00
|
|
|
const bool is_a_filter_partition, bool no_io, GetContext* get_context,
|
|
|
|
const SliceTransform* prefix_extractor) const {
|
2014-10-22 20:52:35 +02:00
|
|
|
// If cache_index_and_filter_blocks is false, filter should be pre-populated.
|
|
|
|
// We will return rep_->filter anyway. rep_->filter can be nullptr if filter
|
|
|
|
// read fails at Open() time. We don't want to reload again since it will
|
|
|
|
// most probably fail again.
|
2017-03-22 17:11:23 +01:00
|
|
|
if (!is_a_filter_partition &&
|
|
|
|
!rep_->table_options.cache_index_and_filter_blocks) {
|
2019-05-10 20:53:33 +02:00
|
|
|
return {rep_->filter.get(), nullptr /* cache */,
|
|
|
|
nullptr /* cache_handle */, false /* own_value */};
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
|
|
|
if (rep_->filter_policy == nullptr /* do not use filter */ ||
|
|
|
|
block_cache == nullptr /* no block cache at all */) {
|
2019-05-10 20:53:33 +02:00
|
|
|
return CachableEntry<FilterBlockReader>();
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
if (!is_a_filter_partition && rep_->filter_entry.IsCached()) {
|
|
|
|
return {rep_->filter_entry.GetValue(), nullptr /* cache */,
|
|
|
|
nullptr /* cache_handle */, false /* own_value */};
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
PERF_TIMER_GUARD(read_filter_block_nanos);
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
// Fetching from the cache
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
2014-09-08 19:37:05 +02:00
|
|
|
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
2017-03-22 17:11:23 +01:00
|
|
|
filter_blk_handle, cache_key);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
2019-05-10 20:53:33 +02:00
|
|
|
Cache::Handle* cache_handle = GetEntryFromCache(
|
2019-03-28 00:13:08 +01:00
|
|
|
block_cache, key, rep_->level, BLOCK_CACHE_FILTER_MISS,
|
|
|
|
BLOCK_CACHE_FILTER_HIT,
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context ? &get_context->get_context_stats_.num_cache_filter_miss
|
|
|
|
: nullptr,
|
|
|
|
get_context ? &get_context->get_context_stats_.num_cache_filter_hit
|
|
|
|
: nullptr,
|
|
|
|
statistics, get_context);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
|
|
|
FilterBlockReader* filter = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
2018-12-08 00:04:20 +01:00
|
|
|
PERF_COUNTER_ADD(block_cache_filter_hit_count, 1);
|
2019-03-28 00:13:08 +01:00
|
|
|
filter =
|
|
|
|
reinterpret_cast<FilterBlockReader*>(block_cache->Value(cache_handle));
|
2013-11-13 07:46:51 +01:00
|
|
|
} else if (no_io) {
|
|
|
|
// Do not invoke any io.
|
|
|
|
return CachableEntry<FilterBlockReader>();
|
|
|
|
} else {
|
2018-05-21 23:33:55 +02:00
|
|
|
filter = ReadFilter(prefetch_buffer, filter_blk_handle,
|
|
|
|
is_a_filter_partition, prefix_extractor);
|
2015-09-03 00:36:47 +02:00
|
|
|
if (filter != nullptr) {
|
2018-06-29 17:55:33 +02:00
|
|
|
size_t usage = filter->ApproximateMemoryUsage();
|
2016-08-23 22:44:13 +02:00
|
|
|
Status s = block_cache->Insert(
|
2018-06-29 17:55:33 +02:00
|
|
|
key, filter, usage, &DeleteCachedFilterEntry, &cache_handle,
|
2016-08-23 22:44:13 +02:00
|
|
|
rep_->table_options.cache_index_and_filter_blocks_with_high_priority
|
|
|
|
? Cache::Priority::HIGH
|
|
|
|
: Cache::Priority::LOW);
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2018-12-08 00:04:20 +01:00
|
|
|
PERF_COUNTER_ADD(filter_block_read_count, 1);
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_bytes_write += usage;
|
|
|
|
get_context->get_context_stats_.num_cache_filter_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_filter_bytes_insert +=
|
|
|
|
usage;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usage);
|
2017-12-13 06:06:26 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_FILTER_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, usage);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
delete filter;
|
|
|
|
return CachableEntry<FilterBlockReader>();
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
return {filter, cache_handle ? block_cache : nullptr, cache_handle,
|
|
|
|
false /* own_value */};
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
|
|
|
|
2019-05-10 20:53:33 +02:00
|
|
|
CachableEntry<UncompressionDict>
|
2019-01-24 03:11:08 +01:00
|
|
|
BlockBasedTable::GetUncompressionDict(Rep* rep,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
bool no_io, GetContext* get_context) {
|
|
|
|
if (!rep->table_options.cache_index_and_filter_blocks) {
|
|
|
|
// block cache is either disabled or not used for meta-blocks. In either
|
|
|
|
// case, BlockBasedTableReader is the owner of the uncompression dictionary.
|
2019-05-10 20:53:33 +02:00
|
|
|
return {rep->uncompression_dict.get(), nullptr /* cache */,
|
|
|
|
nullptr /* cache_handle */, false /* own_value */};
|
2019-01-24 03:11:08 +01:00
|
|
|
}
|
|
|
|
if (rep->compression_dict_handle.IsNull()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
return CachableEntry<UncompressionDict>();
|
2019-01-24 03:11:08 +01:00
|
|
|
}
|
|
|
|
char cache_key_buf[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
auto cache_key =
|
|
|
|
GetCacheKey(rep->cache_key_prefix, rep->cache_key_prefix_size,
|
|
|
|
rep->compression_dict_handle, cache_key_buf);
|
|
|
|
auto cache_handle = GetEntryFromCache(
|
|
|
|
rep->table_options.block_cache.get(), cache_key, rep->level,
|
|
|
|
BLOCK_CACHE_COMPRESSION_DICT_MISS, BLOCK_CACHE_COMPRESSION_DICT_HIT,
|
|
|
|
get_context
|
|
|
|
? &get_context->get_context_stats_.num_cache_compression_dict_miss
|
|
|
|
: nullptr,
|
|
|
|
get_context
|
|
|
|
? &get_context->get_context_stats_.num_cache_compression_dict_hit
|
|
|
|
: nullptr,
|
|
|
|
rep->ioptions.statistics, get_context);
|
|
|
|
UncompressionDict* dict = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
dict = reinterpret_cast<UncompressionDict*>(
|
|
|
|
rep->table_options.block_cache->Value(cache_handle));
|
|
|
|
} else if (no_io) {
|
|
|
|
// Do not invoke any io.
|
|
|
|
} else {
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
|
|
|
Status s =
|
|
|
|
ReadCompressionDictBlock(rep, prefetch_buffer, &compression_dict_block);
|
|
|
|
size_t usage = 0;
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(compression_dict_block != nullptr);
|
|
|
|
// TODO(ajkr): find a way to avoid the `compression_dict_block` data copy
|
|
|
|
dict = new UncompressionDict(compression_dict_block->data.ToString(),
|
|
|
|
rep->blocks_definitely_zstd_compressed,
|
|
|
|
rep->ioptions.statistics);
|
|
|
|
usage = dict->ApproximateMemoryUsage();
|
|
|
|
s = rep->table_options.block_cache->Insert(
|
|
|
|
cache_key, dict, usage, &DeleteCachedUncompressionDictEntry,
|
|
|
|
&cache_handle,
|
|
|
|
rep->table_options.cache_index_and_filter_blocks_with_high_priority
|
|
|
|
? Cache::Priority::HIGH
|
|
|
|
: Cache::Priority::LOW);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
PERF_COUNTER_ADD(compression_dict_block_read_count, 1);
|
|
|
|
if (get_context != nullptr) {
|
|
|
|
get_context->get_context_stats_.num_cache_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_bytes_write += usage;
|
|
|
|
get_context->get_context_stats_.num_cache_compression_dict_add++;
|
|
|
|
get_context->get_context_stats_
|
|
|
|
.num_cache_compression_dict_bytes_insert += usage;
|
|
|
|
} else {
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_ADD);
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_BYTES_WRITE, usage);
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_COMPRESSION_DICT_ADD);
|
|
|
|
RecordTick(rep->ioptions.statistics,
|
|
|
|
BLOCK_CACHE_COMPRESSION_DICT_BYTES_INSERT, usage);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// There should be no way to get here if block cache insertion succeeded.
|
|
|
|
// Though it is still possible something failed earlier.
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_ADD_FAILURES);
|
|
|
|
delete dict;
|
|
|
|
dict = nullptr;
|
|
|
|
assert(cache_handle == nullptr);
|
|
|
|
}
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
return {dict, cache_handle ? rep->table_options.block_cache.get() : nullptr,
|
|
|
|
cache_handle, false /* own_value */};
|
2019-01-24 03:11:08 +01:00
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
// disable_prefix_seek should be set to true when prefix_extractor found in SST
|
|
|
|
// differs from the one in mutable_cf_options and index type is HashBasedIndex
|
2018-08-10 01:49:45 +02:00
|
|
|
InternalIteratorBase<BlockHandle>* BlockBasedTable::NewIndexIterator(
|
2018-05-21 23:33:55 +02:00
|
|
|
const ReadOptions& read_options, bool disable_prefix_seek,
|
2018-07-13 02:19:57 +02:00
|
|
|
IndexBlockIter* input_iter, CachableEntry<IndexReader>* index_entry,
|
2018-05-21 23:33:55 +02:00
|
|
|
GetContext* get_context) {
|
2014-03-01 03:19:07 +01:00
|
|
|
// index reader has already been pre-populated.
|
|
|
|
if (rep_->index_reader) {
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2014-08-26 01:14:30 +02:00
|
|
|
return rep_->index_reader->NewIterator(
|
2018-05-21 23:33:55 +02:00
|
|
|
input_iter, read_options.total_order_seek || disable_prefix_seek,
|
|
|
|
read_options.fill_cache);
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
// we have a pinned index block
|
2019-05-10 20:53:33 +02:00
|
|
|
if (rep_->index_entry.IsCached()) {
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2019-05-10 20:53:33 +02:00
|
|
|
return rep_->index_entry.GetValue()->NewIterator(
|
2018-05-21 23:33:55 +02:00
|
|
|
input_iter, read_options.total_order_seek || disable_prefix_seek,
|
|
|
|
read_options.fill_cache);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
}
|
|
|
|
|
2015-07-09 01:34:48 +02:00
|
|
|
PERF_TIMER_GUARD(read_index_block_nanos);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2017-03-22 17:11:23 +01:00
|
|
|
const bool no_io = read_options.read_tier == kBlockCacheTier;
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
2014-03-01 03:19:07 +01:00
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
Index Reader should not be reused after DB restart
Summary:
In block based table reader, wow we put index reader to block cache, which can be retrieved after DB restart. However, index reader may reference internal comparator, which can be destroyed after DB restarts, causing problems.
Fix it by making cache key identical per table reader.
Test Plan: Add a new test which failed with out the commit but now pass.
Reviewers: IslamAbdelRahman
Reviewed By: IslamAbdelRahman
Subscribers: maro, yhchiang, kradhakrishnan, leveldb, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D55287
2016-03-11 00:16:11 +01:00
|
|
|
auto key =
|
|
|
|
GetCacheKeyFromOffset(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
rep_->dummy_index_reader_offset, cache_key);
|
2014-09-05 01:18:36 +02:00
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
2018-07-21 01:43:13 +02:00
|
|
|
auto cache_handle = GetEntryFromCache(
|
2019-03-28 00:13:08 +01:00
|
|
|
block_cache, key, rep_->level, BLOCK_CACHE_INDEX_MISS,
|
|
|
|
BLOCK_CACHE_INDEX_HIT,
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context ? &get_context->get_context_stats_.num_cache_index_miss
|
|
|
|
: nullptr,
|
|
|
|
get_context ? &get_context->get_context_stats_.num_cache_index_hit
|
|
|
|
: nullptr,
|
|
|
|
statistics, get_context);
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
if (cache_handle == nullptr && no_io) {
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
input_iter->Invalidate(Status::Incomplete("no blocking io"));
|
2014-07-31 01:34:35 +02:00
|
|
|
return input_iter;
|
|
|
|
} else {
|
2018-08-10 01:49:45 +02:00
|
|
|
return NewErrorInternalIterator<BlockHandle>(
|
|
|
|
Status::Incomplete("no blocking io"));
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
IndexReader* index_reader = nullptr;
|
|
|
|
if (cache_handle != nullptr) {
|
2018-12-08 00:04:20 +01:00
|
|
|
PERF_COUNTER_ADD(block_cache_index_hit_count, 1);
|
2014-03-01 03:19:07 +01:00
|
|
|
index_reader =
|
|
|
|
reinterpret_cast<IndexReader*>(block_cache->Value(cache_handle));
|
2013-11-13 07:46:51 +01:00
|
|
|
} else {
|
2014-03-01 03:19:07 +01:00
|
|
|
// Create index reader and put it in the cache.
|
|
|
|
Status s;
|
2016-08-24 03:20:41 +02:00
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:2");
|
2017-08-11 20:59:13 +02:00
|
|
|
s = CreateIndexReader(nullptr /* prefetch_buffer */, &index_reader);
|
2016-08-24 03:20:41 +02:00
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:1");
|
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:3");
|
|
|
|
TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:4");
|
2018-06-29 17:55:33 +02:00
|
|
|
size_t charge = 0;
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2016-07-09 02:50:51 +02:00
|
|
|
assert(index_reader != nullptr);
|
2018-06-29 17:55:33 +02:00
|
|
|
charge = index_reader->ApproximateMemoryUsage();
|
2016-08-23 22:44:13 +02:00
|
|
|
s = block_cache->Insert(
|
2018-06-29 17:55:33 +02:00
|
|
|
key, index_reader, charge, &DeleteCachedIndexEntry, &cache_handle,
|
2016-08-23 22:44:13 +02:00
|
|
|
rep_->table_options.cache_index_and_filter_blocks_with_high_priority
|
|
|
|
? Cache::Priority::HIGH
|
|
|
|
: Cache::Priority::LOW);
|
2016-03-11 02:35:19 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
2016-03-11 02:35:19 +01:00
|
|
|
if (s.ok()) {
|
2017-12-13 06:06:26 +01:00
|
|
|
if (get_context != nullptr) {
|
2018-07-21 01:43:13 +02:00
|
|
|
get_context->get_context_stats_.num_cache_add++;
|
|
|
|
get_context->get_context_stats_.num_cache_bytes_write += charge;
|
2017-12-13 06:06:26 +01:00
|
|
|
} else {
|
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, charge);
|
2017-12-13 06:06:26 +01:00
|
|
|
}
|
2018-12-08 00:04:20 +01:00
|
|
|
PERF_COUNTER_ADD(index_block_read_count, 1);
|
2016-10-11 20:59:05 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_ADD);
|
2018-06-29 17:55:33 +02:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, charge);
|
2016-03-11 02:35:19 +01:00
|
|
|
} else {
|
2016-07-09 02:50:51 +02:00
|
|
|
if (index_reader != nullptr) {
|
|
|
|
delete index_reader;
|
|
|
|
}
|
2016-03-11 02:35:19 +01:00
|
|
|
RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES);
|
2014-03-01 03:19:07 +01:00
|
|
|
// make sure if something goes wrong, index_reader shall remain intact.
|
2014-07-31 01:34:35 +02:00
|
|
|
if (input_iter != nullptr) {
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
input_iter->Invalidate(s);
|
2014-07-31 01:34:35 +02:00
|
|
|
return input_iter;
|
|
|
|
} else {
|
2018-08-10 01:49:45 +02:00
|
|
|
return NewErrorInternalIterator<BlockHandle>(s);
|
2014-07-31 01:34:35 +02:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
|
|
|
|
assert(cache_handle);
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2014-08-26 01:14:30 +02:00
|
|
|
auto* iter = index_reader->NewIterator(
|
2018-05-21 23:33:55 +02:00
|
|
|
input_iter, read_options.total_order_seek || disable_prefix_seek);
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
|
|
|
// the caller would like to take ownership of the index block
|
|
|
|
// don't call RegisterCleanup() in this case, the caller will take care of it
|
|
|
|
if (index_entry != nullptr) {
|
2019-05-10 20:53:33 +02:00
|
|
|
*index_entry = {index_reader, block_cache, cache_handle,
|
|
|
|
false /* own_value */};
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
} else {
|
|
|
|
iter->RegisterCleanup(&ReleaseCachedEntry, block_cache, cache_handle);
|
|
|
|
}
|
|
|
|
|
2013-11-13 07:46:51 +01:00
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2014-04-25 21:22:23 +02:00
|
|
|
// Convert an index iterator value (i.e., an encoded BlockHandle)
|
|
|
|
// into an iterator over the contents of the corresponding block.
|
2014-07-31 01:34:35 +02:00
|
|
|
// If input_iter is null, new a iterator
|
|
|
|
// If input_iter is not null, update this iter and return it
|
2018-07-13 02:19:57 +02:00
|
|
|
template <typename TBlockIter>
|
|
|
|
TBlockIter* BlockBasedTable::NewDataBlockIterator(
|
2017-03-22 17:11:23 +01:00
|
|
|
Rep* rep, const ReadOptions& ro, const BlockHandle& handle,
|
2018-07-13 02:19:57 +02:00
|
|
|
TBlockIter* input_iter, bool is_index, bool key_includes_seq,
|
2018-08-10 01:49:45 +02:00
|
|
|
bool index_key_is_full, GetContext* get_context, Status s,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer) {
|
2015-07-09 01:34:48 +02:00
|
|
|
PERF_TIMER_GUARD(new_table_block_iter_nanos);
|
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep->table_options.block_cache.get();
|
2014-04-25 21:22:23 +02:00
|
|
|
CachableEntry<Block> block;
|
2018-07-13 02:19:57 +02:00
|
|
|
TBlockIter* iter;
|
2019-01-24 03:11:08 +01:00
|
|
|
{
|
|
|
|
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
|
|
|
auto uncompression_dict_storage =
|
|
|
|
GetUncompressionDict(rep, prefetch_buffer, no_io, get_context);
|
|
|
|
const UncompressionDict& uncompression_dict =
|
2019-05-10 20:53:33 +02:00
|
|
|
uncompression_dict_storage.GetValue() == nullptr
|
2019-01-24 03:11:08 +01:00
|
|
|
? UncompressionDict::GetEmptyDict()
|
2019-05-10 20:53:33 +02:00
|
|
|
: *uncompression_dict_storage.GetValue();
|
2016-11-05 17:10:51 +01:00
|
|
|
if (s.ok()) {
|
2019-01-24 03:11:08 +01:00
|
|
|
s = MaybeReadBlockAndLoadToCache(prefetch_buffer, rep, ro, handle,
|
|
|
|
uncompression_dict, &block, is_index,
|
|
|
|
get_context);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (input_iter != nullptr) {
|
|
|
|
iter = input_iter;
|
|
|
|
} else {
|
|
|
|
iter = new TBlockIter;
|
|
|
|
}
|
|
|
|
// Didn't get any data from block caches.
|
2019-05-10 20:53:33 +02:00
|
|
|
if (s.ok() && block.GetValue() == nullptr) {
|
2019-01-24 03:11:08 +01:00
|
|
|
if (no_io) {
|
|
|
|
// Could not read from block_cache and can't do IO
|
|
|
|
iter->Invalidate(Status::Incomplete("no blocking io"));
|
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
std::unique_ptr<Block> block_value;
|
|
|
|
{
|
|
|
|
StopWatch sw(rep->ioptions.env, rep->ioptions.statistics,
|
|
|
|
READ_BLOCK_GET_MICROS);
|
|
|
|
s = ReadBlockFromFile(
|
|
|
|
rep->file.get(), prefetch_buffer, rep->footer, ro, handle,
|
|
|
|
&block_value, rep->ioptions,
|
|
|
|
rep->blocks_maybe_compressed /*do_decompress*/,
|
|
|
|
rep->blocks_maybe_compressed, uncompression_dict,
|
|
|
|
rep->persistent_cache_options,
|
|
|
|
is_index ? kDisableGlobalSequenceNumber : rep->global_seqno,
|
|
|
|
rep->table_options.read_amp_bytes_per_bit,
|
|
|
|
GetMemoryAllocator(rep->table_options));
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
block.SetOwnedValue(block_value.release());
|
2019-01-24 03:11:08 +01:00
|
|
|
}
|
2016-11-05 17:10:51 +01:00
|
|
|
}
|
2019-01-24 03:11:08 +01:00
|
|
|
// TODO(ajkr): also pin compression dictionary block when
|
|
|
|
// `pin_l0_filter_and_index_blocks_in_cache == true`.
|
2016-11-05 17:10:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(block.GetValue() != nullptr);
|
2018-07-13 02:19:57 +02:00
|
|
|
const bool kTotalOrderSeek = true;
|
2018-11-14 02:00:49 +01:00
|
|
|
// Block contents are pinned and it is still pinned after the iterator
|
2018-12-21 06:57:18 +01:00
|
|
|
// is destroyed as long as cleanup functions are moved to another object,
|
2018-11-14 02:00:49 +01:00
|
|
|
// when:
|
|
|
|
// 1. block cache handle is set to be released in cleanup function, or
|
2018-12-21 06:57:18 +01:00
|
|
|
// 2. it's pointing to immortal source. If own_bytes is true then we are
|
|
|
|
// not reading data from the original source, whether immortal or not.
|
2018-11-14 02:00:49 +01:00
|
|
|
// Otherwise, the block is pinned iff the source is immortal.
|
|
|
|
bool block_contents_pinned =
|
2019-05-10 20:53:33 +02:00
|
|
|
(block.IsCached() ||
|
|
|
|
(!block.GetValue()->own_bytes() && rep->immortal_table));
|
|
|
|
iter = block.GetValue()->NewIterator<TBlockIter>(
|
2018-05-26 03:41:31 +02:00
|
|
|
&rep->internal_comparator, rep->internal_comparator.user_comparator(),
|
2018-08-10 01:49:45 +02:00
|
|
|
iter, rep->ioptions.statistics, kTotalOrderSeek, key_includes_seq,
|
2018-11-14 02:00:49 +01:00
|
|
|
index_key_is_full, block_contents_pinned);
|
2019-05-10 20:53:33 +02:00
|
|
|
if (!block.IsCached()) {
|
2018-01-29 23:34:56 +01:00
|
|
|
if (!ro.fill_cache && rep->cache_key_prefix_size != 0) {
|
|
|
|
// insert a dummy record to block cache to track the memory usage
|
|
|
|
Cache::Handle* cache_handle;
|
|
|
|
// There are two other types of cache keys: 1) SST cache key added in
|
2018-11-14 02:00:49 +01:00
|
|
|
// `MaybeReadBlockAndLoadToCache` 2) dummy cache key added in
|
2018-01-29 23:34:56 +01:00
|
|
|
// `write_buffer_manager`. Use longer prefix (41 bytes) to differentiate
|
|
|
|
// from SST cache key(31 bytes), and use non-zero prefix to
|
|
|
|
// differentiate from `write_buffer_manager`
|
|
|
|
const size_t kExtraCacheKeyPrefix = kMaxVarint64Length * 4 + 1;
|
|
|
|
char cache_key[kExtraCacheKeyPrefix + kMaxVarint64Length];
|
|
|
|
// Prefix: use rep->cache_key_prefix padded by 0s
|
|
|
|
memset(cache_key, 0, kExtraCacheKeyPrefix + kMaxVarint64Length);
|
|
|
|
assert(rep->cache_key_prefix_size != 0);
|
|
|
|
assert(rep->cache_key_prefix_size <= kExtraCacheKeyPrefix);
|
|
|
|
memcpy(cache_key, rep->cache_key_prefix, rep->cache_key_prefix_size);
|
|
|
|
char* end = EncodeVarint64(cache_key + kExtraCacheKeyPrefix,
|
|
|
|
next_cache_key_id_++);
|
|
|
|
assert(end - cache_key <=
|
|
|
|
static_cast<int>(kExtraCacheKeyPrefix + kMaxVarint64Length));
|
|
|
|
Slice unique_key =
|
|
|
|
Slice(cache_key, static_cast<size_t>(end - cache_key));
|
2018-06-29 17:55:33 +02:00
|
|
|
s = block_cache->Insert(unique_key, nullptr,
|
2019-05-10 20:53:33 +02:00
|
|
|
block.GetValue()->ApproximateMemoryUsage(),
|
|
|
|
nullptr, &cache_handle);
|
2018-01-29 23:34:56 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
if (cache_handle != nullptr) {
|
|
|
|
iter->RegisterCleanup(&ForceReleaseCachedEntry, block_cache,
|
|
|
|
cache_handle);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-11-05 17:10:51 +01:00
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
|
|
|
|
block.TransferTo(iter);
|
2016-11-05 17:10:51 +01:00
|
|
|
} else {
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(block.GetValue() == nullptr);
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
iter->Invalidate(s);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
2016-11-05 17:10:51 +01:00
|
|
|
return iter;
|
|
|
|
}
|
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
Status BlockBasedTable::MaybeReadBlockAndLoadToCache(
|
2017-08-18 19:53:03 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer, Rep* rep, const ReadOptions& ro,
|
2019-01-24 03:11:08 +01:00
|
|
|
const BlockHandle& handle, const UncompressionDict& uncompression_dict,
|
2017-12-13 06:06:26 +01:00
|
|
|
CachableEntry<Block>* block_entry, bool is_index, GetContext* get_context) {
|
2017-08-23 23:55:26 +02:00
|
|
|
assert(block_entry != nullptr);
|
2016-11-05 17:10:51 +01:00
|
|
|
const bool no_io = (ro.read_tier == kBlockCacheTier);
|
|
|
|
Cache* block_cache = rep->table_options.block_cache.get();
|
2018-11-14 02:00:49 +01:00
|
|
|
|
|
|
|
// No point to cache compressed blocks if it never goes away
|
2016-11-05 17:10:51 +01:00
|
|
|
Cache* block_cache_compressed =
|
2018-11-14 02:00:49 +01:00
|
|
|
rep->immortal_table ? nullptr
|
|
|
|
: rep->table_options.block_cache_compressed.get();
|
2014-04-25 21:22:23 +02:00
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
// First, try to get the block from the cache
|
|
|
|
//
|
2014-04-25 21:22:23 +02:00
|
|
|
// If either block cache is enabled, we'll try to read from it.
|
2016-11-05 17:10:51 +01:00
|
|
|
Status s;
|
2018-11-14 02:00:49 +01:00
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice key /* key to the block cache */;
|
|
|
|
Slice ckey /* key to the compressed block cache */;
|
2014-04-25 21:22:23 +02:00
|
|
|
if (block_cache != nullptr || block_cache_compressed != nullptr) {
|
|
|
|
// create key for block cache
|
|
|
|
if (block_cache != nullptr) {
|
2014-09-08 19:37:05 +02:00
|
|
|
key = GetCacheKey(rep->cache_key_prefix, rep->cache_key_prefix_size,
|
|
|
|
handle, cache_key);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (block_cache_compressed != nullptr) {
|
|
|
|
ckey = GetCacheKey(rep->compressed_cache_key_prefix,
|
|
|
|
rep->compressed_cache_key_prefix_size, handle,
|
|
|
|
compressed_cache_key);
|
|
|
|
}
|
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
s = GetDataBlockFromCache(key, ckey, block_cache, block_cache_compressed,
|
2019-01-24 03:11:08 +01:00
|
|
|
rep, ro, block_entry, uncompression_dict,
|
2018-11-14 02:00:49 +01:00
|
|
|
rep->table_options.read_amp_bytes_per_bit,
|
|
|
|
is_index, get_context);
|
2014-04-25 21:22:23 +02:00
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
// Can't find the block from the cache. If I/O is allowed, read from the
|
|
|
|
// file.
|
2019-05-10 20:53:33 +02:00
|
|
|
if (block_entry->GetValue() == nullptr && !no_io && ro.fill_cache) {
|
2018-11-14 02:00:49 +01:00
|
|
|
Statistics* statistics = rep->ioptions.statistics;
|
|
|
|
bool do_decompress =
|
|
|
|
block_cache_compressed == nullptr && rep->blocks_maybe_compressed;
|
|
|
|
CompressionType raw_block_comp_type;
|
|
|
|
BlockContents raw_block_contents;
|
2014-04-25 21:22:23 +02:00
|
|
|
{
|
2014-09-05 01:18:36 +02:00
|
|
|
StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS);
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockFetcher block_fetcher(
|
2017-08-18 19:53:03 +02:00
|
|
|
rep->file.get(), prefetch_buffer, rep->footer, ro, handle,
|
2018-11-14 02:00:49 +01:00
|
|
|
&raw_block_contents, rep->ioptions,
|
2018-11-29 02:58:08 +01:00
|
|
|
do_decompress /* do uncompress */, rep->blocks_maybe_compressed,
|
2019-01-24 03:11:08 +01:00
|
|
|
uncompression_dict, rep->persistent_cache_options,
|
2018-11-29 02:58:08 +01:00
|
|
|
GetMemoryAllocator(rep->table_options),
|
|
|
|
GetMemoryAllocatorForCompressedBlock(rep->table_options));
|
2018-11-14 02:00:49 +01:00
|
|
|
s = block_fetcher.ReadBlockContents();
|
|
|
|
raw_block_comp_type = block_fetcher.get_compression_type();
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
2018-11-14 02:00:49 +01:00
|
|
|
SequenceNumber seq_no = rep->get_global_seqno(is_index);
|
|
|
|
// If filling cache is allowed and a cache is configured, try to put the
|
|
|
|
// block to the cache.
|
2016-08-27 03:55:58 +02:00
|
|
|
s = PutDataBlockToCache(
|
|
|
|
key, ckey, block_cache, block_cache_compressed, ro, rep->ioptions,
|
2018-11-14 02:00:49 +01:00
|
|
|
block_entry, &raw_block_contents, raw_block_comp_type,
|
2019-01-24 03:11:08 +01:00
|
|
|
rep->table_options.format_version, uncompression_dict, seq_no,
|
2018-11-29 02:58:08 +01:00
|
|
|
rep->table_options.read_amp_bytes_per_bit,
|
|
|
|
GetMemoryAllocator(rep->table_options), is_index,
|
2017-12-13 06:06:26 +01:00
|
|
|
is_index && rep->table_options
|
|
|
|
.cache_index_and_filter_blocks_with_high_priority
|
2017-03-22 17:11:23 +01:00
|
|
|
? Cache::Priority::HIGH
|
2017-12-13 06:06:26 +01:00
|
|
|
: Cache::Priority::LOW,
|
2018-11-29 02:58:08 +01:00
|
|
|
get_context);
|
2014-04-25 21:22:23 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
assert(s.ok() || block_entry->GetValue() == nullptr);
|
2016-11-05 17:10:51 +01:00
|
|
|
return s;
|
2012-09-27 10:05:38 +02:00
|
|
|
}
|
|
|
|
|
2018-02-13 01:57:56 +01:00
|
|
|
BlockBasedTable::PartitionedIndexIteratorState::PartitionedIndexIteratorState(
|
|
|
|
BlockBasedTable* table,
|
2018-05-26 03:41:31 +02:00
|
|
|
std::unordered_map<uint64_t, CachableEntry<Block>>* block_map,
|
2018-08-10 01:49:45 +02:00
|
|
|
bool index_key_includes_seq, bool index_key_is_full)
|
2018-05-26 03:41:31 +02:00
|
|
|
: table_(table),
|
|
|
|
block_map_(block_map),
|
2018-08-10 01:49:45 +02:00
|
|
|
index_key_includes_seq_(index_key_includes_seq),
|
|
|
|
index_key_is_full_(index_key_is_full) {}
|
2017-02-07 01:29:29 +01:00
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
InternalIteratorBase<BlockHandle>*
|
2018-02-13 01:57:56 +01:00
|
|
|
BlockBasedTable::PartitionedIndexIteratorState::NewSecondaryIterator(
|
2018-08-10 01:49:45 +02:00
|
|
|
const BlockHandle& handle) {
|
2017-02-07 01:29:29 +01:00
|
|
|
// Return a block iterator on the index partition
|
2018-02-13 01:57:56 +01:00
|
|
|
auto rep = table_->get_rep();
|
|
|
|
auto block = block_map_->find(handle.offset());
|
|
|
|
// This is a possible scenario since block cache might not have had space
|
|
|
|
// for the partition
|
|
|
|
if (block != block_map_->end()) {
|
|
|
|
PERF_COUNTER_ADD(block_cache_hit_count, 1);
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_INDEX_HIT);
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_HIT);
|
|
|
|
Cache* block_cache = rep->table_options.block_cache.get();
|
|
|
|
assert(block_cache);
|
|
|
|
RecordTick(rep->ioptions.statistics, BLOCK_CACHE_BYTES_READ,
|
2019-05-10 20:53:33 +02:00
|
|
|
block_cache->GetUsage(block->second.GetCacheHandle()));
|
2018-07-13 02:19:57 +02:00
|
|
|
Statistics* kNullStats = nullptr;
|
2018-11-14 02:00:49 +01:00
|
|
|
// We don't return pinned datat from index blocks, so no need
|
|
|
|
// to set `block_contents_pinned`.
|
2019-05-10 20:53:33 +02:00
|
|
|
return block->second.GetValue()->NewIterator<IndexBlockIter>(
|
2018-05-26 03:41:31 +02:00
|
|
|
&rep->internal_comparator, rep->internal_comparator.user_comparator(),
|
2018-08-10 01:49:45 +02:00
|
|
|
nullptr, kNullStats, true, index_key_includes_seq_, index_key_is_full_);
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
// Create an empty iterator
|
2018-08-10 01:49:45 +02:00
|
|
|
return new IndexBlockIter();
|
2017-05-06 00:01:04 +02:00
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
// This will be broken if the user specifies an unusual implementation
|
|
|
|
// of Options.comparator, or if the user specifies an unusual
|
2014-08-25 23:22:05 +02:00
|
|
|
// definition of prefixes in BlockBasedTableOptions.filter_policy.
|
|
|
|
// In particular, we require the following three properties:
|
2013-08-13 23:04:56 +02:00
|
|
|
//
|
|
|
|
// 1) key.starts_with(prefix(key))
|
|
|
|
// 2) Compare(prefix(key), key) <= 0.
|
|
|
|
// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0
|
2013-08-23 23:49:57 +02:00
|
|
|
//
|
2013-11-13 07:46:51 +01:00
|
|
|
// Otherwise, this method guarantees no I/O will be incurred.
|
|
|
|
//
|
|
|
|
// REQUIRES: this method shouldn't be called while the DB lock is held.
|
2018-06-27 00:56:26 +02:00
|
|
|
bool BlockBasedTable::PrefixMayMatch(
|
|
|
|
const Slice& internal_key, const ReadOptions& read_options,
|
|
|
|
const SliceTransform* options_prefix_extractor,
|
|
|
|
const bool need_upper_bound_check) {
|
2014-08-25 23:22:05 +02:00
|
|
|
if (!rep_->filter_policy) {
|
2014-06-10 18:36:59 +02:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-06-27 00:56:26 +02:00
|
|
|
const SliceTransform* prefix_extractor;
|
|
|
|
|
|
|
|
if (rep_->table_prefix_extractor == nullptr) {
|
|
|
|
if (need_upper_bound_check) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
prefix_extractor = options_prefix_extractor;
|
|
|
|
} else {
|
|
|
|
prefix_extractor = rep_->table_prefix_extractor.get();
|
|
|
|
}
|
2016-01-26 23:47:42 +01:00
|
|
|
auto user_key = ExtractUserKey(internal_key);
|
2018-05-21 23:33:55 +02:00
|
|
|
if (!prefix_extractor->InDomain(user_key)) {
|
2016-01-26 23:47:42 +01:00
|
|
|
return true;
|
|
|
|
}
|
2014-04-25 21:22:23 +02:00
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
bool may_match = true;
|
|
|
|
Status s;
|
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
// First, try check with full filter
|
2018-05-21 23:33:55 +02:00
|
|
|
auto filter_entry = GetFilter(prefix_extractor);
|
2019-05-10 20:53:33 +02:00
|
|
|
FilterBlockReader* filter = filter_entry.GetValue();
|
2018-06-27 00:56:26 +02:00
|
|
|
bool filter_checked = true;
|
2016-04-13 22:02:33 +02:00
|
|
|
if (filter != nullptr) {
|
|
|
|
if (!filter->IsBlockBased()) {
|
2017-03-22 17:11:23 +01:00
|
|
|
const Slice* const const_ikey_ptr = &internal_key;
|
2018-06-27 00:56:26 +02:00
|
|
|
may_match = filter->RangeMayExist(
|
|
|
|
read_options.iterate_upper_bound, user_key, prefix_extractor,
|
|
|
|
rep_->internal_comparator.user_comparator(), const_ikey_ptr,
|
|
|
|
&filter_checked, need_upper_bound_check);
|
2016-04-13 22:02:33 +02:00
|
|
|
} else {
|
2018-06-27 00:56:26 +02:00
|
|
|
// if prefix_extractor changed for block based filter, skip filter
|
|
|
|
if (need_upper_bound_check) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
auto prefix = prefix_extractor->Transform(user_key);
|
2017-03-22 17:11:23 +01:00
|
|
|
InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue);
|
|
|
|
auto internal_prefix = internal_key_prefix.Encode();
|
|
|
|
|
|
|
|
// To prevent any io operation in this method, we set `read_tier` to make
|
|
|
|
// sure we always read index or filter only when they have already been
|
|
|
|
// loaded to memory.
|
|
|
|
ReadOptions no_io_read_options;
|
|
|
|
no_io_read_options.read_tier = kBlockCacheTier;
|
|
|
|
|
2016-04-13 22:02:33 +02:00
|
|
|
// Then, try find it within each block
|
2018-05-21 23:33:55 +02:00
|
|
|
// we already know prefix_extractor and prefix_extractor_name must match
|
|
|
|
// because `CheckPrefixMayMatch` first checks `check_filter_ == true`
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter(
|
2018-06-27 00:56:26 +02:00
|
|
|
NewIndexIterator(no_io_read_options,
|
|
|
|
/* need_upper_bound_check */ false));
|
2016-04-13 22:02:33 +02:00
|
|
|
iiter->Seek(internal_prefix);
|
|
|
|
|
|
|
|
if (!iiter->Valid()) {
|
|
|
|
// we're past end of file
|
|
|
|
// if it's incomplete, it means that we avoided I/O
|
|
|
|
// and we're not really sure that we're past the end
|
|
|
|
// of the file
|
|
|
|
may_match = iiter->status().IsIncomplete();
|
2018-05-29 21:09:01 +02:00
|
|
|
} else if ((rep_->table_properties &&
|
|
|
|
rep_->table_properties->index_key_is_user_key
|
2018-05-26 03:41:31 +02:00
|
|
|
? iiter->key()
|
|
|
|
: ExtractUserKey(iiter->key()))
|
2016-04-13 22:02:33 +02:00
|
|
|
.starts_with(ExtractUserKey(internal_prefix))) {
|
|
|
|
// we need to check for this subtle case because our only
|
|
|
|
// guarantee is that "the key is a string >= last key in that data
|
|
|
|
// block" according to the doc/table_format.txt spec.
|
|
|
|
//
|
|
|
|
// Suppose iiter->key() starts with the desired prefix; it is not
|
|
|
|
// necessarily the case that the corresponding data block will
|
|
|
|
// contain the prefix, since iiter->key() need not be in the
|
|
|
|
// block. However, the next data block may contain the prefix, so
|
|
|
|
// we return true to play it safe.
|
|
|
|
may_match = true;
|
|
|
|
} else if (filter->IsBlockBased()) {
|
|
|
|
// iiter->key() does NOT start with the desired prefix. Because
|
|
|
|
// Seek() finds the first key that is >= the seek target, this
|
|
|
|
// means that iiter->key() > prefix. Thus, any data blocks coming
|
|
|
|
// after the data block corresponding to iiter->key() cannot
|
|
|
|
// possibly contain the key. Thus, the corresponding data block
|
|
|
|
// is the only on could potentially contain the prefix.
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle = iiter->value();
|
2018-05-21 23:33:55 +02:00
|
|
|
may_match =
|
|
|
|
filter->PrefixMayMatch(prefix, prefix_extractor, handle.offset());
|
2016-04-13 22:02:33 +02:00
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
2013-08-13 23:04:56 +02:00
|
|
|
}
|
2013-08-23 23:49:57 +02:00
|
|
|
|
2018-06-27 00:56:26 +02:00
|
|
|
if (filter_checked) {
|
|
|
|
Statistics* statistics = rep_->ioptions.statistics;
|
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED);
|
|
|
|
if (!may_match) {
|
|
|
|
RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL);
|
|
|
|
}
|
2013-08-23 23:49:57 +02:00
|
|
|
}
|
|
|
|
|
2013-08-13 23:04:56 +02:00
|
|
|
return may_match;
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::Seek(const Slice& target) {
|
2018-07-26 02:03:39 +02:00
|
|
|
is_out_of_bound_ = false;
|
2018-06-27 00:56:26 +02:00
|
|
|
if (!CheckPrefixMayMatch(target)) {
|
2018-02-13 01:57:56 +01:00
|
|
|
ResetDataIter();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-05-01 23:23:48 +02:00
|
|
|
bool need_seek_index = true;
|
2019-05-10 21:36:40 +02:00
|
|
|
if (block_iter_points_to_real_block_ && block_iter_.Valid()) {
|
2019-05-01 23:23:48 +02:00
|
|
|
// Reseek.
|
|
|
|
prev_index_value_ = index_iter_->value();
|
|
|
|
// We can avoid an index seek if:
|
|
|
|
// 1. The new seek key is larger than the current key
|
|
|
|
// 2. The new seek key is within the upper bound of the block
|
|
|
|
// Since we don't necessarily know the internal key for either
|
|
|
|
// the current key or the upper bound, we check user keys and
|
|
|
|
// exclude the equality case. Considering internal keys can
|
|
|
|
// improve for the boundary cases, but it would complicate the
|
|
|
|
// code.
|
|
|
|
if (user_comparator_.Compare(ExtractUserKey(target),
|
|
|
|
block_iter_.user_key()) > 0 &&
|
|
|
|
user_comparator_.Compare(ExtractUserKey(target),
|
|
|
|
index_iter_->user_key()) < 0) {
|
|
|
|
need_seek_index = false;
|
|
|
|
}
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
|
2019-05-01 23:23:48 +02:00
|
|
|
if (need_seek_index) {
|
|
|
|
index_iter_->Seek(target);
|
|
|
|
if (!index_iter_->Valid()) {
|
|
|
|
ResetDataIter();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
InitDataBlock();
|
|
|
|
}
|
2018-02-13 01:57:56 +01:00
|
|
|
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.Seek(target);
|
2018-02-13 01:57:56 +01:00
|
|
|
|
|
|
|
FindKeyForward();
|
2019-04-16 20:32:03 +02:00
|
|
|
CheckOutOfBound();
|
2018-07-13 02:19:57 +02:00
|
|
|
assert(
|
|
|
|
!block_iter_.Valid() ||
|
|
|
|
(key_includes_seq_ && icomp_.Compare(target, block_iter_.key()) <= 0) ||
|
2019-03-27 18:24:16 +01:00
|
|
|
(!key_includes_seq_ && user_comparator_.Compare(ExtractUserKey(target),
|
|
|
|
block_iter_.key()) <= 0));
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::SeekForPrev(
|
|
|
|
const Slice& target) {
|
2018-07-26 02:03:39 +02:00
|
|
|
is_out_of_bound_ = false;
|
2018-06-27 00:56:26 +02:00
|
|
|
if (!CheckPrefixMayMatch(target)) {
|
2018-02-13 01:57:56 +01:00
|
|
|
ResetDataIter();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
SavePrevIndexValue();
|
|
|
|
|
|
|
|
// Call Seek() rather than SeekForPrev() in the index block, because the
|
|
|
|
// target data block will likely to contain the position for `target`, the
|
|
|
|
// same as Seek(), rather than than before.
|
|
|
|
// For example, if we have three data blocks, each containing two keys:
|
|
|
|
// [2, 4] [6, 8] [10, 12]
|
|
|
|
// (the keys in the index block would be [4, 8, 12])
|
|
|
|
// and the user calls SeekForPrev(7), we need to go to the second block,
|
|
|
|
// just like if they call Seek(7).
|
|
|
|
// The only case where the block is difference is when they seek to a position
|
|
|
|
// in the boundary. For example, if they SeekForPrev(5), we should go to the
|
|
|
|
// first block, rather than the second. However, we don't have the information
|
|
|
|
// to distinguish the two unless we read the second block. In this case, we'll
|
|
|
|
// end up with reading two blocks.
|
|
|
|
index_iter_->Seek(target);
|
|
|
|
|
|
|
|
if (!index_iter_->Valid()) {
|
|
|
|
index_iter_->SeekToLast();
|
|
|
|
if (!index_iter_->Valid()) {
|
|
|
|
ResetDataIter();
|
|
|
|
block_iter_points_to_real_block_ = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
InitDataBlock();
|
|
|
|
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.SeekForPrev(target);
|
2018-02-13 01:57:56 +01:00
|
|
|
|
|
|
|
FindKeyBackward();
|
2018-07-13 02:19:57 +02:00
|
|
|
assert(!block_iter_.Valid() ||
|
|
|
|
icomp_.Compare(target, block_iter_.key()) >= 0);
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToFirst() {
|
2018-07-26 02:03:39 +02:00
|
|
|
is_out_of_bound_ = false;
|
2018-02-13 01:57:56 +01:00
|
|
|
SavePrevIndexValue();
|
|
|
|
index_iter_->SeekToFirst();
|
|
|
|
if (!index_iter_->Valid()) {
|
|
|
|
ResetDataIter();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
InitDataBlock();
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.SeekToFirst();
|
2018-02-13 01:57:56 +01:00
|
|
|
FindKeyForward();
|
2019-04-16 20:32:03 +02:00
|
|
|
CheckOutOfBound();
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::SeekToLast() {
|
2018-07-26 02:03:39 +02:00
|
|
|
is_out_of_bound_ = false;
|
2018-02-13 01:57:56 +01:00
|
|
|
SavePrevIndexValue();
|
|
|
|
index_iter_->SeekToLast();
|
|
|
|
if (!index_iter_->Valid()) {
|
|
|
|
ResetDataIter();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
InitDataBlock();
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.SeekToLast();
|
2018-02-13 01:57:56 +01:00
|
|
|
FindKeyBackward();
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::Next() {
|
2018-02-13 01:57:56 +01:00
|
|
|
assert(block_iter_points_to_real_block_);
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.Next();
|
2018-02-13 01:57:56 +01:00
|
|
|
FindKeyForward();
|
|
|
|
}
|
|
|
|
|
2019-04-18 20:08:33 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
bool BlockBasedTableIterator<TBlockIter, TValue>::NextAndGetResult(
|
2019-05-17 19:23:38 +02:00
|
|
|
IterateResult* result) {
|
2019-04-18 20:08:33 +02:00
|
|
|
Next();
|
|
|
|
bool is_valid = Valid();
|
|
|
|
if (is_valid) {
|
2019-05-17 19:23:38 +02:00
|
|
|
result->key = key();
|
|
|
|
result->may_be_out_of_upper_bound = MayBeOutOfUpperBound();
|
2019-04-18 20:08:33 +02:00
|
|
|
}
|
|
|
|
return is_valid;
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::Prev() {
|
2018-02-13 01:57:56 +01:00
|
|
|
assert(block_iter_points_to_real_block_);
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.Prev();
|
2018-02-13 01:57:56 +01:00
|
|
|
FindKeyBackward();
|
|
|
|
}
|
|
|
|
|
2019-04-27 06:20:25 +02:00
|
|
|
// Found that 256 KB readahead size provides the best performance, based on
|
|
|
|
// experiments, for auto readahead. Experiment data is in PR #3282.
|
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
const size_t
|
|
|
|
BlockBasedTableIterator<TBlockIter, TValue>::kMaxAutoReadaheadSize =
|
|
|
|
256 * 1024;
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::InitDataBlock() {
|
|
|
|
BlockHandle data_block_handle = index_iter_->value();
|
2018-02-13 01:57:56 +01:00
|
|
|
if (!block_iter_points_to_real_block_ ||
|
2018-08-10 01:49:45 +02:00
|
|
|
data_block_handle.offset() != prev_index_value_.offset() ||
|
Change and clarify the relationship between Valid(), status() and Seek*() for all iterators. Also fix some bugs
Summary:
Before this PR, Iterator/InternalIterator may simultaneously have non-ok status() and Valid() = true. That state means that the last operation failed, but the iterator is nevertheless positioned on some unspecified record. Likely intended uses of that are:
* If some sst files are corrupted, a normal iterator can be used to read the data from files that are not corrupted.
* When using read_tier = kBlockCacheTier, read the data that's in block cache, skipping over the data that is not.
However, this behavior wasn't documented well (and until recently the wiki on github had misleading incorrect information). In the code there's a lot of confusion about the relationship between status() and Valid(), and about whether Seek()/SeekToLast()/etc reset the status or not. There were a number of bugs caused by this confusion, both inside rocksdb and in the code that uses rocksdb (including ours).
This PR changes the convention to:
* If status() is not ok, Valid() always returns false.
* Any seek operation resets status. (Before the PR, it depended on iterator type and on particular error.)
This does sacrifice the two use cases listed above, but siying said it's ok.
Overview of the changes:
* A commit that adds missing status checks in MergingIterator. This fixes a bug that actually affects us, and we need it fixed. `DBIteratorTest.NonBlockingIterationBugRepro` explains the scenario.
* Changes to lots of iterator types to make all of them conform to the new convention. Some bug fixes along the way. By far the biggest changes are in DBIter, which is a big messy piece of code; I tried to make it less big and messy but mostly failed.
* A stress-test for DBIter, to gain some confidence that I didn't break it. It does a few million random operations on the iterator, while occasionally modifying the underlying data (like ForwardIterator does) and occasionally returning non-ok status from internal iterator.
To find the iterator types that needed changes I searched for "public .*Iterator" in the code. Here's an overview of all 27 iterator types:
Iterators that didn't need changes:
* status() is always ok(), or Valid() is always false: MemTableIterator, ModelIter, TestIterator, KVIter (2 classes with this name anonymous namespaces), LoggingForwardVectorIterator, VectorIterator, MockTableIterator, EmptyIterator, EmptyInternalIterator.
* Thin wrappers that always pass through Valid() and status(): ArenaWrappedDBIter, TtlIterator, InternalIteratorFromIterator.
Iterators with changes (see inline comments for details):
* DBIter - an overhaul:
- It used to silently skip corrupted keys (`FindParseableKey()`), which seems dangerous. This PR makes it just stop immediately after encountering a corrupted key, just like it would for other kinds of corruption. Let me know if there was actually some deeper meaning in this behavior and I should put it back.
- It had a few code paths silently discarding subiterator's status. The stress test caught a few.
- The backwards iteration code path was expecting the internal iterator's set of keys to be immutable. It's probably always true in practice at the moment, since ForwardIterator doesn't support backwards iteration, but this PR fixes it anyway. See added DBIteratorTest.ReverseToForwardBug for an example.
- Some parts of backwards iteration code path even did things like `assert(iter_->Valid())` after a seek, which is never a safe assumption.
- It used to not reset status on seek for some types of errors.
- Some simplifications and better comments.
- Some things got more complicated from the added error handling. I'm open to ideas for how to make it nicer.
* MergingIterator - check status after every operation on every subiterator, and in some places assert that valid subiterators have ok status.
* ForwardIterator - changed to the new convention, also slightly simplified.
* ForwardLevelIterator - fixed some bugs and simplified.
* LevelIterator - simplified.
* TwoLevelIterator - changed to the new convention. Also fixed a bug that would make SeekForPrev() sometimes silently ignore errors from first_level_iter_.
* BlockBasedTableIterator - minor changes.
* BlockIter - replaced `SetStatus()` with `Invalidate()` to make sure non-ok BlockIter is always invalid.
* PlainTableIterator - some seeks used to not reset status.
* CuckooTableIterator - tiny code cleanup.
* ManagedIterator - fixed some bugs.
* BaseDeltaIterator - changed to the new convention and fixed a bug.
* BlobDBIterator - seeks used to not reset status.
* KeyConvertingIterator - some small change.
Closes https://github.com/facebook/rocksdb/pull/3810
Differential Revision: D7888019
Pulled By: al13n321
fbshipit-source-id: 4aaf6d3421c545d16722a815b2fa2e7912bc851d
2018-05-17 11:44:14 +02:00
|
|
|
// if previous attempt of reading the block missed cache, try again
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.status().IsIncomplete()) {
|
2018-02-13 01:57:56 +01:00
|
|
|
if (block_iter_points_to_real_block_) {
|
|
|
|
ResetDataIter();
|
|
|
|
}
|
|
|
|
auto* rep = table_->get_rep();
|
|
|
|
|
2019-04-27 06:20:25 +02:00
|
|
|
// Prefetch additional data for range scans (iterators). Enabled only for
|
|
|
|
// user reads.
|
|
|
|
// Implicit auto readahead:
|
|
|
|
// Enabled after 2 sequential IOs when ReadOptions.readahead_size == 0.
|
|
|
|
// Explicit user requested readahead:
|
|
|
|
// Enabled from the very first IO when ReadOptions.readahead_size is set.
|
|
|
|
if (!for_compaction_) {
|
|
|
|
if (read_options_.readahead_size == 0) {
|
|
|
|
// Implicit auto readahead
|
|
|
|
num_file_reads_++;
|
|
|
|
if (num_file_reads_ > kMinNumFileReadsToStartAutoReadahead) {
|
|
|
|
if (!rep->file->use_direct_io() &&
|
|
|
|
(data_block_handle.offset() +
|
|
|
|
static_cast<size_t>(data_block_handle.size()) +
|
|
|
|
kBlockTrailerSize >
|
|
|
|
readahead_limit_)) {
|
|
|
|
// Buffered I/O
|
|
|
|
// Discarding the return status of Prefetch calls intentionally, as
|
|
|
|
// we can fallback to reading from disk if Prefetch fails.
|
|
|
|
rep->file->Prefetch(data_block_handle.offset(), readahead_size_);
|
|
|
|
readahead_limit_ = static_cast<size_t>(data_block_handle.offset() +
|
|
|
|
readahead_size_);
|
|
|
|
// Keep exponentially increasing readahead size until
|
|
|
|
// kMaxAutoReadaheadSize.
|
|
|
|
readahead_size_ =
|
|
|
|
std::min(kMaxAutoReadaheadSize, readahead_size_ * 2);
|
|
|
|
} else if (rep->file->use_direct_io() && !prefetch_buffer_) {
|
|
|
|
// Direct I/O
|
|
|
|
// Let FilePrefetchBuffer take care of the readahead.
|
|
|
|
prefetch_buffer_.reset(
|
|
|
|
new FilePrefetchBuffer(rep->file.get(), kInitAutoReadaheadSize,
|
|
|
|
kMaxAutoReadaheadSize));
|
|
|
|
}
|
Improve direct IO range scan performance with readahead (#3884)
Summary:
This PR extends the improvements in #3282 to also work when using Direct IO.
We see **4.5X performance improvement** in seekrandom benchmark doing long range scans, when using direct reads, on flash.
**Description:**
This change improves the performance of iterators doing long range scans (e.g. big/full index or table scans in MyRocks) by using readahead and prefetching additional data on each disk IO, and storing in a local buffer. This prefetching is automatically enabled on noticing more than 2 IOs for the same table file during iteration. The readahead size starts with 8KB and is exponentially increased on each additional sequential IO, up to a max of 256 KB. This helps in cutting down the number of IOs needed to complete the range scan.
**Implementation Details:**
- Used `FilePrefetchBuffer` as the underlying buffer to store the readahead data. `FilePrefetchBuffer` can now take file_reader, readahead_size and max_readahead_size as input to the constructor, and automatically do readahead.
- `FilePrefetchBuffer::TryReadFromCache` can now call `FilePrefetchBuffer::Prefetch` if readahead is enabled.
- `AlignedBuffer` (which is the underlying store for `FilePrefetchBuffer`) now takes a few additional args in `AlignedBuffer::AllocateNewBuffer` to allow copying data from the old buffer.
- Made sure not to re-read partial chunks of data that were already available in the buffer, from device again.
- Fixed a couple of cases where `AlignedBuffer::cursize_` was not being properly kept up-to-date.
**Constraints:**
- Similar to #3282, this gets currently enabled only when ReadOptions.readahead_size = 0 (which is the default value).
- Since the prefetched data is stored in a temporary buffer allocated on heap, this could increase the memory usage if you have many iterators doing long range scans simultaneously.
- Enabled only for user reads, and disabled for compactions. Compaction reads are controlled by the options `use_direct_io_for_flush_and_compaction` and `compaction_readahead_size`, and the current feature takes precautions not to mess with them.
**Benchmarks:**
I used the same benchmark as used in #3282.
Data fill:
```
TEST_TMPDIR=/data/users/$USER/benchmarks/iter ./db_bench -benchmarks=fillrandom -num=1000000000 -compression_type="none" -level_compaction_dynamic_level_bytes
```
Do a long range scan: Seekrandom with large number of nexts
```
TEST_TMPDIR=/data/users/$USER/benchmarks/iter ./db_bench -benchmarks=seekrandom -use_direct_reads -duration=60 -num=1000000000 -use_existing_db -seek_nexts=10000 -statistics -histogram
```
```
Before:
seekrandom : 37939.906 micros/op 26 ops/sec; 29.2 MB/s (1636 of 1999 found)
With this change:
seekrandom : 8527.720 micros/op 117 ops/sec; 129.7 MB/s (6530 of 7999 found)
```
~4.5X perf improvement. Taken on an average of 3 runs.
Closes https://github.com/facebook/rocksdb/pull/3884
Differential Revision: D8082143
Pulled By: sagar0
fbshipit-source-id: 4d7a8561cbac03478663713df4d31ad2620253bb
2018-06-21 20:02:49 +02:00
|
|
|
}
|
2019-04-27 06:20:25 +02:00
|
|
|
} else if (!prefetch_buffer_) {
|
|
|
|
// Explicit user requested readahead
|
|
|
|
// The actual condition is:
|
|
|
|
// if (read_options_.readahead_size != 0 && !prefetch_buffer_)
|
|
|
|
prefetch_buffer_.reset(new FilePrefetchBuffer(
|
|
|
|
rep->file.get(), read_options_.readahead_size,
|
|
|
|
read_options_.readahead_size));
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
Status s;
|
2018-07-13 02:19:57 +02:00
|
|
|
BlockBasedTable::NewDataBlockIterator<TBlockIter>(
|
|
|
|
rep, read_options_, data_block_handle, &block_iter_, is_index_,
|
2018-08-10 01:49:45 +02:00
|
|
|
key_includes_seq_, index_key_is_full_,
|
2018-07-13 02:19:57 +02:00
|
|
|
/* get_context */ nullptr, s, prefetch_buffer_.get());
|
2018-02-13 01:57:56 +01:00
|
|
|
block_iter_points_to_real_block_ = true;
|
2019-05-17 19:23:38 +02:00
|
|
|
if (read_options_.iterate_upper_bound != nullptr) {
|
|
|
|
data_block_within_upper_bound_ =
|
|
|
|
(user_comparator_.Compare(*read_options_.iterate_upper_bound,
|
|
|
|
index_iter_->user_key()) > 0);
|
|
|
|
}
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
2019-04-18 20:08:33 +02:00
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::FindBlockForward() {
|
2018-02-13 01:57:56 +01:00
|
|
|
// TODO the while loop inherits from two-level-iterator. We don't know
|
|
|
|
// whether a block can be empty so it can be replaced by an "if".
|
2019-04-18 20:08:33 +02:00
|
|
|
do {
|
2018-07-13 02:19:57 +02:00
|
|
|
if (!block_iter_.status().ok()) {
|
2018-02-13 01:57:56 +01:00
|
|
|
return;
|
|
|
|
}
|
2019-04-16 20:32:03 +02:00
|
|
|
// Whether next data block is out of upper bound, if there is one.
|
2019-05-17 19:23:38 +02:00
|
|
|
bool next_block_is_out_of_bound =
|
|
|
|
read_options_.iterate_upper_bound != nullptr &&
|
|
|
|
block_iter_points_to_real_block_ && !data_block_within_upper_bound_;
|
2018-02-13 01:57:56 +01:00
|
|
|
ResetDataIter();
|
|
|
|
index_iter_->Next();
|
2019-04-16 20:32:03 +02:00
|
|
|
if (next_block_is_out_of_bound) {
|
|
|
|
// The next block is out of bound. No need to read it.
|
|
|
|
TEST_SYNC_POINT_CALLBACK("BlockBasedTableIterator:out_of_bound", nullptr);
|
|
|
|
// We need to make sure this is not the last data block before setting
|
|
|
|
// is_out_of_bound_, since the index key for the last data block can be
|
|
|
|
// larger than smallest key of the next file on the same level.
|
|
|
|
if (index_iter_->Valid()) {
|
|
|
|
is_out_of_bound_ = true;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2018-02-13 01:57:56 +01:00
|
|
|
|
|
|
|
if (index_iter_->Valid()) {
|
|
|
|
InitDataBlock();
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.SeekToFirst();
|
2018-02-13 01:57:56 +01:00
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
2019-04-18 20:08:33 +02:00
|
|
|
} while (!block_iter_.Valid());
|
|
|
|
}
|
|
|
|
|
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyForward() {
|
|
|
|
assert(!is_out_of_bound_);
|
|
|
|
|
|
|
|
if (!block_iter_.Valid()) {
|
|
|
|
FindBlockForward();
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::FindKeyBackward() {
|
2018-07-13 02:19:57 +02:00
|
|
|
while (!block_iter_.Valid()) {
|
|
|
|
if (!block_iter_.status().ok()) {
|
2018-02-13 01:57:56 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ResetDataIter();
|
|
|
|
index_iter_->Prev();
|
|
|
|
|
|
|
|
if (index_iter_->Valid()) {
|
|
|
|
InitDataBlock();
|
2018-07-13 02:19:57 +02:00
|
|
|
block_iter_.SeekToLast();
|
2018-02-13 01:57:56 +01:00
|
|
|
} else {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// We could have check lower bound here too, but we opt not to do it for
|
|
|
|
// code simplicity.
|
|
|
|
}
|
|
|
|
|
2019-04-16 20:32:03 +02:00
|
|
|
template <class TBlockIter, typename TValue>
|
|
|
|
void BlockBasedTableIterator<TBlockIter, TValue>::CheckOutOfBound() {
|
|
|
|
if (read_options_.iterate_upper_bound != nullptr &&
|
|
|
|
block_iter_points_to_real_block_ && block_iter_.Valid()) {
|
|
|
|
is_out_of_bound_ = user_comparator_.Compare(
|
|
|
|
*read_options_.iterate_upper_bound, user_key()) <= 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
InternalIterator* BlockBasedTable::NewIterator(
|
|
|
|
const ReadOptions& read_options, const SliceTransform* prefix_extractor,
|
2018-06-25 22:07:38 +02:00
|
|
|
Arena* arena, bool skip_filters, bool for_compaction) {
|
2018-06-27 00:56:26 +02:00
|
|
|
bool need_upper_bound_check =
|
2018-06-01 06:27:08 +02:00
|
|
|
PrefixExtractorChanged(rep_->table_properties.get(), prefix_extractor);
|
2018-05-26 03:41:31 +02:00
|
|
|
const bool kIsNotIndex = false;
|
2018-02-13 01:57:56 +01:00
|
|
|
if (arena == nullptr) {
|
2018-07-13 02:19:57 +02:00
|
|
|
return new BlockBasedTableIterator<DataBlockIter>(
|
2018-02-13 01:57:56 +01:00
|
|
|
this, read_options, rep_->internal_comparator,
|
2018-05-21 23:33:55 +02:00
|
|
|
NewIndexIterator(
|
|
|
|
read_options,
|
2018-06-27 00:56:26 +02:00
|
|
|
need_upper_bound_check &&
|
2018-05-26 03:41:31 +02:00
|
|
|
rep_->index_type == BlockBasedTableOptions::kHashSearch),
|
2018-02-13 01:57:56 +01:00
|
|
|
!skip_filters && !read_options.total_order_seek &&
|
2018-06-27 00:56:26 +02:00
|
|
|
prefix_extractor != nullptr,
|
|
|
|
need_upper_bound_check, prefix_extractor, kIsNotIndex,
|
2019-03-26 18:09:26 +01:00
|
|
|
true /*key_includes_seq*/, true /*index_key_is_full*/, for_compaction);
|
2018-02-13 01:57:56 +01:00
|
|
|
} else {
|
2018-07-13 02:19:57 +02:00
|
|
|
auto* mem =
|
|
|
|
arena->AllocateAligned(sizeof(BlockBasedTableIterator<DataBlockIter>));
|
|
|
|
return new (mem) BlockBasedTableIterator<DataBlockIter>(
|
2018-02-13 01:57:56 +01:00
|
|
|
this, read_options, rep_->internal_comparator,
|
2018-06-27 00:56:26 +02:00
|
|
|
NewIndexIterator(read_options, need_upper_bound_check),
|
2018-02-13 01:57:56 +01:00
|
|
|
!skip_filters && !read_options.total_order_seek &&
|
2018-06-27 00:56:26 +02:00
|
|
|
prefix_extractor != nullptr,
|
|
|
|
need_upper_bound_check, prefix_extractor, kIsNotIndex,
|
2019-03-26 18:09:26 +01:00
|
|
|
true /*key_includes_seq*/, true /*index_key_is_full*/, for_compaction);
|
2018-02-13 01:57:56 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2018-11-29 00:26:56 +01:00
|
|
|
FragmentedRangeTombstoneIterator* BlockBasedTable::NewRangeTombstoneIterator(
|
2018-11-15 01:18:16 +01:00
|
|
|
const ReadOptions& read_options) {
|
Cache fragmented range tombstones in BlockBasedTableReader (#4493)
Summary:
This allows tombstone fragmenting to only be performed when the table is opened, and cached for subsequent accesses.
On the same DB used in #4449, running `readrandom` results in the following:
```
readrandom : 0.983 micros/op 1017076 ops/sec; 78.3 MB/s (63103 of 100000 found)
```
Now that Get performance in the presence of range tombstones is reasonable, I also compared the performance between a DB with range tombstones, "expanded" range tombstones (several point tombstones that cover the same keys the equivalent range tombstone would cover, a common workaround for DeleteRange), and no range tombstones. The created DBs had 5 million keys each, and DeleteRange was called at regular intervals (depending on the total number of range tombstones being written) after 4.5 million Puts. The table below summarizes the results of a `readwhilewriting` benchmark (in order to provide somewhat more realistic results):
```
Tombstones? | avg micros/op | stddev micros/op | avg ops/s | stddev ops/s
----------------- | ------------- | ---------------- | ------------ | ------------
None | 0.6186 | 0.04637 | 1,625,252.90 | 124,679.41
500 Expanded | 0.6019 | 0.03628 | 1,666,670.40 | 101,142.65
500 Unexpanded | 0.6435 | 0.03994 | 1,559,979.40 | 104,090.52
1k Expanded | 0.6034 | 0.04349 | 1,665,128.10 | 125,144.57
1k Unexpanded | 0.6261 | 0.03093 | 1,600,457.50 | 79,024.94
5k Expanded | 0.6163 | 0.05926 | 1,636,668.80 | 154,888.85
5k Unexpanded | 0.6402 | 0.04002 | 1,567,804.70 | 100,965.55
10k Expanded | 0.6036 | 0.05105 | 1,667,237.70 | 142,830.36
10k Unexpanded | 0.6128 | 0.02598 | 1,634,633.40 | 72,161.82
25k Expanded | 0.6198 | 0.04542 | 1,620,980.50 | 116,662.93
25k Unexpanded | 0.5478 | 0.0362 | 1,833,059.10 | 121,233.81
50k Expanded | 0.5104 | 0.04347 | 1,973,107.90 | 184,073.49
50k Unexpanded | 0.4528 | 0.03387 | 2,219,034.50 | 170,984.32
```
After a large enough quantity of range tombstones are written, range tombstone Gets can become faster than reading from an equivalent DB with several point tombstones.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4493
Differential Revision: D10842844
Pulled By: abhimadan
fbshipit-source-id: a7d44534f8120e6aabb65779d26c6b9df954c509
2018-10-26 04:25:00 +02:00
|
|
|
if (rep_->fragmented_range_dels == nullptr) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
2018-11-15 01:18:16 +01:00
|
|
|
SequenceNumber snapshot = kMaxSequenceNumber;
|
|
|
|
if (read_options.snapshot != nullptr) {
|
|
|
|
snapshot = read_options.snapshot->GetSequenceNumber();
|
|
|
|
}
|
|
|
|
return new FragmentedRangeTombstoneIterator(
|
2018-12-11 20:44:24 +01:00
|
|
|
rep_->fragmented_range_dels, rep_->internal_comparator, snapshot);
|
Cache fragmented range tombstones in BlockBasedTableReader (#4493)
Summary:
This allows tombstone fragmenting to only be performed when the table is opened, and cached for subsequent accesses.
On the same DB used in #4449, running `readrandom` results in the following:
```
readrandom : 0.983 micros/op 1017076 ops/sec; 78.3 MB/s (63103 of 100000 found)
```
Now that Get performance in the presence of range tombstones is reasonable, I also compared the performance between a DB with range tombstones, "expanded" range tombstones (several point tombstones that cover the same keys the equivalent range tombstone would cover, a common workaround for DeleteRange), and no range tombstones. The created DBs had 5 million keys each, and DeleteRange was called at regular intervals (depending on the total number of range tombstones being written) after 4.5 million Puts. The table below summarizes the results of a `readwhilewriting` benchmark (in order to provide somewhat more realistic results):
```
Tombstones? | avg micros/op | stddev micros/op | avg ops/s | stddev ops/s
----------------- | ------------- | ---------------- | ------------ | ------------
None | 0.6186 | 0.04637 | 1,625,252.90 | 124,679.41
500 Expanded | 0.6019 | 0.03628 | 1,666,670.40 | 101,142.65
500 Unexpanded | 0.6435 | 0.03994 | 1,559,979.40 | 104,090.52
1k Expanded | 0.6034 | 0.04349 | 1,665,128.10 | 125,144.57
1k Unexpanded | 0.6261 | 0.03093 | 1,600,457.50 | 79,024.94
5k Expanded | 0.6163 | 0.05926 | 1,636,668.80 | 154,888.85
5k Unexpanded | 0.6402 | 0.04002 | 1,567,804.70 | 100,965.55
10k Expanded | 0.6036 | 0.05105 | 1,667,237.70 | 142,830.36
10k Unexpanded | 0.6128 | 0.02598 | 1,634,633.40 | 72,161.82
25k Expanded | 0.6198 | 0.04542 | 1,620,980.50 | 116,662.93
25k Unexpanded | 0.5478 | 0.0362 | 1,833,059.10 | 121,233.81
50k Expanded | 0.5104 | 0.04347 | 1,973,107.90 | 184,073.49
50k Unexpanded | 0.4528 | 0.03387 | 2,219,034.50 | 170,984.32
```
After a large enough quantity of range tombstones are written, range tombstone Gets can become faster than reading from an equivalent DB with several point tombstones.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4493
Differential Revision: D10842844
Pulled By: abhimadan
fbshipit-source-id: a7d44534f8120e6aabb65779d26c6b9df954c509
2018-10-26 04:25:00 +02:00
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
bool BlockBasedTable::FullFilterKeyMayMatch(
|
|
|
|
const ReadOptions& read_options, FilterBlockReader* filter,
|
|
|
|
const Slice& internal_key, const bool no_io,
|
|
|
|
const SliceTransform* prefix_extractor) const {
|
2015-02-03 02:42:57 +01:00
|
|
|
if (filter == nullptr || filter->IsBlockBased()) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
Slice user_key = ExtractUserKey(internal_key);
|
2017-03-22 17:11:23 +01:00
|
|
|
const Slice* const const_ikey_ptr = &internal_key;
|
2018-04-06 00:54:24 +02:00
|
|
|
bool may_match = true;
|
2016-06-10 00:48:45 +02:00
|
|
|
if (filter->whole_key_filtering()) {
|
2018-05-21 23:33:55 +02:00
|
|
|
may_match = filter->KeyMayMatch(user_key, prefix_extractor, kNotValid,
|
|
|
|
no_io, const_ikey_ptr);
|
|
|
|
} else if (!read_options.total_order_seek && prefix_extractor &&
|
2018-04-06 00:54:24 +02:00
|
|
|
rep_->table_properties->prefix_extractor_name.compare(
|
2018-05-21 23:33:55 +02:00
|
|
|
prefix_extractor->Name()) == 0 &&
|
|
|
|
prefix_extractor->InDomain(user_key) &&
|
|
|
|
!filter->PrefixMayMatch(prefix_extractor->Transform(user_key),
|
|
|
|
prefix_extractor, kNotValid, false,
|
|
|
|
const_ikey_ptr)) {
|
2018-04-06 00:54:24 +02:00
|
|
|
may_match = false;
|
|
|
|
}
|
|
|
|
if (may_match) {
|
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_POSITIVE);
|
2018-10-24 21:10:59 +02:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_positive, 1, rep_->level);
|
2015-02-03 02:42:57 +01:00
|
|
|
}
|
2018-04-06 00:54:24 +02:00
|
|
|
return may_match;
|
2015-02-03 02:42:57 +01:00
|
|
|
}
|
|
|
|
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
void BlockBasedTable::FullFilterKeysMayMatch(
|
|
|
|
const ReadOptions& read_options, FilterBlockReader* filter,
|
|
|
|
MultiGetRange* range, const bool no_io,
|
|
|
|
const SliceTransform* prefix_extractor) const {
|
|
|
|
if (filter == nullptr || filter->IsBlockBased()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (filter->whole_key_filtering()) {
|
|
|
|
filter->KeysMayMatch(range, prefix_extractor, kNotValid, no_io);
|
|
|
|
} else if (!read_options.total_order_seek && prefix_extractor &&
|
|
|
|
rep_->table_properties->prefix_extractor_name.compare(
|
|
|
|
prefix_extractor->Name()) == 0) {
|
|
|
|
for (auto iter = range->begin(); iter != range->end(); ++iter) {
|
|
|
|
Slice user_key = iter->lkey->user_key();
|
|
|
|
|
|
|
|
if (!prefix_extractor->InDomain(user_key)) {
|
|
|
|
range->SkipKey(iter);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
filter->PrefixesMayMatch(range, prefix_extractor, kNotValid, false);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key,
|
2018-05-21 23:33:55 +02:00
|
|
|
GetContext* get_context,
|
|
|
|
const SliceTransform* prefix_extractor,
|
|
|
|
bool skip_filters) {
|
2018-05-26 03:41:31 +02:00
|
|
|
assert(key.size() >= 8); // key must be internal key
|
2012-04-17 17:36:46 +02:00
|
|
|
Status s;
|
2017-03-22 17:11:23 +01:00
|
|
|
const bool no_io = read_options.read_tier == kBlockCacheTier;
|
Skip bottom-level filter block caching when hit-optimized
Summary:
When Get() or NewIterator() trigger file loads, skip caching the filter block if
(1) optimize_filters_for_hits is set and (2) the file is on the bottommost
level. Also skip checking filters under the same conditions, which means that
for a preloaded file or a file that was trivially-moved to the bottom level, its
filter block will eventually expire from the cache.
- added parameters/instance variables in various places in order to propagate the config ("skip_filters") from version_set to block_based_table_reader
- in BlockBasedTable::Rep, this optimization prevents filter from being loaded when the file is opened simply by setting filter_policy = nullptr
- in BlockBasedTable::Get/BlockBasedTable::NewIterator, this optimization prevents filter from being used (even if it was loaded already) by setting filter = nullptr
Test Plan:
updated unit test:
$ ./db_test --gtest_filter=DBTest.OptimizeFiltersForHits
will also run 'make check'
Reviewers: sdong, igor, paultuckfield, anthony, rven, kradhakrishnan, IslamAbdelRahman, yhchiang
Reviewed By: yhchiang
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D51633
2015-12-23 19:15:07 +01:00
|
|
|
CachableEntry<FilterBlockReader> filter_entry;
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
bool may_match;
|
|
|
|
FilterBlockReader* filter = nullptr;
|
|
|
|
{
|
|
|
|
if (!skip_filters) {
|
|
|
|
filter_entry =
|
|
|
|
GetFilter(prefix_extractor, /*prefetch_buffer*/ nullptr,
|
|
|
|
read_options.read_tier == kBlockCacheTier, get_context);
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
filter = filter_entry.GetValue();
|
[RocksDB] [MergeOperator] The new Merge Interface! Uses merge sequences.
Summary:
Here are the major changes to the Merge Interface. It has been expanded
to handle cases where the MergeOperator is not associative. It does so by stacking
up merge operations while scanning through the key history (i.e.: during Get() or
Compaction), until a valid Put/Delete/end-of-history is encountered; it then
applies all of the merge operations in the correct sequence starting with the
base/sentinel value.
I have also introduced an "AssociativeMerge" function which allows the user to
take advantage of associative merge operations (such as in the case of counters).
The implementation will always attempt to merge the operations/operands themselves
together when they are encountered, and will resort to the "stacking" method if
and only if the "associative-merge" fails.
This implementation is conjectured to allow MergeOperator to handle the general
case, while still providing the user with the ability to take advantage of certain
efficiencies in their own merge-operator / data-structure.
NOTE: This is a preliminary diff. This must still go through a lot of review,
revision, and testing. Feedback welcome!
Test Plan:
-This is a preliminary diff. I have only just begun testing/debugging it.
-I will be testing this with the existing MergeOperator use-cases and unit-tests
(counters, string-append, and redis-lists)
-I will be "desk-checking" and walking through the code with the help gdb.
-I will find a way of stress-testing the new interface / implementation using
db_bench, db_test, merge_test, and/or db_stress.
-I will ensure that my tests cover all cases: Get-Memtable,
Get-Immutable-Memtable, Get-from-Disk, Iterator-Range-Scan, Flush-Memtable-to-L0,
Compaction-L0-L1, Compaction-Ln-L(n+1), Put/Delete found, Put/Delete not-found,
end-of-history, end-of-file, etc.
-A lot of feedback from the reviewers.
Reviewers: haobo, dhruba, zshao, emayanke
Reviewed By: haobo
CC: leveldb
Differential Revision: https://reviews.facebook.net/D11499
2013-08-06 05:14:32 +02:00
|
|
|
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
// First check the full filter
|
|
|
|
// If full filter not useful, Then go into each block
|
|
|
|
may_match = FullFilterKeyMayMatch(read_options, filter, key, no_io,
|
|
|
|
prefix_extractor);
|
|
|
|
}
|
|
|
|
if (!may_match) {
|
2014-09-08 19:37:05 +02:00
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
|
2018-10-24 21:10:59 +02:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
|
2014-09-08 19:37:05 +02:00
|
|
|
} else {
|
2018-07-13 02:19:57 +02:00
|
|
|
IndexBlockIter iiter_on_stack;
|
2018-05-21 23:33:55 +02:00
|
|
|
// if prefix_extractor found in block differs from options, disable
|
|
|
|
// BlockPrefixIndex. Only do this check when index_type is kHashSearch.
|
2018-06-27 00:56:26 +02:00
|
|
|
bool need_upper_bound_check = false;
|
2018-05-21 23:33:55 +02:00
|
|
|
if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
|
2018-06-27 00:56:26 +02:00
|
|
|
need_upper_bound_check = PrefixExtractorChanged(
|
2018-06-05 04:59:44 +02:00
|
|
|
rep_->table_properties.get(), prefix_extractor);
|
2018-05-21 23:33:55 +02:00
|
|
|
}
|
2018-06-27 00:56:26 +02:00
|
|
|
auto iiter =
|
|
|
|
NewIndexIterator(read_options, need_upper_bound_check, &iiter_on_stack,
|
|
|
|
/* index_entry */ nullptr, get_context);
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter_unique_ptr;
|
2017-02-07 01:29:29 +01:00
|
|
|
if (iiter != &iiter_on_stack) {
|
2017-03-22 17:11:23 +01:00
|
|
|
iiter_unique_ptr.reset(iiter);
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
|
2018-04-06 00:54:24 +02:00
|
|
|
bool matched = false; // if such user key mathced a key in SST
|
2014-09-08 19:37:05 +02:00
|
|
|
bool done = false;
|
2017-02-07 01:29:29 +01:00
|
|
|
for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle = iiter->value();
|
2014-01-27 22:53:22 +01:00
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
bool not_exist_in_filter =
|
|
|
|
filter != nullptr && filter->IsBlockBased() == true &&
|
2018-05-21 23:33:55 +02:00
|
|
|
!filter->KeyMayMatch(ExtractUserKey(key), prefix_extractor,
|
|
|
|
handle.offset(), no_io);
|
2014-09-08 19:37:05 +02:00
|
|
|
|
|
|
|
if (not_exist_in_filter) {
|
|
|
|
// Not found
|
|
|
|
// TODO: think about interaction with Merge. If a user key cannot
|
|
|
|
// cross one data block, we should be fine.
|
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL);
|
2018-10-24 21:10:59 +02:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_useful, 1, rep_->level);
|
2014-09-08 19:37:05 +02:00
|
|
|
break;
|
|
|
|
} else {
|
2018-07-13 02:19:57 +02:00
|
|
|
DataBlockIter biter;
|
|
|
|
NewDataBlockIterator<DataBlockIter>(
|
|
|
|
rep_, read_options, iiter->value(), &biter, false,
|
2019-01-24 00:36:31 +01:00
|
|
|
true /* key_includes_seq */, true /* index_key_is_full */,
|
|
|
|
get_context);
|
2014-09-08 19:37:05 +02:00
|
|
|
|
2016-02-09 20:20:22 +01:00
|
|
|
if (read_options.read_tier == kBlockCacheTier &&
|
2016-12-30 00:48:24 +01:00
|
|
|
biter.status().IsIncomplete()) {
|
2014-09-08 19:37:05 +02:00
|
|
|
// couldn't get block from block_cache
|
2018-02-13 01:57:56 +01:00
|
|
|
// Update Saver.state to Found because we are only looking for
|
|
|
|
// whether we can guarantee the key is not there when "no_io" is set
|
2014-09-29 20:09:09 +02:00
|
|
|
get_context->MarkKeyMayExist();
|
2013-03-21 23:59:47 +01:00
|
|
|
break;
|
|
|
|
}
|
2016-12-30 00:48:24 +01:00
|
|
|
if (!biter.status().ok()) {
|
|
|
|
s = biter.status();
|
2014-09-08 19:37:05 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-15 23:27:47 +02:00
|
|
|
bool may_exist = biter.SeekForGet(key);
|
|
|
|
if (!may_exist) {
|
|
|
|
// HashSeek cannot find the key this block and the the iter is not
|
|
|
|
// the end of the block, i.e. cannot be in the following blocks
|
|
|
|
// either. In this case, the seek_key cannot be found, so we break
|
|
|
|
// from the top level for-loop.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-09-08 19:37:05 +02:00
|
|
|
// Call the *saver function on each entry/block until it returns false
|
2018-08-15 23:27:47 +02:00
|
|
|
for (; biter.Valid(); biter.Next()) {
|
2014-09-08 19:37:05 +02:00
|
|
|
ParsedInternalKey parsed_key;
|
2016-12-30 00:48:24 +01:00
|
|
|
if (!ParseInternalKey(biter.key(), &parsed_key)) {
|
2014-09-08 19:37:05 +02:00
|
|
|
s = Status::Corruption(Slice());
|
|
|
|
}
|
|
|
|
|
Copy Get() result when file reads use mmap
Summary:
For iterator reads, a `SuperVersion` is pinned to preserve a snapshot of SST files, and `Block`s are pinned to allow `key()` and `value()` to return pointers directly into a RocksDB memory region. This works for both non-mmap reads, where the block owns the memory region, and mmap reads, where the file owns the memory region.
For point reads with `PinnableSlice`, only the `Block` object is pinned. This works for non-mmap reads because the block owns the memory region, so even if the file is deleted after compaction, the memory region survives. However, for mmap reads, file deletion causes the memory region to which the `PinnableSlice` refers to be unmapped. The result is usually a segfault upon accessing the `PinnableSlice`, although sometimes it returned wrong results (I repro'd this a bunch of times with `db_stress`).
This PR copies the value into the `PinnableSlice` when it comes from mmap'd memory. We can tell whether the `Block` owns its memory using `Block::cachable()`, which is unset when reads do not use the provided buffer as is the case with mmap file reads. When that is false we ensure the result of `Get()` is copied.
This feels like a short-term solution as ideally we'd have the `PinnableSlice` pin the mmap'd memory so we can do zero-copy reads. It seemed hard so I chose this approach to fix correctness in the meantime.
Closes https://github.com/facebook/rocksdb/pull/3881
Differential Revision: D8076288
Pulled By: ajkr
fbshipit-source-id: 31d78ec010198723522323dbc6ea325122a46b08
2018-06-02 01:46:32 +02:00
|
|
|
if (!get_context->SaveValue(
|
|
|
|
parsed_key, biter.value(), &matched,
|
|
|
|
biter.IsValuePinned() ? &biter : nullptr)) {
|
2014-09-08 19:37:05 +02:00
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2016-12-30 00:48:24 +01:00
|
|
|
s = biter.status();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
2017-03-22 17:11:23 +01:00
|
|
|
if (done) {
|
|
|
|
// Avoid the extra Next which is expensive in two-level indexes
|
|
|
|
break;
|
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
}
|
2018-04-06 00:54:24 +02:00
|
|
|
if (matched && filter != nullptr && !filter->IsBlockBased()) {
|
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
|
2018-10-24 21:10:59 +02:00
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
|
|
|
|
rep_->level);
|
2018-04-06 00:54:24 +02:00
|
|
|
}
|
2014-09-08 19:37:05 +02:00
|
|
|
if (s.ok()) {
|
2017-02-07 01:29:29 +01:00
|
|
|
s = iiter->status();
|
2012-04-17 17:36:46 +02:00
|
|
|
}
|
|
|
|
}
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2012-04-17 17:36:46 +02:00
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
using MultiGetRange = MultiGetContext::Range;
|
|
|
|
void BlockBasedTable::MultiGet(const ReadOptions& read_options,
|
|
|
|
const MultiGetRange* mget_range,
|
|
|
|
const SliceTransform* prefix_extractor,
|
|
|
|
bool skip_filters) {
|
|
|
|
const bool no_io = read_options.read_tier == kBlockCacheTier;
|
|
|
|
CachableEntry<FilterBlockReader> filter_entry;
|
|
|
|
FilterBlockReader* filter = nullptr;
|
|
|
|
MultiGetRange sst_file_range(*mget_range, mget_range->begin(),
|
|
|
|
mget_range->end());
|
|
|
|
{
|
|
|
|
if (!skip_filters) {
|
|
|
|
// TODO: Figure out where the stats should go
|
|
|
|
filter_entry = GetFilter(prefix_extractor, /*prefetch_buffer*/ nullptr,
|
|
|
|
read_options.read_tier == kBlockCacheTier,
|
|
|
|
nullptr /*get_context*/);
|
|
|
|
}
|
2019-05-10 20:53:33 +02:00
|
|
|
filter = filter_entry.GetValue();
|
Introduce a new MultiGet batching implementation (#5011)
Summary:
This PR introduces a new MultiGet() API, with the underlying implementation grouping keys based on SST file and batching lookups in a file. The reason for the new API is twofold - the definition allows callers to allocate storage for status and values on stack instead of std::vector, as well as return values as PinnableSlices in order to avoid copying, and it keeps the original MultiGet() implementation intact while we experiment with batching.
Batching is useful when there is some spatial locality to the keys being queries, as well as larger batch sizes. The main benefits are due to -
1. Fewer function calls, especially to BlockBasedTableReader::MultiGet() and FullFilterBlockReader::KeysMayMatch()
2. Bloom filter cachelines can be prefetched, hiding the cache miss latency
The next step is to optimize the binary searches in the level_storage_info, index blocks and data blocks, since we could reduce the number of key comparisons if the keys are relatively close to each other. The batching optimizations also need to be extended to other formats, such as PlainTable and filter formats. This also needs to be added to db_stress.
Benchmark results from db_bench for various batch size/locality of reference combinations are given below. Locality was simulated by offsetting the keys in a batch by a stride length. Each SST file is about 8.6MB uncompressed and key/value size is 16/100 uncompressed. To focus on the cpu benefit of batching, the runs were single threaded and bound to the same cpu to eliminate interference from other system events. The results show a 10-25% improvement in micros/op from smaller to larger batch sizes (4 - 32).
Batch Sizes
1 | 2 | 4 | 8 | 16 | 32
Random pattern (Stride length 0)
4.158 | 4.109 | 4.026 | 4.05 | 4.1 | 4.074 - Get
4.438 | 4.302 | 4.165 | 4.122 | 4.096 | 4.075 - MultiGet (no batching)
4.461 | 4.256 | 4.277 | 4.11 | 4.182 | 4.14 - MultiGet (w/ batching)
Good locality (Stride length 16)
4.048 | 3.659 | 3.248 | 2.99 | 2.84 | 2.753
4.429 | 3.728 | 3.406 | 3.053 | 2.911 | 2.781
4.452 | 3.45 | 2.833 | 2.451 | 2.233 | 2.135
Good locality (Stride length 256)
4.066 | 3.786 | 3.581 | 3.447 | 3.415 | 3.232
4.406 | 4.005 | 3.644 | 3.49 | 3.381 | 3.268
4.393 | 3.649 | 3.186 | 2.882 | 2.676 | 2.62
Medium locality (Stride length 4096)
4.012 | 3.922 | 3.768 | 3.61 | 3.582 | 3.555
4.364 | 4.057 | 3.791 | 3.65 | 3.57 | 3.465
4.479 | 3.758 | 3.316 | 3.077 | 2.959 | 2.891
dbbench command used (on a DB with 4 levels, 12 million keys)-
TEST_TMPDIR=/dev/shm numactl -C 10 ./db_bench.tmp -use_existing_db=true -benchmarks="readseq,multireadrandom" -write_buffer_size=4194304 -target_file_size_base=4194304 -max_bytes_for_level_base=16777216 -num=12000000 -reads=12000000 -duration=90 -threads=1 -compression_type=none -cache_size=4194304000 -batch_size=32 -disable_auto_compactions=true -bloom_bits=10 -cache_index_and_filter_blocks=true -pin_l0_filter_and_index_blocks_in_cache=true -multiread_batched=true -multiread_stride=4
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5011
Differential Revision: D14348703
Pulled By: anand1976
fbshipit-source-id: 774406dab3776d979c809522a67bedac6c17f84b
2019-04-11 23:24:09 +02:00
|
|
|
|
|
|
|
// First check the full filter
|
|
|
|
// If full filter not useful, Then go into each block
|
|
|
|
FullFilterKeysMayMatch(read_options, filter, &sst_file_range, no_io,
|
|
|
|
prefix_extractor);
|
|
|
|
}
|
|
|
|
if (skip_filters || !sst_file_range.empty()) {
|
|
|
|
IndexBlockIter iiter_on_stack;
|
|
|
|
// if prefix_extractor found in block differs from options, disable
|
|
|
|
// BlockPrefixIndex. Only do this check when index_type is kHashSearch.
|
|
|
|
bool need_upper_bound_check = false;
|
|
|
|
if (rep_->index_type == BlockBasedTableOptions::kHashSearch) {
|
|
|
|
need_upper_bound_check = PrefixExtractorChanged(
|
|
|
|
rep_->table_properties.get(), prefix_extractor);
|
|
|
|
}
|
|
|
|
auto iiter = NewIndexIterator(
|
|
|
|
read_options, need_upper_bound_check, &iiter_on_stack,
|
|
|
|
/* index_entry */ nullptr, sst_file_range.begin()->get_context);
|
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter_unique_ptr;
|
|
|
|
if (iiter != &iiter_on_stack) {
|
|
|
|
iiter_unique_ptr.reset(iiter);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto miter = sst_file_range.begin(); miter != sst_file_range.end();
|
|
|
|
++miter) {
|
|
|
|
Status s;
|
|
|
|
GetContext* get_context = miter->get_context;
|
|
|
|
const Slice& key = miter->ikey;
|
|
|
|
bool matched = false; // if such user key matched a key in SST
|
|
|
|
bool done = false;
|
|
|
|
for (iiter->Seek(key); iiter->Valid() && !done; iiter->Next()) {
|
|
|
|
DataBlockIter biter;
|
|
|
|
NewDataBlockIterator<DataBlockIter>(
|
|
|
|
rep_, read_options, iiter->value(), &biter, false,
|
|
|
|
true /* key_includes_seq */, get_context);
|
|
|
|
|
|
|
|
if (read_options.read_tier == kBlockCacheTier &&
|
|
|
|
biter.status().IsIncomplete()) {
|
|
|
|
// couldn't get block from block_cache
|
|
|
|
// Update Saver.state to Found because we are only looking for
|
|
|
|
// whether we can guarantee the key is not there when "no_io" is set
|
|
|
|
get_context->MarkKeyMayExist();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!biter.status().ok()) {
|
|
|
|
s = biter.status();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool may_exist = biter.SeekForGet(key);
|
|
|
|
if (!may_exist) {
|
|
|
|
// HashSeek cannot find the key this block and the the iter is not
|
|
|
|
// the end of the block, i.e. cannot be in the following blocks
|
|
|
|
// either. In this case, the seek_key cannot be found, so we break
|
|
|
|
// from the top level for-loop.
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Call the *saver function on each entry/block until it returns false
|
|
|
|
for (; biter.Valid(); biter.Next()) {
|
|
|
|
ParsedInternalKey parsed_key;
|
|
|
|
if (!ParseInternalKey(biter.key(), &parsed_key)) {
|
|
|
|
s = Status::Corruption(Slice());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!get_context->SaveValue(
|
|
|
|
parsed_key, biter.value(), &matched,
|
|
|
|
biter.IsValuePinned() ? &biter : nullptr)) {
|
|
|
|
done = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s = biter.status();
|
|
|
|
if (done) {
|
|
|
|
// Avoid the extra Next which is expensive in two-level indexes
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (matched && filter != nullptr && !filter->IsBlockBased()) {
|
|
|
|
RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_FULL_TRUE_POSITIVE);
|
|
|
|
PERF_COUNTER_BY_LEVEL_ADD(bloom_filter_full_true_positive, 1,
|
|
|
|
rep_->level);
|
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
s = iiter->status();
|
|
|
|
}
|
|
|
|
*(miter->s) = s;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-03 02:07:03 +01:00
|
|
|
Status BlockBasedTable::Prefetch(const Slice* const begin,
|
|
|
|
const Slice* const end) {
|
|
|
|
auto& comparator = rep_->internal_comparator;
|
2018-05-26 03:41:31 +02:00
|
|
|
auto user_comparator = comparator.user_comparator();
|
2015-03-03 02:07:03 +01:00
|
|
|
// pre-condition
|
|
|
|
if (begin && end && comparator.Compare(*begin, *end) > 0) {
|
|
|
|
return Status::InvalidArgument(*begin, *end);
|
|
|
|
}
|
|
|
|
|
2018-07-13 02:19:57 +02:00
|
|
|
IndexBlockIter iiter_on_stack;
|
2018-05-21 23:33:55 +02:00
|
|
|
auto iiter = NewIndexIterator(ReadOptions(), false, &iiter_on_stack);
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter_unique_ptr;
|
2017-02-07 01:29:29 +01:00
|
|
|
if (iiter != &iiter_on_stack) {
|
2018-08-10 01:49:45 +02:00
|
|
|
iiter_unique_ptr =
|
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>>(iiter);
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
2015-03-03 02:07:03 +01:00
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
if (!iiter->status().ok()) {
|
2015-03-03 02:07:03 +01:00
|
|
|
// error opening index iterator
|
2017-02-07 01:29:29 +01:00
|
|
|
return iiter->status();
|
2015-03-03 02:07:03 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// indicates if we are on the last page that need to be pre-fetched
|
|
|
|
bool prefetching_boundary_page = false;
|
|
|
|
|
2017-02-07 01:29:29 +01:00
|
|
|
for (begin ? iiter->Seek(*begin) : iiter->SeekToFirst(); iiter->Valid();
|
|
|
|
iiter->Next()) {
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle block_handle = iiter->value();
|
2018-05-29 21:09:01 +02:00
|
|
|
const bool is_user_key = rep_->table_properties &&
|
|
|
|
rep_->table_properties->index_key_is_user_key > 0;
|
2018-05-26 03:41:31 +02:00
|
|
|
if (end &&
|
|
|
|
((!is_user_key && comparator.Compare(iiter->key(), *end) >= 0) ||
|
|
|
|
(is_user_key &&
|
|
|
|
user_comparator->Compare(iiter->key(), ExtractUserKey(*end)) >= 0))) {
|
2015-03-03 02:07:03 +01:00
|
|
|
if (prefetching_boundary_page) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The index entry represents the last key in the data block.
|
|
|
|
// We should load this page into memory as well, but no more
|
|
|
|
prefetching_boundary_page = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Load the block specified by the block_handle into the block cache
|
2018-07-13 02:19:57 +02:00
|
|
|
DataBlockIter biter;
|
|
|
|
NewDataBlockIterator<DataBlockIter>(rep_, ReadOptions(), block_handle,
|
|
|
|
&biter);
|
2015-03-03 02:07:03 +01:00
|
|
|
|
|
|
|
if (!biter.status().ok()) {
|
|
|
|
// there was an unexpected error while pre-fetching
|
|
|
|
return biter.status();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2017-08-10 00:49:40 +02:00
|
|
|
Status BlockBasedTable::VerifyChecksum() {
|
|
|
|
Status s;
|
|
|
|
// Check Meta blocks
|
|
|
|
std::unique_ptr<Block> meta;
|
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
2017-08-11 20:59:13 +02:00
|
|
|
s = ReadMetaBlock(rep_, nullptr /* prefetch buffer */, &meta, &meta_iter);
|
2017-08-10 00:49:40 +02:00
|
|
|
if (s.ok()) {
|
2019-03-26 18:15:43 +01:00
|
|
|
s = VerifyChecksumInMetaBlocks(meta_iter.get());
|
2017-08-10 00:49:40 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
// Check Data blocks
|
2018-07-13 02:19:57 +02:00
|
|
|
IndexBlockIter iiter_on_stack;
|
2018-08-10 01:49:45 +02:00
|
|
|
InternalIteratorBase<BlockHandle>* iiter =
|
2018-05-21 23:33:55 +02:00
|
|
|
NewIndexIterator(ReadOptions(), false, &iiter_on_stack);
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter_unique_ptr;
|
2017-08-10 00:49:40 +02:00
|
|
|
if (iiter != &iiter_on_stack) {
|
2018-08-10 01:49:45 +02:00
|
|
|
iiter_unique_ptr =
|
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>>(iiter);
|
2017-08-10 00:49:40 +02:00
|
|
|
}
|
|
|
|
if (!iiter->status().ok()) {
|
|
|
|
// error opening index iterator
|
|
|
|
return iiter->status();
|
|
|
|
}
|
|
|
|
s = VerifyChecksumInBlocks(iiter);
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
Status BlockBasedTable::VerifyChecksumInBlocks(
|
|
|
|
InternalIteratorBase<BlockHandle>* index_iter) {
|
2017-08-10 00:49:40 +02:00
|
|
|
Status s;
|
|
|
|
for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
|
|
|
|
s = index_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle = index_iter->value();
|
|
|
|
BlockContents contents;
|
2018-11-29 02:58:08 +01:00
|
|
|
BlockFetcher block_fetcher(
|
|
|
|
rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
|
|
|
|
ReadOptions(), handle, &contents, rep_->ioptions,
|
|
|
|
false /* decompress */, false /*maybe_compressed*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
|
2018-08-10 01:49:45 +02:00
|
|
|
s = block_fetcher.ReadBlockContents();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-03-26 18:15:43 +01:00
|
|
|
Status BlockBasedTable::VerifyChecksumInMetaBlocks(
|
2018-08-10 01:49:45 +02:00
|
|
|
InternalIteratorBase<Slice>* index_iter) {
|
|
|
|
Status s;
|
|
|
|
for (index_iter->SeekToFirst(); index_iter->Valid(); index_iter->Next()) {
|
|
|
|
s = index_iter->status();
|
2017-08-10 00:49:40 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle;
|
|
|
|
Slice input = index_iter->value();
|
|
|
|
s = handle.DecodeFrom(&input);
|
2017-08-10 00:49:40 +02:00
|
|
|
BlockContents contents;
|
2018-11-29 02:58:08 +01:00
|
|
|
BlockFetcher block_fetcher(
|
|
|
|
rep_->file.get(), nullptr /* prefetch buffer */, rep_->footer,
|
|
|
|
ReadOptions(), handle, &contents, rep_->ioptions,
|
|
|
|
false /* decompress */, false /*maybe_compressed*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionDict::GetEmptyDict(), rep_->persistent_cache_options);
|
2017-12-12 00:16:37 +01:00
|
|
|
s = block_fetcher.ReadBlockContents();
|
2019-03-26 18:15:43 +01:00
|
|
|
if (s.IsCorruption() && index_iter->key() == kPropertiesBlock) {
|
|
|
|
TableProperties* table_properties;
|
|
|
|
s = TryReadPropertiesWithGlobalSeqno(rep_, nullptr /* prefetch_buffer */,
|
|
|
|
index_iter->value(),
|
|
|
|
&table_properties);
|
|
|
|
delete table_properties;
|
|
|
|
}
|
2017-08-10 00:49:40 +02:00
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
|
|
|
|
const Slice& key) {
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> iiter(
|
|
|
|
NewIndexIterator(options));
|
2014-06-20 10:23:02 +02:00
|
|
|
iiter->Seek(key);
|
|
|
|
assert(iiter->Valid());
|
|
|
|
CachableEntry<Block> block;
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle = iiter->value();
|
2014-08-25 23:22:05 +02:00
|
|
|
Cache* block_cache = rep_->table_options.block_cache.get();
|
2014-06-20 10:23:02 +02:00
|
|
|
assert(block_cache != nullptr);
|
|
|
|
|
|
|
|
char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
|
|
|
Slice cache_key =
|
2018-02-13 01:57:56 +01:00
|
|
|
GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, handle,
|
|
|
|
cache_key_storage);
|
2014-06-20 10:23:02 +02:00
|
|
|
Slice ckey;
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
Status s;
|
2019-01-24 03:11:08 +01:00
|
|
|
if (!rep_->compression_dict_handle.IsNull()) {
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
|
|
|
s = ReadCompressionDictBlock(rep_, nullptr /* prefetch_buffer */,
|
|
|
|
&compression_dict_block);
|
|
|
|
if (s.ok()) {
|
|
|
|
assert(compression_dict_block != nullptr);
|
|
|
|
UncompressionDict uncompression_dict(
|
|
|
|
compression_dict_block->data.ToString(),
|
|
|
|
rep_->blocks_definitely_zstd_compressed);
|
|
|
|
s = GetDataBlockFromCache(cache_key, ckey, block_cache, nullptr, rep_,
|
|
|
|
options, &block, uncompression_dict,
|
|
|
|
0 /* read_amp_bytes_per_bit */);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
s = GetDataBlockFromCache(
|
|
|
|
cache_key, ckey, block_cache, nullptr, rep_, options, &block,
|
|
|
|
UncompressionDict::GetEmptyDict(), 0 /* read_amp_bytes_per_bit */);
|
|
|
|
}
|
2014-06-20 10:23:02 +02:00
|
|
|
assert(s.ok());
|
2019-05-10 20:53:33 +02:00
|
|
|
return block.IsCached();
|
2013-02-01 00:20:24 +01:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
|
2018-06-23 00:14:05 +02:00
|
|
|
BlockBasedTableOptions::IndexType BlockBasedTable::UpdateIndexType() {
|
2014-03-01 03:19:07 +01:00
|
|
|
// Some old version of block-based tables don't have index type present in
|
|
|
|
// table properties. If that's the case we can safely use the kBinarySearch.
|
2018-06-23 00:14:05 +02:00
|
|
|
BlockBasedTableOptions::IndexType index_type_on_file =
|
|
|
|
BlockBasedTableOptions::kBinarySearch;
|
Use a different approach to make sure BlockBasedTableReader can use hash index on older files
Summary:
A recent commit https://github.com/facebook/rocksdb/commit/e37dd216f9384bfdabc6760fa296e8ee28c79d30 makes sure hash index can be used when reading existing files. This patch uses another way to achieve the approach:
(1) Currently, always writing kBinarySearch to files, despite of BlockBasedTableOptions.IndexType setting.
(2) When reading a file, read out the field, and make sure it is kBinarySearch, while always use index type by users.
The reason for doing it is, to reserve kHashSearch property on disk to future. If now we write out binary index for both of kHashSearch and kBinarySearch. We have to use a new flag in the future for hash index on disk, otherwise compatibility would break. Also, we want the real index type and type shown in properties block to be consistent.
Test Plan: make all check
Reviewers: haobo, kailiu
Reviewed By: kailiu
CC: igor, ljin, yhchiang, xjin, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D18009
2014-04-18 03:00:58 +02:00
|
|
|
if (rep_->table_properties) {
|
|
|
|
auto& props = rep_->table_properties->user_collected_properties;
|
|
|
|
auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
|
|
|
|
if (pos != props.end()) {
|
|
|
|
index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
|
|
|
|
DecodeFixed32(pos->second.c_str()));
|
2018-05-21 23:33:55 +02:00
|
|
|
// update index_type with the true type
|
|
|
|
rep_->index_type = index_type_on_file;
|
Use a different approach to make sure BlockBasedTableReader can use hash index on older files
Summary:
A recent commit https://github.com/facebook/rocksdb/commit/e37dd216f9384bfdabc6760fa296e8ee28c79d30 makes sure hash index can be used when reading existing files. This patch uses another way to achieve the approach:
(1) Currently, always writing kBinarySearch to files, despite of BlockBasedTableOptions.IndexType setting.
(2) When reading a file, read out the field, and make sure it is kBinarySearch, while always use index type by users.
The reason for doing it is, to reserve kHashSearch property on disk to future. If now we write out binary index for both of kHashSearch and kBinarySearch. We have to use a new flag in the future for hash index on disk, otherwise compatibility would break. Also, we want the real index type and type shown in properties block to be consistent.
Test Plan: make all check
Reviewers: haobo, kailiu
Reviewed By: kailiu
CC: igor, ljin, yhchiang, xjin, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D18009
2014-04-18 03:00:58 +02:00
|
|
|
}
|
|
|
|
}
|
2018-06-23 00:14:05 +02:00
|
|
|
return index_type_on_file;
|
|
|
|
}
|
|
|
|
|
|
|
|
// REQUIRES: The following fields of rep_ should have already been populated:
|
|
|
|
// 1. file
|
|
|
|
// 2. index_handle,
|
|
|
|
// 3. options
|
|
|
|
// 4. internal_comparator
|
|
|
|
// 5. index_type
|
|
|
|
Status BlockBasedTable::CreateIndexReader(
|
|
|
|
FilePrefetchBuffer* prefetch_buffer, IndexReader** index_reader,
|
|
|
|
InternalIterator* preloaded_meta_index_iter, int level) {
|
2019-05-08 05:17:48 +02:00
|
|
|
auto index_type_on_file = rep_->index_type;
|
Use a different approach to make sure BlockBasedTableReader can use hash index on older files
Summary:
A recent commit https://github.com/facebook/rocksdb/commit/e37dd216f9384bfdabc6760fa296e8ee28c79d30 makes sure hash index can be used when reading existing files. This patch uses another way to achieve the approach:
(1) Currently, always writing kBinarySearch to files, despite of BlockBasedTableOptions.IndexType setting.
(2) When reading a file, read out the field, and make sure it is kBinarySearch, while always use index type by users.
The reason for doing it is, to reserve kHashSearch property on disk to future. If now we write out binary index for both of kHashSearch and kBinarySearch. We have to use a new flag in the future for hash index on disk, otherwise compatibility would break. Also, we want the real index type and type shown in properties block to be consistent.
Test Plan: make all check
Reviewers: haobo, kailiu
Reviewed By: kailiu
CC: igor, ljin, yhchiang, xjin, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D18009
2014-04-18 03:00:58 +02:00
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
auto file = rep_->file.get();
|
2017-05-06 00:01:04 +02:00
|
|
|
const InternalKeyComparator* icomparator = &rep_->internal_comparator;
|
2014-05-01 20:09:32 +02:00
|
|
|
const Footer& footer = rep_->footer;
|
2018-05-21 23:33:55 +02:00
|
|
|
|
|
|
|
// kHashSearch requires non-empty prefix_extractor but bypass checking
|
|
|
|
// prefix_extractor here since we have no access to MutableCFOptions.
|
2018-06-27 00:56:26 +02:00
|
|
|
// Add need_upper_bound_check flag in BlockBasedTable::NewIndexIterator.
|
2018-05-21 23:33:55 +02:00
|
|
|
// If prefix_extractor does not match prefix_extractor_name from table
|
|
|
|
// properties, turn off Hash Index by setting total_order_seek to true
|
2014-06-20 00:32:31 +02:00
|
|
|
|
2014-05-15 23:09:03 +02:00
|
|
|
switch (index_type_on_file) {
|
2017-02-07 01:29:29 +01:00
|
|
|
case BlockBasedTableOptions::kTwoLevelIndexSearch: {
|
|
|
|
return PartitionIndexReader::Create(
|
2017-08-11 20:59:13 +02:00
|
|
|
this, file, prefetch_buffer, footer, footer.index_handle(),
|
|
|
|
rep_->ioptions, icomparator, index_reader,
|
2018-05-26 03:41:31 +02:00
|
|
|
rep_->persistent_cache_options, level,
|
2018-05-29 21:09:01 +02:00
|
|
|
rep_->table_properties == nullptr ||
|
2018-08-10 01:49:45 +02:00
|
|
|
rep_->table_properties->index_key_is_user_key == 0,
|
|
|
|
rep_->table_properties == nullptr ||
|
2018-11-29 02:58:08 +01:00
|
|
|
rep_->table_properties->index_value_is_delta_encoded == 0,
|
|
|
|
GetMemoryAllocator(rep_->table_options));
|
2017-02-07 01:29:29 +01:00
|
|
|
}
|
2014-03-01 03:19:07 +01:00
|
|
|
case BlockBasedTableOptions::kBinarySearch: {
|
2014-05-01 20:09:32 +02:00
|
|
|
return BinarySearchIndexReader::Create(
|
2017-08-11 20:59:13 +02:00
|
|
|
file, prefetch_buffer, footer, footer.index_handle(), rep_->ioptions,
|
2018-05-26 03:41:31 +02:00
|
|
|
icomparator, index_reader, rep_->persistent_cache_options,
|
2018-05-29 21:09:01 +02:00
|
|
|
rep_->table_properties == nullptr ||
|
2018-08-10 01:49:45 +02:00
|
|
|
rep_->table_properties->index_key_is_user_key == 0,
|
|
|
|
rep_->table_properties == nullptr ||
|
2018-11-29 02:58:08 +01:00
|
|
|
rep_->table_properties->index_value_is_delta_encoded == 0,
|
|
|
|
GetMemoryAllocator(rep_->table_options));
|
2014-04-10 23:19:43 +02:00
|
|
|
}
|
|
|
|
case BlockBasedTableOptions::kHashSearch: {
|
2014-05-15 23:09:03 +02:00
|
|
|
std::unique_ptr<Block> meta_guard;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter_guard;
|
2014-05-15 23:09:03 +02:00
|
|
|
auto meta_index_iter = preloaded_meta_index_iter;
|
|
|
|
if (meta_index_iter == nullptr) {
|
2017-08-11 20:59:13 +02:00
|
|
|
auto s =
|
|
|
|
ReadMetaBlock(rep_, prefetch_buffer, &meta_guard, &meta_iter_guard);
|
2014-05-15 23:09:03 +02:00
|
|
|
if (!s.ok()) {
|
2014-06-20 00:32:31 +02:00
|
|
|
// we simply fall back to binary search in case there is any
|
|
|
|
// problem with prefix hash index loading.
|
2017-03-16 03:22:52 +01:00
|
|
|
ROCKS_LOG_WARN(rep_->ioptions.info_log,
|
|
|
|
"Unable to read the metaindex block."
|
|
|
|
" Fall back to binary search index.");
|
2014-06-20 00:32:31 +02:00
|
|
|
return BinarySearchIndexReader::Create(
|
2017-08-11 20:59:13 +02:00
|
|
|
file, prefetch_buffer, footer, footer.index_handle(),
|
|
|
|
rep_->ioptions, icomparator, index_reader,
|
2018-05-26 03:41:31 +02:00
|
|
|
rep_->persistent_cache_options,
|
2018-05-29 21:09:01 +02:00
|
|
|
rep_->table_properties == nullptr ||
|
2018-08-10 01:49:45 +02:00
|
|
|
rep_->table_properties->index_key_is_user_key == 0,
|
|
|
|
rep_->table_properties == nullptr ||
|
2018-11-29 02:58:08 +01:00
|
|
|
rep_->table_properties->index_value_is_delta_encoded == 0,
|
|
|
|
GetMemoryAllocator(rep_->table_options));
|
2014-05-15 23:09:03 +02:00
|
|
|
}
|
|
|
|
meta_index_iter = meta_iter_guard.get();
|
|
|
|
}
|
|
|
|
|
2014-04-10 23:19:43 +02:00
|
|
|
return HashIndexReader::Create(
|
2017-08-11 20:59:13 +02:00
|
|
|
rep_->internal_prefix_transform.get(), footer, file, prefetch_buffer,
|
|
|
|
rep_->ioptions, icomparator, footer.index_handle(), meta_index_iter,
|
|
|
|
index_reader, rep_->hash_index_allow_collision,
|
2018-05-26 03:41:31 +02:00
|
|
|
rep_->persistent_cache_options,
|
2018-05-29 21:09:01 +02:00
|
|
|
rep_->table_properties == nullptr ||
|
2018-08-10 01:49:45 +02:00
|
|
|
rep_->table_properties->index_key_is_user_key == 0,
|
|
|
|
rep_->table_properties == nullptr ||
|
2018-11-29 02:58:08 +01:00
|
|
|
rep_->table_properties->index_value_is_delta_encoded == 0,
|
|
|
|
GetMemoryAllocator(rep_->table_options));
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
default: {
|
|
|
|
std::string error_message =
|
2017-06-13 19:59:22 +02:00
|
|
|
"Unrecognized index type: " + ToString(index_type_on_file);
|
2014-03-02 08:40:08 +01:00
|
|
|
return Status::InvalidArgument(error_message.c_str());
|
2014-03-01 03:19:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-10-29 01:54:09 +01:00
|
|
|
uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) {
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> index_iter(
|
2018-08-10 01:49:45 +02:00
|
|
|
NewIndexIterator(ReadOptions()));
|
2013-11-13 07:46:51 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
index_iter->Seek(key);
|
|
|
|
uint64_t result;
|
|
|
|
if (index_iter->Valid()) {
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle handle = index_iter->value();
|
|
|
|
result = handle.offset();
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
2014-03-01 05:37:32 +01:00
|
|
|
// key is past the last key in the file. If table_properties is not
|
|
|
|
// available, approximate the offset by returning the offset of the
|
|
|
|
// metaindex block (which is right near the end of the file).
|
RocksDB 2.8 to be able to read files generated by 2.6
Summary:
From 2.6 to 2.7, property block name is renamed from rocksdb.stats to rocksdb.properties. Older properties were not able to be loaded. In 2.8, we seem to have added some logic that uses property block without checking null pointers, which create segment faults.
In this patch, we fix it by:
(1) try rocksdb.stats if rocksdb.properties is not found
(2) add some null checking before consuming rep->table_properties
Test Plan: make sure a file generated in 2.7 couldn't be opened now can be opened.
Reviewers: haobo, igor, yhchiang
Reviewed By: igor
CC: ljin, xjin, dhruba, kailiu, leveldb
Differential Revision: https://reviews.facebook.net/D17961
2014-04-17 04:30:33 +02:00
|
|
|
result = 0;
|
|
|
|
if (rep_->table_properties) {
|
|
|
|
result = rep_->table_properties->data_size;
|
|
|
|
}
|
2014-03-01 05:37:32 +01:00
|
|
|
// table_properties is not present in the table.
|
|
|
|
if (result == 0) {
|
2014-05-01 20:09:32 +02:00
|
|
|
result = rep_->footer.metaindex_handle().offset();
|
2014-03-01 05:37:32 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-02-20 00:38:57 +01:00
|
|
|
bool BlockBasedTable::TEST_filter_block_preloaded() const {
|
|
|
|
return rep_->filter != nullptr;
|
|
|
|
}
|
|
|
|
|
2014-03-01 03:19:07 +01:00
|
|
|
bool BlockBasedTable::TEST_index_reader_preloaded() const {
|
|
|
|
return rep_->index_reader != nullptr;
|
2014-02-20 00:38:57 +01:00
|
|
|
}
|
|
|
|
|
2016-08-01 23:50:19 +02:00
|
|
|
Status BlockBasedTable::GetKVPairsFromDataBlocks(
|
|
|
|
std::vector<KVPairBlock>* kv_pair_blocks) {
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> blockhandles_iter(
|
2016-08-01 23:50:19 +02:00
|
|
|
NewIndexIterator(ReadOptions()));
|
|
|
|
|
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Cannot read Index Block
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_ptr<InternalIterator> datablock_iter;
|
2018-07-13 02:19:57 +02:00
|
|
|
datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
|
|
|
|
rep_, ReadOptions(), blockhandles_iter->value()));
|
2016-08-01 23:50:19 +02:00
|
|
|
s = datablock_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Error reading the block - Skipped
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
KVPairBlock kv_pair_block;
|
|
|
|
for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
|
|
|
|
datablock_iter->Next()) {
|
|
|
|
s = datablock_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
// Error reading the block - Skipped
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
const Slice& key = datablock_iter->key();
|
|
|
|
const Slice& value = datablock_iter->value();
|
|
|
|
std::string key_copy = std::string(key.data(), key.size());
|
|
|
|
std::string value_copy = std::string(value.data(), value.size());
|
|
|
|
|
|
|
|
kv_pair_block.push_back(
|
|
|
|
std::make_pair(std::move(key_copy), std::move(value_copy)));
|
|
|
|
}
|
|
|
|
kv_pair_blocks->push_back(std::move(kv_pair_block));
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
Status BlockBasedTable::DumpTable(WritableFile* out_file,
|
|
|
|
const SliceTransform* prefix_extractor) {
|
2014-12-23 22:24:07 +01:00
|
|
|
// Output Footer
|
|
|
|
out_file->Append(
|
|
|
|
"Footer Details:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(rep_->footer.ToString().c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
|
|
|
// Output MetaIndex
|
|
|
|
out_file->Append(
|
|
|
|
"Metaindex Details:\n"
|
|
|
|
"--------------------------------------\n");
|
|
|
|
std::unique_ptr<Block> meta;
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> meta_iter;
|
2017-08-11 20:59:13 +02:00
|
|
|
Status s =
|
|
|
|
ReadMetaBlock(rep_, nullptr /* prefetch_buffer */, &meta, &meta_iter);
|
2014-12-23 22:24:07 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) {
|
|
|
|
s = meta_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
if (meta_iter->key() == rocksdb::kPropertiesBlock) {
|
|
|
|
out_file->Append(" Properties block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
Shared dictionary compression using reference block
Summary:
This adds a new metablock containing a shared dictionary that is used
to compress all data blocks in the SST file. The size of the shared dictionary
is configurable in CompressionOptions and defaults to 0. It's currently only
used for zlib/lz4/lz4hc, but the block will be stored in the SST regardless of
the compression type if the user chooses a nonzero dictionary size.
During compaction, computes the dictionary by randomly sampling the first
output file in each subcompaction. It pre-computes the intervals to sample
by assuming the output file will have the maximum allowable length. In case
the file is smaller, some of the pre-computed sampling intervals can be beyond
end-of-file, in which case we skip over those samples and the dictionary will
be a bit smaller. After the dictionary is generated using the first file in a
subcompaction, it is loaded into the compression library before writing each
block in each subsequent file of that subcompaction.
On the read path, gets the dictionary from the metablock, if it exists. Then,
loads that dictionary into the compression library before reading each block.
Test Plan: new unit test
Reviewers: yhchiang, IslamAbdelRahman, cyan, sdong
Reviewed By: sdong
Subscribers: andrewkr, yoshinorim, kradhakrishnan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D52287
2016-04-28 02:36:03 +02:00
|
|
|
} else if (meta_iter->key() == rocksdb::kCompressionDictBlock) {
|
|
|
|
out_file->Append(" Compression dictionary block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
2014-12-23 22:24:07 +01:00
|
|
|
} else if (strstr(meta_iter->key().ToString().c_str(),
|
|
|
|
"filter.rocksdb.") != nullptr) {
|
|
|
|
out_file->Append(" Filter block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
2016-11-12 18:23:05 +01:00
|
|
|
} else if (meta_iter->key() == rocksdb::kRangeDelBlock) {
|
|
|
|
out_file->Append(" Range deletion block handle: ");
|
|
|
|
out_file->Append(meta_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
2014-12-23 22:24:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
} else {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output TableProperties
|
|
|
|
const rocksdb::TableProperties* table_properties;
|
|
|
|
table_properties = rep_->table_properties.get();
|
|
|
|
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
out_file->Append(
|
|
|
|
"Table Properties:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(table_properties->ToString("\n ", ": ").c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
2018-08-21 01:46:45 +02:00
|
|
|
// Output Filter blocks
|
|
|
|
if (!rep_->filter && !table_properties->filter_policy_name.empty()) {
|
|
|
|
// Support only BloomFilter as off now
|
|
|
|
rocksdb::BlockBasedTableOptions table_options;
|
|
|
|
table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(1));
|
|
|
|
if (table_properties->filter_policy_name.compare(
|
|
|
|
table_options.filter_policy->Name()) == 0) {
|
|
|
|
std::string filter_block_key = kFilterBlockPrefix;
|
|
|
|
filter_block_key.append(table_properties->filter_policy_name);
|
|
|
|
BlockHandle handle;
|
|
|
|
if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) {
|
|
|
|
BlockContents block;
|
|
|
|
BlockFetcher block_fetcher(
|
|
|
|
rep_->file.get(), nullptr /* prefetch_buffer */, rep_->footer,
|
|
|
|
ReadOptions(), handle, &block, rep_->ioptions,
|
2018-11-29 02:58:08 +01:00
|
|
|
false /*decompress*/, false /*maybe_compressed*/,
|
2019-01-24 03:11:08 +01:00
|
|
|
UncompressionDict::GetEmptyDict(),
|
2018-11-29 02:58:08 +01:00
|
|
|
rep_->persistent_cache_options);
|
2018-08-21 01:46:45 +02:00
|
|
|
s = block_fetcher.ReadBlockContents();
|
|
|
|
if (!s.ok()) {
|
|
|
|
rep_->filter.reset(new BlockBasedFilterBlockReader(
|
|
|
|
prefix_extractor, table_options,
|
|
|
|
table_options.whole_key_filtering, std::move(block),
|
|
|
|
rep_->ioptions.statistics));
|
|
|
|
}
|
2014-12-23 22:24:07 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rep_->filter) {
|
|
|
|
out_file->Append(
|
|
|
|
"Filter Details:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
out_file->Append(rep_->filter->ToString().c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
// Output Index block
|
|
|
|
s = DumpIndexBlock(out_file);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
2017-02-03 21:37:02 +01:00
|
|
|
|
|
|
|
// Output compression dictionary
|
2019-01-24 03:11:08 +01:00
|
|
|
if (!rep_->compression_dict_handle.IsNull()) {
|
|
|
|
std::unique_ptr<const BlockContents> compression_dict_block;
|
|
|
|
s = ReadCompressionDictBlock(rep_, nullptr /* prefetch_buffer */,
|
|
|
|
&compression_dict_block);
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
assert(compression_dict_block != nullptr);
|
|
|
|
auto compression_dict = compression_dict_block->data;
|
2017-02-03 21:37:02 +01:00
|
|
|
out_file->Append(
|
|
|
|
"Compression Dictionary:\n"
|
|
|
|
"--------------------------------------\n");
|
|
|
|
out_file->Append(" size (bytes): ");
|
|
|
|
out_file->Append(rocksdb::ToString(compression_dict.size()));
|
|
|
|
out_file->Append("\n\n");
|
|
|
|
out_file->Append(" HEX ");
|
|
|
|
out_file->Append(compression_dict.ToString(true).c_str());
|
|
|
|
out_file->Append("\n\n");
|
|
|
|
}
|
|
|
|
|
2016-11-12 18:23:05 +01:00
|
|
|
// Output range deletions block
|
2016-11-16 21:02:39 +01:00
|
|
|
auto* range_del_iter = NewRangeTombstoneIterator(ReadOptions());
|
2016-11-21 21:07:09 +01:00
|
|
|
if (range_del_iter != nullptr) {
|
|
|
|
range_del_iter->SeekToFirst();
|
|
|
|
if (range_del_iter->Valid()) {
|
|
|
|
out_file->Append(
|
|
|
|
"Range deletions:\n"
|
|
|
|
"--------------------------------------\n"
|
|
|
|
" ");
|
|
|
|
for (; range_del_iter->Valid(); range_del_iter->Next()) {
|
|
|
|
DumpKeyValue(range_del_iter->key(), range_del_iter->value(), out_file);
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
2016-11-12 18:23:05 +01:00
|
|
|
}
|
2016-11-21 21:07:09 +01:00
|
|
|
delete range_del_iter;
|
2016-11-12 18:23:05 +01:00
|
|
|
}
|
2014-12-23 22:24:07 +01:00
|
|
|
// Output Data blocks
|
|
|
|
s = DumpDataBlocks(out_file);
|
|
|
|
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
void BlockBasedTable::Close() {
|
2017-07-28 05:16:25 +02:00
|
|
|
if (rep_->closed) {
|
|
|
|
return;
|
|
|
|
}
|
2019-04-05 01:18:07 +02:00
|
|
|
|
|
|
|
Cache* const cache = rep_->table_options.block_cache.get();
|
|
|
|
|
|
|
|
// cleanup index, filter, and compression dictionary blocks
|
|
|
|
// to avoid accessing dangling pointers
|
2017-05-02 22:39:09 +02:00
|
|
|
if (!rep_->table_options.no_block_cache) {
|
|
|
|
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
2019-04-05 01:18:07 +02:00
|
|
|
|
2017-05-02 22:39:09 +02:00
|
|
|
// Get the filter block key
|
|
|
|
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
2017-05-06 05:10:56 +02:00
|
|
|
rep_->filter_handle, cache_key);
|
2019-04-05 01:18:07 +02:00
|
|
|
cache->Erase(key);
|
|
|
|
|
2017-05-02 22:39:09 +02:00
|
|
|
// Get the index block key
|
|
|
|
key = GetCacheKeyFromOffset(rep_->cache_key_prefix,
|
|
|
|
rep_->cache_key_prefix_size,
|
|
|
|
rep_->dummy_index_reader_offset, cache_key);
|
2019-04-05 01:18:07 +02:00
|
|
|
cache->Erase(key);
|
|
|
|
|
|
|
|
if (!rep_->compression_dict_handle.IsNull()) {
|
|
|
|
// Get the compression dictionary block key
|
|
|
|
key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
|
|
|
rep_->compression_dict_handle, cache_key);
|
|
|
|
cache->Erase(key);
|
|
|
|
}
|
2016-06-03 19:47:47 +02:00
|
|
|
}
|
2019-04-05 01:18:07 +02:00
|
|
|
|
2017-07-28 05:16:25 +02:00
|
|
|
rep_->closed = true;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) {
|
|
|
|
out_file->Append(
|
|
|
|
"Index Details:\n"
|
|
|
|
"--------------------------------------\n");
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> blockhandles_iter(
|
2015-10-13 00:06:38 +02:00
|
|
|
NewIndexIterator(ReadOptions()));
|
2014-12-23 22:24:07 +01:00
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Can not read Index Block \n\n");
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
out_file->Append(" Block key hex dump: Data block handle\n");
|
|
|
|
out_file->Append(" Block key ascii\n\n");
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Slice key = blockhandles_iter->key();
|
2018-05-26 03:41:31 +02:00
|
|
|
Slice user_key;
|
2014-12-23 22:24:07 +01:00
|
|
|
InternalKey ikey;
|
2018-05-29 21:09:01 +02:00
|
|
|
if (rep_->table_properties &&
|
|
|
|
rep_->table_properties->index_key_is_user_key != 0) {
|
|
|
|
user_key = key;
|
|
|
|
} else {
|
2018-05-26 03:41:31 +02:00
|
|
|
ikey.DecodeFrom(key);
|
|
|
|
user_key = ikey.user_key();
|
|
|
|
}
|
2014-12-23 22:24:07 +01:00
|
|
|
|
|
|
|
out_file->Append(" HEX ");
|
2018-05-26 03:41:31 +02:00
|
|
|
out_file->Append(user_key.ToString(true).c_str());
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append(": ");
|
|
|
|
out_file->Append(blockhandles_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
2018-05-26 03:41:31 +02:00
|
|
|
std::string str_key = user_key.ToString();
|
2014-12-23 22:24:07 +01:00
|
|
|
std::string res_key("");
|
|
|
|
char cspace = ' ';
|
|
|
|
for (size_t i = 0; i < str_key.size(); i++) {
|
|
|
|
res_key.append(&str_key[i], 1);
|
|
|
|
res_key.append(1, cspace);
|
|
|
|
}
|
|
|
|
out_file->Append(" ASCII ");
|
|
|
|
out_file->Append(res_key.c_str());
|
|
|
|
out_file->Append("\n ------\n");
|
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) {
|
2018-08-10 01:49:45 +02:00
|
|
|
std::unique_ptr<InternalIteratorBase<BlockHandle>> blockhandles_iter(
|
2015-10-13 00:06:38 +02:00
|
|
|
NewIndexIterator(ReadOptions()));
|
2014-12-23 22:24:07 +01:00
|
|
|
Status s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Can not read Index Block \n\n");
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-08-13 01:34:11 +02:00
|
|
|
uint64_t datablock_size_min = std::numeric_limits<uint64_t>::max();
|
|
|
|
uint64_t datablock_size_max = 0;
|
|
|
|
uint64_t datablock_size_sum = 0;
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
size_t block_id = 1;
|
|
|
|
for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid();
|
|
|
|
block_id++, blockhandles_iter->Next()) {
|
|
|
|
s = blockhandles_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2018-08-10 01:49:45 +02:00
|
|
|
BlockHandle bh = blockhandles_iter->value();
|
2016-08-13 01:34:11 +02:00
|
|
|
uint64_t datablock_size = bh.size();
|
|
|
|
datablock_size_min = std::min(datablock_size_min, datablock_size);
|
|
|
|
datablock_size_max = std::max(datablock_size_max, datablock_size);
|
|
|
|
datablock_size_sum += datablock_size;
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append("Data Block # ");
|
2015-04-24 04:17:57 +02:00
|
|
|
out_file->Append(rocksdb::ToString(block_id));
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Append(" @ ");
|
|
|
|
out_file->Append(blockhandles_iter->value().ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
out_file->Append("--------------------------------------\n");
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
std::unique_ptr<InternalIterator> datablock_iter;
|
2018-07-13 02:19:57 +02:00
|
|
|
datablock_iter.reset(NewDataBlockIterator<DataBlockIter>(
|
|
|
|
rep_, ReadOptions(), blockhandles_iter->value()));
|
2014-12-23 22:24:07 +01:00
|
|
|
s = datablock_iter->status();
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Error reading the block - Skipped \n\n");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (datablock_iter->SeekToFirst(); datablock_iter->Valid();
|
|
|
|
datablock_iter->Next()) {
|
|
|
|
s = datablock_iter->status();
|
|
|
|
if (!s.ok()) {
|
|
|
|
out_file->Append("Error reading the block - Skipped \n");
|
|
|
|
break;
|
|
|
|
}
|
2016-11-12 18:23:05 +01:00
|
|
|
DumpKeyValue(datablock_iter->key(), datablock_iter->value(), out_file);
|
2014-12-23 22:24:07 +01:00
|
|
|
}
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
2016-08-13 01:34:11 +02:00
|
|
|
|
|
|
|
uint64_t num_datablocks = block_id - 1;
|
|
|
|
if (num_datablocks) {
|
|
|
|
double datablock_size_avg =
|
|
|
|
static_cast<double>(datablock_size_sum) / num_datablocks;
|
|
|
|
out_file->Append("Data Block Summary:\n");
|
|
|
|
out_file->Append("--------------------------------------");
|
|
|
|
out_file->Append("\n # data blocks: ");
|
|
|
|
out_file->Append(rocksdb::ToString(num_datablocks));
|
|
|
|
out_file->Append("\n min data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_min));
|
|
|
|
out_file->Append("\n max data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_max));
|
|
|
|
out_file->Append("\n avg data block size: ");
|
|
|
|
out_file->Append(rocksdb::ToString(datablock_size_avg));
|
|
|
|
out_file->Append("\n");
|
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-11-12 18:23:05 +01:00
|
|
|
void BlockBasedTable::DumpKeyValue(const Slice& key, const Slice& value,
|
|
|
|
WritableFile* out_file) {
|
|
|
|
InternalKey ikey;
|
|
|
|
ikey.DecodeFrom(key);
|
|
|
|
|
|
|
|
out_file->Append(" HEX ");
|
|
|
|
out_file->Append(ikey.user_key().ToString(true).c_str());
|
|
|
|
out_file->Append(": ");
|
|
|
|
out_file->Append(value.ToString(true).c_str());
|
|
|
|
out_file->Append("\n");
|
|
|
|
|
|
|
|
std::string str_key = ikey.user_key().ToString();
|
|
|
|
std::string str_value = value.ToString();
|
|
|
|
std::string res_key(""), res_value("");
|
|
|
|
char cspace = ' ';
|
|
|
|
for (size_t i = 0; i < str_key.size(); i++) {
|
2017-11-29 02:20:21 +01:00
|
|
|
if (str_key[i] == '\0') {
|
|
|
|
res_key.append("\\0", 2);
|
|
|
|
} else {
|
|
|
|
res_key.append(&str_key[i], 1);
|
|
|
|
}
|
2016-11-12 18:23:05 +01:00
|
|
|
res_key.append(1, cspace);
|
|
|
|
}
|
|
|
|
for (size_t i = 0; i < str_value.size(); i++) {
|
2017-11-29 02:20:21 +01:00
|
|
|
if (str_value[i] == '\0') {
|
|
|
|
res_value.append("\\0", 2);
|
|
|
|
} else {
|
|
|
|
res_value.append(&str_value[i], 1);
|
|
|
|
}
|
2016-11-12 18:23:05 +01:00
|
|
|
res_value.append(1, cspace);
|
|
|
|
}
|
|
|
|
|
|
|
|
out_file->Append(" ASCII ");
|
|
|
|
out_file->Append(res_key.c_str());
|
|
|
|
out_file->Append(": ");
|
|
|
|
out_file->Append(res_value.c_str());
|
|
|
|
out_file->Append("\n ------\n");
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
namespace {
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
void DeleteCachedFilterEntry(const Slice& /*key*/, void* value) {
|
2016-06-03 19:47:47 +02:00
|
|
|
FilterBlockReader* filter = reinterpret_cast<FilterBlockReader*>(value);
|
|
|
|
if (filter->statistics() != nullptr) {
|
|
|
|
RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT,
|
2018-06-29 17:55:33 +02:00
|
|
|
filter->ApproximateMemoryUsage());
|
2016-06-03 19:47:47 +02:00
|
|
|
}
|
|
|
|
delete filter;
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
void DeleteCachedIndexEntry(const Slice& /*key*/, void* value) {
|
2016-06-03 19:47:47 +02:00
|
|
|
IndexReader* index_reader = reinterpret_cast<IndexReader*>(value);
|
|
|
|
if (index_reader->statistics() != nullptr) {
|
|
|
|
RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT,
|
2018-06-29 17:55:33 +02:00
|
|
|
index_reader->ApproximateMemoryUsage());
|
2016-06-03 19:47:47 +02:00
|
|
|
}
|
|
|
|
delete index_reader;
|
|
|
|
}
|
|
|
|
|
2019-01-24 03:11:08 +01:00
|
|
|
void DeleteCachedUncompressionDictEntry(const Slice& /*key*/, void* value) {
|
|
|
|
UncompressionDict* dict = reinterpret_cast<UncompressionDict*>(value);
|
|
|
|
RecordTick(dict->statistics(), BLOCK_CACHE_COMPRESSION_DICT_BYTES_EVICT,
|
|
|
|
dict->ApproximateMemoryUsage());
|
|
|
|
delete dict;
|
|
|
|
}
|
|
|
|
|
2016-06-03 19:47:47 +02:00
|
|
|
} // anonymous namespace
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|