2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <stdint.h>
|
2018-04-13 02:55:14 +02:00
|
|
|
#include <string>
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/file_prefetch_buffer.h"
|
|
|
|
#include "file/random_access_file_reader.h"
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
#include "rocksdb/options.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/slice.h"
|
|
|
|
#include "rocksdb/status.h"
|
2013-10-29 01:54:09 +01:00
|
|
|
#include "rocksdb/table.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "memory/memory_allocator.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "options/cf_options.h"
|
2019-09-17 00:14:51 +02:00
|
|
|
#include "port/malloc.h"
|
2017-03-04 03:09:43 +01:00
|
|
|
#include "port/port.h" // noexcept
|
|
|
|
#include "table/persistent_cache_options.h"
|
2016-04-28 20:39:12 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class RandomAccessFile;
|
|
|
|
struct ReadOptions;
|
|
|
|
|
2016-07-19 18:44:03 +02:00
|
|
|
extern bool ShouldReportDetailedTime(Env* env, Statistics* stats);
|
|
|
|
|
2014-02-05 01:21:47 +01:00
|
|
|
// the length of the magic number in bytes.
|
|
|
|
const int kMagicNumberLengthByte = 8;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// BlockHandle is a pointer to the extent of a file that stores a data
|
|
|
|
// block or a meta block.
|
|
|
|
class BlockHandle {
|
|
|
|
public:
|
2020-04-28 23:46:13 +02:00
|
|
|
// Creates a block handle with special values indicating "uninitialized,"
|
|
|
|
// distinct from the "null" block handle.
|
2011-03-18 23:37:00 +01:00
|
|
|
BlockHandle();
|
2013-12-05 00:43:09 +01:00
|
|
|
BlockHandle(uint64_t offset, uint64_t size);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// The offset of the block in the file.
|
|
|
|
uint64_t offset() const { return offset_; }
|
2014-11-06 20:14:28 +01:00
|
|
|
void set_offset(uint64_t _offset) { offset_ = _offset; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// The size of the stored block
|
|
|
|
uint64_t size() const { return size_; }
|
2014-11-06 20:14:28 +01:00
|
|
|
void set_size(uint64_t _size) { size_ = _size; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
void EncodeTo(std::string* dst) const;
|
|
|
|
Status DecodeFrom(Slice* input);
|
2018-08-10 01:49:45 +02:00
|
|
|
Status DecodeSizeFrom(uint64_t offset, Slice* input);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
// Return a string that contains the copy of handle.
|
|
|
|
std::string ToString(bool hex = true) const;
|
|
|
|
|
2013-12-05 00:43:09 +01:00
|
|
|
// if the block handle's offset and size are both "0", we will view it
|
|
|
|
// as a null block handle that points to no where.
|
2018-04-13 02:55:14 +02:00
|
|
|
bool IsNull() const { return offset_ == 0 && size_ == 0; }
|
2013-12-05 00:43:09 +01:00
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
static const BlockHandle& NullBlockHandle() { return kNullBlockHandle; }
|
2013-12-05 00:43:09 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Maximum encoding length of a BlockHandle
|
|
|
|
enum { kMaxEncodedLength = 10 + 10 };
|
|
|
|
|
2020-04-28 23:46:13 +02:00
|
|
|
inline bool operator==(const BlockHandle& rhs) const {
|
|
|
|
return offset_ == rhs.offset_ && size_ == rhs.size_;
|
|
|
|
}
|
|
|
|
inline bool operator!=(const BlockHandle& rhs) const {
|
|
|
|
return !(*this == rhs);
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2016-11-05 17:10:51 +01:00
|
|
|
uint64_t offset_;
|
|
|
|
uint64_t size_;
|
2013-12-05 00:43:09 +01:00
|
|
|
|
|
|
|
static const BlockHandle kNullBlockHandle;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
Add an option to put first key of each sst block in the index (#5289)
Summary:
The first key is used to defer reading the data block until this file gets to the top of merging iterator's heap. For short range scans, most files never make it to the top of the heap, so this change can reduce read amplification by a lot sometimes.
Consider the following workload. There are a few data streams (we'll be calling them "logs"), each stream consisting of a sequence of blobs (we'll be calling them "records"). Each record is identified by log ID and a sequence number within the log. RocksDB key is concatenation of log ID and sequence number (big endian). Reads are mostly relatively short range scans, each within a single log. Writes are mostly sequential for each log, but writes to different logs are randomly interleaved. Compactions are disabled; instead, when we accumulate a few tens of sst files, we create a new column family and start writing to it.
So, a typical sst file consists of a few ranges of blocks, each range corresponding to one log ID (we use FlushBlockPolicy to cut blocks at log boundaries). A typical read would go like this. First, iterator Seek() reads one block from each sst file. Then a series of Next()s move through one sst file (since writes to each log are mostly sequential) until the subiterator reaches the end of this log in this sst file; then Next() switches to the next sst file and reads sequentially from that, and so on. Often a range scan will only return records from a small number of blocks in small number of sst files; in this case, the cost of initial Seek() reading one block from each file may be bigger than the cost of reading the actually useful blocks.
Neither iterate_upper_bound nor bloom filters can prevent reading one block from each file in Seek(). But this PR can: if the index contains first key from each block, we don't have to read the block until this block actually makes it to the top of merging iterator's heap, so for short range scans we won't read any blocks from most of the sst files.
This PR does the deferred block loading inside value() call. This is not ideal: there's no good way to report an IO error from inside value(). As discussed with siying offline, it would probably be better to change InternalIterator's interface to explicitly fetch deferred value and get status. I'll do it in a separate PR.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5289
Differential Revision: D15256423
Pulled By: al13n321
fbshipit-source-id: 750e4c39ce88e8d41662f701cf6275d9388ba46a
2019-06-25 05:50:35 +02:00
|
|
|
// Value in block-based table file index.
|
|
|
|
//
|
|
|
|
// The index entry for block n is: y -> h, [x],
|
|
|
|
// where: y is some key between the last key of block n (inclusive) and the
|
|
|
|
// first key of block n+1 (exclusive); h is BlockHandle pointing to block n;
|
|
|
|
// x, if present, is the first key of block n (unshortened).
|
|
|
|
// This struct represents the "h, [x]" part.
|
|
|
|
struct IndexValue {
|
|
|
|
BlockHandle handle;
|
|
|
|
// Empty means unknown.
|
|
|
|
Slice first_internal_key;
|
|
|
|
|
|
|
|
IndexValue() = default;
|
|
|
|
IndexValue(BlockHandle _handle, Slice _first_internal_key)
|
|
|
|
: handle(_handle), first_internal_key(_first_internal_key) {}
|
|
|
|
|
|
|
|
// have_first_key indicates whether the `first_internal_key` is used.
|
|
|
|
// If previous_handle is not null, delta encoding is used;
|
|
|
|
// in this case, the two handles must point to consecutive blocks:
|
|
|
|
// handle.offset() ==
|
|
|
|
// previous_handle->offset() + previous_handle->size() + kBlockTrailerSize
|
|
|
|
void EncodeTo(std::string* dst, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle) const;
|
|
|
|
Status DecodeFrom(Slice* input, bool have_first_key,
|
|
|
|
const BlockHandle* previous_handle);
|
|
|
|
|
|
|
|
std::string ToString(bool hex, bool have_first_key) const;
|
|
|
|
};
|
|
|
|
|
2020-08-13 03:24:27 +02:00
|
|
|
inline uint32_t GetCompressFormatForVersion(uint32_t format_version) {
|
|
|
|
// As of format_version 2, we encode compressed block with
|
2015-01-15 01:24:24 +01:00
|
|
|
// compress_format_version == 2. Before that, the version is 1.
|
|
|
|
// DO NOT CHANGE THIS FUNCTION, it affects disk format
|
2020-08-13 03:24:27 +02:00
|
|
|
return format_version >= 2 ? 2 : 1;
|
2015-01-15 01:24:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
inline bool BlockBasedTableSupportedVersion(uint32_t version) {
|
New Bloom filter implementation for full and partitioned filters (#6007)
Summary:
Adds an improved, replacement Bloom filter implementation (FastLocalBloom) for full and partitioned filters in the block-based table. This replacement is faster and more accurate, especially for high bits per key or millions of keys in a single filter.
Speed
The improved speed, at least on recent x86_64, comes from
* Using fastrange instead of modulo (%)
* Using our new hash function (XXH3 preview, added in a previous commit), which is much faster for large keys and only *slightly* slower on keys around 12 bytes if hashing the same size many thousands of times in a row.
* Optimizing the Bloom filter queries with AVX2 SIMD operations. (Added AVX2 to the USE_SSE=1 build.) Careful design was required to support (a) SIMD-optimized queries, (b) compatible non-SIMD code that's simple and efficient, (c) flexible choice of number of probes, and (d) essentially maximized accuracy for a cache-local Bloom filter. Probes are made eight at a time, so any number of probes up to 8 is the same speed, then up to 16, etc.
* Prefetching cache lines when building the filter. Although this optimization could be applied to the old structure as well, it seems to balance out the small added cost of accumulating 64 bit hashes for adding to the filter rather than 32 bit hashes.
Here's nominal speed data from filter_bench (200MB in filters, about 10k keys each, 10 bits filter data / key, 6 probes, avg key size 24 bytes, includes hashing time) on Skylake DE (relatively low clock speed):
$ ./filter_bench -quick -impl=2 -net_includes_hashing # New Bloom filter
Build avg ns/key: 47.7135
Mixed inside/outside queries...
Single filter net ns/op: 26.2825
Random filter net ns/op: 150.459
Average FP rate %: 0.954651
$ ./filter_bench -quick -impl=0 -net_includes_hashing # Old Bloom filter
Build avg ns/key: 47.2245
Mixed inside/outside queries...
Single filter net ns/op: 63.2978
Random filter net ns/op: 188.038
Average FP rate %: 1.13823
Similar build time but dramatically faster query times on hot data (63 ns to 26 ns), and somewhat faster on stale data (188 ns to 150 ns). Performance differences on batched and skewed query loads are between these extremes as expected.
The only other interesting thing about speed is "inside" (query key was added to filter) vs. "outside" (query key was not added to filter) query times. The non-SIMD implementations are substantially slower when most queries are "outside" vs. "inside". This goes against what one might expect or would have observed years ago, as "outside" queries only need about two probes on average, due to short-circuiting, while "inside" always have num_probes (say 6). The problem is probably the nastily unpredictable branch. The SIMD implementation has few branches (very predictable) and has pretty consistent running time regardless of query outcome.
Accuracy
The generally improved accuracy (re: Issue https://github.com/facebook/rocksdb/issues/5857) comes from a better design for probing indices
within a cache line (re: Issue https://github.com/facebook/rocksdb/issues/4120) and improved accuracy for millions of keys in a single filter from using a 64-bit hash function (XXH3p). Design details in code comments.
Accuracy data (generalizes, except old impl gets worse with millions of keys):
Memory bits per key: FP rate percent old impl -> FP rate percent new impl
6: 5.70953 -> 5.69888
8: 2.45766 -> 2.29709
10: 1.13977 -> 0.959254
12: 0.662498 -> 0.411593
16: 0.353023 -> 0.0873754
24: 0.261552 -> 0.0060971
50: 0.225453 -> ~0.00003 (less than 1 in a million queries are FP)
Fixes https://github.com/facebook/rocksdb/issues/5857
Fixes https://github.com/facebook/rocksdb/issues/4120
Unlike the old implementation, this implementation has a fixed cache line size (64 bytes). At 10 bits per key, the accuracy of this new implementation is very close to the old implementation with 128-byte cache line size. If there's sufficient demand, this implementation could be generalized.
Compatibility
Although old releases would see the new structure as corrupt filter data and read the table as if there's no filter, we've decided only to enable the new Bloom filter with new format_version=5. This provides a smooth path for automatic adoption over time, with an option for early opt-in.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6007
Test Plan: filter_bench has been used thoroughly to validate speed, accuracy, and correctness. Unit tests have been carefully updated to exercise new and old implementations, as well as the logic to select an implementation based on context (format_version).
Differential Revision: D18294749
Pulled By: pdillinger
fbshipit-source-id: d44c9db3696e4d0a17caaec47075b7755c262c5f
2019-11-14 01:31:26 +01:00
|
|
|
return version <= 5;
|
2015-01-15 01:24:24 +01:00
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Footer encapsulates the fixed information stored at the tail
|
|
|
|
// end of every table file.
|
|
|
|
class Footer {
|
|
|
|
public:
|
2014-02-05 01:21:47 +01:00
|
|
|
// Constructs a footer without specifying its table magic number.
|
|
|
|
// In such case, the table magic number of such footer should be
|
|
|
|
// initialized via @ReadFooterFromFile().
|
2015-01-13 23:33:04 +01:00
|
|
|
// Use this when you plan to load Footer with DecodeFrom(). Never use this
|
|
|
|
// when you plan to EncodeTo.
|
|
|
|
Footer() : Footer(kInvalidTableMagicNumber, 0) {}
|
2014-02-05 01:21:47 +01:00
|
|
|
|
2015-01-13 23:33:04 +01:00
|
|
|
// Use this constructor when you plan to write out the footer using
|
|
|
|
// EncodeTo(). Never use this constructor with DecodeFrom().
|
|
|
|
Footer(uint64_t table_magic_number, uint32_t version);
|
2014-05-01 20:09:32 +02:00
|
|
|
|
|
|
|
// The version of the footer in this file
|
|
|
|
uint32_t version() const { return version_; }
|
|
|
|
|
|
|
|
// The checksum type used in this file
|
|
|
|
ChecksumType checksum() const { return checksum_; }
|
|
|
|
void set_checksum(const ChecksumType c) { checksum_ = c; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// The block handle for the metaindex block of the table
|
|
|
|
const BlockHandle& metaindex_handle() const { return metaindex_handle_; }
|
|
|
|
void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; }
|
|
|
|
|
|
|
|
// The block handle for the index block of the table
|
2014-05-01 20:09:32 +02:00
|
|
|
const BlockHandle& index_handle() const { return index_handle_; }
|
2014-02-05 01:21:47 +01:00
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
void set_index_handle(const BlockHandle& h) { index_handle_ = h; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-02-05 01:21:47 +01:00
|
|
|
uint64_t table_magic_number() const { return table_magic_number_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void EncodeTo(std::string* dst) const;
|
2014-02-05 01:21:47 +01:00
|
|
|
|
2015-01-13 23:33:04 +01:00
|
|
|
// Set the current footer based on the input slice.
|
|
|
|
//
|
|
|
|
// REQUIRES: table_magic_number_ is not set (i.e.,
|
|
|
|
// HasInitializedTableMagicNumber() is true). The function will initialize the
|
|
|
|
// magic number
|
2011-03-18 23:37:00 +01:00
|
|
|
Status DecodeFrom(Slice* input);
|
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
// Encoded length of a Footer. Note that the serialization of a Footer will
|
|
|
|
// always occupy at least kMinEncodedLength bytes. If fields are changed
|
|
|
|
// the version number should be incremented and kMaxEncodedLength should be
|
|
|
|
// increased accordingly.
|
2011-03-18 23:37:00 +01:00
|
|
|
enum {
|
2014-05-01 20:09:32 +02:00
|
|
|
// Footer version 0 (legacy) will always occupy exactly this many bytes.
|
|
|
|
// It consists of two block handles, padding, and a magic number.
|
|
|
|
kVersion0EncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8,
|
2015-01-13 23:33:04 +01:00
|
|
|
// Footer of versions 1 and higher will always occupy exactly this many
|
|
|
|
// bytes. It consists of the checksum type, two block handles, padding,
|
|
|
|
// a version number (bigger than 1), and a magic number
|
|
|
|
kNewVersionsEncodedLength = 1 + 2 * BlockHandle::kMaxEncodedLength + 4 + 8,
|
2014-05-01 20:09:32 +02:00
|
|
|
kMinEncodedLength = kVersion0EncodedLength,
|
2015-01-13 23:33:04 +01:00
|
|
|
kMaxEncodedLength = kNewVersionsEncodedLength,
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2014-02-11 23:12:58 +01:00
|
|
|
static const uint64_t kInvalidTableMagicNumber = 0;
|
2014-02-05 01:21:47 +01:00
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
// convert this object to a human readable form
|
|
|
|
std::string ToString() const;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
private:
|
2014-03-20 00:04:51 +01:00
|
|
|
// REQUIRES: magic number wasn't initialized.
|
|
|
|
void set_table_magic_number(uint64_t magic_number) {
|
|
|
|
assert(!HasInitializedTableMagicNumber());
|
|
|
|
table_magic_number_ = magic_number;
|
2014-02-05 01:21:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// return true if @table_magic_number_ is set to a value different
|
|
|
|
// from @kInvalidTableMagicNumber.
|
|
|
|
bool HasInitializedTableMagicNumber() const {
|
|
|
|
return (table_magic_number_ != kInvalidTableMagicNumber);
|
|
|
|
}
|
|
|
|
|
2014-05-01 20:09:32 +02:00
|
|
|
uint32_t version_;
|
|
|
|
ChecksumType checksum_;
|
2011-03-18 23:37:00 +01:00
|
|
|
BlockHandle metaindex_handle_;
|
|
|
|
BlockHandle index_handle_;
|
2014-03-20 00:04:51 +01:00
|
|
|
uint64_t table_magic_number_ = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-12-05 01:35:48 +01:00
|
|
|
// Read the footer from file
|
2015-01-13 23:33:04 +01:00
|
|
|
// If enforce_table_magic_number != 0, ReadFooterFromFile() will return
|
|
|
|
// corruption if table_magic number is not equal to enforce_table_magic_number
|
2020-06-29 23:51:57 +02:00
|
|
|
Status ReadFooterFromFile(const IOOptions& opts, RandomAccessFileReader* file,
|
2017-08-11 20:59:13 +02:00
|
|
|
FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
uint64_t file_size, Footer* footer,
|
2015-01-13 23:33:04 +01:00
|
|
|
uint64_t enforce_table_magic_number = 0);
|
2013-12-05 01:35:48 +01:00
|
|
|
|
2020-06-20 01:16:57 +02:00
|
|
|
// 1-byte compression type + 32-bit checksum
|
2011-03-18 23:37:00 +01:00
|
|
|
static const size_t kBlockTrailerSize = 5;
|
|
|
|
|
2019-11-12 01:57:49 +01:00
|
|
|
// Make block size calculation for IO less error prone
|
|
|
|
inline uint64_t block_size(const BlockHandle& handle) {
|
|
|
|
return handle.size() + kBlockTrailerSize;
|
|
|
|
}
|
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
inline CompressionType get_block_compression_type(const char* block_data,
|
|
|
|
size_t block_size) {
|
|
|
|
return static_cast<CompressionType>(block_data[block_size]);
|
|
|
|
}
|
|
|
|
|
2019-05-24 20:16:47 +02:00
|
|
|
// Represents the contents of a block read from an SST file. Depending on how
|
|
|
|
// it's created, it may or may not own the actual block bytes. As an example,
|
|
|
|
// BlockContents objects representing data read from mmapped files only point
|
|
|
|
// into the mmapped region.
|
2012-04-17 17:36:46 +02:00
|
|
|
struct BlockContents {
|
2019-03-28 00:13:08 +01:00
|
|
|
Slice data; // Actual contents of data
|
2018-10-03 02:21:54 +02:00
|
|
|
CacheAllocationPtr allocation;
|
2014-08-16 00:05:09 +02:00
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
#ifndef NDEBUG
|
|
|
|
// Whether the block is a raw block, which contains compression type
|
|
|
|
// byte. It is only used for assertion.
|
|
|
|
bool is_raw_block = false;
|
|
|
|
#endif // NDEBUG
|
2014-09-18 00:08:19 +02:00
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockContents() {}
|
2014-09-18 00:08:19 +02:00
|
|
|
|
2019-05-24 20:16:47 +02:00
|
|
|
// Does not take ownership of the underlying data bytes.
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockContents(const Slice& _data) : data(_data) {}
|
2015-07-13 21:11:05 +02:00
|
|
|
|
2019-05-24 20:16:47 +02:00
|
|
|
// Takes ownership of the underlying data bytes.
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockContents(CacheAllocationPtr&& _data, size_t _size)
|
|
|
|
: data(_data.get(), _size), allocation(std::move(_data)) {}
|
|
|
|
|
2019-05-24 20:16:47 +02:00
|
|
|
// Takes ownership of the underlying data bytes.
|
2018-11-14 02:00:49 +01:00
|
|
|
BlockContents(std::unique_ptr<char[]>&& _data, size_t _size)
|
|
|
|
: data(_data.get(), _size) {
|
2018-10-03 02:21:54 +02:00
|
|
|
allocation.reset(_data.release());
|
|
|
|
}
|
|
|
|
|
2019-05-24 20:16:47 +02:00
|
|
|
// Returns whether the object has ownership of the underlying data bytes.
|
2018-11-14 02:00:49 +01:00
|
|
|
bool own_bytes() const { return allocation.get() != nullptr; }
|
|
|
|
|
|
|
|
// It's the caller's responsibility to make sure that this is
|
|
|
|
// for raw block contents, which contains the compression
|
|
|
|
// byte in the end.
|
|
|
|
CompressionType get_compression_type() const {
|
|
|
|
assert(is_raw_block);
|
|
|
|
return get_block_compression_type(data.data(), data.size());
|
|
|
|
}
|
|
|
|
|
2018-06-29 17:55:33 +02:00
|
|
|
// The additional memory space taken by the block data.
|
|
|
|
size_t usable_size() const {
|
|
|
|
if (allocation.get() != nullptr) {
|
2018-10-03 02:21:54 +02:00
|
|
|
auto allocator = allocation.get_deleter().allocator;
|
|
|
|
if (allocator) {
|
|
|
|
return allocator->UsableSize(allocation.get(), data.size());
|
|
|
|
}
|
2018-06-29 17:55:33 +02:00
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
return malloc_usable_size(allocation.get());
|
|
|
|
#else
|
2018-08-09 02:40:05 +02:00
|
|
|
return data.size();
|
2018-06-29 17:55:33 +02:00
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
} else {
|
|
|
|
return 0; // no extra memory is occupied by the data
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-14 02:00:49 +01:00
|
|
|
size_t ApproximateMemoryUsage() const {
|
|
|
|
return usable_size() + sizeof(*this);
|
|
|
|
}
|
|
|
|
|
2018-04-13 02:55:14 +02:00
|
|
|
BlockContents(BlockContents&& other) ROCKSDB_NOEXCEPT {
|
|
|
|
*this = std::move(other);
|
|
|
|
}
|
2015-07-02 01:13:49 +02:00
|
|
|
|
|
|
|
BlockContents& operator=(BlockContents&& other) {
|
2015-07-08 01:58:20 +02:00
|
|
|
data = std::move(other.data);
|
|
|
|
allocation = std::move(other.allocation);
|
2018-11-14 02:00:49 +01:00
|
|
|
#ifndef NDEBUG
|
|
|
|
is_raw_block = other.is_raw_block;
|
|
|
|
#endif // NDEBUG
|
2015-07-08 01:58:20 +02:00
|
|
|
return *this;
|
2015-07-02 01:13:49 +02:00
|
|
|
}
|
2012-04-17 17:36:46 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Read the block identified by "handle" from "file". On failure
|
|
|
|
// return non-OK. On success fill *result and return OK.
|
2015-12-16 03:20:10 +01:00
|
|
|
extern Status ReadBlockContents(
|
2017-08-11 20:59:13 +02:00
|
|
|
RandomAccessFileReader* file, FilePrefetchBuffer* prefetch_buffer,
|
|
|
|
const Footer& footer, const ReadOptions& options, const BlockHandle& handle,
|
|
|
|
BlockContents* contents, const ImmutableCFOptions& ioptions,
|
2016-07-19 18:44:03 +02:00
|
|
|
bool do_uncompress = true, const Slice& compression_dict = Slice(),
|
|
|
|
const PersistentCacheOptions& cache_options = PersistentCacheOptions());
|
2013-09-02 08:23:40 +02:00
|
|
|
|
|
|
|
// The 'data' points to the raw block contents read in from file.
|
|
|
|
// This method allocates a new heap buffer and the raw block
|
|
|
|
// contents are uncompresed into this buffer. This buffer is
|
|
|
|
// returned via 'result' and it is upto the caller to
|
|
|
|
// free this buffer.
|
2015-01-15 01:24:24 +01:00
|
|
|
// For description of compress_format_version and possible values, see
|
|
|
|
// util/compression.h
|
2019-01-19 04:10:17 +01:00
|
|
|
extern Status UncompressBlockContents(const UncompressionInfo& info,
|
|
|
|
const char* data, size_t n,
|
|
|
|
BlockContents* contents,
|
|
|
|
uint32_t compress_format_version,
|
|
|
|
const ImmutableCFOptions& ioptions,
|
|
|
|
MemoryAllocator* allocator = nullptr);
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2016-06-11 03:20:54 +02:00
|
|
|
// This is an extension to UncompressBlockContents that accepts
|
|
|
|
// a specific compression type. This is used by un-wrapped blocks
|
|
|
|
// with no compression header.
|
|
|
|
extern Status UncompressBlockContentsForCompressionType(
|
2019-01-19 04:10:17 +01:00
|
|
|
const UncompressionInfo& info, const char* data, size_t n,
|
2018-06-05 21:51:05 +02:00
|
|
|
BlockContents* contents, uint32_t compress_format_version,
|
2018-10-26 23:27:09 +02:00
|
|
|
const ImmutableCFOptions& ioptions, MemoryAllocator* allocator = nullptr);
|
2016-06-11 03:20:54 +02:00
|
|
|
|
2020-10-19 20:37:05 +02:00
|
|
|
// Replace db_host_id contents with the real hostname if necessary
|
|
|
|
extern Status ReifyDbHostIdProperty(Env* env, std::string* db_host_id);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Implementation details follow. Clients should ignore,
|
|
|
|
|
2016-11-05 17:10:51 +01:00
|
|
|
// TODO(andrewkr): we should prefer one way of representing a null/uninitialized
|
|
|
|
// BlockHandle. Currently we use zeros for null and use negation-of-zeros for
|
|
|
|
// uninitialized.
|
2011-03-18 23:37:00 +01:00
|
|
|
inline BlockHandle::BlockHandle()
|
2018-04-13 02:55:14 +02:00
|
|
|
: BlockHandle(~static_cast<uint64_t>(0), ~static_cast<uint64_t>(0)) {}
|
2013-12-05 00:43:09 +01:00
|
|
|
|
2014-11-06 20:14:28 +01:00
|
|
|
inline BlockHandle::BlockHandle(uint64_t _offset, uint64_t _size)
|
|
|
|
: offset_(_offset), size_(_size) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|