2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-11-24 19:04:16 +01:00
|
|
|
#pragma once
|
2015-10-15 02:08:28 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-11-24 19:04:16 +01:00
|
|
|
|
2016-01-13 03:20:06 +01:00
|
|
|
#include <memory>
|
2014-11-24 19:04:16 +01:00
|
|
|
#include <string>
|
|
|
|
#include "db/dbformat.h"
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/writable_file_writer.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "options/cf_options.h"
|
2014-11-24 19:04:16 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-24 19:04:16 +01:00
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
class SstFileDumper {
|
2014-11-24 19:04:16 +01:00
|
|
|
public:
|
2019-01-03 20:11:09 +01:00
|
|
|
explicit SstFileDumper(const Options& options, const std::string& file_name,
|
2020-05-13 03:21:32 +02:00
|
|
|
size_t readahead_size, bool verify_checksum,
|
2020-06-25 04:30:15 +02:00
|
|
|
bool output_hex, bool decode_blob_index,
|
|
|
|
bool silent = false);
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
Status ReadSequential(bool print_kv, uint64_t read_num, bool has_from,
|
|
|
|
const std::string& from_key, bool has_to,
|
2017-03-13 18:24:52 +01:00
|
|
|
const std::string& to_key,
|
|
|
|
bool use_from_as_prefix = false);
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
Status ReadTableProperties(
|
|
|
|
std::shared_ptr<const TableProperties>* table_properties);
|
|
|
|
uint64_t GetReadNumber() { return read_num_; }
|
|
|
|
TableProperties* GetInitTableProperties() { return table_properties_.get(); }
|
|
|
|
|
2017-08-10 00:49:40 +02:00
|
|
|
Status VerifyChecksum();
|
2014-12-23 22:24:07 +01:00
|
|
|
Status DumpTable(const std::string& out_filename);
|
|
|
|
Status getStatus() { return init_result_; }
|
|
|
|
|
2017-08-12 00:49:17 +02:00
|
|
|
int ShowAllCompressionSizes(
|
|
|
|
size_t block_size,
|
|
|
|
const std::vector<std::pair<CompressionType, const char*>>&
|
2020-04-27 21:33:49 +02:00
|
|
|
compression_types,
|
|
|
|
int32_t compress_level_from,
|
|
|
|
int32_t compress_level_to);
|
|
|
|
|
|
|
|
int ShowCompressionSize(
|
|
|
|
size_t block_size,
|
|
|
|
CompressionType compress_type,
|
|
|
|
const CompressionOptions& compress_opt);
|
2015-07-24 02:05:33 +02:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
private:
|
2015-02-26 01:34:26 +01:00
|
|
|
// Get the TableReader implementation for the sst file
|
|
|
|
Status GetTableReader(const std::string& file_path);
|
2014-11-24 19:04:16 +01:00
|
|
|
Status ReadTableProperties(uint64_t table_magic_number,
|
2020-05-13 03:21:32 +02:00
|
|
|
RandomAccessFileReader* file, uint64_t file_size,
|
|
|
|
FilePrefetchBuffer* prefetch_buffer);
|
2015-07-24 02:05:33 +02:00
|
|
|
|
|
|
|
uint64_t CalculateCompressedTableSize(const TableBuilderOptions& tb_options,
|
2019-09-14 01:29:16 +02:00
|
|
|
size_t block_size,
|
|
|
|
uint64_t* num_data_blocks);
|
2015-07-24 02:05:33 +02:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
Status SetTableOptionsByMagicNumber(uint64_t table_magic_number);
|
|
|
|
Status SetOldTableOptions();
|
|
|
|
|
2015-02-26 01:34:26 +01:00
|
|
|
// Helper function to call the factory with settings specific to the
|
|
|
|
// factory implementation
|
|
|
|
Status NewTableReader(const ImmutableCFOptions& ioptions,
|
|
|
|
const EnvOptions& soptions,
|
|
|
|
const InternalKeyComparator& internal_comparator,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
uint64_t file_size,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableReader>* table_reader);
|
2015-02-26 01:34:26 +01:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
std::string file_name_;
|
|
|
|
uint64_t read_num_;
|
|
|
|
bool output_hex_;
|
2019-10-18 04:35:22 +02:00
|
|
|
bool decode_blob_index_;
|
2014-11-24 19:04:16 +01:00
|
|
|
EnvOptions soptions_;
|
2020-06-25 04:30:15 +02:00
|
|
|
// less verbose in stdout/stderr
|
|
|
|
bool silent_;
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
// options_ and internal_comparator_ will also be used in
|
|
|
|
// ReadSequential internally (specifically, seek-related operations)
|
|
|
|
Options options_;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
|
|
|
Status init_result_;
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableReader> table_reader_;
|
|
|
|
std::unique_ptr<RandomAccessFileReader> file_;
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
const ImmutableCFOptions ioptions_;
|
2018-05-21 23:33:55 +02:00
|
|
|
const MutableCFOptions moptions_;
|
2020-05-13 03:21:32 +02:00
|
|
|
ReadOptions read_options_;
|
2014-11-24 19:04:16 +01:00
|
|
|
InternalKeyComparator internal_comparator_;
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableProperties> table_properties_;
|
2014-11-24 19:04:16 +01:00
|
|
|
};
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|