2019-05-30 23:47:29 +02:00
|
|
|
|
2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-11-08 02:23:58 +01:00
|
|
|
//
|
2014-11-12 22:05:12 +01:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2015-10-15 02:08:28 +02:00
|
|
|
#include "tools/sst_dump_tool_imp.h"
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2019-06-06 22:52:39 +02:00
|
|
|
#include <cinttypes>
|
2016-05-07 01:09:09 +02:00
|
|
|
#include <iostream>
|
2016-01-13 03:20:06 +01:00
|
|
|
#include <map>
|
2017-07-29 01:23:50 +02:00
|
|
|
#include <memory>
|
2016-01-13 03:20:06 +01:00
|
|
|
#include <sstream>
|
|
|
|
#include <vector>
|
|
|
|
|
2020-03-12 18:58:27 +01:00
|
|
|
#include "db/blob/blob_index.h"
|
2016-01-13 03:20:06 +01:00
|
|
|
#include "db/memtable.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
#include "env/composite_env_wrapper.h"
|
2017-04-06 04:02:00 +02:00
|
|
|
#include "options/cf_options.h"
|
2016-01-13 03:20:06 +01:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/iterator.h"
|
|
|
|
#include "rocksdb/slice_transform.h"
|
|
|
|
#include "rocksdb/status.h"
|
|
|
|
#include "rocksdb/table_properties.h"
|
2016-05-07 01:09:09 +02:00
|
|
|
#include "rocksdb/utilities/ldb_cmd.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/block_based/block.h"
|
|
|
|
#include "table/block_based/block_based_table_builder.h"
|
|
|
|
#include "table/block_based/block_based_table_factory.h"
|
|
|
|
#include "table/block_based/block_builder.h"
|
2016-01-13 03:20:06 +01:00
|
|
|
#include "table/format.h"
|
|
|
|
#include "table/meta_blocks.h"
|
2019-05-30 23:47:29 +02:00
|
|
|
#include "table/plain/plain_table_factory.h"
|
2016-05-07 01:09:09 +02:00
|
|
|
#include "table/table_reader.h"
|
2016-05-03 17:46:24 +02:00
|
|
|
#include "util/compression.h"
|
2016-09-02 23:16:31 +02:00
|
|
|
#include "util/random.h"
|
2016-01-13 03:20:06 +01:00
|
|
|
|
2015-09-01 03:35:12 +02:00
|
|
|
#include "port/port.h"
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2019-01-03 20:11:09 +01:00
|
|
|
SstFileDumper::SstFileDumper(const Options& options,
|
|
|
|
const std::string& file_path, bool verify_checksum,
|
2019-10-18 04:35:22 +02:00
|
|
|
bool output_hex, bool decode_blob_index)
|
2018-05-21 23:33:55 +02:00
|
|
|
: file_name_(file_path),
|
|
|
|
read_num_(0),
|
|
|
|
verify_checksum_(verify_checksum),
|
|
|
|
output_hex_(output_hex),
|
2019-10-18 04:35:22 +02:00
|
|
|
decode_blob_index_(decode_blob_index),
|
2019-01-03 20:11:09 +01:00
|
|
|
options_(options),
|
2018-05-21 23:33:55 +02:00
|
|
|
ioptions_(options_),
|
|
|
|
moptions_(ColumnFamilyOptions(options_)),
|
|
|
|
internal_comparator_(BytewiseComparator()) {
|
2014-11-08 02:23:58 +01:00
|
|
|
fprintf(stdout, "Process %s\n", file_path.c_str());
|
2015-02-26 01:34:26 +01:00
|
|
|
init_result_ = GetTableReader(file_name_);
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
|
2014-11-09 19:01:50 +01:00
|
|
|
extern const uint64_t kBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kLegacyBlockBasedTableMagicNumber;
|
|
|
|
extern const uint64_t kPlainTableMagicNumber;
|
|
|
|
extern const uint64_t kLegacyPlainTableMagicNumber;
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2015-07-24 02:05:33 +02:00
|
|
|
const char* testFileName = "test_file_name";
|
|
|
|
|
2017-08-12 00:49:17 +02:00
|
|
|
static const std::vector<std::pair<CompressionType, const char*>>
|
|
|
|
kCompressions = {
|
|
|
|
{CompressionType::kNoCompression, "kNoCompression"},
|
|
|
|
{CompressionType::kSnappyCompression, "kSnappyCompression"},
|
|
|
|
{CompressionType::kZlibCompression, "kZlibCompression"},
|
|
|
|
{CompressionType::kBZip2Compression, "kBZip2Compression"},
|
|
|
|
{CompressionType::kLZ4Compression, "kLZ4Compression"},
|
|
|
|
{CompressionType::kLZ4HCCompression, "kLZ4HCCompression"},
|
|
|
|
{CompressionType::kXpressCompression, "kXpressCompression"},
|
|
|
|
{CompressionType::kZSTD, "kZSTD"}};
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::GetTableReader(const std::string& file_path) {
|
2016-03-31 00:59:24 +02:00
|
|
|
// Warning about 'magic_number' being uninitialized shows up only in UBsan
|
|
|
|
// builds. Though access is guarded by 's.ok()' checks, fix the issue to
|
|
|
|
// avoid any warnings.
|
|
|
|
uint64_t magic_number = Footer::kInvalidTableMagicNumber;
|
2014-11-08 02:23:58 +01:00
|
|
|
|
|
|
|
// read table magic number
|
|
|
|
Footer footer;
|
|
|
|
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<RandomAccessFile> file;
|
2017-10-19 19:48:47 +02:00
|
|
|
uint64_t file_size = 0;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
Status s = options_.env->NewRandomAccessFile(file_path, &file, soptions_);
|
2014-11-08 02:23:58 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
s = options_.env->GetFileSize(file_path, &file_size);
|
|
|
|
}
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
file_.reset(new RandomAccessFileReader(NewLegacyRandomAccessFileWrapper(file),
|
|
|
|
file_path));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
|
2014-11-08 02:23:58 +01:00
|
|
|
if (s.ok()) {
|
2017-08-11 20:59:13 +02:00
|
|
|
s = ReadFooterFromFile(file_.get(), nullptr /* prefetch_buffer */,
|
|
|
|
file_size, &footer);
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
if (s.ok()) {
|
|
|
|
magic_number = footer.table_magic_number();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
|
|
|
if (magic_number == kPlainTableMagicNumber ||
|
|
|
|
magic_number == kLegacyPlainTableMagicNumber) {
|
|
|
|
soptions_.use_mmap_reads = true;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
options_.env->NewRandomAccessFile(file_path, &file, soptions_);
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
file_.reset(new RandomAccessFileReader(
|
|
|
|
NewLegacyRandomAccessFileWrapper(file), file_path));
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
options_.comparator = &internal_comparator_;
|
|
|
|
// For old sst format, ReadTableProperties might fail but file can be read
|
|
|
|
if (ReadTableProperties(magic_number, file_.get(), file_size).ok()) {
|
|
|
|
SetTableOptionsByMagicNumber(magic_number);
|
|
|
|
} else {
|
|
|
|
SetOldTableOptions();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s.ok()) {
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
s = NewTableReader(ioptions_, soptions_, internal_comparator_, file_size,
|
|
|
|
&table_reader_);
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::NewTableReader(
|
2018-03-05 22:08:17 +01:00
|
|
|
const ImmutableCFOptions& /*ioptions*/, const EnvOptions& /*soptions*/,
|
|
|
|
const InternalKeyComparator& /*internal_comparator*/, uint64_t file_size,
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableReader>* /*table_reader*/) {
|
2020-04-10 19:43:20 +02:00
|
|
|
auto t_opt = TableReaderOptions(ioptions_, moptions_.prefix_extractor.get(),
|
|
|
|
soptions_, internal_comparator_);
|
|
|
|
// Allow open file with global sequence number for backward compatibility.
|
|
|
|
t_opt.largest_seqno = kMaxSequenceNumber;
|
|
|
|
|
2015-02-26 01:34:26 +01:00
|
|
|
// We need to turn off pre-fetching of index and filter nodes for
|
|
|
|
// BlockBasedTable
|
2017-07-29 01:23:50 +02:00
|
|
|
if (BlockBasedTableFactory::kName == options_.table_factory->Name()) {
|
2020-04-10 19:43:20 +02:00
|
|
|
return options_.table_factory->NewTableReader(t_opt, std::move(file_),
|
|
|
|
file_size, &table_reader_,
|
|
|
|
/*enable_prefetch=*/false);
|
2015-02-26 01:34:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// For all other factory implementation
|
2020-04-10 19:43:20 +02:00
|
|
|
return options_.table_factory->NewTableReader(t_opt, std::move(file_),
|
|
|
|
file_size, &table_reader_);
|
2015-02-26 01:34:26 +01:00
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::VerifyChecksum() {
|
2019-08-17 01:40:09 +02:00
|
|
|
// We could pass specific readahead setting into read options if needed.
|
|
|
|
return table_reader_->VerifyChecksum(ReadOptions(),
|
|
|
|
TableReaderCaller::kSSTDumpTool);
|
2017-08-10 00:49:40 +02:00
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::DumpTable(const std::string& out_filename) {
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<WritableFile> out_file;
|
2019-10-09 04:17:39 +02:00
|
|
|
Env* env = options_.env;
|
2014-12-23 22:24:07 +01:00
|
|
|
env->NewWritableFile(out_filename, &out_file, soptions_);
|
Move the filter readers out of the block cache (#5504)
Summary:
Currently, when the block cache is used for the filter block, it is not
really the block itself that is stored in the cache but a FilterBlockReader
object. Since this object is not pure data (it has, for instance, pointers that
might dangle, including in one case a back pointer to the TableReader), it's not
really sharable. To avoid the issues around this, the current code erases the
cache entries when the TableReader is closed (which, BTW, is not sufficient
since a concurrent TableReader might have picked up the object in the meantime).
Instead of doing this, the patch moves the FilterBlockReader out of the cache
altogether, and decouples the filter reader object from the filter block.
In particular, instead of the TableReader owning, or caching/pinning the
FilterBlockReader (based on the customer's settings), with the change the
TableReader unconditionally owns the FilterBlockReader, which in turn
owns/caches/pins the filter block. This change also enables us to reuse the code
paths historically used for data blocks for filters as well.
Note:
Eviction statistics for filter blocks are temporarily broken. We plan to fix this in a
separate phase.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5504
Test Plan: make asan_check
Differential Revision: D16036974
Pulled By: ltamasi
fbshipit-source-id: 770f543c5fb4ed126fd1e04bfd3809cf4ff9c091
2019-07-16 22:11:23 +02:00
|
|
|
Status s = table_reader_->DumpTable(out_file.get());
|
2014-12-23 22:24:07 +01:00
|
|
|
out_file->Close();
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
uint64_t SstFileDumper::CalculateCompressedTableSize(
|
2019-09-14 01:29:16 +02:00
|
|
|
const TableBuilderOptions& tb_options, size_t block_size,
|
|
|
|
uint64_t* num_data_blocks) {
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<WritableFile> out_file;
|
2019-10-09 04:17:39 +02:00
|
|
|
std::unique_ptr<Env> env(NewMemEnv(options_.env));
|
2015-07-24 02:05:33 +02:00
|
|
|
env->NewWritableFile(testFileName, &out_file, soptions_);
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<WritableFileWriter> dest_writer;
|
2018-08-23 19:04:10 +02:00
|
|
|
dest_writer.reset(
|
Introduce a new storage specific Env API (#5761)
Summary:
The current Env API encompasses both storage/file operations, as well as OS related operations. Most of the APIs return a Status, which does not have enough metadata about an error, such as whether its retry-able or not, scope (i.e fault domain) of the error etc., that may be required in order to properly handle a storage error. The file APIs also do not provide enough control over the IO SLA, such as timeout, prioritization, hinting about placement and redundancy etc.
This PR separates out the file/storage APIs from Env into a new FileSystem class. The APIs are updated to return an IOStatus with metadata about the error, as well as to take an IOOptions structure as input in order to allow more control over the IO.
The user can set both ```options.env``` and ```options.file_system``` to specify that RocksDB should use the former for OS related operations and the latter for storage operations. Internally, a ```CompositeEnvWrapper``` has been introduced that inherits from ```Env``` and redirects individual methods to either an ```Env``` implementation or the ```FileSystem``` as appropriate. When options are sanitized during ```DB::Open```, ```options.env``` is replaced with a newly allocated ```CompositeEnvWrapper``` instance if both env and file_system have been specified. This way, the rest of the RocksDB code can continue to function as before.
This PR also ports PosixEnv to the new API by splitting it into two - PosixEnv and PosixFileSystem. PosixEnv is defined as a sub-class of CompositeEnvWrapper, and threading/time functions are overridden with Posix specific implementations in order to avoid an extra level of indirection.
The ```CompositeEnvWrapper``` translates ```IOStatus``` return code to ```Status```, and sets the severity to ```kSoftError``` if the io_status is retryable. The error handling code in RocksDB can then recover the DB automatically.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5761
Differential Revision: D18868376
Pulled By: anand1976
fbshipit-source-id: 39efe18a162ea746fabac6360ff529baba48486f
2019-12-13 23:47:08 +01:00
|
|
|
new WritableFileWriter(NewLegacyWritableFileWrapper(std::move(out_file)),
|
|
|
|
testFileName, soptions_));
|
2015-07-24 02:05:33 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
|
|
|
table_options.block_size = block_size;
|
|
|
|
BlockBasedTableFactory block_based_tf(table_options);
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<TableBuilder> table_builder;
|
2015-08-04 17:42:34 +02:00
|
|
|
table_builder.reset(block_based_tf.NewTableBuilder(
|
2015-10-09 01:57:35 +02:00
|
|
|
tb_options,
|
|
|
|
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily,
|
|
|
|
dest_writer.get()));
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<InternalIterator> iter(table_reader_->NewIterator(
|
2019-06-20 23:28:22 +02:00
|
|
|
ReadOptions(), moptions_.prefix_extractor.get(), /*arena=*/nullptr,
|
|
|
|
/*skip_filters=*/false, TableReaderCaller::kSSTDumpTool));
|
2015-07-24 02:05:33 +02:00
|
|
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
|
|
|
if (!iter->status().ok()) {
|
2015-07-31 02:46:47 +02:00
|
|
|
fputs(iter->status().ToString().c_str(), stderr);
|
2015-07-24 02:05:33 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2015-08-04 17:42:34 +02:00
|
|
|
table_builder->Add(iter->key(), iter->value());
|
2015-07-24 02:05:33 +02:00
|
|
|
}
|
2015-08-04 17:42:34 +02:00
|
|
|
Status s = table_builder->Finish();
|
2015-07-24 02:05:33 +02:00
|
|
|
if (!s.ok()) {
|
2015-07-31 02:46:47 +02:00
|
|
|
fputs(s.ToString().c_str(), stderr);
|
2015-07-24 02:05:33 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2015-08-04 17:42:34 +02:00
|
|
|
uint64_t size = table_builder->FileSize();
|
2019-09-14 01:29:16 +02:00
|
|
|
assert(num_data_blocks != nullptr);
|
|
|
|
*num_data_blocks = table_builder->GetTableProperties().num_data_blocks;
|
2015-07-24 02:05:33 +02:00
|
|
|
env->DeleteFile(testFileName);
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
int SstFileDumper::ShowAllCompressionSizes(
|
2017-08-12 00:49:17 +02:00
|
|
|
size_t block_size,
|
|
|
|
const std::vector<std::pair<CompressionType, const char*>>&
|
|
|
|
compression_types) {
|
2015-07-24 02:05:33 +02:00
|
|
|
ReadOptions read_options;
|
|
|
|
Options opts;
|
2020-02-20 21:07:53 +01:00
|
|
|
opts.statistics = ROCKSDB_NAMESPACE::CreateDBStatistics();
|
2019-09-14 01:29:16 +02:00
|
|
|
opts.statistics->set_stats_level(StatsLevel::kAll);
|
2015-07-24 02:05:33 +02:00
|
|
|
const ImmutableCFOptions imoptions(opts);
|
2018-05-21 23:33:55 +02:00
|
|
|
const ColumnFamilyOptions cfo(opts);
|
|
|
|
const MutableCFOptions moptions(cfo);
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::InternalKeyComparator ikc(opts.comparator);
|
2015-07-24 02:05:33 +02:00
|
|
|
std::vector<std::unique_ptr<IntTblPropCollectorFactory> >
|
|
|
|
block_based_table_factories;
|
|
|
|
|
2015-09-01 03:35:12 +02:00
|
|
|
fprintf(stdout, "Block Size: %" ROCKSDB_PRIszt "\n", block_size);
|
2015-07-24 02:05:33 +02:00
|
|
|
|
2017-08-12 00:49:17 +02:00
|
|
|
for (auto& i : compression_types) {
|
2016-05-03 17:46:24 +02:00
|
|
|
if (CompressionTypeSupported(i.first)) {
|
|
|
|
CompressionOptions compress_opt;
|
|
|
|
std::string column_family_name;
|
2016-09-18 07:30:43 +02:00
|
|
|
int unknown_level = -1;
|
2019-03-18 20:07:35 +01:00
|
|
|
TableBuilderOptions tb_opts(
|
|
|
|
imoptions, moptions, ikc, &block_based_table_factories, i.first,
|
|
|
|
0 /* sample_for_compression */, compress_opt,
|
|
|
|
false /* skip_filters */, column_family_name, unknown_level);
|
2019-09-14 01:29:16 +02:00
|
|
|
uint64_t num_data_blocks = 0;
|
|
|
|
uint64_t file_size =
|
|
|
|
CalculateCompressedTableSize(tb_opts, block_size, &num_data_blocks);
|
|
|
|
fprintf(stdout, "Compression: %-24s", i.second);
|
|
|
|
fprintf(stdout, " Size: %10" PRIu64, file_size);
|
|
|
|
fprintf(stdout, " Blocks: %6" PRIu64, num_data_blocks);
|
|
|
|
const uint64_t compressed_blocks =
|
|
|
|
opts.statistics->getAndResetTickerCount(NUMBER_BLOCK_COMPRESSED);
|
|
|
|
const uint64_t not_compressed_blocks =
|
|
|
|
opts.statistics->getAndResetTickerCount(NUMBER_BLOCK_NOT_COMPRESSED);
|
|
|
|
// When the option enable_index_compression is true,
|
|
|
|
// NUMBER_BLOCK_COMPRESSED is incremented for index block(s).
|
|
|
|
if ((compressed_blocks + not_compressed_blocks) > num_data_blocks) {
|
|
|
|
num_data_blocks = compressed_blocks + not_compressed_blocks;
|
|
|
|
}
|
|
|
|
const uint64_t ratio_not_compressed_blocks =
|
|
|
|
(num_data_blocks - compressed_blocks) - not_compressed_blocks;
|
|
|
|
const double compressed_pcnt =
|
|
|
|
(0 == num_data_blocks) ? 0.0
|
|
|
|
: ((static_cast<double>(compressed_blocks) /
|
|
|
|
static_cast<double>(num_data_blocks)) *
|
|
|
|
100.0);
|
|
|
|
const double ratio_not_compressed_pcnt =
|
|
|
|
(0 == num_data_blocks)
|
|
|
|
? 0.0
|
|
|
|
: ((static_cast<double>(ratio_not_compressed_blocks) /
|
|
|
|
static_cast<double>(num_data_blocks)) *
|
|
|
|
100.0);
|
|
|
|
const double not_compressed_pcnt =
|
|
|
|
(0 == num_data_blocks)
|
|
|
|
? 0.0
|
|
|
|
: ((static_cast<double>(not_compressed_blocks) /
|
|
|
|
static_cast<double>(num_data_blocks)) *
|
|
|
|
100.0);
|
|
|
|
fprintf(stdout, " Compressed: %6" PRIu64 " (%5.1f%%)", compressed_blocks,
|
|
|
|
compressed_pcnt);
|
|
|
|
fprintf(stdout, " Not compressed (ratio): %6" PRIu64 " (%5.1f%%)",
|
|
|
|
ratio_not_compressed_blocks, ratio_not_compressed_pcnt);
|
|
|
|
fprintf(stdout, " Not compressed (abort): %6" PRIu64 " (%5.1f%%)\n",
|
|
|
|
not_compressed_blocks, not_compressed_pcnt);
|
2016-05-03 17:46:24 +02:00
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Unsupported compression type: %s.\n", i.second);
|
|
|
|
}
|
2015-07-24 02:05:33 +02:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::ReadTableProperties(uint64_t table_magic_number,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
RandomAccessFileReader* file,
|
2014-11-08 02:23:58 +01:00
|
|
|
uint64_t file_size) {
|
|
|
|
TableProperties* table_properties = nullptr;
|
2020-02-20 21:07:53 +01:00
|
|
|
Status s = ROCKSDB_NAMESPACE::ReadTableProperties(
|
|
|
|
file, file_size, table_magic_number, ioptions_, &table_properties);
|
2014-11-08 02:23:58 +01:00
|
|
|
if (s.ok()) {
|
|
|
|
table_properties_.reset(table_properties);
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Not able to read table properties\n");
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::SetTableOptionsByMagicNumber(
|
2014-11-08 02:23:58 +01:00
|
|
|
uint64_t table_magic_number) {
|
|
|
|
assert(table_properties_);
|
|
|
|
if (table_magic_number == kBlockBasedTableMagicNumber ||
|
|
|
|
table_magic_number == kLegacyBlockBasedTableMagicNumber) {
|
|
|
|
options_.table_factory = std::make_shared<BlockBasedTableFactory>();
|
|
|
|
fprintf(stdout, "Sst file format: block-based\n");
|
|
|
|
auto& props = table_properties_->user_collected_properties;
|
|
|
|
auto pos = props.find(BlockBasedTablePropertyNames::kIndexType);
|
|
|
|
if (pos != props.end()) {
|
|
|
|
auto index_type_on_file = static_cast<BlockBasedTableOptions::IndexType>(
|
|
|
|
DecodeFixed32(pos->second.c_str()));
|
|
|
|
if (index_type_on_file ==
|
|
|
|
BlockBasedTableOptions::IndexType::kHashSearch) {
|
|
|
|
options_.prefix_extractor.reset(NewNoopTransform());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else if (table_magic_number == kPlainTableMagicNumber ||
|
|
|
|
table_magic_number == kLegacyPlainTableMagicNumber) {
|
|
|
|
options_.allow_mmap_reads = true;
|
|
|
|
|
|
|
|
PlainTableOptions plain_table_options;
|
|
|
|
plain_table_options.user_key_len = kPlainTableVariableLength;
|
|
|
|
plain_table_options.bloom_bits_per_key = 0;
|
|
|
|
plain_table_options.hash_table_ratio = 0;
|
|
|
|
plain_table_options.index_sparseness = 1;
|
|
|
|
plain_table_options.huge_page_tlb_size = 0;
|
|
|
|
plain_table_options.encoding_type = kPlain;
|
|
|
|
plain_table_options.full_scan_mode = true;
|
|
|
|
|
|
|
|
options_.table_factory.reset(NewPlainTableFactory(plain_table_options));
|
|
|
|
fprintf(stdout, "Sst file format: plain table\n");
|
|
|
|
} else {
|
|
|
|
char error_msg_buffer[80];
|
|
|
|
snprintf(error_msg_buffer, sizeof(error_msg_buffer) - 1,
|
|
|
|
"Unsupported table magic number --- %lx",
|
|
|
|
(long)table_magic_number);
|
|
|
|
return Status::InvalidArgument(error_msg_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::SetOldTableOptions() {
|
2014-11-08 02:23:58 +01:00
|
|
|
assert(table_properties_ == nullptr);
|
|
|
|
options_.table_factory = std::make_shared<BlockBasedTableFactory>();
|
|
|
|
fprintf(stdout, "Sst file format: block-based(old version)\n");
|
|
|
|
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::ReadSequential(bool print_kv, uint64_t read_num,
|
2017-03-13 18:24:52 +01:00
|
|
|
bool has_from, const std::string& from_key,
|
|
|
|
bool has_to, const std::string& to_key,
|
|
|
|
bool use_from_as_prefix) {
|
2014-11-08 02:23:58 +01:00
|
|
|
if (!table_reader_) {
|
|
|
|
return init_result_;
|
|
|
|
}
|
|
|
|
|
2018-05-21 23:33:55 +02:00
|
|
|
InternalIterator* iter = table_reader_->NewIterator(
|
2019-06-20 23:28:22 +02:00
|
|
|
ReadOptions(verify_checksum_, false), moptions_.prefix_extractor.get(),
|
|
|
|
/*arena=*/nullptr, /*skip_filters=*/false,
|
|
|
|
TableReaderCaller::kSSTDumpTool);
|
2014-11-08 02:23:58 +01:00
|
|
|
uint64_t i = 0;
|
|
|
|
if (has_from) {
|
2015-04-24 03:08:37 +02:00
|
|
|
InternalKey ikey;
|
2017-09-13 02:16:44 +02:00
|
|
|
ikey.SetMinPossibleForUserKey(from_key);
|
2014-11-08 02:23:58 +01:00
|
|
|
iter->Seek(ikey.Encode());
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
Slice key = iter->key();
|
|
|
|
Slice value = iter->value();
|
|
|
|
++i;
|
|
|
|
if (read_num > 0 && i > read_num)
|
|
|
|
break;
|
|
|
|
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
if (!ParseInternalKey(key, &ikey)) {
|
|
|
|
std::cerr << "Internal Key ["
|
|
|
|
<< key.ToString(true /* in hex*/)
|
|
|
|
<< "] parse error!\n";
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-03-13 18:24:52 +01:00
|
|
|
// the key returned is not prefixed with out 'from' key
|
|
|
|
if (use_from_as_prefix && !ikey.user_key.starts_with(from_key)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2014-11-08 02:23:58 +01:00
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (has_to && BytewiseComparator()->Compare(ikey.user_key, to_key) >= 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (print_kv) {
|
2019-10-18 04:35:22 +02:00
|
|
|
if (!decode_blob_index_ || ikey.type != kTypeBlobIndex) {
|
|
|
|
fprintf(stdout, "%s => %s\n", ikey.DebugString(output_hex_).c_str(),
|
|
|
|
value.ToString(output_hex_).c_str());
|
|
|
|
} else {
|
|
|
|
BlobIndex blob_index;
|
|
|
|
|
|
|
|
const Status s = blob_index.DecodeFrom(value);
|
|
|
|
if (!s.ok()) {
|
|
|
|
fprintf(stderr, "%s => error decoding blob index\n",
|
|
|
|
ikey.DebugString(output_hex_).c_str());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "%s => %s\n", ikey.DebugString(output_hex_).c_str(),
|
|
|
|
blob_index.DebugString(output_hex_).c_str());
|
|
|
|
}
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
read_num_ += i;
|
|
|
|
|
|
|
|
Status ret = iter->status();
|
|
|
|
delete iter;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
Status SstFileDumper::ReadTableProperties(
|
2014-11-08 02:23:58 +01:00
|
|
|
std::shared_ptr<const TableProperties>* table_properties) {
|
|
|
|
if (!table_reader_) {
|
|
|
|
return init_result_;
|
|
|
|
}
|
|
|
|
|
|
|
|
*table_properties = table_reader_->GetTableProperties();
|
|
|
|
return init_result_;
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
void print_help() {
|
2019-09-19 21:32:33 +02:00
|
|
|
fprintf(
|
|
|
|
stderr,
|
|
|
|
R"(sst_dump --file=<data_dir_OR_sst_file> [--command=check|scan|raw|recompress]
|
2016-04-08 21:05:02 +02:00
|
|
|
--file=<data_dir_OR_sst_file>
|
|
|
|
Path to SST file or directory containing SST files
|
|
|
|
|
2019-10-09 04:17:39 +02:00
|
|
|
--env_uri=<uri of underlying Env>
|
|
|
|
URI of underlying Env
|
|
|
|
|
2017-08-10 00:49:40 +02:00
|
|
|
--command=check|scan|raw|verify
|
2019-09-14 01:29:16 +02:00
|
|
|
check: Iterate over entries in files but don't print anything except if an error is encountered (default command)
|
2016-04-08 21:05:02 +02:00
|
|
|
scan: Iterate over entries in files and print them to screen
|
|
|
|
raw: Dump all the table contents to <file_name>_dump.txt
|
2019-09-14 01:29:16 +02:00
|
|
|
verify: Iterate all the blocks in files verifying checksum to detect possible corruption but don't print anything except if a corruption is encountered
|
2017-08-12 00:49:17 +02:00
|
|
|
recompress: reports the SST file size if recompressed with different
|
|
|
|
compression types
|
2016-04-08 21:05:02 +02:00
|
|
|
|
|
|
|
--output_hex
|
|
|
|
Can be combined with scan command to print the keys and values in Hex
|
|
|
|
|
2019-10-18 04:35:22 +02:00
|
|
|
--decode_blob_index
|
|
|
|
Decode blob indexes and print them in a human-readable format during scans.
|
|
|
|
|
2016-04-08 21:05:02 +02:00
|
|
|
--from=<user_key>
|
|
|
|
Key to start reading from when executing check|scan
|
|
|
|
|
|
|
|
--to=<user_key>
|
|
|
|
Key to stop reading at when executing check|scan
|
|
|
|
|
2017-03-13 18:24:52 +01:00
|
|
|
--prefix=<user_key>
|
|
|
|
Returns all keys with this prefix when executing check|scan
|
|
|
|
Cannot be used in conjunction with --from
|
|
|
|
|
2016-04-08 21:05:02 +02:00
|
|
|
--read_num=<num>
|
|
|
|
Maximum number of entries to read when executing check|scan
|
|
|
|
|
|
|
|
--verify_checksum
|
|
|
|
Verify file checksum when executing check|scan
|
|
|
|
|
|
|
|
--input_key_hex
|
|
|
|
Can be combined with --from and --to to indicate that these values are encoded in Hex
|
|
|
|
|
|
|
|
--show_properties
|
2017-08-12 00:49:17 +02:00
|
|
|
Print table properties after iterating over the file when executing
|
|
|
|
check|scan|raw
|
2016-04-08 21:05:02 +02:00
|
|
|
|
|
|
|
--set_block_size=<block_size>
|
2017-08-12 00:49:17 +02:00
|
|
|
Can be combined with --command=recompress to set the block size that will
|
|
|
|
be used when trying different compression algorithms
|
|
|
|
|
|
|
|
--compression_types=<comma-separated list of CompressionType members, e.g.,
|
|
|
|
kSnappyCompression>
|
|
|
|
Can be combined with --command=recompress to run recompression for this
|
|
|
|
list of compression types
|
2016-11-10 19:06:06 +01:00
|
|
|
|
|
|
|
--parse_internal_key=<0xKEY>
|
|
|
|
Convenience option to parse an internal key on the command line. Dumps the
|
|
|
|
internal key in hex format {'key' @ SN: type}
|
2016-04-08 21:05:02 +02:00
|
|
|
)");
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2019-01-03 20:11:09 +01:00
|
|
|
int SSTDumpTool::Run(int argc, char** argv, Options options) {
|
2019-10-09 04:17:39 +02:00
|
|
|
const char* env_uri = nullptr;
|
2014-11-08 02:23:58 +01:00
|
|
|
const char* dir_or_file = nullptr;
|
2017-10-19 19:48:47 +02:00
|
|
|
uint64_t read_num = std::numeric_limits<uint64_t>::max();
|
2014-11-08 02:23:58 +01:00
|
|
|
std::string command;
|
|
|
|
|
|
|
|
char junk;
|
|
|
|
uint64_t n;
|
|
|
|
bool verify_checksum = false;
|
|
|
|
bool output_hex = false;
|
2019-10-18 04:35:22 +02:00
|
|
|
bool decode_blob_index = false;
|
2014-11-08 02:23:58 +01:00
|
|
|
bool input_key_hex = false;
|
|
|
|
bool has_from = false;
|
|
|
|
bool has_to = false;
|
2017-03-13 18:24:52 +01:00
|
|
|
bool use_from_as_prefix = false;
|
2014-11-08 02:23:58 +01:00
|
|
|
bool show_properties = false;
|
2017-01-04 03:24:15 +01:00
|
|
|
bool show_summary = false;
|
2015-07-24 02:05:33 +02:00
|
|
|
bool set_block_size = false;
|
2014-11-08 02:23:58 +01:00
|
|
|
std::string from_key;
|
|
|
|
std::string to_key;
|
2015-07-24 02:05:33 +02:00
|
|
|
std::string block_size_str;
|
2017-10-19 19:48:47 +02:00
|
|
|
size_t block_size = 0;
|
2017-08-12 00:49:17 +02:00
|
|
|
std::vector<std::pair<CompressionType, const char*>> compression_types;
|
2017-01-04 03:24:15 +01:00
|
|
|
uint64_t total_num_files = 0;
|
|
|
|
uint64_t total_num_data_blocks = 0;
|
|
|
|
uint64_t total_data_block_size = 0;
|
|
|
|
uint64_t total_index_block_size = 0;
|
|
|
|
uint64_t total_filter_block_size = 0;
|
2014-11-08 02:23:58 +01:00
|
|
|
for (int i = 1; i < argc; i++) {
|
2019-10-09 04:17:39 +02:00
|
|
|
if (strncmp(argv[i], "--env_uri=", 10) == 0) {
|
|
|
|
env_uri = argv[i] + 10;
|
|
|
|
} else if (strncmp(argv[i], "--file=", 7) == 0) {
|
2014-11-08 02:23:58 +01:00
|
|
|
dir_or_file = argv[i] + 7;
|
|
|
|
} else if (strcmp(argv[i], "--output_hex") == 0) {
|
|
|
|
output_hex = true;
|
2019-10-18 04:35:22 +02:00
|
|
|
} else if (strcmp(argv[i], "--decode_blob_index") == 0) {
|
|
|
|
decode_blob_index = true;
|
2014-11-08 02:23:58 +01:00
|
|
|
} else if (strcmp(argv[i], "--input_key_hex") == 0) {
|
|
|
|
input_key_hex = true;
|
2019-10-09 04:17:39 +02:00
|
|
|
} else if (sscanf(argv[i], "--read_num=%lu%c", (unsigned long*)&n, &junk) ==
|
|
|
|
1) {
|
2014-11-08 02:23:58 +01:00
|
|
|
read_num = n;
|
|
|
|
} else if (strcmp(argv[i], "--verify_checksum") == 0) {
|
|
|
|
verify_checksum = true;
|
|
|
|
} else if (strncmp(argv[i], "--command=", 10) == 0) {
|
|
|
|
command = argv[i] + 10;
|
|
|
|
} else if (strncmp(argv[i], "--from=", 7) == 0) {
|
|
|
|
from_key = argv[i] + 7;
|
|
|
|
has_from = true;
|
|
|
|
} else if (strncmp(argv[i], "--to=", 5) == 0) {
|
|
|
|
to_key = argv[i] + 5;
|
|
|
|
has_to = true;
|
2017-03-13 18:24:52 +01:00
|
|
|
} else if (strncmp(argv[i], "--prefix=", 9) == 0) {
|
|
|
|
from_key = argv[i] + 9;
|
|
|
|
use_from_as_prefix = true;
|
2014-11-08 02:23:58 +01:00
|
|
|
} else if (strcmp(argv[i], "--show_properties") == 0) {
|
|
|
|
show_properties = true;
|
2017-01-04 03:24:15 +01:00
|
|
|
} else if (strcmp(argv[i], "--show_summary") == 0) {
|
|
|
|
show_summary = true;
|
2015-07-24 02:05:33 +02:00
|
|
|
} else if (strncmp(argv[i], "--set_block_size=", 17) == 0) {
|
|
|
|
set_block_size = true;
|
|
|
|
block_size_str = argv[i] + 17;
|
|
|
|
std::istringstream iss(block_size_str);
|
2017-08-12 00:49:17 +02:00
|
|
|
iss >> block_size;
|
2015-07-24 02:05:33 +02:00
|
|
|
if (iss.fail()) {
|
2017-08-12 00:49:17 +02:00
|
|
|
fprintf(stderr, "block size must be numeric\n");
|
2015-07-24 02:05:33 +02:00
|
|
|
exit(1);
|
|
|
|
}
|
2017-08-12 00:49:17 +02:00
|
|
|
} else if (strncmp(argv[i], "--compression_types=", 20) == 0) {
|
|
|
|
std::string compression_types_csv = argv[i] + 20;
|
|
|
|
std::istringstream iss(compression_types_csv);
|
|
|
|
std::string compression_type;
|
|
|
|
while (std::getline(iss, compression_type, ',')) {
|
|
|
|
auto iter = std::find_if(
|
|
|
|
kCompressions.begin(), kCompressions.end(),
|
|
|
|
[&compression_type](std::pair<CompressionType, const char*> curr) {
|
|
|
|
return curr.second == compression_type;
|
|
|
|
});
|
|
|
|
if (iter == kCompressions.end()) {
|
|
|
|
fprintf(stderr, "%s is not a valid CompressionType\n",
|
|
|
|
compression_type.c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
compression_types.emplace_back(*iter);
|
|
|
|
}
|
2016-11-10 19:06:06 +01:00
|
|
|
} else if (strncmp(argv[i], "--parse_internal_key=", 21) == 0) {
|
|
|
|
std::string in_key(argv[i] + 21);
|
|
|
|
try {
|
2020-02-20 21:07:53 +01:00
|
|
|
in_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(in_key);
|
2016-11-10 19:06:06 +01:00
|
|
|
} catch (...) {
|
|
|
|
std::cerr << "ERROR: Invalid key input '"
|
|
|
|
<< in_key
|
|
|
|
<< "' Use 0x{hex representation of internal rocksdb key}" << std::endl;
|
|
|
|
return -1;
|
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
Slice sl_key = ROCKSDB_NAMESPACE::Slice(in_key);
|
2016-11-10 19:06:06 +01:00
|
|
|
ParsedInternalKey ikey;
|
|
|
|
int retc = 0;
|
|
|
|
if (!ParseInternalKey(sl_key, &ikey)) {
|
|
|
|
std::cerr << "Internal Key [" << sl_key.ToString(true /* in hex*/)
|
|
|
|
<< "] parse error!\n";
|
|
|
|
retc = -1;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "key=%s\n", ikey.DebugString(true).c_str());
|
|
|
|
return retc;
|
2014-11-08 02:23:58 +01:00
|
|
|
} else {
|
2017-03-13 18:24:52 +01:00
|
|
|
fprintf(stderr, "Unrecognized argument '%s'\n\n", argv[i]);
|
2014-11-08 02:23:58 +01:00
|
|
|
print_help();
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-13 18:24:52 +01:00
|
|
|
if (use_from_as_prefix && has_from) {
|
|
|
|
fprintf(stderr, "Cannot specify --prefix and --from\n\n");
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2014-11-08 02:23:58 +01:00
|
|
|
if (input_key_hex) {
|
2017-03-13 18:24:52 +01:00
|
|
|
if (has_from || use_from_as_prefix) {
|
2020-02-20 21:07:53 +01:00
|
|
|
from_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(from_key);
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
if (has_to) {
|
2020-02-20 21:07:53 +01:00
|
|
|
to_key = ROCKSDB_NAMESPACE::LDBCommand::HexToString(to_key);
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (dir_or_file == nullptr) {
|
2017-03-13 18:24:52 +01:00
|
|
|
fprintf(stderr, "file or directory must be specified.\n\n");
|
2014-11-08 02:23:58 +01:00
|
|
|
print_help();
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
std::shared_ptr<ROCKSDB_NAMESPACE::Env> env_guard;
|
2019-10-09 04:17:39 +02:00
|
|
|
|
|
|
|
// If caller of SSTDumpTool::Run(...) does not specify a different env other
|
|
|
|
// than Env::Default(), then try to load custom env based on dir_or_file.
|
|
|
|
// Otherwise, the caller is responsible for creating custom env.
|
2020-02-20 21:07:53 +01:00
|
|
|
if (!options.env || options.env == ROCKSDB_NAMESPACE::Env::Default()) {
|
2019-10-09 04:17:39 +02:00
|
|
|
Env* env = Env::Default();
|
|
|
|
Status s = Env::LoadEnv(env_uri ? env_uri : "", &env, &env_guard);
|
|
|
|
if (!s.ok() && !s.IsNotFound()) {
|
|
|
|
fprintf(stderr, "LoadEnv: %s\n", s.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
options.env = env;
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "options.env is %p\n", options.env);
|
|
|
|
}
|
|
|
|
|
2014-11-08 02:23:58 +01:00
|
|
|
std::vector<std::string> filenames;
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::Env* env = options.env;
|
|
|
|
ROCKSDB_NAMESPACE::Status st = env->GetChildren(dir_or_file, &filenames);
|
2014-11-08 02:23:58 +01:00
|
|
|
bool dir = true;
|
|
|
|
if (!st.ok()) {
|
|
|
|
filenames.clear();
|
|
|
|
filenames.push_back(dir_or_file);
|
|
|
|
dir = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "from [%s] to [%s]\n",
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::Slice(from_key).ToString(true).c_str(),
|
|
|
|
ROCKSDB_NAMESPACE::Slice(to_key).ToString(true).c_str());
|
2014-11-08 02:23:58 +01:00
|
|
|
|
|
|
|
uint64_t total_read = 0;
|
|
|
|
for (size_t i = 0; i < filenames.size(); i++) {
|
|
|
|
std::string filename = filenames.at(i);
|
|
|
|
if (filename.length() <= 4 ||
|
|
|
|
filename.rfind(".sst") != filename.length() - 4) {
|
|
|
|
// ignore
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (dir) {
|
|
|
|
filename = std::string(dir_or_file) + "/" + filename;
|
|
|
|
}
|
2014-12-23 22:24:07 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SstFileDumper dumper(options, filename, verify_checksum,
|
|
|
|
output_hex, decode_blob_index);
|
2018-11-27 21:59:27 +01:00
|
|
|
if (!dumper.getStatus().ok()) {
|
2014-12-23 22:24:07 +01:00
|
|
|
fprintf(stderr, "%s: %s\n", filename.c_str(),
|
2018-11-27 21:59:27 +01:00
|
|
|
dumper.getStatus().ToString().c_str());
|
2017-01-04 03:24:15 +01:00
|
|
|
continue;
|
2014-12-23 22:24:07 +01:00
|
|
|
}
|
|
|
|
|
2017-08-12 00:49:17 +02:00
|
|
|
if (command == "recompress") {
|
2018-11-27 21:59:27 +01:00
|
|
|
dumper.ShowAllCompressionSizes(
|
2017-08-12 00:49:17 +02:00
|
|
|
set_block_size ? block_size : 16384,
|
|
|
|
compression_types.empty() ? kCompressions : compression_types);
|
2015-07-24 02:05:33 +02:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-12-23 22:24:07 +01:00
|
|
|
if (command == "raw") {
|
|
|
|
std::string out_filename = filename.substr(0, filename.length() - 4);
|
|
|
|
out_filename.append("_dump.txt");
|
|
|
|
|
2018-11-27 21:59:27 +01:00
|
|
|
st = dumper.DumpTable(out_filename);
|
2014-12-23 22:24:07 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
|
|
|
|
exit(1);
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "raw dump written to file %s\n", &out_filename[0]);
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-11-08 02:23:58 +01:00
|
|
|
// scan all files in give file path.
|
|
|
|
if (command == "" || command == "scan" || command == "check") {
|
2018-11-27 21:59:27 +01:00
|
|
|
st = dumper.ReadSequential(
|
2017-03-13 18:24:52 +01:00
|
|
|
command == "scan", read_num > 0 ? (read_num - total_read) : read_num,
|
|
|
|
has_from || use_from_as_prefix, from_key, has_to, to_key,
|
|
|
|
use_from_as_prefix);
|
2014-11-08 02:23:58 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
fprintf(stderr, "%s: %s\n", filename.c_str(),
|
|
|
|
st.ToString().c_str());
|
|
|
|
}
|
2018-11-27 21:59:27 +01:00
|
|
|
total_read += dumper.GetReadNumber();
|
2014-11-08 02:23:58 +01:00
|
|
|
if (read_num > 0 && total_read > read_num) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2017-01-04 03:24:15 +01:00
|
|
|
|
2017-08-10 00:49:40 +02:00
|
|
|
if (command == "verify") {
|
2018-11-27 21:59:27 +01:00
|
|
|
st = dumper.VerifyChecksum();
|
2017-08-10 00:49:40 +02:00
|
|
|
if (!st.ok()) {
|
|
|
|
fprintf(stderr, "%s is corrupted: %s\n", filename.c_str(),
|
|
|
|
st.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "The file is ok\n");
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2017-01-04 03:24:15 +01:00
|
|
|
if (show_properties || show_summary) {
|
2020-02-20 21:07:53 +01:00
|
|
|
const ROCKSDB_NAMESPACE::TableProperties* table_properties;
|
2014-11-08 02:23:58 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
std::shared_ptr<const ROCKSDB_NAMESPACE::TableProperties>
|
2014-11-08 02:23:58 +01:00
|
|
|
table_properties_from_reader;
|
2018-11-27 21:59:27 +01:00
|
|
|
st = dumper.ReadTableProperties(&table_properties_from_reader);
|
2014-11-08 02:23:58 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
fprintf(stderr, "%s: %s\n", filename.c_str(), st.ToString().c_str());
|
|
|
|
fprintf(stderr, "Try to use initial table properties\n");
|
2018-11-27 21:59:27 +01:00
|
|
|
table_properties = dumper.GetInitTableProperties();
|
2014-11-08 02:23:58 +01:00
|
|
|
} else {
|
|
|
|
table_properties = table_properties_from_reader.get();
|
|
|
|
}
|
|
|
|
if (table_properties != nullptr) {
|
2017-01-04 03:24:15 +01:00
|
|
|
if (show_properties) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"Table Properties:\n"
|
|
|
|
"------------------------------\n"
|
|
|
|
" %s",
|
|
|
|
table_properties->ToString("\n ", ": ").c_str());
|
2016-05-19 23:24:48 +02:00
|
|
|
}
|
2017-01-04 03:24:15 +01:00
|
|
|
total_num_files += 1;
|
|
|
|
total_num_data_blocks += table_properties->num_data_blocks;
|
|
|
|
total_data_block_size += table_properties->data_size;
|
|
|
|
total_index_block_size += table_properties->index_size;
|
|
|
|
total_filter_block_size += table_properties->filter_size;
|
2019-10-18 23:43:17 +02:00
|
|
|
if (show_properties) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"Raw user collected properties\n"
|
|
|
|
"------------------------------\n");
|
|
|
|
for (const auto& kv : table_properties->user_collected_properties) {
|
|
|
|
std::string prop_name = kv.first;
|
|
|
|
std::string prop_val = Slice(kv.second).ToString(true);
|
|
|
|
fprintf(stdout, " # %s: 0x%s\n", prop_name.c_str(),
|
|
|
|
prop_val.c_str());
|
|
|
|
}
|
2017-01-04 03:24:15 +01:00
|
|
|
}
|
2019-10-18 23:43:17 +02:00
|
|
|
} else {
|
|
|
|
fprintf(stderr, "Reader unexpectedly returned null properties\n");
|
2016-12-14 20:09:50 +01:00
|
|
|
}
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
|
|
|
}
|
2017-01-04 03:24:15 +01:00
|
|
|
if (show_summary) {
|
|
|
|
fprintf(stdout, "total number of files: %" PRIu64 "\n", total_num_files);
|
|
|
|
fprintf(stdout, "total number of data blocks: %" PRIu64 "\n",
|
|
|
|
total_num_data_blocks);
|
|
|
|
fprintf(stdout, "total data block size: %" PRIu64 "\n",
|
|
|
|
total_data_block_size);
|
|
|
|
fprintf(stdout, "total index block size: %" PRIu64 "\n",
|
|
|
|
total_index_block_size);
|
|
|
|
fprintf(stdout, "total filter block size: %" PRIu64 "\n",
|
|
|
|
total_filter_block_size);
|
|
|
|
}
|
2014-12-23 22:24:07 +01:00
|
|
|
return 0;
|
2014-11-08 02:23:58 +01:00
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2014-11-13 20:39:30 +01:00
|
|
|
|
|
|
|
#endif // ROCKSDB_LITE
|