2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2016-05-07 01:09:09 +02:00
|
|
|
#include "rocksdb/utilities/ldb_cmd.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
|
2015-09-09 00:46:16 +02:00
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
#include "db/db_impl.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "db/dbformat.h"
|
|
|
|
#include "db/log_reader.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
#include "db/write_batch_internal.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "port/dirent.h"
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2014-11-24 19:04:16 +01:00
|
|
|
#include "rocksdb/table_properties.h"
|
2016-07-14 23:09:31 +02:00
|
|
|
#include "rocksdb/utilities/backupable_db.h"
|
2017-03-21 19:49:08 +01:00
|
|
|
#include "rocksdb/utilities/checkpoint.h"
|
2017-05-12 23:59:57 +02:00
|
|
|
#include "rocksdb/utilities/debug.h"
|
2017-01-26 00:54:09 +01:00
|
|
|
#include "rocksdb/utilities/object_registry.h"
|
2017-04-20 19:16:13 +02:00
|
|
|
#include "rocksdb/utilities/options_util.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "rocksdb/write_batch.h"
|
2016-06-21 03:01:03 +02:00
|
|
|
#include "rocksdb/write_buffer_manager.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/scoped_arena_iterator.h"
|
2016-05-07 01:09:09 +02:00
|
|
|
#include "tools/ldb_cmd_impl.h"
|
2015-10-15 02:08:28 +02:00
|
|
|
#include "tools/sst_dump_tool_imp.h"
|
2017-07-29 01:23:50 +02:00
|
|
|
#include "util/cast_util.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
#include "util/coding.h"
|
2017-04-04 03:27:24 +02:00
|
|
|
#include "util/filename.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "util/stderr_logger.h"
|
2015-04-24 04:17:57 +02:00
|
|
|
#include "util/string_util.h"
|
2014-04-29 05:34:20 +02:00
|
|
|
#include "utilities/ttl/db_ttl_impl.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
|
2015-04-24 04:17:57 +02:00
|
|
|
#include <cstdlib>
|
2014-11-01 03:22:49 +01:00
|
|
|
#include <ctime>
|
2016-12-16 20:17:26 +01:00
|
|
|
#include <fstream>
|
|
|
|
#include <functional>
|
2016-05-07 01:09:09 +02:00
|
|
|
#include <iostream>
|
2014-11-01 03:22:49 +01:00
|
|
|
#include <limits>
|
|
|
|
#include <sstream>
|
|
|
|
#include <stdexcept>
|
2016-05-07 01:09:09 +02:00
|
|
|
#include <string>
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string LDBCommand::ARG_DB = "db";
|
|
|
|
const std::string LDBCommand::ARG_PATH = "path";
|
|
|
|
const std::string LDBCommand::ARG_HEX = "hex";
|
|
|
|
const std::string LDBCommand::ARG_KEY_HEX = "key_hex";
|
|
|
|
const std::string LDBCommand::ARG_VALUE_HEX = "value_hex";
|
|
|
|
const std::string LDBCommand::ARG_CF_NAME = "column_family";
|
|
|
|
const std::string LDBCommand::ARG_TTL = "ttl";
|
|
|
|
const std::string LDBCommand::ARG_TTL_START = "start_time";
|
|
|
|
const std::string LDBCommand::ARG_TTL_END = "end_time";
|
|
|
|
const std::string LDBCommand::ARG_TIMESTAMP = "timestamp";
|
2017-04-20 19:16:13 +02:00
|
|
|
const std::string LDBCommand::ARG_TRY_LOAD_OPTIONS = "try_load_options";
|
2017-06-14 01:55:08 +02:00
|
|
|
const std::string LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS =
|
|
|
|
"ignore_unknown_options";
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string LDBCommand::ARG_FROM = "from";
|
|
|
|
const std::string LDBCommand::ARG_TO = "to";
|
|
|
|
const std::string LDBCommand::ARG_MAX_KEYS = "max_keys";
|
|
|
|
const std::string LDBCommand::ARG_BLOOM_BITS = "bloom_bits";
|
|
|
|
const std::string LDBCommand::ARG_FIX_PREFIX_LEN = "fix_prefix_len";
|
|
|
|
const std::string LDBCommand::ARG_COMPRESSION_TYPE = "compression_type";
|
|
|
|
const std::string LDBCommand::ARG_COMPRESSION_MAX_DICT_BYTES =
|
2016-05-11 01:33:47 +02:00
|
|
|
"compression_max_dict_bytes";
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string LDBCommand::ARG_BLOCK_SIZE = "block_size";
|
|
|
|
const std::string LDBCommand::ARG_AUTO_COMPACTION = "auto_compaction";
|
|
|
|
const std::string LDBCommand::ARG_DB_WRITE_BUFFER_SIZE = "db_write_buffer_size";
|
|
|
|
const std::string LDBCommand::ARG_WRITE_BUFFER_SIZE = "write_buffer_size";
|
|
|
|
const std::string LDBCommand::ARG_FILE_SIZE = "file_size";
|
|
|
|
const std::string LDBCommand::ARG_CREATE_IF_MISSING = "create_if_missing";
|
|
|
|
const std::string LDBCommand::ARG_NO_VALUE = "no_value";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2012-12-27 00:15:54 +01:00
|
|
|
const char* LDBCommand::DELIM = " ==> ";
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
|
2018-04-08 06:46:53 +02:00
|
|
|
bool is_write_committed, LDBCommandExecuteResult* exec_state);
|
2016-01-06 23:19:08 +01:00
|
|
|
|
|
|
|
void DumpSstFile(std::string filename, bool output_hex, bool show_properties);
|
|
|
|
};
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2016-01-23 00:46:32 +01:00
|
|
|
int argc, char** argv, const Options& options,
|
|
|
|
const LDBOptions& ldb_options,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>* column_families) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> args;
|
2013-01-11 20:09:23 +01:00
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
args.push_back(argv[i]);
|
|
|
|
}
|
2016-05-13 21:12:39 +02:00
|
|
|
return InitFromCmdLineArgs(args, options, ldb_options, column_families,
|
|
|
|
SelectCommand);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse the command-line arguments and create the appropriate LDBCommand2
|
|
|
|
* instance.
|
|
|
|
* The command line arguments must be in the following format:
|
2014-11-01 03:22:49 +01:00
|
|
|
* ./ldb --db=PATH_TO_DB [--commonOpt1=commonOpt1Val] ..
|
|
|
|
* COMMAND <PARAM1> <PARAM2> ... [-cmdSpecificOpt1=cmdSpecificOpt1Val] ..
|
2013-01-11 20:09:23 +01:00
|
|
|
* This is similar to the command line format used by HBaseClientTool.
|
|
|
|
* Command name is not included in args.
|
2013-02-20 03:12:20 +01:00
|
|
|
* Returns nullptr if the command-line cannot be parsed.
|
2013-01-11 20:09:23 +01:00
|
|
|
*/
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::vector<std::string>& args, const Options& options,
|
2016-01-23 00:46:32 +01:00
|
|
|
const LDBOptions& ldb_options,
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<ColumnFamilyDescriptor>* /*column_families*/,
|
2016-05-27 22:04:07 +02:00
|
|
|
const std::function<LDBCommand*(const ParsedParams&)>& selector) {
|
|
|
|
// --x=y command line arguments are added as x->y map entries in
|
|
|
|
// parsed_params.option_map.
|
|
|
|
//
|
|
|
|
// Command-line arguments of the form --hex end up in this array as hex to
|
|
|
|
// parsed_params.flags
|
|
|
|
ParsedParams parsed_params;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
// Everything other than option_map and flags. Represents commands
|
2014-11-01 03:22:49 +01:00
|
|
|
// and their parameters. For eg: put key1 value1 go into this vector.
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> cmdTokens;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string OPTION_PREFIX = "--";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
for (const auto& arg : args) {
|
2014-11-01 03:22:49 +01:00
|
|
|
if (arg[0] == '-' && arg[1] == '-'){
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> splits = StringSplit(arg, '=');
|
2013-01-11 20:09:23 +01:00
|
|
|
if (splits.size() == 2) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2016-05-27 22:04:07 +02:00
|
|
|
parsed_params.option_map[optionKey] = splits[1];
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2016-05-27 22:04:07 +02:00
|
|
|
parsed_params.flags.push_back(optionKey);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
} else {
|
2013-06-21 01:02:36 +02:00
|
|
|
cmdTokens.push_back(arg);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmdTokens.size() < 1) {
|
|
|
|
fprintf(stderr, "Command not specified!");
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-27 22:04:07 +02:00
|
|
|
parsed_params.cmd = cmdTokens[0];
|
|
|
|
parsed_params.cmd_params.assign(cmdTokens.begin() + 1, cmdTokens.end());
|
|
|
|
|
|
|
|
LDBCommand* command = selector(parsed_params);
|
2013-04-12 05:21:49 +02:00
|
|
|
|
|
|
|
if (command) {
|
2014-06-20 08:54:13 +02:00
|
|
|
command->SetDBOptions(options);
|
|
|
|
command->SetLDBOptions(ldb_options);
|
2013-04-12 05:21:49 +02:00
|
|
|
}
|
|
|
|
return command;
|
|
|
|
}
|
|
|
|
|
2016-05-27 22:04:07 +02:00
|
|
|
LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) {
|
|
|
|
if (parsed_params.cmd == GetCommand::Name()) {
|
|
|
|
return new GetCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == PutCommand::Name()) {
|
|
|
|
return new PutCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == BatchPutCommand::Name()) {
|
|
|
|
return new BatchPutCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ScanCommand::Name()) {
|
|
|
|
return new ScanCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DeleteCommand::Name()) {
|
|
|
|
return new DeleteCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-11-16 00:49:15 +01:00
|
|
|
} else if (parsed_params.cmd == DeleteRangeCommand::Name()) {
|
|
|
|
return new DeleteRangeCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 22:04:07 +02:00
|
|
|
} else if (parsed_params.cmd == ApproxSizeCommand::Name()) {
|
|
|
|
return new ApproxSizeCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBQuerierCommand::Name()) {
|
|
|
|
return new DBQuerierCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CompactorCommand::Name()) {
|
|
|
|
return new CompactorCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == WALDumperCommand::Name()) {
|
|
|
|
return new WALDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ReduceDBLevelsCommand::Name()) {
|
|
|
|
return new ReduceDBLevelsCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ChangeCompactionStyleCommand::Name()) {
|
|
|
|
return new ChangeCompactionStyleCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBDumperCommand::Name()) {
|
|
|
|
return new DBDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBLoaderCommand::Name()) {
|
|
|
|
return new DBLoaderCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ManifestDumpCommand::Name()) {
|
|
|
|
return new ManifestDumpCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == ListColumnFamiliesCommand::Name()) {
|
|
|
|
return new ListColumnFamiliesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CreateColumnFamilyCommand::Name()) {
|
|
|
|
return new CreateColumnFamilyCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == DBFileDumperCommand::Name()) {
|
|
|
|
return new DBFileDumperCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == InternalDumpCommand::Name()) {
|
|
|
|
return new InternalDumpCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == CheckConsistencyCommand::Name()) {
|
|
|
|
return new CheckConsistencyCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2017-03-21 19:49:08 +01:00
|
|
|
} else if (parsed_params.cmd == CheckPointCommand::Name()) {
|
|
|
|
return new CheckPointCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-05-27 22:04:07 +02:00
|
|
|
} else if (parsed_params.cmd == RepairCommand::Name()) {
|
|
|
|
return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-07-14 23:09:31 +02:00
|
|
|
} else if (parsed_params.cmd == BackupCommand::Name()) {
|
|
|
|
return new BackupCommand(parsed_params.cmd_params, parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2016-07-26 20:13:26 +02:00
|
|
|
} else if (parsed_params.cmd == RestoreCommand::Name()) {
|
|
|
|
return new RestoreCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map, parsed_params.flags);
|
2018-08-09 23:18:59 +02:00
|
|
|
} else if (parsed_params.cmd == WriteExternalSstFilesCommand::Name()) {
|
|
|
|
return new WriteExternalSstFilesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
|
|
|
} else if (parsed_params.cmd == IngestExternalSstFilesCommand::Name()) {
|
|
|
|
return new IngestExternalSstFilesCommand(parsed_params.cmd_params,
|
|
|
|
parsed_params.option_map,
|
|
|
|
parsed_params.flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-07 01:09:09 +02:00
|
|
|
/* Run the command, and return the execute result. */
|
|
|
|
void LDBCommand::Run() {
|
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (db_ == nullptr && !NoDBOpen()) {
|
|
|
|
OpenDB();
|
2017-04-20 19:16:13 +02:00
|
|
|
if (exec_state_.IsFailed() && try_load_options_) {
|
|
|
|
// We don't always return if there is a failure because a WAL file or
|
|
|
|
// manifest file can be given to "dump" command so we should continue.
|
|
|
|
// --try_load_options is not valid in those cases.
|
|
|
|
return;
|
|
|
|
}
|
2016-05-07 01:09:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// We'll intentionally proceed even if the DB can't be opened because users
|
|
|
|
// can also specify a filename, not just a directory.
|
|
|
|
DoCommand();
|
|
|
|
|
|
|
|
if (exec_state_.IsNotStarted()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (db_ != nullptr) {
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
LDBCommand::LDBCommand(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags, bool is_read_only,
|
|
|
|
const std::vector<std::string>& valid_cmd_line_options)
|
2016-05-07 01:09:09 +02:00
|
|
|
: db_(nullptr),
|
2017-11-29 00:15:16 +01:00
|
|
|
db_ttl_(nullptr),
|
2016-05-07 01:09:09 +02:00
|
|
|
is_read_only_(is_read_only),
|
|
|
|
is_key_hex_(false),
|
|
|
|
is_value_hex_(false),
|
|
|
|
is_db_ttl_(false),
|
|
|
|
timestamp_(false),
|
2017-04-20 19:16:13 +02:00
|
|
|
try_load_options_(false),
|
2017-06-14 01:55:08 +02:00
|
|
|
ignore_unknown_options_(false),
|
2017-04-20 19:16:13 +02:00
|
|
|
create_if_missing_(false),
|
2016-05-07 01:09:09 +02:00
|
|
|
option_map_(options),
|
|
|
|
flags_(flags),
|
|
|
|
valid_cmd_line_options_(valid_cmd_line_options) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr = options.find(ARG_DB);
|
2016-05-07 01:09:09 +02:00
|
|
|
if (itr != options.end()) {
|
|
|
|
db_path_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_CF_NAME);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
column_family_name_ = itr->second;
|
|
|
|
} else {
|
|
|
|
column_family_name_ = kDefaultColumnFamilyName;
|
|
|
|
}
|
|
|
|
|
|
|
|
is_key_hex_ = IsKeyHex(options, flags);
|
|
|
|
is_value_hex_ = IsValueHex(options, flags);
|
|
|
|
is_db_ttl_ = IsFlagPresent(flags, ARG_TTL);
|
|
|
|
timestamp_ = IsFlagPresent(flags, ARG_TIMESTAMP);
|
2017-04-20 19:16:13 +02:00
|
|
|
try_load_options_ = IsFlagPresent(flags, ARG_TRY_LOAD_OPTIONS);
|
2017-06-14 01:55:08 +02:00
|
|
|
ignore_unknown_options_ = IsFlagPresent(flags, ARG_IGNORE_UNKNOWN_OPTIONS);
|
2016-05-07 01:09:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void LDBCommand::OpenDB() {
|
2017-04-20 19:16:13 +02:00
|
|
|
if (!create_if_missing_ && try_load_options_) {
|
2017-11-29 02:20:47 +01:00
|
|
|
Status s = LoadLatestOptions(db_path_, Env::Default(), &options_,
|
2017-06-14 01:55:08 +02:00
|
|
|
&column_families_, ignore_unknown_options_);
|
2017-12-07 01:38:49 +01:00
|
|
|
if (!s.ok() && !s.IsNotFound()) {
|
2017-04-20 19:16:13 +02:00
|
|
|
// Option file exists but load option file error.
|
|
|
|
std::string msg = s.ToString();
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(msg);
|
|
|
|
db_ = nullptr;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
2017-11-29 02:20:47 +01:00
|
|
|
options_ = PrepareOptionsForOpenDB();
|
2016-05-07 01:09:09 +02:00
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// Open the DB.
|
|
|
|
Status st;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles_opened;
|
|
|
|
if (is_db_ttl_) {
|
|
|
|
// ldb doesn't yet support TTL DB with multiple column families
|
|
|
|
if (!column_family_name_.empty() || !column_families_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"ldb doesn't support TTL DB with multiple column families");
|
|
|
|
}
|
|
|
|
if (is_read_only_) {
|
2017-11-29 02:20:47 +01:00
|
|
|
st = DBWithTTL::Open(options_, db_path_, &db_ttl_, 0, true);
|
2016-05-07 01:09:09 +02:00
|
|
|
} else {
|
2017-11-29 02:20:47 +01:00
|
|
|
st = DBWithTTL::Open(options_, db_path_, &db_ttl_);
|
2016-05-07 01:09:09 +02:00
|
|
|
}
|
|
|
|
db_ = db_ttl_;
|
|
|
|
} else {
|
2017-11-29 02:20:47 +01:00
|
|
|
if (column_families_.empty()) {
|
2016-05-07 01:09:09 +02:00
|
|
|
// Try to figure out column family lists
|
|
|
|
std::vector<std::string> cf_list;
|
|
|
|
st = DB::ListColumnFamilies(DBOptions(), db_path_, &cf_list);
|
|
|
|
// There is possible the DB doesn't exist yet, for "create if not
|
|
|
|
// "existing case". The failure is ignored here. We rely on DB::Open()
|
|
|
|
// to give us the correct error message for problem with opening
|
|
|
|
// existing DB.
|
|
|
|
if (st.ok() && cf_list.size() > 1) {
|
|
|
|
// Ignore single column family DB.
|
|
|
|
for (auto cf_name : cf_list) {
|
2017-11-29 02:20:47 +01:00
|
|
|
column_families_.emplace_back(cf_name, options_);
|
2016-05-07 01:09:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (is_read_only_) {
|
|
|
|
if (column_families_.empty()) {
|
2017-11-29 02:20:47 +01:00
|
|
|
st = DB::OpenForReadOnly(options_, db_path_, &db_);
|
2016-05-07 01:09:09 +02:00
|
|
|
} else {
|
2017-11-29 02:20:47 +01:00
|
|
|
st = DB::OpenForReadOnly(options_, db_path_, column_families_,
|
2016-05-07 01:09:09 +02:00
|
|
|
&handles_opened, &db_);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (column_families_.empty()) {
|
2017-11-29 02:20:47 +01:00
|
|
|
st = DB::Open(options_, db_path_, &db_);
|
2016-05-07 01:09:09 +02:00
|
|
|
} else {
|
2017-12-01 08:39:56 +01:00
|
|
|
st = DB::Open(options_, db_path_, column_families_, &handles_opened,
|
|
|
|
&db_);
|
2016-05-07 01:09:09 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!st.ok()) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string msg = st.ToString();
|
2016-05-07 01:09:09 +02:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(msg);
|
|
|
|
} else if (!handles_opened.empty()) {
|
|
|
|
assert(handles_opened.size() == column_families_.size());
|
|
|
|
bool found_cf_name = false;
|
|
|
|
for (size_t i = 0; i < handles_opened.size(); i++) {
|
|
|
|
cf_handles_[column_families_[i].name] = handles_opened[i];
|
|
|
|
if (column_family_name_ == column_families_[i].name) {
|
|
|
|
found_cf_name = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found_cf_name) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Non-existing column family " + column_family_name_);
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// We successfully opened DB in single column family mode.
|
|
|
|
assert(column_families_.empty());
|
|
|
|
if (column_family_name_ != kDefaultColumnFamilyName) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Non-existing column family " + column_family_name_);
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void LDBCommand::CloseDB() {
|
|
|
|
if (db_ != nullptr) {
|
|
|
|
for (auto& pair : cf_handles_) {
|
|
|
|
delete pair.second;
|
|
|
|
}
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ColumnFamilyHandle* LDBCommand::GetCfHandle() {
|
|
|
|
if (!cf_handles_.empty()) {
|
|
|
|
auto it = cf_handles_.find(column_family_name_);
|
|
|
|
if (it == cf_handles_.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Cannot find column family " + column_family_name_);
|
|
|
|
} else {
|
|
|
|
return it->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return db_->DefaultColumnFamily();
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> LDBCommand::BuildCmdLineOptions(
|
|
|
|
std::vector<std::string> options) {
|
|
|
|
std::vector<std::string> ret = {ARG_DB,
|
|
|
|
ARG_BLOOM_BITS,
|
|
|
|
ARG_BLOCK_SIZE,
|
|
|
|
ARG_AUTO_COMPACTION,
|
|
|
|
ARG_COMPRESSION_TYPE,
|
|
|
|
ARG_COMPRESSION_MAX_DICT_BYTES,
|
|
|
|
ARG_WRITE_BUFFER_SIZE,
|
|
|
|
ARG_FILE_SIZE,
|
|
|
|
ARG_FIX_PREFIX_LEN,
|
2017-04-20 19:16:13 +02:00
|
|
|
ARG_TRY_LOAD_OPTIONS,
|
2017-06-14 01:55:08 +02:00
|
|
|
ARG_IGNORE_UNKNOWN_OPTIONS,
|
2016-05-20 16:42:18 +02:00
|
|
|
ARG_CF_NAME};
|
2016-05-07 01:09:09 +02:00
|
|
|
ret.insert(ret.end(), options.begin(), options.end());
|
|
|
|
return ret;
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Parses the specific integer option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false if the option is not found or if there is an error parsing the
|
|
|
|
* value. If there is an error, the specified exec_state is also
|
|
|
|
* updated.
|
|
|
|
*/
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::ParseIntOption(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string& option, int& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
option_map_.find(option);
|
2013-04-12 05:21:49 +02:00
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
value = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2016-05-20 16:42:18 +02:00
|
|
|
value = std::stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2013-01-11 20:09:23 +01:00
|
|
|
return true;
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::invalid_argument&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
return false;
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
/**
|
|
|
|
* Parses the specified option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false otherwise.
|
|
|
|
*/
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::ParseStringOption(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::map<std::string, std::string>& /*options*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string& option, std::string* value) {
|
2013-06-21 01:02:36 +02:00
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
*value = itr->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options LDBCommand::PrepareOptionsForOpenDB() {
|
2017-11-29 02:20:47 +01:00
|
|
|
ColumnFamilyOptions* cf_opts;
|
|
|
|
auto column_families_iter =
|
|
|
|
std::find_if(column_families_.begin(), column_families_.end(),
|
|
|
|
[this](const ColumnFamilyDescriptor& cf_desc) {
|
|
|
|
return cf_desc.name == column_family_name_;
|
|
|
|
});
|
|
|
|
if (column_families_iter != column_families_.end()) {
|
|
|
|
cf_opts = &column_families_iter->options;
|
|
|
|
} else {
|
|
|
|
cf_opts = static_cast<ColumnFamilyOptions*>(&options_);
|
|
|
|
}
|
|
|
|
DBOptions* db_opts = static_cast<DBOptions*>(&options_);
|
|
|
|
db_opts->create_if_missing = false;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
2014-10-14 01:08:02 +02:00
|
|
|
bool use_table_options = false;
|
2013-01-11 20:09:23 +01:00
|
|
|
int bits;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (bits > 0) {
|
2014-10-14 01:08:02 +02:00
|
|
|
use_table_options = true;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS + " must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int block_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (block_size > 0) {
|
2014-10-14 01:08:02 +02:00
|
|
|
use_table_options = true;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_size = block_size;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE + " must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 01:08:02 +02:00
|
|
|
if (use_table_options) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2014-10-14 01:08:02 +02:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_AUTO_COMPACTION);
|
|
|
|
if (itr != option_map_.end()) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->disable_auto_compactions = !StringToBool(itr->second);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_COMPRESSION_TYPE);
|
|
|
|
if (itr != option_map_.end()) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string comp = itr->second;
|
2013-01-11 20:09:23 +01:00
|
|
|
if (comp == "no") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kNoCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "snappy") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kSnappyCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "zlib") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kZlibCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "bzip2") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kBZip2Compression;
|
2014-02-08 03:12:30 +01:00
|
|
|
} else if (comp == "lz4") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kLZ4Compression;
|
2014-02-08 03:12:30 +01:00
|
|
|
} else if (comp == "lz4hc") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kLZ4HCCompression;
|
2016-04-20 07:54:24 +02:00
|
|
|
} else if (comp == "xpress") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kXpressCompression;
|
2015-08-28 00:40:42 +02:00
|
|
|
} else if (comp == "zstd") {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression = kZSTD;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
// Unknown compression.
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Unknown compression level: " + comp);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-11 01:33:47 +02:00
|
|
|
int compression_max_dict_bytes;
|
|
|
|
if (ParseIntOption(option_map_, ARG_COMPRESSION_MAX_DICT_BYTES,
|
|
|
|
compression_max_dict_bytes, exec_state_)) {
|
|
|
|
if (compression_max_dict_bytes >= 0) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->compression_opts.max_dict_bytes = compression_max_dict_bytes;
|
2016-05-11 01:33:47 +02:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_COMPRESSION_MAX_DICT_BYTES + " must be >= 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-02 21:09:20 +01:00
|
|
|
int db_write_buffer_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_DB_WRITE_BUFFER_SIZE,
|
|
|
|
db_write_buffer_size, exec_state_)) {
|
|
|
|
if (db_write_buffer_size >= 0) {
|
2017-11-29 02:20:47 +01:00
|
|
|
db_opts->db_write_buffer_size = db_write_buffer_size;
|
2014-12-02 21:09:20 +01:00
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_DB_WRITE_BUFFER_SIZE +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" must be >= 0.");
|
2014-12-02 21:09:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
int write_buffer_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size,
|
|
|
|
exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (write_buffer_size > 0) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->write_buffer_size = write_buffer_size;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_WRITE_BUFFER_SIZE +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int file_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_FILE_SIZE, file_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (file_size > 0) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->target_file_size_base = file_size;
|
2012-12-21 07:56:58 +01:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_FILE_SIZE + " must be > 0.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-29 02:20:47 +01:00
|
|
|
if (db_opts->db_paths.size() == 0) {
|
|
|
|
db_opts->db_paths.emplace_back(db_path_,
|
|
|
|
std::numeric_limits<uint64_t>::max());
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
|
|
|
|
2014-10-14 01:08:02 +02:00
|
|
|
int fix_prefix_len;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_FIX_PREFIX_LEN, fix_prefix_len,
|
|
|
|
exec_state_)) {
|
2014-10-14 01:08:02 +02:00
|
|
|
if (fix_prefix_len > 0) {
|
2017-11-29 02:20:47 +01:00
|
|
|
cf_opts->prefix_extractor.reset(
|
2014-10-14 01:08:02 +02:00
|
|
|
NewFixedPrefixTransform(static_cast<size_t>(fix_prefix_len)));
|
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
exec_state_ =
|
2015-03-17 02:08:59 +01:00
|
|
|
LDBCommandExecuteResult::Failed(ARG_FIX_PREFIX_LEN + " must be > 0.");
|
2014-10-14 01:08:02 +02:00
|
|
|
}
|
|
|
|
}
|
2017-11-29 02:20:47 +01:00
|
|
|
// TODO(ajkr): this return value doesn't reflect the CF options changed, so
|
|
|
|
// subcommands that rely on this won't see the effect of CF-related CLI args.
|
|
|
|
// Such subcommands need to be changed to properly support CFs.
|
|
|
|
return options_;
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::ParseKeyValue(const std::string& line, std::string* key,
|
|
|
|
std::string* value, bool is_key_hex,
|
|
|
|
bool is_value_hex) {
|
2013-01-11 20:09:23 +01:00
|
|
|
size_t pos = line.find(DELIM);
|
2016-05-20 16:42:18 +02:00
|
|
|
if (pos != std::string::npos) {
|
2013-01-11 20:09:23 +01:00
|
|
|
*key = line.substr(0, pos);
|
|
|
|
*value = line.substr(pos + strlen(DELIM));
|
|
|
|
if (is_key_hex) {
|
|
|
|
*key = HexToString(*key);
|
|
|
|
}
|
|
|
|
if (is_value_hex) {
|
|
|
|
*value = HexToString(*value);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Make sure that ONLY the command-line options and flags expected by this
|
|
|
|
* command are specified on the command-line. Extraneous options are usually
|
|
|
|
* the result of user error.
|
|
|
|
* Returns true if all checks pass. Else returns false, and prints an
|
|
|
|
* appropriate error msg to stderr.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ValidateCmdLineOptions() {
|
2016-05-20 16:42:18 +02:00
|
|
|
for (std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
option_map_.begin();
|
|
|
|
itr != option_map_.end(); ++itr) {
|
|
|
|
if (std::find(valid_cmd_line_options_.begin(),
|
|
|
|
valid_cmd_line_options_.end(),
|
|
|
|
itr->first) == valid_cmd_line_options_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stderr, "Invalid command-line option %s\n", itr->first.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
for (std::vector<std::string>::const_iterator itr = flags_.begin();
|
|
|
|
itr != flags_.end(); ++itr) {
|
|
|
|
if (std::find(valid_cmd_line_options_.begin(),
|
|
|
|
valid_cmd_line_options_.end(),
|
|
|
|
*itr) == valid_cmd_line_options_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stderr, "Invalid command-line flag %s\n", itr->c_str());
|
|
|
|
return false;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
if (!NoDBOpen() && option_map_.find(ARG_DB) == option_map_.end() &&
|
|
|
|
option_map_.find(ARG_PATH) == option_map_.end()) {
|
|
|
|
fprintf(stderr, "Either %s or %s must be specified.\n", ARG_DB.c_str(),
|
|
|
|
ARG_PATH.c_str());
|
2013-01-11 20:09:23 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string LDBCommand::HexToString(const std::string& str) {
|
|
|
|
std::string result;
|
2016-05-07 01:09:09 +02:00
|
|
|
std::string::size_type len = str.length();
|
|
|
|
if (len < 2 || str[0] != '0' || str[1] != 'x') {
|
|
|
|
fprintf(stderr, "Invalid hex input %s. Must start with 0x\n", str.c_str());
|
|
|
|
throw "Invalid hex input";
|
|
|
|
}
|
|
|
|
if (!Slice(str.data() + 2, len - 2).DecodeHex(&result)) {
|
|
|
|
throw "Invalid hex input";
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string LDBCommand::StringToHex(const std::string& str) {
|
|
|
|
std::string result("0x");
|
2016-05-07 01:09:09 +02:00
|
|
|
result.append(Slice(str).ToString(true));
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string LDBCommand::PrintKeyValue(const std::string& key,
|
|
|
|
const std::string& value, bool is_key_hex,
|
|
|
|
bool is_value_hex) {
|
|
|
|
std::string result;
|
2016-05-07 01:09:09 +02:00
|
|
|
result.append(is_key_hex ? StringToHex(key) : key);
|
|
|
|
result.append(DELIM);
|
|
|
|
result.append(is_value_hex ? StringToHex(value) : value);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string LDBCommand::PrintKeyValue(const std::string& key,
|
|
|
|
const std::string& value, bool is_hex) {
|
2016-05-07 01:09:09 +02:00
|
|
|
return PrintKeyValue(key, value, is_hex, is_hex);
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string LDBCommand::HelpRangeCmdArgs() {
|
2016-05-07 01:09:09 +02:00
|
|
|
std::ostringstream str_stream;
|
|
|
|
str_stream << " ";
|
|
|
|
str_stream << "[--" << ARG_FROM << "] ";
|
|
|
|
str_stream << "[--" << ARG_TO << "] ";
|
|
|
|
return str_stream.str();
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::IsKeyHex(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags) {
|
2016-05-07 01:09:09 +02:00
|
|
|
return (IsFlagPresent(flags, ARG_HEX) || IsFlagPresent(flags, ARG_KEY_HEX) ||
|
|
|
|
ParseBooleanOption(options, ARG_HEX, false) ||
|
|
|
|
ParseBooleanOption(options, ARG_KEY_HEX, false));
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::IsValueHex(const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags) {
|
2016-05-07 01:09:09 +02:00
|
|
|
return (IsFlagPresent(flags, ARG_HEX) ||
|
|
|
|
IsFlagPresent(flags, ARG_VALUE_HEX) ||
|
|
|
|
ParseBooleanOption(options, ARG_HEX, false) ||
|
|
|
|
ParseBooleanOption(options, ARG_VALUE_HEX, false));
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::ParseBooleanOption(
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::string& option, bool default_val) {
|
|
|
|
std::map<std::string, std::string>::const_iterator itr = options.find(option);
|
2016-05-07 01:09:09 +02:00
|
|
|
if (itr != options.end()) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string option_val = itr->second;
|
2016-05-07 01:09:09 +02:00
|
|
|
return StringToBool(itr->second);
|
|
|
|
}
|
|
|
|
return default_val;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
bool LDBCommand::StringToBool(std::string val) {
|
2016-05-07 01:09:09 +02:00
|
|
|
std::transform(val.begin(), val.end(), val.begin(),
|
|
|
|
[](char ch) -> char { return (char)::tolower(ch); });
|
|
|
|
|
|
|
|
if (val == "true") {
|
|
|
|
return true;
|
|
|
|
} else if (val == "false") {
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
throw "Invalid value for boolean argument";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
CompactorCommand::CompactorCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_FROM, ARG_TO, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_TTL})),
|
|
|
|
null_from_(true),
|
|
|
|
null_to_(true) {
|
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void CompactorCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CompactorCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void CompactorCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Slice* begin = nullptr;
|
|
|
|
Slice* end = nullptr;
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
begin = new Slice(from_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
if (!null_to_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
end = new Slice(to_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:45:26 +01:00
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
|
|
|
2016-10-17 19:40:30 +02:00
|
|
|
db_->CompactRange(cro, GetCfHandle(), begin, end);
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
2012-10-31 19:47:18 +01:00
|
|
|
|
|
|
|
delete begin;
|
|
|
|
delete end;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string DBLoaderCommand::ARG_DISABLE_WAL = "disable_wal";
|
|
|
|
const std::string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load";
|
|
|
|
const std::string DBLoaderCommand::ARG_COMPACT = "compact";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
DBLoaderCommand::DBLoaderCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_CREATE_IF_MISSING, ARG_DISABLE_WAL,
|
|
|
|
ARG_BULK_LOAD, ARG_COMPACT})),
|
|
|
|
disable_wal_(false),
|
|
|
|
bulk_load_(false),
|
|
|
|
compact_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
|
|
|
|
disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
|
2013-02-26 07:57:37 +01:00
|
|
|
bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
|
|
|
|
compact_ = IsFlagPresent(flags, ARG_COMPACT);
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void DBLoaderCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBLoaderCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
|
|
|
ret.append(" [--" + ARG_DISABLE_WAL + "]");
|
|
|
|
ret.append(" [--" + ARG_BULK_LOAD + "]");
|
|
|
|
ret.append(" [--" + ARG_COMPACT + "]");
|
|
|
|
ret.append("\n");
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options DBLoaderCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-12-17 02:06:51 +01:00
|
|
|
opt.create_if_missing = create_if_missing_;
|
2013-02-26 07:57:37 +01:00
|
|
|
if (bulk_load_) {
|
|
|
|
opt.PrepareForBulkLoad();
|
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBLoaderCommand::DoCommand() {
|
2012-12-17 02:06:51 +01:00
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2012-12-17 02:06:51 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
if (disable_wal_) {
|
|
|
|
write_options.disableWAL = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string line;
|
2016-07-22 20:46:40 +02:00
|
|
|
// prefer ifstream getline performance vs that from std::cin istream
|
|
|
|
std::ifstream ifs_stdin("/dev/stdin");
|
|
|
|
std::istream* istream_p = ifs_stdin.is_open() ? &ifs_stdin : &std::cin;
|
|
|
|
while (getline(*istream_p, line, '\n')) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string key;
|
|
|
|
std::string value;
|
2013-01-11 20:09:23 +01:00
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
2012-12-17 02:06:51 +01:00
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
|
|
|
bad_lines ++;
|
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2012-12-17 02:06:51 +01:00
|
|
|
if (bad_lines > 0) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cout << "Warning: " << bad_lines << " bad lines ignored." << std::endl;
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
2013-02-26 07:57:37 +01:00
|
|
|
if (compact_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
2013-02-26 07:57:37 +01:00
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
namespace {
|
|
|
|
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
void DumpManifestFile(std::string file, bool verbose, bool hex, bool json) {
|
2014-11-24 19:04:16 +01:00
|
|
|
Options options;
|
|
|
|
EnvOptions sopt;
|
|
|
|
std::string dbname("dummy");
|
2015-03-17 23:04:37 +01:00
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
|
|
|
|
options.table_cache_numshardbits));
|
2014-11-24 19:04:16 +01:00
|
|
|
// Notice we are using the default options not through SanitizeOptions(),
|
|
|
|
// if VersionSet::DumpManifest() depends on any option done by
|
|
|
|
// SanitizeOptions(), we need to initialize it manually.
|
|
|
|
options.db_paths.emplace_back("dummy", 0);
|
2015-08-03 20:01:24 +02:00
|
|
|
options.num_levels = 64;
|
2015-05-16 00:52:51 +02:00
|
|
|
WriteController wc(options.delayed_write_rate);
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(options.db_write_buffer_size);
|
2016-09-24 01:34:04 +02:00
|
|
|
ImmutableDBOptions immutable_db_options(options);
|
|
|
|
VersionSet versions(dbname, &immutable_db_options, sopt, tc.get(), &wb, &wc);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
Status s = versions.DumpManifest(options, file, verbose, hex, json);
|
2014-11-24 19:04:16 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing file %s %s\n", file.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string ManifestDumpCommand::ARG_VERBOSE = "verbose";
|
|
|
|
const std::string ManifestDumpCommand::ARG_JSON = "json";
|
|
|
|
const std::string ManifestDumpCommand::ARG_PATH = "path";
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ManifestDumpCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ManifestDumpCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_VERBOSE + "]");
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
ret.append(" [--" + ARG_JSON + "]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
|
|
|
|
ret.append("\n");
|
2013-03-22 17:17:30 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
ManifestDumpCommand::ManifestDumpCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_VERBOSE, ARG_PATH, ARG_HEX, ARG_JSON})),
|
|
|
|
verbose_(false),
|
|
|
|
json_(false),
|
|
|
|
path_("") {
|
2013-03-22 17:17:30 +01:00
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
json_ = IsFlagPresent(flags, ARG_JSON);
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_PATH);
|
2013-03-22 17:17:30 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
if (path_.empty()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--path: missing pathname");
|
2013-03-22 17:17:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ManifestDumpCommand::DoCommand() {
|
|
|
|
|
|
|
|
std::string manifestfile;
|
|
|
|
|
|
|
|
if (!path_.empty()) {
|
|
|
|
manifestfile = path_;
|
|
|
|
} else {
|
|
|
|
bool found = false;
|
|
|
|
// We need to find the manifest file by searching the directory
|
|
|
|
// containing the db for files of the form MANIFEST_[0-9]+
|
2015-07-02 01:13:49 +02:00
|
|
|
|
|
|
|
auto CloseDir = [](DIR* p) { closedir(p); };
|
2015-07-13 21:11:05 +02:00
|
|
|
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()),
|
|
|
|
CloseDir);
|
2015-07-02 01:13:49 +02:00
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
if (d == nullptr) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(db_path_ + " is not a directory");
|
2013-03-22 17:17:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
struct dirent* entry;
|
2015-07-02 01:13:49 +02:00
|
|
|
while ((entry = readdir(d.get())) != nullptr) {
|
2013-03-22 17:17:30 +01:00
|
|
|
unsigned int match;
|
2015-09-08 23:23:42 +02:00
|
|
|
uint64_t num;
|
2015-09-09 00:46:16 +02:00
|
|
|
if (sscanf(entry->d_name, "MANIFEST-%" PRIu64 "%n", &num, &match) &&
|
2015-09-08 23:23:42 +02:00
|
|
|
match == strlen(entry->d_name)) {
|
2013-03-22 17:17:30 +01:00
|
|
|
if (!found) {
|
|
|
|
manifestfile = db_path_ + "/" + std::string(entry->d_name);
|
|
|
|
found = true;
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Multiple MANIFEST files found; use --path to select one");
|
2013-03-22 17:17:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
DumpManifestFile(manifestfile, verbose_, is_key_hex_, json_);
|
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s done\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-02-28 01:18:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ListColumnFamiliesCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ListColumnFamiliesCommand::Name());
|
|
|
|
ret.append(" full_path_to_db_directory ");
|
|
|
|
ret.append("\n");
|
2014-02-28 01:18:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ListColumnFamiliesCommand::ListColumnFamiliesCommand(
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2014-02-28 01:18:23 +01:00
|
|
|
: LDBCommand(options, flags, false, {}) {
|
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2014-02-28 01:18:23 +01:00
|
|
|
"dbname must be specified for the list_column_families command");
|
|
|
|
} else {
|
|
|
|
dbname_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ListColumnFamiliesCommand::DoCommand() {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> column_families;
|
2014-02-28 01:18:23 +01:00
|
|
|
Status s = DB::ListColumnFamilies(DBOptions(), dbname_, &column_families);
|
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing db %s %s\n", dbname_.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
printf("Column families in %s: \n{", dbname_.c_str());
|
|
|
|
bool first = true;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
if (!first) {
|
|
|
|
printf(", ");
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
printf("%s", cf.c_str());
|
|
|
|
}
|
|
|
|
printf("}\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void CreateColumnFamilyCommand::Help(std::string& ret) {
|
2016-01-23 00:46:32 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CreateColumnFamilyCommand::Name());
|
|
|
|
ret.append(" --db=<db_path> <new_column_family_name>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateColumnFamilyCommand::CreateColumnFamilyCommand(
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2016-01-23 00:46:32 +01:00
|
|
|
: LDBCommand(options, flags, true, {ARG_DB}) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"new column family name must be specified");
|
|
|
|
} else {
|
|
|
|
new_cf_name_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateColumnFamilyCommand::DoCommand() {
|
2017-07-07 00:50:56 +02:00
|
|
|
ColumnFamilyHandle* new_cf_handle = nullptr;
|
2016-01-23 00:46:32 +01:00
|
|
|
Status st = db_->CreateColumnFamily(options_, new_cf_name_, &new_cf_handle);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Fail to create new column family: " + st.ToString());
|
|
|
|
}
|
|
|
|
delete new_cf_handle;
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
|
2014-02-28 01:18:23 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
namespace {
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string ReadableTime(int unixtime) {
|
2013-06-19 04:57:54 +02:00
|
|
|
char time_buffer [80];
|
|
|
|
time_t rawtime = unixtime;
|
2015-09-08 23:23:42 +02:00
|
|
|
struct tm tInfo;
|
|
|
|
struct tm* timeinfo = localtime_r(&rawtime, &tInfo);
|
|
|
|
assert(timeinfo == &tInfo);
|
2013-06-19 04:57:54 +02:00
|
|
|
strftime(time_buffer, 80, "%c", timeinfo);
|
2016-05-20 16:42:18 +02:00
|
|
|
return std::string(time_buffer);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function only called when it's the sane case of >1 buckets in time-range
|
|
|
|
// Also called only when timekv falls between ttl_start and ttl_end provided
|
2016-05-20 16:42:18 +02:00
|
|
|
void IncBucketCounts(std::vector<uint64_t>& bucket_counts, int ttl_start,
|
|
|
|
int time_range, int bucket_size, int timekv,
|
|
|
|
int num_buckets) {
|
2018-04-13 02:55:14 +02:00
|
|
|
#ifdef NDEBUG
|
|
|
|
(void)time_range;
|
|
|
|
(void)num_buckets;
|
|
|
|
#endif
|
2013-06-20 20:50:33 +02:00
|
|
|
assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
|
|
|
|
timekv < (ttl_start + time_range) && num_buckets > 1);
|
|
|
|
int bucket = (timekv - ttl_start) / bucket_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
bucket_counts[bucket]++;
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void PrintBucketCounts(const std::vector<uint64_t>& bucket_counts,
|
|
|
|
int ttl_start, int ttl_end, int bucket_size,
|
|
|
|
int num_buckets) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int time_point = ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
|
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(time_point + bucket_size).c_str(),
|
2014-11-01 03:22:49 +01:00
|
|
|
(unsigned long)bucket_counts[i]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(ttl_end).c_str(),
|
2014-11-01 03:22:49 +01:00
|
|
|
(unsigned long)bucket_counts[num_buckets - 1]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const std::string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const std::string InternalDumpCommand::ARG_STATS = "stats";
|
|
|
|
const std::string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex";
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
InternalDumpCommand::InternalDumpCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2016-01-06 23:19:08 +01:00
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY,
|
|
|
|
ARG_COUNT_DELIM, ARG_STATS, ARG_INPUT_KEY_HEX})),
|
|
|
|
has_from_(false),
|
|
|
|
has_to_(false),
|
|
|
|
max_keys_(-1),
|
|
|
|
delim_("."),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
|
|
|
print_stats_(false),
|
|
|
|
is_input_key_hex_(false) {
|
2013-06-21 01:02:36 +02:00
|
|
|
has_from_ = ParseStringOption(options, ARG_FROM, &from_);
|
|
|
|
has_to_ = ParseStringOption(options, ARG_TO, &to_);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_COUNT_DELIM);
|
2013-11-01 21:59:14 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
2014-11-01 03:22:49 +01:00
|
|
|
// fprintf(stdout,"delim = %c\n",delim_[0]);
|
2013-11-01 21:59:14 +01:00
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2014-11-01 03:22:49 +01:00
|
|
|
delim_=".";
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
2013-08-09 00:51:16 +02:00
|
|
|
is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
if (is_input_key_hex_) {
|
2013-06-21 01:02:36 +02:00
|
|
|
if (has_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (has_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void InternalDumpCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(InternalDumpCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_INPUT_KEY_HEX + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append("\n");
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void InternalDumpCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-06-21 01:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (print_stats_) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string stats;
|
2016-01-23 00:46:32 +01:00
|
|
|
if (db_->GetProperty(GetCfHandle(), "rocksdb.stats", &stats)) {
|
2013-06-21 01:02:36 +02:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast as DBImpl to get internal iterator
|
2017-05-12 23:59:57 +02:00
|
|
|
std::vector<KeyVersion> key_versions;
|
2018-08-17 00:48:55 +02:00
|
|
|
Status st = GetAllKeyVersions(db_, from_, to_, max_keys_, &key_versions);
|
2017-05-12 23:59:57 +02:00
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-06-21 01:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string rtype1, rtype2, row, val;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
long long count = 0;
|
2017-05-12 23:59:57 +02:00
|
|
|
for (auto& key_version : key_versions) {
|
|
|
|
InternalKey ikey(key_version.user_key, key_version.sequence,
|
|
|
|
static_cast<ValueType>(key_version.type));
|
|
|
|
if (has_to_ && ikey.user_key() == to_) {
|
|
|
|
// GetAllKeyVersions() includes keys with user key `to_`, but idump has
|
|
|
|
// traditionally excluded such keys.
|
2013-06-21 01:02:36 +02:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
int k;
|
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
s1=0;
|
2017-05-12 23:59:57 +02:00
|
|
|
row = ikey.Encode().ToString();
|
|
|
|
val = key_version.value;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
|
2013-11-01 21:59:14 +01:00
|
|
|
s1++;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
|
2013-11-01 21:59:14 +01:00
|
|
|
s1++;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
2018-06-07 20:34:52 +02:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2014-11-01 03:22:49 +01:00
|
|
|
c=1;
|
|
|
|
s2=s1;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
|
|
|
c++;
|
2014-11-01 03:22:49 +01:00
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
2017-05-12 23:59:57 +02:00
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string key = ikey.DebugString(is_key_hex_);
|
2017-05-12 23:59:57 +02:00
|
|
|
std::string value = Slice(key_version.value).ToString(is_value_hex_);
|
2014-11-01 03:22:49 +01:00
|
|
|
std::cout << key << " => " << value << "\n";
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
2014-11-01 03:22:49 +01:00
|
|
|
if (max_keys_ > 0 && count >= max_keys_) break;
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
if(count_delim_) {
|
2018-06-07 20:34:52 +02:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Internal keys in range: %lld\n", count);
|
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string DBDumperCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const std::string DBDumperCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const std::string DBDumperCommand::ARG_STATS = "stats";
|
|
|
|
const std::string DBDumperCommand::ARG_TTL_BUCKET = "bucket";
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
DBDumperCommand::DBDumperCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2016-01-06 23:19:08 +01:00
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY, ARG_COUNT_DELIM,
|
|
|
|
ARG_STATS, ARG_TTL_START, ARG_TTL_END, ARG_TTL_BUCKET,
|
|
|
|
ARG_TIMESTAMP, ARG_PATH})),
|
|
|
|
null_from_(true),
|
|
|
|
null_to_(true),
|
|
|
|
max_keys_(-1),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
|
|
|
print_stats_(false) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2016-05-20 16:42:18 +02:00
|
|
|
max_keys_ = std::stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::invalid_argument&) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" has an invalid value");
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
itr = options.find(ARG_COUNT_DELIM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2014-11-01 03:22:49 +01:00
|
|
|
delim_=".";
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
2016-01-06 23:19:08 +01:00
|
|
|
|
|
|
|
itr = options.find(ARG_PATH);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
2017-10-02 18:40:00 +02:00
|
|
|
if (db_path_.empty()) {
|
|
|
|
db_path_ = path_;
|
|
|
|
}
|
2016-01-06 23:19:08 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void DBDumperCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBDumperCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append(" [--" + ARG_TTL_BUCKET + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-06 23:19:08 +01:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_a_file>]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
/**
|
|
|
|
* Handles two separate cases:
|
|
|
|
*
|
|
|
|
* 1) --db is specified - just dump the database.
|
|
|
|
*
|
|
|
|
* 2) --path is specified - determine based on file extension what dumping
|
|
|
|
* function to call. Please note that we intentionally use the extension
|
|
|
|
* and avoid probing the file contents under the assumption that renaming
|
|
|
|
* the files is not a supported scenario.
|
|
|
|
*
|
|
|
|
*/
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBDumperCommand::DoCommand() {
|
2012-11-21 22:26:32 +01:00
|
|
|
if (!db_) {
|
2016-01-06 23:19:08 +01:00
|
|
|
assert(!path_.empty());
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string fileName = GetFileNameFromPath(path_);
|
2016-01-06 23:19:08 +01:00
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
|
|
|
|
if (!ParseFileName(fileName, &number, &type)) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Can't parse file type: " + path_);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case kLogFile:
|
2018-04-08 06:46:53 +02:00
|
|
|
// TODO(myabandeh): allow configuring is_write_commited
|
2016-01-06 23:19:08 +01:00
|
|
|
DumpWalFile(path_, /* print_header_ */ true, /* print_values_ */ true,
|
2018-04-08 06:46:53 +02:00
|
|
|
true /* is_write_commited */, &exec_state_);
|
2016-01-06 23:19:08 +01:00
|
|
|
break;
|
|
|
|
case kTableFile:
|
|
|
|
DumpSstFile(path_, is_key_hex_, /* show_properties */ true);
|
|
|
|
break;
|
|
|
|
case kDescriptorFile:
|
|
|
|
DumpManifestFile(path_, /* verbose_ */ false, is_key_hex_,
|
|
|
|
/* json_ */ false);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"File type not supported: " + path_);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
DoDumpCommand();
|
2012-11-21 22:26:32 +01:00
|
|
|
}
|
2016-01-06 23:19:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBDumperCommand::DoDumpCommand() {
|
|
|
|
assert(nullptr != db_);
|
|
|
|
assert(path_.empty());
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
// Parse command line args
|
|
|
|
uint64_t count = 0;
|
|
|
|
if (print_stats_) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string stats;
|
2013-10-05 07:32:05 +02:00
|
|
|
if (db_->GetProperty("rocksdb.stats", &stats)) {
|
2012-10-31 19:47:18 +01:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup key iterator
|
2016-01-23 00:46:32 +01:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions(), GetCfHandle());
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = iter->status();
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Iterator error." + st.ToString());
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!null_from_) {
|
|
|
|
iter->Seek(from_);
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
|
|
|
int max_keys = max_keys_;
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete iter;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int time_range = ttl_end - ttl_start;
|
|
|
|
int bucket_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) ||
|
2013-06-19 04:57:54 +02:00
|
|
|
bucket_size <= 0) {
|
|
|
|
bucket_size = time_range; // Will have just 1 bucket by default
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
//cretaing variables for row count of each type
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string rtype1, rtype2, row, val;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// At this point, bucket_size=0 => time_range=0
|
2014-11-11 22:47:22 +01:00
|
|
|
int num_buckets = (bucket_size >= time_range)
|
|
|
|
? 1
|
|
|
|
: ((time_range + bucket_size - 1) / bucket_size);
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<uint64_t> bucket_counts(num_buckets, 0);
|
2013-11-01 21:59:14 +01:00
|
|
|
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
fprintf(stdout, "Dumping key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
|
|
|
|
2017-10-02 18:40:00 +02:00
|
|
|
HistogramImpl vsize_hist;
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int rawtime = 0;
|
2012-10-31 19:47:18 +01:00
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (!null_to_ && (iter->key().ToString() >= to_))
|
|
|
|
break;
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
|
|
|
if (max_keys == 0)
|
|
|
|
break;
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2017-07-29 01:23:50 +02:00
|
|
|
TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(iter);
|
2013-06-20 20:50:33 +02:00
|
|
|
rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
if (max_keys > 0) {
|
|
|
|
--max_keys;
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && num_buckets > 1) {
|
2014-11-01 03:22:49 +01:00
|
|
|
IncBucketCounts(bucket_counts, ttl_start, time_range, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
rawtime, num_buckets);
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
|
|
|
s1 = row.size()+val.size();
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
2018-06-07 20:34:52 +02:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2014-11-01 03:22:49 +01:00
|
|
|
c=1;
|
|
|
|
s2=s1;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
c++;
|
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
|
|
|
|
2017-10-02 18:40:00 +02:00
|
|
|
if (count_only_) {
|
|
|
|
vsize_hist.Add(iter->value().size());
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string str =
|
|
|
|
PrintKeyValue(iter->key().ToString(), iter->value().ToString(),
|
|
|
|
is_key_hex_, is_value_hex_);
|
2012-12-27 00:15:54 +01:00
|
|
|
fprintf(stdout, "%s\n", str.c_str());
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2013-06-19 04:57:54 +02:00
|
|
|
if (num_buckets > 1 && is_db_ttl_) {
|
2013-06-20 20:50:33 +02:00
|
|
|
PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
num_buckets);
|
2014-11-01 03:22:49 +01:00
|
|
|
} else if(count_delim_) {
|
2018-06-07 20:34:52 +02:00
|
|
|
fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n",
|
|
|
|
rtype2.c_str(), c, s2);
|
2013-06-19 04:57:54 +02:00
|
|
|
} else {
|
2018-06-07 20:34:52 +02:00
|
|
|
fprintf(stdout, "Keys in range: %" PRIu64 "\n", count);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
2017-10-02 18:40:00 +02:00
|
|
|
|
|
|
|
if (count_only_) {
|
|
|
|
fprintf(stdout, "Value size distribution: \n");
|
|
|
|
fprintf(stdout, "%s\n", vsize_hist.ToString().c_str());
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Clean up
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string ReduceDBLevelsCommand::ARG_NEW_LEVELS = "new_levels";
|
|
|
|
const std::string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS =
|
|
|
|
"print_old_levels";
|
2012-12-27 00:15:54 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
ReduceDBLevelsCommand::ReduceDBLevelsCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_NEW_LEVELS, ARG_PRINT_OLD_LEVELS})),
|
|
|
|
old_levels_(1 << 7),
|
|
|
|
new_levels_(-1),
|
|
|
|
print_old_levels_(false) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
|
2013-01-11 20:09:23 +01:00
|
|
|
print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
if(new_levels_ <= 0) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string> ReduceDBLevelsCommand::PrepareArgs(
|
|
|
|
const std::string& db_path, int new_levels, bool print_old_level) {
|
|
|
|
std::vector<std::string> ret;
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.push_back("reduce_levels");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.push_back("--" + ARG_DB + "=" + db_path);
|
2015-04-24 04:17:57 +02:00
|
|
|
ret.push_back("--" + ARG_NEW_LEVELS + "=" + rocksdb::ToString(new_levels));
|
2014-11-01 03:22:49 +01:00
|
|
|
if(print_old_level) {
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ReduceDBLevelsCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ReduceDBLevelsCommand::Name());
|
|
|
|
ret.append(" --" + ARG_NEW_LEVELS + "=<New number of levels>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_OLD_LEVELS + "]");
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options ReduceDBLevelsCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.num_levels = old_levels_;
|
2013-05-23 19:56:36 +02:00
|
|
|
opt.max_bytes_for_level_multiplier_additional.resize(opt.num_levels, 1);
|
2012-11-09 03:45:19 +01:00
|
|
|
// Disable size compaction
|
2014-02-03 22:48:30 +01:00
|
|
|
opt.max_bytes_for_level_base = 1ULL << 50;
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
2012-10-31 19:47:18 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
|
2013-01-11 20:09:23 +01:00
|
|
|
int* levels) {
|
2016-09-24 01:34:04 +02:00
|
|
|
ImmutableDBOptions db_options(opt);
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
std::shared_ptr<Cache> tc(
|
2015-03-17 23:04:37 +01:00
|
|
|
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits));
|
2013-03-08 21:29:19 +01:00
|
|
|
const InternalKeyComparator cmp(opt.comparator);
|
2015-05-16 00:52:51 +02:00
|
|
|
WriteController wc(opt.delayed_write_rate);
|
2016-06-21 03:01:03 +02:00
|
|
|
WriteBufferManager wb(opt.db_write_buffer_size);
|
2016-09-24 01:34:04 +02:00
|
|
|
VersionSet versions(db_path_, &db_options, soptions, tc.get(), &wb, &wc);
|
2014-01-22 20:44:53 +01:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 18:56:17 +02:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-01 04:44:48 +01:00
|
|
|
ColumnFamilyOptions(opt));
|
|
|
|
dummy.push_back(dummy_descriptor);
|
2012-11-09 03:45:19 +01:00
|
|
|
// We rely the VersionSet::Recover to tell us the internal data structures
|
|
|
|
// in the db. And the Recover() should never do any change
|
|
|
|
// (like LogAndApply) to the manifest file.
|
2014-01-22 20:44:53 +01:00
|
|
|
Status st = versions.Recover(dummy);
|
2012-11-09 03:45:19 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
int max = -1;
|
2014-01-27 23:33:50 +01:00
|
|
|
auto default_cfd = versions.GetColumnFamilySet()->GetDefault();
|
2014-02-03 21:08:33 +01:00
|
|
|
for (int i = 0; i < default_cfd->NumberLevels(); i++) {
|
2014-10-31 16:48:19 +01:00
|
|
|
if (default_cfd->current()->storage_info()->NumLevelFiles(i)) {
|
2012-11-09 03:45:19 +01:00
|
|
|
max = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*levels = max + 1;
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void ReduceDBLevelsCommand::DoCommand() {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (new_levels_ <= 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Invalid number of levels.\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st;
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
int old_level_num = -1;
|
|
|
|
st = GetOldNumOfLevels(opt, &old_level_num);
|
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-11-09 03:45:19 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
if (print_old_levels_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "The old number of levels in use is %d\n", old_level_num);
|
2012-11-09 03:45:19 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
if (old_level_num <= new_levels_) {
|
|
|
|
return;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
old_levels_ = old_level_num;
|
|
|
|
|
|
|
|
OpenDB();
|
2017-04-20 19:16:13 +02:00
|
|
|
if (exec_state_.IsFailed()) {
|
2012-11-21 01:14:04 +01:00
|
|
|
return;
|
|
|
|
}
|
2017-12-07 07:54:11 +01:00
|
|
|
assert(db_ != nullptr);
|
2012-10-31 19:47:18 +01:00
|
|
|
// Compact the whole DB to put all files to the highest level.
|
2012-11-09 03:45:19 +01:00
|
|
|
fprintf(stdout, "Compacting the db...\n");
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
2012-10-31 19:47:18 +01:00
|
|
|
CloseDB();
|
|
|
|
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
2014-11-01 03:22:49 +01:00
|
|
|
st = VersionSet::ReduceNumberOfLevels(db_path_, &opt, soptions, new_levels_);
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-10-31 19:47:18 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string ChangeCompactionStyleCommand::ARG_OLD_COMPACTION_STYLE =
|
|
|
|
"old_compaction_style";
|
|
|
|
const std::string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE =
|
|
|
|
"new_compaction_style";
|
2013-09-04 22:13:08 +02:00
|
|
|
|
|
|
|
ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_OLD_COMPACTION_STYLE, ARG_NEW_COMPACTION_STYLE})),
|
|
|
|
old_compaction_style_(-1),
|
|
|
|
new_compaction_style_(-1) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
|
|
|
|
exec_state_);
|
2013-09-04 22:13:08 +02:00
|
|
|
if (old_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
old_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
|
|
|
|
exec_state_);
|
2013-09-04 22:13:08 +02:00
|
|
|
if (new_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_compaction_style_ == old_compaction_style_) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Old compaction style is the same as new compaction style. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleUniversal &&
|
|
|
|
new_compaction_style_ == kCompactionStyleLevel) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Convert from universal compaction to level compaction. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ChangeCompactionStyleCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ChangeCompactionStyleCommand::Name());
|
|
|
|
ret.append(" --" + ARG_OLD_COMPACTION_STYLE + "=<Old compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append(" --" + ARG_NEW_COMPACTION_STYLE + "=<New compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append("\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Options ChangeCompactionStyleCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// In order to convert from level compaction to universal compaction, we
|
|
|
|
// need to compact all data into a single file and move it to level 0.
|
|
|
|
opt.disable_auto_compactions = true;
|
|
|
|
opt.target_file_size_base = INT_MAX;
|
|
|
|
opt.target_file_size_multiplier = 1;
|
|
|
|
opt.max_bytes_for_level_base = INT_MAX;
|
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChangeCompactionStyleCommand::DoCommand() {
|
|
|
|
// print db stats before we have made any change
|
|
|
|
std::string property;
|
|
|
|
std::string files_per_level;
|
2016-01-23 00:46:32 +01:00
|
|
|
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// format print string
|
2013-09-04 22:13:08 +02:00
|
|
|
char buf[100];
|
2014-11-01 03:22:49 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 22:13:08 +02:00
|
|
|
files_per_level += buf;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "files per level before compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
|
|
|
|
// manual compact into a single file and move the file to level 0
|
2015-06-17 23:36:14 +02:00
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 0;
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(compact_options, GetCfHandle(), nullptr, nullptr);
|
2013-09-04 22:13:08 +02:00
|
|
|
|
|
|
|
// verify compaction result
|
|
|
|
files_per_level = "";
|
|
|
|
int num_files = 0;
|
2017-02-07 20:36:01 +01:00
|
|
|
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// format print string
|
2013-09-04 22:13:08 +02:00
|
|
|
char buf[100];
|
2014-11-01 03:22:49 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 22:13:08 +02:00
|
|
|
files_per_level += buf;
|
|
|
|
|
|
|
|
num_files = atoi(property.c_str());
|
|
|
|
|
|
|
|
// level 0 should have only 1 file
|
|
|
|
if (i == 0 && num_files != 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level 0 after compaction is " +
|
|
|
|
ToString(num_files) + ", not 1.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// other levels should have no file
|
|
|
|
if (i > 0 && num_files != 0) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level " +
|
|
|
|
ToString(i) + " after compaction is " + ToString(num_files) +
|
|
|
|
", not 0.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "files per level after compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct StdErrReporter : public log::Reader::Reporter {
|
2018-03-05 22:08:17 +01:00
|
|
|
virtual void Corruption(size_t /*bytes*/, const Status& s) override {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cerr << "Corruption detected in log file " << s.ToString() << "\n";
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
class InMemoryHandler : public WriteBatch::Handler {
|
|
|
|
public:
|
2018-04-08 06:46:53 +02:00
|
|
|
InMemoryHandler(std::stringstream& row, bool print_values,
|
|
|
|
bool write_after_commit = false)
|
|
|
|
: Handler(),
|
|
|
|
row_(row),
|
|
|
|
print_values_(print_values),
|
|
|
|
write_after_commit_(write_after_commit) {}
|
2013-02-20 03:12:20 +01:00
|
|
|
|
2013-12-04 08:16:36 +01:00
|
|
|
void commonPutMerge(const Slice& key, const Slice& value) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string k = LDBCommand::StringToHex(key.ToString());
|
2013-12-04 08:16:36 +01:00
|
|
|
if (print_values_) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string v = LDBCommand::StringToHex(value.ToString());
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << k << " : ";
|
|
|
|
row_ << v << " ";
|
|
|
|
} else {
|
|
|
|
row_ << k << " ";
|
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2016-04-29 03:23:29 +02:00
|
|
|
virtual Status PutCF(uint32_t cf, const Slice& key,
|
|
|
|
const Slice& value) override {
|
|
|
|
row_ << "PUT(" << cf << ") : ";
|
2013-12-04 08:16:36 +01:00
|
|
|
commonPutMerge(key, value);
|
2016-04-29 03:23:29 +02:00
|
|
|
return Status::OK();
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2016-04-29 03:23:29 +02:00
|
|
|
virtual Status MergeCF(uint32_t cf, const Slice& key,
|
|
|
|
const Slice& value) override {
|
|
|
|
row_ << "MERGE(" << cf << ") : ";
|
2013-12-04 08:16:36 +01:00
|
|
|
commonPutMerge(key, value);
|
2016-04-29 03:23:29 +02:00
|
|
|
return Status::OK();
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2018-01-09 17:47:46 +01:00
|
|
|
virtual Status MarkNoop(bool)
|
|
|
|
override {
|
|
|
|
row_ << "NOOP ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-04-29 03:23:29 +02:00
|
|
|
virtual Status DeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
row_ << "DELETE(" << cf << ") : ";
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
2016-04-29 03:23:29 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual Status SingleDeleteCF(uint32_t cf, const Slice& key) override {
|
|
|
|
row_ << "SINGLE_DELETE(" << cf << ") : ";
|
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2016-11-16 00:49:15 +01:00
|
|
|
virtual Status DeleteRangeCF(uint32_t cf, const Slice& begin_key,
|
|
|
|
const Slice& end_key) override {
|
|
|
|
row_ << "DELETE_RANGE(" << cf << ") : ";
|
|
|
|
row_ << LDBCommand::StringToHex(begin_key.ToString()) << " ";
|
|
|
|
row_ << LDBCommand::StringToHex(end_key.ToString()) << " ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2018-07-07 02:17:36 +02:00
|
|
|
virtual Status MarkBeginPrepare(bool unprepare) override {
|
|
|
|
row_ << "BEGIN_PREPARE(";
|
|
|
|
row_ << (unprepare ? "true" : "false") << ") ";
|
2016-04-29 03:23:29 +02:00
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Modification of WriteBatch to support two phase commit
Summary: Adds three new WriteBatch data types: Prepare(xid), Commit(xid), Rollback(xid). Prepare(xid) should precede the (single) operation to which is applies. There can obviously be multiple Prepare(xid) markers. There should only be one Rollback(xid) or Commit(xid) marker yet not both. None of this logic is currently enforced and will most likely be implemented further up such as in the memtableinserter. All three markers are similar to PutLogData in that they are writebatch meta-data, ie stored but not counted. All three markers differ from PutLogData in that they will actually be written to disk. As for WriteBatchWithIndex, Prepare, Commit, Rollback are all implemented just as PutLogData and none are tested just as PutLogData.
Test Plan: single unit test in write_batch_test.
Reviewers: hermanlee4, sdong, anthony
Subscribers: leveldb, dhruba, vasilep, andrewkr
Differential Revision: https://reviews.facebook.net/D57867
2016-04-08 08:35:51 +02:00
|
|
|
virtual Status MarkEndPrepare(const Slice& xid) override {
|
2016-04-29 03:23:29 +02:00
|
|
|
row_ << "END_PREPARE(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Modification of WriteBatch to support two phase commit
Summary: Adds three new WriteBatch data types: Prepare(xid), Commit(xid), Rollback(xid). Prepare(xid) should precede the (single) operation to which is applies. There can obviously be multiple Prepare(xid) markers. There should only be one Rollback(xid) or Commit(xid) marker yet not both. None of this logic is currently enforced and will most likely be implemented further up such as in the memtableinserter. All three markers are similar to PutLogData in that they are writebatch meta-data, ie stored but not counted. All three markers differ from PutLogData in that they will actually be written to disk. As for WriteBatchWithIndex, Prepare, Commit, Rollback are all implemented just as PutLogData and none are tested just as PutLogData.
Test Plan: single unit test in write_batch_test.
Reviewers: hermanlee4, sdong, anthony
Subscribers: leveldb, dhruba, vasilep, andrewkr
Differential Revision: https://reviews.facebook.net/D57867
2016-04-08 08:35:51 +02:00
|
|
|
virtual Status MarkRollback(const Slice& xid) override {
|
2016-04-29 03:23:29 +02:00
|
|
|
row_ << "ROLLBACK(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
Modification of WriteBatch to support two phase commit
Summary: Adds three new WriteBatch data types: Prepare(xid), Commit(xid), Rollback(xid). Prepare(xid) should precede the (single) operation to which is applies. There can obviously be multiple Prepare(xid) markers. There should only be one Rollback(xid) or Commit(xid) marker yet not both. None of this logic is currently enforced and will most likely be implemented further up such as in the memtableinserter. All three markers are similar to PutLogData in that they are writebatch meta-data, ie stored but not counted. All three markers differ from PutLogData in that they will actually be written to disk. As for WriteBatchWithIndex, Prepare, Commit, Rollback are all implemented just as PutLogData and none are tested just as PutLogData.
Test Plan: single unit test in write_batch_test.
Reviewers: hermanlee4, sdong, anthony
Subscribers: leveldb, dhruba, vasilep, andrewkr
Differential Revision: https://reviews.facebook.net/D57867
2016-04-08 08:35:51 +02:00
|
|
|
virtual Status MarkCommit(const Slice& xid) override {
|
2016-04-29 03:23:29 +02:00
|
|
|
row_ << "COMMIT(";
|
|
|
|
row_ << LDBCommand::StringToHex(xid.ToString()) << ") ";
|
|
|
|
return Status::OK();
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
virtual ~InMemoryHandler() {}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2018-04-08 06:46:53 +02:00
|
|
|
protected:
|
|
|
|
virtual bool WriteAfterCommit() const override { return write_after_commit_; }
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
private:
|
2016-05-20 16:42:18 +02:00
|
|
|
std::stringstream& row_;
|
2013-12-04 08:16:36 +01:00
|
|
|
bool print_values_;
|
2018-04-08 06:46:53 +02:00
|
|
|
bool write_after_commit_;
|
2013-02-20 03:12:20 +01:00
|
|
|
};
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
|
2018-04-08 06:46:53 +02:00
|
|
|
bool is_write_committed, LDBCommandExecuteResult* exec_state) {
|
2014-11-24 19:04:16 +01:00
|
|
|
Env* env_ = Env::Default();
|
|
|
|
EnvOptions soptions;
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<SequentialFileReader> wal_file_reader;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
|
|
|
|
Status status;
|
|
|
|
{
|
2018-11-09 20:17:34 +01:00
|
|
|
std::unique_ptr<SequentialFile> file;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
status = env_->NewSequentialFile(wal_file, &file, soptions);
|
|
|
|
if (status.ok()) {
|
2018-06-21 17:34:24 +02:00
|
|
|
wal_file_reader.reset(
|
|
|
|
new SequentialFileReader(std::move(file), wal_file));
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
}
|
|
|
|
}
|
2014-11-24 19:04:16 +01:00
|
|
|
if (!status.ok()) {
|
|
|
|
if (exec_state) {
|
2015-03-17 02:08:59 +01:00
|
|
|
*exec_state = LDBCommandExecuteResult::Failed("Failed to open WAL file " +
|
2014-11-24 19:04:16 +01:00
|
|
|
status.ToString());
|
|
|
|
} else {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cerr << "Error: Failed to open WAL file " << status.ToString()
|
|
|
|
<< std::endl;
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
StdErrReporter reporter;
|
2015-10-08 19:06:16 +02:00
|
|
|
uint64_t log_number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
// we need the log number, but ParseFilename expects dbname/NNN.log.
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string sanitized = wal_file;
|
2015-10-08 19:06:16 +02:00
|
|
|
size_t lastslash = sanitized.rfind('/');
|
|
|
|
if (lastslash != std::string::npos)
|
|
|
|
sanitized = sanitized.substr(lastslash + 1);
|
|
|
|
if (!ParseFileName(sanitized, &log_number, &type)) {
|
|
|
|
// bogus input, carry on as best we can
|
|
|
|
log_number = 0;
|
|
|
|
}
|
|
|
|
DBOptions db_options;
|
2016-05-20 16:42:18 +02:00
|
|
|
log::Reader reader(db_options.info_log, std::move(wal_file_reader),
|
2018-10-19 20:51:13 +02:00
|
|
|
&reporter, true /* checksum */, log_number,
|
|
|
|
false /* retry_after_eof */);
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string scratch;
|
2014-11-24 19:04:16 +01:00
|
|
|
WriteBatch batch;
|
|
|
|
Slice record;
|
2016-05-20 16:42:18 +02:00
|
|
|
std::stringstream row;
|
2014-11-24 19:04:16 +01:00
|
|
|
if (print_header) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cout << "Sequence,Count,ByteSize,Physical Offset,Key(s)";
|
2014-11-24 19:04:16 +01:00
|
|
|
if (print_values) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cout << " : value ";
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cout << "\n";
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
|
|
|
while (reader.ReadRecord(&record, &scratch)) {
|
|
|
|
row.str("");
|
2016-03-30 19:35:22 +02:00
|
|
|
if (record.size() < WriteBatchInternal::kHeader) {
|
2014-11-24 19:04:16 +01:00
|
|
|
reporter.Corruption(record.size(),
|
|
|
|
Status::Corruption("log record too small"));
|
|
|
|
} else {
|
|
|
|
WriteBatchInternal::SetContents(&batch, record);
|
|
|
|
row << WriteBatchInternal::Sequence(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::Count(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::ByteSize(&batch) << ",";
|
|
|
|
row << reader.LastRecordOffset() << ",";
|
2018-04-08 06:46:53 +02:00
|
|
|
InMemoryHandler handler(row, print_values, is_write_committed);
|
2014-11-24 19:04:16 +01:00
|
|
|
batch.Iterate(&handler);
|
|
|
|
row << "\n";
|
|
|
|
}
|
2016-05-20 16:42:18 +02:00
|
|
|
std::cout << row.str();
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string WALDumperCommand::ARG_WAL_FILE = "walfile";
|
2018-04-08 06:46:53 +02:00
|
|
|
const std::string WALDumperCommand::ARG_WRITE_COMMITTED = "write_committed";
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string WALDumperCommand::ARG_PRINT_VALUE = "print_value";
|
|
|
|
const std::string WALDumperCommand::ARG_PRINT_HEADER = "header";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
WALDumperCommand::WALDumperCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
2018-04-08 06:46:53 +02:00
|
|
|
BuildCmdLineOptions({ARG_WAL_FILE, ARG_WRITE_COMMITTED,
|
|
|
|
ARG_PRINT_HEADER, ARG_PRINT_VALUE})),
|
2016-05-20 16:42:18 +02:00
|
|
|
print_header_(false),
|
2018-04-08 06:46:53 +02:00
|
|
|
print_values_(false),
|
|
|
|
is_write_committed_(false) {
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
wal_file_.clear();
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_WAL_FILE);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
wal_file_ = itr->second;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
|
|
|
|
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
|
2018-04-08 06:46:53 +02:00
|
|
|
is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true);
|
|
|
|
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (wal_file_.empty()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("Argument " + ARG_WAL_FILE +
|
|
|
|
" must be specified.");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void WALDumperCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WALDumperCommand::Name());
|
|
|
|
ret.append(" --" + ARG_WAL_FILE + "=<write_ahead_log_file_path>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_HEADER + "] ");
|
|
|
|
ret.append(" [--" + ARG_PRINT_VALUE + "] ");
|
2018-04-08 06:46:53 +02:00
|
|
|
ret.append(" [--" + ARG_WRITE_COMMITTED + "=true|false] ");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void WALDumperCommand::DoCommand() {
|
2018-04-08 06:46:53 +02:00
|
|
|
DumpWalFile(wal_file_, print_header_, print_values_, is_write_committed_,
|
|
|
|
&exec_state_);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
GetCommand::GetCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"<key> must be specified for the get command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void GetCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetCommand::Name());
|
|
|
|
ret.append(" <key>");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void GetCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string value;
|
2016-01-23 00:46:32 +01:00
|
|
|
Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "%s\n",
|
|
|
|
(is_value_hex_ ? StringToHex(value) : value).c_str());
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
ApproxSizeCommand::ApproxSizeCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM, ARG_TO})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (options.find(ARG_FROM) != options.end()) {
|
|
|
|
start_key_ = options.find(ARG_FROM)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_FROM + " must be specified for approxsize command");
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.find(ARG_TO) != options.end()) {
|
|
|
|
end_key_ = options.find(ARG_TO)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_TO + " must be specified for approxsize command");
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ApproxSizeCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ApproxSizeCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ApproxSizeCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
Range ranges[1];
|
|
|
|
ranges[0] = Range(start_key_, end_key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
uint64_t sizes[1];
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
|
2013-11-13 06:02:03 +01:00
|
|
|
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
2013-07-04 00:32:49 +02:00
|
|
|
/* Weird that GetApproximateSizes() returns void, although documentation
|
2013-01-11 20:09:23 +01:00
|
|
|
* says that it returns a Status object.
|
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
BatchPutCommand::BatchPutCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (params.size() < 2) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2014-11-01 03:22:49 +01:00
|
|
|
"At least one <key> <value> pair must be specified batchput.");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (params.size() % 2 != 0) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2013-01-11 20:09:23 +01:00
|
|
|
"Equal number of <key>s and <value>s must be specified for batchput.");
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < params.size(); i += 2) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string key = params.at(i);
|
|
|
|
std::string value = params.at(i + 1);
|
|
|
|
key_values_.push_back(std::pair<std::string, std::string>(
|
|
|
|
is_key_hex_ ? HexToString(key) : key,
|
|
|
|
is_value_hex_ ? HexToString(value) : value));
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
2017-04-20 19:16:13 +02:00
|
|
|
create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void BatchPutCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(BatchPutCommand::Name());
|
|
|
|
ret.append(" <key> <value> [<key> <value>] [..]");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void BatchPutCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
WriteBatch batch;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
for (std::vector<std::pair<std::string, std::string>>::const_iterator itr =
|
|
|
|
key_values_.begin();
|
|
|
|
itr != key_values_.end(); ++itr) {
|
2016-01-23 00:46:32 +01:00
|
|
|
batch.Put(GetCfHandle(), itr->first, itr->second);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Write(WriteOptions(), &batch);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options BatchPutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2017-04-20 19:16:13 +02:00
|
|
|
opt.create_if_missing = create_if_missing_;
|
2013-01-11 20:09:23 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
ScanCommand::ScanCommand(const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_NO_VALUE, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_TO, ARG_VALUE_HEX, ARG_FROM, ARG_TIMESTAMP,
|
|
|
|
ARG_MAX_KEYS, ARG_TTL_START, ARG_TTL_END})),
|
2016-03-10 22:34:42 +01:00
|
|
|
start_key_specified_(false),
|
|
|
|
end_key_specified_(false),
|
|
|
|
max_keys_scanned_(-1),
|
|
|
|
no_value_(false) {
|
2016-05-20 16:42:18 +02:00
|
|
|
std::map<std::string, std::string>::const_iterator itr =
|
|
|
|
options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
start_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
}
|
|
|
|
start_key_specified_ = true;
|
|
|
|
}
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
end_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
end_key_specified_ = true;
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::vector<std::string>::const_iterator vitr =
|
2016-03-10 22:34:42 +01:00
|
|
|
std::find(flags.begin(), flags.end(), ARG_NO_VALUE);
|
2016-01-11 19:51:42 +01:00
|
|
|
if (vitr != flags.end()) {
|
|
|
|
no_value_ = true;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_scanned_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2016-05-20 16:42:18 +02:00
|
|
|
max_keys_scanned_ = std::stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::invalid_argument&) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" has an invalid value");
|
2016-05-20 16:42:18 +02:00
|
|
|
} catch (const std::out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void ScanCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ScanCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>q] ");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-11 19:51:42 +01:00
|
|
|
ret.append(" [--" + ARG_NO_VALUE + "]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScanCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
int num_keys_scanned = 0;
|
2016-01-23 00:46:32 +01:00
|
|
|
Iterator* it = db_->NewIterator(ReadOptions(), GetCfHandle());
|
2013-01-11 20:09:23 +01:00
|
|
|
if (start_key_specified_) {
|
|
|
|
it->Seek(start_key_);
|
|
|
|
} else {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete it;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "Scanning key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
for ( ;
|
2014-11-01 03:22:49 +01:00
|
|
|
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
|
|
|
|
it->Next()) {
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2017-07-29 01:23:50 +02:00
|
|
|
TtlIterator* it_ttl = static_cast_with_check<TtlIterator, Iterator>(it);
|
2013-06-20 20:50:33 +02:00
|
|
|
int rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
|
|
|
}
|
2015-11-20 07:26:37 +01:00
|
|
|
|
|
|
|
Slice key_slice = it->key();
|
|
|
|
|
|
|
|
std::string formatted_key;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
formatted_key = "0x" + key_slice.ToString(true /* hex */);
|
|
|
|
key_slice = formatted_key;
|
|
|
|
} else if (ldb_options_.key_formatter) {
|
|
|
|
formatted_key = ldb_options_.key_formatter->Format(key_slice);
|
|
|
|
key_slice = formatted_key;
|
|
|
|
}
|
|
|
|
|
2016-01-11 19:51:42 +01:00
|
|
|
if (no_value_) {
|
2016-03-10 22:34:42 +01:00
|
|
|
fprintf(stdout, "%.*s\n", static_cast<int>(key_slice.size()),
|
|
|
|
key_slice.data());
|
2016-01-11 19:51:42 +01:00
|
|
|
} else {
|
2016-03-10 22:34:42 +01:00
|
|
|
Slice val_slice = it->value();
|
|
|
|
std::string formatted_value;
|
|
|
|
if (is_value_hex_) {
|
|
|
|
formatted_value = "0x" + val_slice.ToString(true /* hex */);
|
|
|
|
val_slice = formatted_value;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "%.*s : %.*s\n", static_cast<int>(key_slice.size()),
|
|
|
|
key_slice.data(), static_cast<int>(val_slice.size()),
|
|
|
|
val_slice.data());
|
2015-11-20 07:26:37 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
num_keys_scanned++;
|
|
|
|
if (max_keys_scanned_ >= 0 && num_keys_scanned >= max_keys_scanned_) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!it->status().ok()) { // Check for any errors found during the scan
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(it->status().ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
DeleteCommand::DeleteCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"KEY must be specified for the delete command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void DeleteCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteCommand::Name() + " <key>");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Delete(WriteOptions(), GetCfHandle(), key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-11-16 00:49:15 +01:00
|
|
|
DeleteRangeCommand::DeleteRangeCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
if (params.size() != 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"begin and end keys must be specified for the delete command");
|
|
|
|
} else {
|
|
|
|
begin_key_ = params.at(0);
|
|
|
|
end_key_ = params.at(1);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
begin_key_ = HexToString(begin_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteRangeCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteRangeCommand::Name() + " <begin key> <end key>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteRangeCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st =
|
|
|
|
db_->DeleteRange(WriteOptions(), GetCfHandle(), begin_key_, end_key_);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
PutCommand::PutCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_CREATE_IF_MISSING})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (params.size() != 2) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"<key> and <value> must be specified for the put command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
value_ = params.at(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_value_hex_) {
|
|
|
|
value_ = HexToString(value_);
|
|
|
|
}
|
2017-04-20 19:16:13 +02:00
|
|
|
create_if_missing_ = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void PutCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(PutCommand::Name());
|
|
|
|
ret.append(" <key> <value> ");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PutCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Put(WriteOptions(), GetCfHandle(), key_, value_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options PutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2017-04-20 19:16:13 +02:00
|
|
|
opt.create_if_missing = create_if_missing_;
|
2013-01-11 20:09:23 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
const char* DBQuerierCommand::HELP_CMD = "help";
|
|
|
|
const char* DBQuerierCommand::GET_CMD = "get";
|
|
|
|
const char* DBQuerierCommand::PUT_CMD = "put";
|
|
|
|
const char* DBQuerierCommand::DELETE_CMD = "delete";
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
DBQuerierCommand::DBQuerierCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void DBQuerierCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBQuerierCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
|
|
|
ret.append(" Starts a REPL shell. Type help for list of available "
|
2013-01-11 20:09:23 +01:00
|
|
|
"commands.");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBQuerierCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
ReadOptions read_options;
|
|
|
|
WriteOptions write_options;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string line;
|
|
|
|
std::string key;
|
|
|
|
std::string value;
|
|
|
|
while (getline(std::cin, line, '\n')) {
|
|
|
|
// Parse line into std::vector<std::string>
|
|
|
|
std::vector<std::string> tokens;
|
2013-01-11 20:09:23 +01:00
|
|
|
size_t pos = 0;
|
|
|
|
while (true) {
|
|
|
|
size_t pos2 = line.find(' ', pos);
|
2016-05-20 16:42:18 +02:00
|
|
|
if (pos2 == std::string::npos) {
|
2013-01-11 20:09:23 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos, pos2-pos));
|
|
|
|
pos = pos2 + 1;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos));
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::string& cmd = tokens[0];
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (cmd == HELP_CMD) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"get <key>\n"
|
|
|
|
"put <key> <value>\n"
|
|
|
|
"delete <key>\n");
|
|
|
|
} else if (cmd == DELETE_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Delete(write_options, GetCfHandle(), Slice(key));
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "Successfully deleted %s\n", tokens[1].c_str());
|
|
|
|
} else if (cmd == PUT_CMD && tokens.size() == 3) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
value = (is_value_hex_ ? HexToString(tokens[2]) : tokens[2]);
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "Successfully put %s %s\n",
|
|
|
|
tokens[1].c_str(), tokens[2].c_str());
|
|
|
|
} else if (cmd == GET_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2016-01-23 00:46:32 +01:00
|
|
|
if (db_->Get(read_options, GetCfHandle(), Slice(key), &value).ok()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "%s\n", PrintKeyValue(key, value,
|
|
|
|
is_key_hex_, is_value_hex_).c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Not found %s\n", tokens[1].c_str());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Unknown command %s\n", line.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
CheckConsistencyCommand::CheckConsistencyCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
|
2014-03-20 21:42:45 +01:00
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void CheckConsistencyCommand::Help(std::string& ret) {
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckConsistencyCommand::Name());
|
|
|
|
ret.append("\n");
|
2014-03-20 21:42:45 +01:00
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
void CheckConsistencyCommand::DoCommand() {
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2014-03-20 22:18:29 +01:00
|
|
|
opt.paranoid_checks = true;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
2014-03-20 22:18:29 +01:00
|
|
|
DB* db;
|
|
|
|
Status st = DB::OpenForReadOnly(opt, db_path_, &db, false);
|
|
|
|
delete db;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2014-03-20 21:42:45 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
2014-03-20 21:42:45 +01:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2016-03-12 22:50:20 +01:00
|
|
|
|
2017-03-21 19:49:08 +01:00
|
|
|
const std::string CheckPointCommand::ARG_CHECKPOINT_DIR = "checkpoint_dir";
|
|
|
|
|
|
|
|
CheckPointCommand::CheckPointCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2017-03-21 19:49:08 +01:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_CHECKPOINT_DIR})) {
|
|
|
|
auto itr = options.find(ARG_CHECKPOINT_DIR);
|
2018-02-21 01:42:06 +01:00
|
|
|
if (itr != options.end()) {
|
2017-03-21 19:49:08 +01:00
|
|
|
checkpoint_dir_ = itr->second;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPointCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckPointCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CHECKPOINT_DIR + "] ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckPointCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
Status status = Checkpoint::Create(db_, &checkpoint);
|
|
|
|
status = checkpoint->CreateCheckpoint(checkpoint_dir_);
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2018-03-05 22:08:17 +01:00
|
|
|
RepairCommand::RepairCommand(const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2016-03-12 22:50:20 +01:00
|
|
|
: LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void RepairCommand::Help(std::string& ret) {
|
2016-03-12 22:50:20 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(RepairCommand::Name());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void RepairCommand::DoCommand() {
|
|
|
|
Options options = PrepareOptionsForOpenDB();
|
2016-04-01 20:06:06 +02:00
|
|
|
options.info_log.reset(new StderrLogger(InfoLogLevel::WARN_LEVEL));
|
2016-03-12 22:50:20 +01:00
|
|
|
Status status = RepairDB(db_path_, options);
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2016-07-14 23:09:31 +02:00
|
|
|
|
2017-03-03 22:17:39 +01:00
|
|
|
const std::string BackupableCommand::ARG_NUM_THREADS = "num_threads";
|
|
|
|
const std::string BackupableCommand::ARG_BACKUP_ENV_URI = "backup_env_uri";
|
|
|
|
const std::string BackupableCommand::ARG_BACKUP_DIR = "backup_dir";
|
|
|
|
const std::string BackupableCommand::ARG_STDERR_LOG_LEVEL = "stderr_log_level";
|
2016-07-14 23:09:31 +02:00
|
|
|
|
2017-03-03 22:17:39 +01:00
|
|
|
BackupableCommand::BackupableCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2017-03-03 22:17:39 +01:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_BACKUP_ENV_URI, ARG_BACKUP_DIR,
|
|
|
|
ARG_NUM_THREADS, ARG_STDERR_LOG_LEVEL})),
|
|
|
|
num_threads_(1) {
|
|
|
|
auto itr = options.find(ARG_NUM_THREADS);
|
2016-07-14 23:09:31 +02:00
|
|
|
if (itr != options.end()) {
|
2017-03-03 22:17:39 +01:00
|
|
|
num_threads_ = std::stoi(itr->second);
|
2016-07-14 23:09:31 +02:00
|
|
|
}
|
2017-03-03 22:17:39 +01:00
|
|
|
itr = options.find(ARG_BACKUP_ENV_URI);
|
2016-07-26 20:13:26 +02:00
|
|
|
if (itr != options.end()) {
|
2017-03-03 22:17:39 +01:00
|
|
|
backup_env_uri_ = itr->second;
|
2016-07-14 23:09:31 +02:00
|
|
|
}
|
|
|
|
itr = options.find(ARG_BACKUP_DIR);
|
|
|
|
if (itr == options.end()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--" + ARG_BACKUP_DIR +
|
|
|
|
": missing backup directory");
|
|
|
|
} else {
|
2017-03-03 22:17:39 +01:00
|
|
|
backup_dir_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_STDERR_LOG_LEVEL);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
int stderr_log_level = std::stoi(itr->second);
|
|
|
|
if (stderr_log_level < 0 ||
|
|
|
|
stderr_log_level >= InfoLogLevel::NUM_INFO_LOG_LEVELS) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_STDERR_LOG_LEVEL + " must be >= 0 and < " +
|
|
|
|
std::to_string(InfoLogLevel::NUM_INFO_LOG_LEVELS) + ".");
|
|
|
|
} else {
|
|
|
|
logger_.reset(
|
|
|
|
new StderrLogger(static_cast<InfoLogLevel>(stderr_log_level)));
|
|
|
|
}
|
2016-07-14 23:09:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-03 22:17:39 +01:00
|
|
|
void BackupableCommand::Help(const std::string& name, std::string& ret) {
|
2016-07-14 23:09:31 +02:00
|
|
|
ret.append(" ");
|
2017-03-03 22:17:39 +01:00
|
|
|
ret.append(name);
|
|
|
|
ret.append(" [--" + ARG_BACKUP_ENV_URI + "] ");
|
2016-07-14 23:09:31 +02:00
|
|
|
ret.append(" [--" + ARG_BACKUP_DIR + "] ");
|
2017-03-03 22:17:39 +01:00
|
|
|
ret.append(" [--" + ARG_NUM_THREADS + "] ");
|
|
|
|
ret.append(" [--" + ARG_STDERR_LOG_LEVEL + "=<int (InfoLogLevel)>] ");
|
2016-07-14 23:09:31 +02:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
2017-03-03 22:17:39 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
BackupCommand::BackupCommand(const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: BackupableCommand(params, options, flags) {}
|
|
|
|
|
|
|
|
void BackupCommand::Help(std::string& ret) {
|
|
|
|
BackupableCommand::Help(Name(), ret);
|
|
|
|
}
|
|
|
|
|
2016-07-14 23:09:31 +02:00
|
|
|
void BackupCommand::DoCommand() {
|
|
|
|
BackupEngine* backup_engine;
|
|
|
|
Status status;
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
printf("open db OK\n");
|
|
|
|
std::unique_ptr<Env> custom_env_guard;
|
2017-03-03 22:17:39 +01:00
|
|
|
Env* custom_env = NewCustomObject<Env>(backup_env_uri_, &custom_env_guard);
|
2016-07-14 23:09:31 +02:00
|
|
|
BackupableDBOptions backup_options =
|
2017-03-03 22:17:39 +01:00
|
|
|
BackupableDBOptions(backup_dir_, custom_env);
|
|
|
|
backup_options.info_log = logger_.get();
|
|
|
|
backup_options.max_background_operations = num_threads_;
|
2016-07-14 23:09:31 +02:00
|
|
|
status = BackupEngine::Open(Env::Default(), backup_options, &backup_engine);
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("open backup engine OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
status = backup_engine->CreateNewBackup(db_);
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("create new backup OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2016-07-26 20:13:26 +02:00
|
|
|
|
|
|
|
RestoreCommand::RestoreCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2017-03-03 22:17:39 +01:00
|
|
|
: BackupableCommand(params, options, flags) {}
|
2016-07-26 20:13:26 +02:00
|
|
|
|
|
|
|
void RestoreCommand::Help(std::string& ret) {
|
2017-03-03 22:17:39 +01:00
|
|
|
BackupableCommand::Help(Name(), ret);
|
2016-07-26 20:13:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void RestoreCommand::DoCommand() {
|
|
|
|
std::unique_ptr<Env> custom_env_guard;
|
2017-01-26 00:54:09 +01:00
|
|
|
Env* custom_env = NewCustomObject<Env>(backup_env_uri_, &custom_env_guard);
|
2016-07-26 20:13:26 +02:00
|
|
|
std::unique_ptr<BackupEngineReadOnly> restore_engine;
|
|
|
|
Status status;
|
|
|
|
{
|
|
|
|
BackupableDBOptions opts(backup_dir_, custom_env);
|
2017-03-03 22:17:39 +01:00
|
|
|
opts.info_log = logger_.get();
|
2016-07-26 20:13:26 +02:00
|
|
|
opts.max_background_operations = num_threads_;
|
|
|
|
BackupEngineReadOnly* raw_restore_engine_ptr;
|
|
|
|
status = BackupEngineReadOnly::Open(Env::Default(), opts,
|
|
|
|
&raw_restore_engine_ptr);
|
|
|
|
if (status.ok()) {
|
|
|
|
restore_engine.reset(raw_restore_engine_ptr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("open restore engine OK\n");
|
|
|
|
status = restore_engine->RestoreDBFromLatestBackup(db_path_, db_path_);
|
|
|
|
}
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("restore from backup OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
void DumpSstFile(std::string filename, bool output_hex, bool show_properties) {
|
|
|
|
std::string from_key;
|
|
|
|
std::string to_key;
|
|
|
|
if (filename.length() <= 4 ||
|
|
|
|
filename.rfind(".sst") != filename.length() - 4) {
|
|
|
|
std::cout << "Invalid sst file name." << std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// no verification
|
2018-11-27 21:59:27 +01:00
|
|
|
rocksdb::SstFileDumper dumper(filename, false, output_hex);
|
2018-12-13 23:12:02 +01:00
|
|
|
Status st = dumper.ReadSequential(true, std::numeric_limits<uint64_t>::max(),
|
|
|
|
false, // has_from
|
2014-11-24 19:04:16 +01:00
|
|
|
from_key, false, // has_to
|
|
|
|
to_key);
|
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << "Error in reading SST file " << filename << st.ToString()
|
|
|
|
<< std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (show_properties) {
|
|
|
|
const rocksdb::TableProperties* table_properties;
|
|
|
|
|
|
|
|
std::shared_ptr<const rocksdb::TableProperties>
|
|
|
|
table_properties_from_reader;
|
2018-11-27 21:59:27 +01:00
|
|
|
st = dumper.ReadTableProperties(&table_properties_from_reader);
|
2014-11-24 19:04:16 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << filename << ": " << st.ToString()
|
|
|
|
<< ". Try to use initial table properties" << std::endl;
|
2018-11-27 21:59:27 +01:00
|
|
|
table_properties = dumper.GetInitTableProperties();
|
2014-11-24 19:04:16 +01:00
|
|
|
} else {
|
|
|
|
table_properties = table_properties_from_reader.get();
|
|
|
|
}
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
std::cout << std::endl << "Table Properties:" << std::endl;
|
|
|
|
std::cout << table_properties->ToString("\n") << std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
DBFileDumperCommand::DBFileDumperCommand(
|
2018-03-05 22:08:17 +01:00
|
|
|
const std::vector<std::string>& /*params*/,
|
2016-05-20 16:42:18 +02:00
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
2014-11-24 19:04:16 +01:00
|
|
|
: LDBCommand(options, flags, true, BuildCmdLineOptions({})) {}
|
|
|
|
|
2016-05-20 16:42:18 +02:00
|
|
|
void DBFileDumperCommand::Help(std::string& ret) {
|
2014-11-24 19:04:16 +01:00
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBFileDumperCommand::Name());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBFileDumperCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2014-11-24 19:04:16 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
std::cout << "Manifest File" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
std::string manifest_filename;
|
|
|
|
s = ReadFileToString(db_->GetEnv(), CurrentFileName(db_->GetName()),
|
|
|
|
&manifest_filename);
|
|
|
|
if (!s.ok() || manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
|
|
|
std::cerr << "Error when reading CURRENT file "
|
|
|
|
<< CurrentFileName(db_->GetName()) << std::endl;
|
|
|
|
}
|
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
2016-05-20 16:42:18 +02:00
|
|
|
std::string manifest_filepath = db_->GetName() + "/" + manifest_filename;
|
2014-11-24 19:04:16 +01:00
|
|
|
std::cout << manifest_filepath << std::endl;
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
DumpManifestFile(manifest_filepath, false, false, false);
|
2014-11-24 19:04:16 +01:00
|
|
|
std::cout << std::endl;
|
|
|
|
|
|
|
|
std::cout << "SST Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
std::vector<LiveFileMetaData> metadata;
|
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
|
|
|
for (auto& fileMetadata : metadata) {
|
|
|
|
std::string filename = fileMetadata.db_path + fileMetadata.name;
|
|
|
|
std::cout << filename << " level:" << fileMetadata.level << std::endl;
|
|
|
|
std::cout << "------------------------------" << std::endl;
|
|
|
|
DumpSstFile(filename, false, true);
|
|
|
|
std::cout << std::endl;
|
|
|
|
}
|
|
|
|
std::cout << std::endl;
|
|
|
|
|
|
|
|
std::cout << "Write Ahead Log Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
rocksdb::VectorLogPtr wal_files;
|
|
|
|
s = db_->GetSortedWalFiles(wal_files);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::cerr << "Error when getting WAL files" << std::endl;
|
|
|
|
} else {
|
|
|
|
for (auto& wal : wal_files) {
|
|
|
|
// TODO(qyang): option.wal_dir should be passed into ldb command
|
|
|
|
std::string filename = db_->GetOptions().wal_dir + wal->PathName();
|
|
|
|
std::cout << filename << std::endl;
|
2018-04-08 06:46:53 +02:00
|
|
|
// TODO(myabandeh): allow configuring is_write_commited
|
|
|
|
DumpWalFile(filename, true, true, true /* is_write_commited */,
|
|
|
|
&exec_state_);
|
2014-11-24 19:04:16 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-09 23:18:59 +02:00
|
|
|
void WriteExternalSstFilesCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WriteExternalSstFilesCommand::Name());
|
|
|
|
ret.append(" <output_sst_path>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteExternalSstFilesCommand::WriteExternalSstFilesCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_CREATE_IF_MISSING})) {
|
|
|
|
create_if_missing_ =
|
|
|
|
IsFlagPresent(flags, ARG_CREATE_IF_MISSING) ||
|
|
|
|
ParseBooleanOption(options, ARG_CREATE_IF_MISSING, false);
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"output SST file path must be specified");
|
|
|
|
} else {
|
|
|
|
output_sst_path_ = params.at(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void WriteExternalSstFilesCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ColumnFamilyHandle* cfh = GetCfHandle();
|
|
|
|
SstFileWriter sst_file_writer(EnvOptions(), db_->GetOptions(), cfh);
|
|
|
|
Status status = sst_file_writer.Open(output_sst_path_);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("failed to open SST file: " +
|
|
|
|
status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
|
|
|
std::string line;
|
|
|
|
std::ifstream ifs_stdin("/dev/stdin");
|
|
|
|
std::istream* istream_p = ifs_stdin.is_open() ? &ifs_stdin : &std::cin;
|
|
|
|
while (getline(*istream_p, line, '\n')) {
|
|
|
|
std::string key;
|
|
|
|
std::string value;
|
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
|
|
|
status = sst_file_writer.Put(key, value);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to write record to file: " + status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
|
|
|
bad_lines++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
status = sst_file_writer.Finish();
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Failed to finish writing to file: " + status.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (bad_lines > 0) {
|
|
|
|
fprintf(stderr, "Warning: %d bad lines ignored.\n", bad_lines);
|
|
|
|
}
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed(
|
|
|
|
"external SST file written to " + output_sst_path_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Options WriteExternalSstFilesCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
|
|
|
opt.create_if_missing = create_if_missing_;
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_MOVE_FILES = "move_files";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_SNAPSHOT_CONSISTENCY =
|
|
|
|
"snapshot_consistency";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_ALLOW_GLOBAL_SEQNO =
|
|
|
|
"allow_global_seqno";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_ALLOW_BLOCKING_FLUSH =
|
|
|
|
"allow_blocking_flush";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_INGEST_BEHIND =
|
|
|
|
"ingest_behind";
|
|
|
|
const std::string IngestExternalSstFilesCommand::ARG_WRITE_GLOBAL_SEQNO =
|
|
|
|
"write_global_seqno";
|
|
|
|
|
|
|
|
void IngestExternalSstFilesCommand::Help(std::string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(IngestExternalSstFilesCommand::Name());
|
|
|
|
ret.append(" <input_sst_path>");
|
|
|
|
ret.append(" [--" + ARG_MOVE_FILES + "] ");
|
|
|
|
ret.append(" [--" + ARG_SNAPSHOT_CONSISTENCY + "] ");
|
|
|
|
ret.append(" [--" + ARG_ALLOW_GLOBAL_SEQNO + "] ");
|
|
|
|
ret.append(" [--" + ARG_ALLOW_BLOCKING_FLUSH + "] ");
|
|
|
|
ret.append(" [--" + ARG_INGEST_BEHIND + "] ");
|
|
|
|
ret.append(" [--" + ARG_WRITE_GLOBAL_SEQNO + "] ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
IngestExternalSstFilesCommand::IngestExternalSstFilesCommand(
|
|
|
|
const std::vector<std::string>& params,
|
|
|
|
const std::map<std::string, std::string>& options,
|
|
|
|
const std::vector<std::string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, false /* is_read_only */,
|
|
|
|
BuildCmdLineOptions({ARG_MOVE_FILES, ARG_SNAPSHOT_CONSISTENCY,
|
|
|
|
ARG_ALLOW_GLOBAL_SEQNO, ARG_CREATE_IF_MISSING,
|
|
|
|
ARG_ALLOW_BLOCKING_FLUSH, ARG_INGEST_BEHIND,
|
|
|
|
ARG_WRITE_GLOBAL_SEQNO})),
|
|
|
|
move_files_(false),
|
|
|
|
snapshot_consistency_(true),
|
|
|
|
allow_global_seqno_(true),
|
|
|
|
allow_blocking_flush_(true),
|
|
|
|
ingest_behind_(false),
|
|
|
|
write_global_seqno_(true) {
|
|
|
|
create_if_missing_ =
|
|
|
|
IsFlagPresent(flags, ARG_CREATE_IF_MISSING) ||
|
|
|
|
ParseBooleanOption(options, ARG_CREATE_IF_MISSING, false);
|
|
|
|
move_files_ = IsFlagPresent(flags, ARG_MOVE_FILES) ||
|
|
|
|
ParseBooleanOption(options, ARG_MOVE_FILES, false);
|
|
|
|
snapshot_consistency_ =
|
|
|
|
IsFlagPresent(flags, ARG_SNAPSHOT_CONSISTENCY) ||
|
|
|
|
ParseBooleanOption(options, ARG_SNAPSHOT_CONSISTENCY, true);
|
|
|
|
allow_global_seqno_ =
|
|
|
|
IsFlagPresent(flags, ARG_ALLOW_GLOBAL_SEQNO) ||
|
|
|
|
ParseBooleanOption(options, ARG_ALLOW_GLOBAL_SEQNO, true);
|
|
|
|
allow_blocking_flush_ =
|
|
|
|
IsFlagPresent(flags, ARG_ALLOW_BLOCKING_FLUSH) ||
|
|
|
|
ParseBooleanOption(options, ARG_ALLOW_BLOCKING_FLUSH, true);
|
|
|
|
ingest_behind_ = IsFlagPresent(flags, ARG_INGEST_BEHIND) ||
|
|
|
|
ParseBooleanOption(options, ARG_INGEST_BEHIND, false);
|
|
|
|
write_global_seqno_ =
|
|
|
|
IsFlagPresent(flags, ARG_WRITE_GLOBAL_SEQNO) ||
|
|
|
|
ParseBooleanOption(options, ARG_WRITE_GLOBAL_SEQNO, true);
|
|
|
|
|
|
|
|
if (allow_global_seqno_) {
|
|
|
|
if (!write_global_seqno_) {
|
|
|
|
fprintf(stderr,
|
|
|
|
"Warning: not writing global_seqno to the ingested SST can\n"
|
|
|
|
"prevent older versions of RocksDB from being able to open it\n");
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (write_global_seqno_) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"ldb cannot write global_seqno to the ingested SST when global_seqno "
|
|
|
|
"is not allowed");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("input SST path must be specified");
|
|
|
|
} else {
|
|
|
|
input_sst_path_ = params.at(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void IngestExternalSstFilesCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (GetExecuteState().IsFailed()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
ColumnFamilyHandle* cfh = GetCfHandle();
|
|
|
|
IngestExternalFileOptions ifo;
|
|
|
|
ifo.move_files = move_files_;
|
|
|
|
ifo.snapshot_consistency = snapshot_consistency_;
|
|
|
|
ifo.allow_global_seqno = allow_global_seqno_;
|
|
|
|
ifo.allow_blocking_flush = allow_blocking_flush_;
|
|
|
|
ifo.ingest_behind = ingest_behind_;
|
|
|
|
ifo.write_global_seqno = write_global_seqno_;
|
|
|
|
Status status = db_->IngestExternalFile(cfh, {input_sst_path_}, ifo);
|
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"failed to ingest external SST: " + status.ToString());
|
|
|
|
} else {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Succeed("external SST files ingested");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Options IngestExternalSstFilesCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
|
|
|
opt.create_if_missing = create_if_missing_;
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
} // namespace rocksdb
|
2014-04-15 22:39:26 +02:00
|
|
|
#endif // ROCKSDB_LITE
|