2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2014-04-15 20:29:02 +02:00
|
|
|
#include "util/ldb_cmd.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
|
|
|
|
#include "db/dbformat.h"
|
2013-06-21 01:02:36 +02:00
|
|
|
#include "db/db_impl.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
#include "db/log_reader.h"
|
2013-03-22 17:17:30 +01:00
|
|
|
#include "db/filename.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
#include "db/write_batch_internal.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/write_batch.h"
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
#include "util/coding.h"
|
2014-09-05 02:40:41 +02:00
|
|
|
#include "util/scoped_arena_iterator.h"
|
2014-04-29 05:34:20 +02:00
|
|
|
#include "utilities/ttl/db_ttl_impl.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
|
|
|
|
#include <ctime>
|
|
|
|
#include <dirent.h>
|
2014-07-15 00:34:30 +02:00
|
|
|
#include <limits>
|
2013-06-19 04:57:54 +02:00
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
|
|
|
#include <stdexcept>
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-03-20 01:28:30 +01:00
|
|
|
using namespace std;
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
const string LDBCommand::ARG_DB = "db";
|
|
|
|
const string LDBCommand::ARG_HEX = "hex";
|
|
|
|
const string LDBCommand::ARG_KEY_HEX = "key_hex";
|
|
|
|
const string LDBCommand::ARG_VALUE_HEX = "value_hex";
|
2013-05-14 04:11:56 +02:00
|
|
|
const string LDBCommand::ARG_TTL = "ttl";
|
2013-06-19 04:57:54 +02:00
|
|
|
const string LDBCommand::ARG_TTL_START = "start_time";
|
|
|
|
const string LDBCommand::ARG_TTL_END = "end_time";
|
|
|
|
const string LDBCommand::ARG_TIMESTAMP = "timestamp";
|
2013-01-11 20:09:23 +01:00
|
|
|
const string LDBCommand::ARG_FROM = "from";
|
|
|
|
const string LDBCommand::ARG_TO = "to";
|
|
|
|
const string LDBCommand::ARG_MAX_KEYS = "max_keys";
|
|
|
|
const string LDBCommand::ARG_BLOOM_BITS = "bloom_bits";
|
|
|
|
const string LDBCommand::ARG_COMPRESSION_TYPE = "compression_type";
|
|
|
|
const string LDBCommand::ARG_BLOCK_SIZE = "block_size";
|
|
|
|
const string LDBCommand::ARG_AUTO_COMPACTION = "auto_compaction";
|
|
|
|
const string LDBCommand::ARG_WRITE_BUFFER_SIZE = "write_buffer_size";
|
|
|
|
const string LDBCommand::ARG_FILE_SIZE = "file_size";
|
|
|
|
const string LDBCommand::ARG_CREATE_IF_MISSING = "create_if_missing";
|
|
|
|
|
2012-12-27 00:15:54 +01:00
|
|
|
const char* LDBCommand::DELIM = " ==> ";
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
|
|
|
int argc,
|
|
|
|
char** argv,
|
2014-06-20 08:54:13 +02:00
|
|
|
const Options& options,
|
|
|
|
const LDBOptions& ldb_options
|
2013-04-12 05:21:49 +02:00
|
|
|
) {
|
2013-01-11 20:09:23 +01:00
|
|
|
vector<string> args;
|
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
args.push_back(argv[i]);
|
|
|
|
}
|
2014-06-20 08:54:13 +02:00
|
|
|
return InitFromCmdLineArgs(args, options, ldb_options);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse the command-line arguments and create the appropriate LDBCommand2
|
|
|
|
* instance.
|
|
|
|
* The command line arguments must be in the following format:
|
|
|
|
* ./ldb --db=PATH_TO_DB [--commonOpt1=commonOpt1Val] ..
|
|
|
|
* COMMAND <PARAM1> <PARAM2> ... [-cmdSpecificOpt1=cmdSpecificOpt1Val] ..
|
|
|
|
* This is similar to the command line format used by HBaseClientTool.
|
|
|
|
* Command name is not included in args.
|
2013-02-20 03:12:20 +01:00
|
|
|
* Returns nullptr if the command-line cannot be parsed.
|
2013-01-11 20:09:23 +01:00
|
|
|
*/
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
|
|
|
const vector<string>& args,
|
2014-06-20 08:54:13 +02:00
|
|
|
const Options& options,
|
|
|
|
const LDBOptions& ldb_options
|
2013-04-12 05:21:49 +02:00
|
|
|
) {
|
2013-01-11 20:09:23 +01:00
|
|
|
// --x=y command line arguments are added as x->y map entries.
|
2013-04-12 05:21:49 +02:00
|
|
|
map<string, string> option_map;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
// Command-line arguments of the form --hex end up in this array as hex
|
|
|
|
vector<string> flags;
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
// Everything other than option_map and flags. Represents commands
|
2013-01-11 20:09:23 +01:00
|
|
|
// and their parameters. For eg: put key1 value1 go into this vector.
|
|
|
|
vector<string> cmdTokens;
|
|
|
|
|
|
|
|
const string OPTION_PREFIX = "--";
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
for (const auto& arg : args) {
|
2013-03-20 01:28:30 +01:00
|
|
|
if (arg[0] == '-' && arg[1] == '-'){
|
|
|
|
vector<string> splits = stringSplit(arg, '=');
|
2013-01-11 20:09:23 +01:00
|
|
|
if (splits.size() == 2) {
|
|
|
|
string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2013-04-12 05:21:49 +02:00
|
|
|
option_map[optionKey] = splits[1];
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
|
|
|
flags.push_back(optionKey);
|
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
} else {
|
2013-06-21 01:02:36 +02:00
|
|
|
cmdTokens.push_back(arg);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmdTokens.size() < 1) {
|
|
|
|
fprintf(stderr, "Command not specified!");
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
string cmd = cmdTokens[0];
|
|
|
|
vector<string> cmdParams(cmdTokens.begin()+1, cmdTokens.end());
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* command = LDBCommand::SelectCommand(
|
|
|
|
cmd,
|
|
|
|
cmdParams,
|
|
|
|
option_map,
|
|
|
|
flags
|
|
|
|
);
|
|
|
|
|
|
|
|
if (command) {
|
2014-06-20 08:54:13 +02:00
|
|
|
command->SetDBOptions(options);
|
|
|
|
command->SetLDBOptions(ldb_options);
|
2013-04-12 05:21:49 +02:00
|
|
|
}
|
|
|
|
return command;
|
|
|
|
}
|
|
|
|
|
|
|
|
LDBCommand* LDBCommand::SelectCommand(
|
|
|
|
const std::string& cmd,
|
2013-06-21 01:02:36 +02:00
|
|
|
const vector<string>& cmdParams,
|
|
|
|
const map<string, string>& option_map,
|
|
|
|
const vector<string>& flags
|
2013-04-12 05:21:49 +02:00
|
|
|
) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (cmd == GetCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new GetCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == PutCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new PutCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == BatchPutCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new BatchPutCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ScanCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ScanCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DeleteCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DeleteCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ApproxSizeCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ApproxSizeCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBQuerierCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBQuerierCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == CompactorCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new CompactorCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == WALDumperCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new WALDumperCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ReduceDBLevelsCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ReduceDBLevelsCommand(cmdParams, option_map, flags);
|
2013-09-04 22:13:08 +02:00
|
|
|
} else if (cmd == ChangeCompactionStyleCommand::Name()) {
|
|
|
|
return new ChangeCompactionStyleCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBDumperCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBDumperCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBLoaderCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBLoaderCommand(cmdParams, option_map, flags);
|
2013-03-22 17:17:30 +01:00
|
|
|
} else if (cmd == ManifestDumpCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ManifestDumpCommand(cmdParams, option_map, flags);
|
2014-02-28 01:18:23 +01:00
|
|
|
} else if (cmd == ListColumnFamiliesCommand::Name()) {
|
|
|
|
return new ListColumnFamiliesCommand(cmdParams, option_map, flags);
|
2013-06-21 01:02:36 +02:00
|
|
|
} else if (cmd == InternalDumpCommand::Name()) {
|
|
|
|
return new InternalDumpCommand(cmdParams, option_map, flags);
|
2014-03-20 21:42:45 +01:00
|
|
|
} else if (cmd == CheckConsistencyCommand::Name()) {
|
|
|
|
return new CheckConsistencyCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Parses the specific integer option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false if the option is not found or if there is an error parsing the
|
|
|
|
* value. If there is an error, the specified exec_state is also
|
|
|
|
* updated.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ParseIntOption(const map<string, string>& options,
|
2013-06-21 01:02:36 +02:00
|
|
|
const string& option, int& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
map<string, string>::const_iterator itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
try {
|
2013-03-20 01:28:30 +01:00
|
|
|
value = stoi(itr->second);
|
2013-01-11 20:09:23 +01:00
|
|
|
return true;
|
2013-03-20 01:28:30 +01:00
|
|
|
} catch(const invalid_argument&) {
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state = LDBCommandExecuteResult::FAILED(option +
|
|
|
|
" has an invalid value.");
|
2013-03-20 01:28:30 +01:00
|
|
|
} catch(const out_of_range&) {
|
|
|
|
exec_state = LDBCommandExecuteResult::FAILED(option +
|
|
|
|
" has a value out-of-range.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
return false;
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
/**
|
|
|
|
* Parses the specified option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false otherwise.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ParseStringOption(const map<string, string>& options,
|
|
|
|
const string& option, string* value) {
|
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
*value = itr->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options LDBCommand::PrepareOptionsForOpenDB() {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options opt = options_;
|
2012-12-06 00:37:03 +01:00
|
|
|
opt.create_if_missing = false;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
map<string, string>::const_iterator itr;
|
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
2013-01-11 20:09:23 +01:00
|
|
|
int bits;
|
2013-04-12 05:21:49 +02:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (bits > 0) {
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_BLOOM_BITS +
|
|
|
|
" must be > 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int block_size;
|
2013-04-12 05:21:49 +02:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (block_size > 0) {
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_size = block_size;
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_BLOCK_SIZE +
|
|
|
|
" must be > 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_AUTO_COMPACTION);
|
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.disable_auto_compactions = ! StringToBool(itr->second);
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_COMPRESSION_TYPE);
|
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
string comp = itr->second;
|
|
|
|
if (comp == "no") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kNoCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "snappy") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kSnappyCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "zlib") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kZlibCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "bzip2") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kBZip2Compression;
|
2014-02-08 03:12:30 +01:00
|
|
|
} else if (comp == "lz4") {
|
|
|
|
opt.compression = kLZ4Compression;
|
|
|
|
} else if (comp == "lz4hc") {
|
|
|
|
opt.compression = kLZ4HCCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
// Unknown compression.
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Unknown compression level: " + comp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int write_buffer_size;
|
2013-04-12 05:21:49 +02:00
|
|
|
if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size,
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state_)) {
|
|
|
|
if (write_buffer_size > 0) {
|
2012-12-21 07:56:58 +01:00
|
|
|
opt.write_buffer_size = write_buffer_size;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_WRITE_BUFFER_SIZE +
|
|
|
|
" must be > 0.");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int file_size;
|
2013-04-12 05:21:49 +02:00
|
|
|
if (ParseIntOption(option_map_, ARG_FILE_SIZE, file_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (file_size > 0) {
|
2012-12-21 07:56:58 +01:00
|
|
|
opt.target_file_size_base = file_size;
|
|
|
|
} else {
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_FILE_SIZE +
|
|
|
|
" must be > 0.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
if (opt.db_paths.size() == 0) {
|
2014-07-15 00:34:30 +02:00
|
|
|
opt.db_paths.emplace_back(db_path_, std::numeric_limits<uint64_t>::max());
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
|
|
|
|
2012-12-06 00:37:03 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
bool LDBCommand::ParseKeyValue(const string& line, string* key, string* value,
|
|
|
|
bool is_key_hex, bool is_value_hex) {
|
|
|
|
size_t pos = line.find(DELIM);
|
2013-03-20 01:28:30 +01:00
|
|
|
if (pos != string::npos) {
|
2013-01-11 20:09:23 +01:00
|
|
|
*key = line.substr(0, pos);
|
|
|
|
*value = line.substr(pos + strlen(DELIM));
|
|
|
|
if (is_key_hex) {
|
|
|
|
*key = HexToString(*key);
|
|
|
|
}
|
|
|
|
if (is_value_hex) {
|
|
|
|
*value = HexToString(*value);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Make sure that ONLY the command-line options and flags expected by this
|
|
|
|
* command are specified on the command-line. Extraneous options are usually
|
|
|
|
* the result of user error.
|
|
|
|
* Returns true if all checks pass. Else returns false, and prints an
|
|
|
|
* appropriate error msg to stderr.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ValidateCmdLineOptions() {
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
for (map<string, string>::const_iterator itr = option_map_.begin();
|
2014-09-29 23:05:12 +02:00
|
|
|
itr != option_map_.end(); ++itr) {
|
2013-03-20 01:28:30 +01:00
|
|
|
if (find(valid_cmd_line_options_.begin(),
|
2013-01-11 20:09:23 +01:00
|
|
|
valid_cmd_line_options_.end(), itr->first) ==
|
|
|
|
valid_cmd_line_options_.end()) {
|
|
|
|
fprintf(stderr, "Invalid command-line option %s\n", itr->first.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
for (vector<string>::const_iterator itr = flags_.begin();
|
2014-09-29 23:05:12 +02:00
|
|
|
itr != flags_.end(); ++itr) {
|
2013-03-20 01:28:30 +01:00
|
|
|
if (find(valid_cmd_line_options_.begin(),
|
2013-01-11 20:09:23 +01:00
|
|
|
valid_cmd_line_options_.end(), *itr) ==
|
|
|
|
valid_cmd_line_options_.end()) {
|
|
|
|
fprintf(stderr, "Invalid command-line flag %s\n", itr->c_str());
|
|
|
|
return false;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
if (!NoDBOpen() && option_map_.find(ARG_DB) == option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stderr, "%s must be specified\n", ARG_DB.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
CompactorCommand::CompactorCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_FROM, ARG_TO, ARG_HEX, ARG_KEY_HEX,
|
2013-10-17 02:57:03 +02:00
|
|
|
ARG_VALUE_HEX, ARG_TTL})),
|
2013-01-11 20:09:23 +01:00
|
|
|
null_from_(true), null_to_(true) {
|
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void CompactorCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CompactorCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void CompactorCommand::DoCommand() {
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Slice* begin = nullptr;
|
|
|
|
Slice* end = nullptr;
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
begin = new Slice(from_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
if (!null_to_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
end = new Slice(to_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
db_->CompactRange(begin, end);
|
|
|
|
exec_state_ = LDBCommandExecuteResult::SUCCEED("");
|
|
|
|
|
|
|
|
delete begin;
|
|
|
|
delete end;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
const string DBLoaderCommand::ARG_DISABLE_WAL = "disable_wal";
|
2013-02-26 07:57:37 +01:00
|
|
|
const string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load";
|
|
|
|
const string DBLoaderCommand::ARG_COMPACT = "compact";
|
2012-12-17 02:06:51 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
DBLoaderCommand::DBLoaderCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_FROM, ARG_TO, ARG_CREATE_IF_MISSING,
|
2013-02-26 07:57:37 +01:00
|
|
|
ARG_DISABLE_WAL, ARG_BULK_LOAD,
|
|
|
|
ARG_COMPACT})),
|
|
|
|
create_if_missing_(false), disable_wal_(false), bulk_load_(false),
|
|
|
|
compact_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
|
|
|
|
disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
|
2013-02-26 07:57:37 +01:00
|
|
|
bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
|
|
|
|
compact_ = IsFlagPresent(flags, ARG_COMPACT);
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBLoaderCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBLoaderCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
|
|
|
ret.append(" [--" + ARG_DISABLE_WAL + "]");
|
2013-02-26 07:57:37 +01:00
|
|
|
ret.append(" [--" + ARG_BULK_LOAD + "]");
|
|
|
|
ret.append(" [--" + ARG_COMPACT + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options DBLoaderCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-12-17 02:06:51 +01:00
|
|
|
opt.create_if_missing = create_if_missing_;
|
2013-02-26 07:57:37 +01:00
|
|
|
if (bulk_load_) {
|
|
|
|
opt.PrepareForBulkLoad();
|
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBLoaderCommand::DoCommand() {
|
2012-12-17 02:06:51 +01:00
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
if (disable_wal_) {
|
|
|
|
write_options.disableWAL = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
2013-01-11 20:09:23 +01:00
|
|
|
string line;
|
2013-03-20 01:28:30 +01:00
|
|
|
while (getline(cin, line, '\n')) {
|
2013-01-11 20:09:23 +01:00
|
|
|
string key;
|
|
|
|
string value;
|
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
2012-12-17 02:06:51 +01:00
|
|
|
db_->Put(write_options, Slice(key), Slice(value));
|
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
|
|
|
bad_lines ++;
|
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2012-12-17 02:06:51 +01:00
|
|
|
if (bad_lines > 0) {
|
2013-03-20 01:28:30 +01:00
|
|
|
cout << "Warning: " << bad_lines << " bad lines ignored." << endl;
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
2013-02-26 07:57:37 +01:00
|
|
|
if (compact_) {
|
2013-03-08 21:29:19 +01:00
|
|
|
db_->CompactRange(nullptr, nullptr);
|
2013-02-26 07:57:37 +01:00
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
const string ManifestDumpCommand::ARG_VERBOSE = "verbose";
|
|
|
|
const string ManifestDumpCommand::ARG_PATH = "path";
|
|
|
|
|
|
|
|
void ManifestDumpCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ManifestDumpCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_VERBOSE + "]");
|
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
ManifestDumpCommand::ManifestDumpCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
2013-06-21 01:02:36 +02:00
|
|
|
BuildCmdLineOptions({ARG_VERBOSE, ARG_PATH, ARG_HEX})),
|
2013-03-22 17:17:30 +01:00
|
|
|
verbose_(false),
|
|
|
|
path_("")
|
|
|
|
{
|
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
|
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_PATH);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
if (path_.empty()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("--path: missing pathname");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ManifestDumpCommand::DoCommand() {
|
|
|
|
|
|
|
|
std::string manifestfile;
|
|
|
|
|
|
|
|
if (!path_.empty()) {
|
|
|
|
manifestfile = path_;
|
|
|
|
} else {
|
|
|
|
bool found = false;
|
|
|
|
// We need to find the manifest file by searching the directory
|
|
|
|
// containing the db for files of the form MANIFEST_[0-9]+
|
|
|
|
DIR* d = opendir(db_path_.c_str());
|
|
|
|
if (d == nullptr) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
db_path_ + " is not a directory");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
struct dirent* entry;
|
|
|
|
while ((entry = readdir(d)) != nullptr) {
|
|
|
|
unsigned int match;
|
|
|
|
unsigned long long num;
|
2013-11-13 06:02:03 +01:00
|
|
|
if (sscanf(entry->d_name,
|
|
|
|
"MANIFEST-%ln%ln",
|
|
|
|
(unsigned long*)&num,
|
|
|
|
(unsigned long*)&match)
|
2013-03-22 17:17:30 +01:00
|
|
|
&& match == strlen(entry->d_name)) {
|
|
|
|
if (!found) {
|
|
|
|
manifestfile = db_path_ + "/" + std::string(entry->d_name);
|
|
|
|
found = true;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Multiple MANIFEST files found; use --path to select one");
|
2014-09-06 05:47:57 +02:00
|
|
|
closedir(d);
|
2013-03-22 17:17:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
closedir(d);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
|
|
|
|
Options options;
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions sopt;
|
2013-03-22 17:17:30 +01:00
|
|
|
std::string file(manifestfile);
|
|
|
|
std::string dbname("dummy");
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(
|
|
|
|
options.max_open_files - 10, options.table_cache_numshardbits,
|
|
|
|
options.table_cache_remove_scan_count_limit));
|
2014-07-30 03:37:00 +02:00
|
|
|
// Notice we are using the default options not through SanitizeOptions(),
|
|
|
|
// if VersionSet::DumpManifest() depends on any option done by
|
|
|
|
// SanitizeOptions(), we need to initialize it manually.
|
|
|
|
options.db_paths.emplace_back("dummy", 0);
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 20:20:25 +02:00
|
|
|
WriteController wc;
|
|
|
|
VersionSet versions(dbname, &options, sopt, tc.get(), &wc);
|
2014-08-20 22:52:03 +02:00
|
|
|
Status s = versions.DumpManifest(options, file, verbose_, is_key_hex_);
|
2013-03-22 17:17:30 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing file %s %s\n", manifestfile.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s done\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-02-28 01:18:23 +01:00
|
|
|
|
|
|
|
void ListColumnFamiliesCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ListColumnFamiliesCommand::Name());
|
|
|
|
ret.append(" full_path_to_db_directory ");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
ListColumnFamiliesCommand::ListColumnFamiliesCommand(
|
|
|
|
const vector<string>& params, const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, false, {}) {
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"dbname must be specified for the list_column_families command");
|
|
|
|
} else {
|
|
|
|
dbname_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ListColumnFamiliesCommand::DoCommand() {
|
|
|
|
vector<string> column_families;
|
|
|
|
Status s = DB::ListColumnFamilies(DBOptions(), dbname_, &column_families);
|
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing db %s %s\n", dbname_.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
printf("Column families in %s: \n{", dbname_.c_str());
|
|
|
|
bool first = true;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
if (!first) {
|
|
|
|
printf(", ");
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
printf("%s", cf.c_str());
|
|
|
|
}
|
|
|
|
printf("}\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
namespace {
|
|
|
|
|
2013-06-19 04:57:54 +02:00
|
|
|
string ReadableTime(int unixtime) {
|
|
|
|
char time_buffer [80];
|
|
|
|
time_t rawtime = unixtime;
|
|
|
|
struct tm * timeinfo = localtime(&rawtime);
|
|
|
|
strftime(time_buffer, 80, "%c", timeinfo);
|
|
|
|
return string(time_buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function only called when it's the sane case of >1 buckets in time-range
|
|
|
|
// Also called only when timekv falls between ttl_start and ttl_end provided
|
2013-06-20 20:50:33 +02:00
|
|
|
void IncBucketCounts(vector<uint64_t>& bucket_counts, int ttl_start,
|
|
|
|
int time_range, int bucket_size, int timekv, int num_buckets) {
|
|
|
|
assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
|
|
|
|
timekv < (ttl_start + time_range) && num_buckets > 1);
|
|
|
|
int bucket = (timekv - ttl_start) / bucket_size;
|
|
|
|
bucket_counts[bucket]++;
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2013-06-20 20:50:33 +02:00
|
|
|
void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
|
|
|
|
int ttl_end, int bucket_size, int num_buckets) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int time_point = ttl_start;
|
|
|
|
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
|
2013-11-13 06:02:03 +01:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(time_point + bucket_size).c_str(),
|
|
|
|
(unsigned long)bucket_counts[i]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
2013-11-13 06:02:03 +01:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(ttl_end).c_str(),
|
|
|
|
(unsigned long)bucket_counts[num_buckets - 1]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
} // namespace
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
|
2013-11-01 21:59:14 +01:00
|
|
|
const string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
|
2013-06-21 01:02:36 +02:00
|
|
|
const string InternalDumpCommand::ARG_STATS = "stats";
|
2013-08-09 00:51:16 +02:00
|
|
|
const string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex";
|
2013-06-21 01:02:36 +02:00
|
|
|
|
|
|
|
InternalDumpCommand::InternalDumpCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options,
|
|
|
|
const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions({ ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_FROM, ARG_TO, ARG_MAX_KEYS,
|
2013-11-01 21:59:14 +01:00
|
|
|
ARG_COUNT_ONLY, ARG_COUNT_DELIM, ARG_STATS,
|
2013-08-09 00:51:16 +02:00
|
|
|
ARG_INPUT_KEY_HEX})),
|
2013-06-21 01:02:36 +02:00
|
|
|
has_from_(false),
|
|
|
|
has_to_(false),
|
|
|
|
max_keys_(-1),
|
2013-11-01 21:59:14 +01:00
|
|
|
delim_("."),
|
2013-06-21 01:02:36 +02:00
|
|
|
count_only_(false),
|
2013-11-01 21:59:14 +01:00
|
|
|
count_delim_(false),
|
2013-08-09 00:51:16 +02:00
|
|
|
print_stats_(false),
|
|
|
|
is_input_key_hex_(false) {
|
2013-06-21 01:02:36 +02:00
|
|
|
|
|
|
|
has_from_ = ParseStringOption(options, ARG_FROM, &from_);
|
|
|
|
has_to_ = ParseStringOption(options, ARG_TO, &to_);
|
|
|
|
|
|
|
|
ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
|
2013-11-01 21:59:14 +01:00
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_COUNT_DELIM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
|
|
|
// fprintf(stdout,"delim = %c\n",delim_[0]);
|
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
|
|
|
delim_=".";
|
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
2013-08-09 00:51:16 +02:00
|
|
|
is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
if (is_input_key_hex_) {
|
2013-06-21 01:02:36 +02:00
|
|
|
if (has_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (has_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalDumpCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(InternalDumpCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
2013-08-09 00:51:16 +02:00
|
|
|
ret.append(" [--" + ARG_INPUT_KEY_HEX + "]");
|
2013-06-21 01:02:36 +02:00
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
2013-11-01 21:59:14 +01:00
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
2013-06-21 01:02:36 +02:00
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void InternalDumpCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (print_stats_) {
|
|
|
|
string stats;
|
2013-10-05 07:32:05 +02:00
|
|
|
if (db_->GetProperty("rocksdb.stats", &stats)) {
|
2013-06-21 01:02:36 +02:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast as DBImpl to get internal iterator
|
|
|
|
DBImpl* idb = dynamic_cast<DBImpl*>(db_);
|
|
|
|
if (!idb) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("DB is not DBImpl");
|
|
|
|
return;
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
string rtype1,rtype2,row,val;
|
|
|
|
rtype2 = "";
|
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
2013-06-21 01:02:36 +02:00
|
|
|
// Setup internal key iterator
|
2014-09-05 02:40:41 +02:00
|
|
|
Arena arena;
|
|
|
|
ScopedArenaIterator iter(idb->TEST_NewInternalIterator(&arena));
|
2013-06-21 01:02:36 +02:00
|
|
|
Status st = iter->status();
|
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("Iterator error:"
|
|
|
|
+ st.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_from_) {
|
|
|
|
InternalKey ikey(from_, kMaxSequenceNumber, kValueTypeForSeek);
|
|
|
|
iter->Seek(ikey.Encode());
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
|
|
|
long long count = 0;
|
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
if (!ParseInternalKey(iter->key(), &ikey)) {
|
|
|
|
fprintf(stderr, "Internal Key [%s] parse error!\n",
|
|
|
|
iter->key().ToString(true /* in hex*/).data());
|
|
|
|
// TODO: add error counter
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (has_to_ && options_.comparator->Compare(ikey.user_key, to_) >= 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
int k;
|
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
|
|
|
s1=0;
|
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
|
|
|
for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
|
|
|
|
s1++;
|
|
|
|
for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
|
|
|
|
s1++;
|
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long)c,(long long)s2);
|
|
|
|
c=1;
|
|
|
|
s2=s1;
|
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
|
|
|
c++;
|
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
|
|
|
}
|
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2013-06-21 01:02:36 +02:00
|
|
|
string key = ikey.DebugString(is_key_hex_);
|
|
|
|
string value = iter->value().ToString(is_value_hex_);
|
2013-08-09 00:51:16 +02:00
|
|
|
std::cout << key << " => " << value << "\n";
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
|
|
|
if (max_keys_ > 0 && count >= max_keys_) break;
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
if(count_delim_) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n", rtype2.c_str(),
|
|
|
|
(long long)c,(long long)s2);
|
|
|
|
} else
|
2013-06-21 01:02:36 +02:00
|
|
|
fprintf(stdout, "Internal keys in range: %lld\n", (long long) count);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
const string DBDumperCommand::ARG_COUNT_ONLY = "count_only";
|
2013-11-01 21:59:14 +01:00
|
|
|
const string DBDumperCommand::ARG_COUNT_DELIM = "count_delim";
|
2013-01-11 20:09:23 +01:00
|
|
|
const string DBDumperCommand::ARG_STATS = "stats";
|
2013-06-19 04:57:54 +02:00
|
|
|
const string DBDumperCommand::ARG_TTL_BUCKET = "bucket";
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
DBDumperCommand::DBDumperCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, true,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX, ARG_FROM, ARG_TO,
|
2013-11-01 21:59:14 +01:00
|
|
|
ARG_MAX_KEYS, ARG_COUNT_ONLY,
|
|
|
|
ARG_COUNT_DELIM, ARG_STATS, ARG_TTL_START,
|
|
|
|
ARG_TTL_END, ARG_TTL_BUCKET,
|
|
|
|
ARG_TIMESTAMP})),
|
2012-11-06 21:02:18 +01:00
|
|
|
null_from_(true),
|
|
|
|
null_to_(true),
|
|
|
|
max_keys_(-1),
|
|
|
|
count_only_(false),
|
2013-11-01 21:59:14 +01:00
|
|
|
count_delim_(false),
|
2013-01-11 20:09:23 +01:00
|
|
|
print_stats_(false) {
|
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2013-03-20 01:28:30 +01:00
|
|
|
max_keys_ = stoi(itr->second);
|
|
|
|
} catch(const invalid_argument&) {
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
|
|
|
|
" has an invalid value");
|
2013-03-20 01:28:30 +01:00
|
|
|
} catch(const out_of_range&) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
|
2013-05-14 04:11:56 +02:00
|
|
|
" has a value out-of-range");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
itr = options.find(ARG_COUNT_DELIM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
|
|
|
delim_=".";
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBDumperCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBDumperCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
2013-11-01 21:59:14 +01:00
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL_BUCKET + "=<N>]");
|
2013-06-20 20:50:33 +02:00
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBDumperCommand::DoCommand() {
|
2012-11-21 22:26:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Parse command line args
|
|
|
|
uint64_t count = 0;
|
|
|
|
if (print_stats_) {
|
2013-01-11 20:09:23 +01:00
|
|
|
string stats;
|
2013-10-05 07:32:05 +02:00
|
|
|
if (db_->GetProperty("rocksdb.stats", &stats)) {
|
2012-10-31 19:47:18 +01:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup key iterator
|
2013-04-12 05:21:49 +02:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions());
|
|
|
|
Status st = iter->status();
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("Iterator error."
|
|
|
|
+ st.ToString());
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!null_from_) {
|
|
|
|
iter->Seek(from_);
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
|
|
|
int max_keys = max_keys_;
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete iter;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int time_range = ttl_end - ttl_start;
|
|
|
|
int bucket_size;
|
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) ||
|
|
|
|
bucket_size <= 0) {
|
|
|
|
bucket_size = time_range; // Will have just 1 bucket by default
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
//cretaing variables for row count of each type
|
|
|
|
string rtype1,rtype2,row,val;
|
|
|
|
rtype2 = "";
|
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
|
|
|
|
2013-06-19 04:57:54 +02:00
|
|
|
// At this point, bucket_size=0 => time_range=0
|
2013-06-20 20:50:33 +02:00
|
|
|
uint64_t num_buckets = (bucket_size >= time_range) ? 1 :
|
|
|
|
((time_range + bucket_size - 1) / bucket_size);
|
|
|
|
vector<uint64_t> bucket_counts(num_buckets, 0);
|
2013-11-01 21:59:14 +01:00
|
|
|
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
fprintf(stdout, "Dumping key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int rawtime = 0;
|
2012-10-31 19:47:18 +01:00
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (!null_to_ && (iter->key().ToString() >= to_))
|
|
|
|
break;
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
|
|
|
if (max_keys == 0)
|
|
|
|
break;
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2013-08-06 02:55:44 +02:00
|
|
|
TtlIterator* it_ttl = dynamic_cast<TtlIterator*>(iter);
|
|
|
|
assert(it_ttl);
|
2013-06-20 20:50:33 +02:00
|
|
|
rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
if (max_keys > 0) {
|
|
|
|
--max_keys;
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && num_buckets > 1) {
|
2013-06-20 20:50:33 +02:00
|
|
|
IncBucketCounts(bucket_counts, ttl_start, time_range, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
rawtime, num_buckets);
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
|
|
|
s1 = row.size()+val.size();
|
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long )c,(long long)s2);
|
|
|
|
c=1;
|
|
|
|
s2=s1;
|
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
|
|
|
c++;
|
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (!count_only_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
string str = PrintKeyValue(iter->key().ToString(),
|
2013-06-20 20:50:33 +02:00
|
|
|
iter->value().ToString(), is_key_hex_,
|
|
|
|
is_value_hex_);
|
2012-12-27 00:15:54 +01:00
|
|
|
fprintf(stdout, "%s\n", str.c_str());
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2013-06-19 04:57:54 +02:00
|
|
|
if (num_buckets > 1 && is_db_ttl_) {
|
2013-06-20 20:50:33 +02:00
|
|
|
PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
num_buckets);
|
2013-11-01 21:59:14 +01:00
|
|
|
} else if(count_delim_) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long )c,(long long)s2);
|
2013-06-19 04:57:54 +02:00
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Keys in range: %lld\n", (long long) count);
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Clean up
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
const string ReduceDBLevelsCommand::ARG_NEW_LEVELS = "new_levels";
|
|
|
|
const string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS = "print_old_levels";
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
ReduceDBLevelsCommand::ReduceDBLevelsCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_NEW_LEVELS, ARG_PRINT_OLD_LEVELS})),
|
|
|
|
old_levels_(1 << 16),
|
|
|
|
new_levels_(-1),
|
|
|
|
print_old_levels_(false) {
|
2012-12-27 00:15:54 +01:00
|
|
|
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
|
2013-01-11 20:09:23 +01:00
|
|
|
print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
|
|
|
|
if(new_levels_ <= 0) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
2013-01-11 20:09:23 +01:00
|
|
|
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
vector<string> ReduceDBLevelsCommand::PrepareArgs(const string& db_path,
|
|
|
|
int new_levels, bool print_old_level) {
|
|
|
|
vector<string> ret;
|
|
|
|
ret.push_back("reduce_levels");
|
|
|
|
ret.push_back("--" + ARG_DB + "=" + db_path);
|
2013-03-20 01:28:30 +01:00
|
|
|
ret.push_back("--" + ARG_NEW_LEVELS + "=" + to_string(new_levels));
|
2012-10-31 19:47:18 +01:00
|
|
|
if(print_old_level) {
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void ReduceDBLevelsCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ReduceDBLevelsCommand::Name());
|
|
|
|
ret.append(" --" + ARG_NEW_LEVELS + "=<New number of levels>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_OLD_LEVELS + "]");
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options ReduceDBLevelsCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.num_levels = old_levels_;
|
2013-05-23 19:56:36 +02:00
|
|
|
opt.max_bytes_for_level_multiplier_additional.resize(opt.num_levels, 1);
|
2012-11-09 03:45:19 +01:00
|
|
|
// Disable size compaction
|
2014-02-03 22:48:30 +01:00
|
|
|
opt.max_bytes_for_level_base = 1ULL << 50;
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
|
|
|
opt.max_mem_compaction_level = 0;
|
2012-10-31 19:47:18 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
|
2013-01-11 20:09:23 +01:00
|
|
|
int* levels) {
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
std::shared_ptr<Cache> tc(
|
|
|
|
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits,
|
|
|
|
opt.table_cache_remove_scan_count_limit));
|
2013-03-08 21:29:19 +01:00
|
|
|
const InternalKeyComparator cmp(opt.comparator);
|
Push- instead of pull-model for managing Write stalls
Summary:
Introducing WriteController, which is a source of truth about per-DB write delays. Let's define an DB epoch as a period where there are no flushes and compactions (i.e. new epoch is started when flush or compaction finishes). Each epoch can either:
* proceed with all writes without delay
* delay all writes by fixed time
* stop all writes
The three modes are recomputed at each epoch change (flush, compaction), rather than on every write (which is currently the case).
When we have a lot of column families, our current pull behavior adds a big overhead, since we need to loop over every column family for every write. With new push model, overhead on Write code-path is minimal.
This is just the start. Next step is to also take care of stalls introduced by slow memtable flushes. The final goal is to eliminate function MakeRoomForWrite(), which currently needs to be called for every column family by every write.
Test Plan: make check for now. I'll add some unit tests later. Also, perf test.
Reviewers: dhruba, yhchiang, MarkCallaghan, sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22791
2014-09-08 20:20:25 +02:00
|
|
|
WriteController wc;
|
|
|
|
VersionSet versions(db_path_, &opt, soptions, tc.get(), &wc);
|
2014-01-22 20:44:53 +01:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 18:56:17 +02:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-01 04:44:48 +01:00
|
|
|
ColumnFamilyOptions(opt));
|
|
|
|
dummy.push_back(dummy_descriptor);
|
2012-11-09 03:45:19 +01:00
|
|
|
// We rely the VersionSet::Recover to tell us the internal data structures
|
|
|
|
// in the db. And the Recover() should never do any change
|
|
|
|
// (like LogAndApply) to the manifest file.
|
2014-01-22 20:44:53 +01:00
|
|
|
Status st = versions.Recover(dummy);
|
2012-11-09 03:45:19 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
int max = -1;
|
2014-01-27 23:33:50 +01:00
|
|
|
auto default_cfd = versions.GetColumnFamilySet()->GetDefault();
|
2014-02-03 21:08:33 +01:00
|
|
|
for (int i = 0; i < default_cfd->NumberLevels(); i++) {
|
2014-01-29 22:28:50 +01:00
|
|
|
if (default_cfd->current()->NumLevelFiles(i)) {
|
2012-11-09 03:45:19 +01:00
|
|
|
max = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*levels = max + 1;
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void ReduceDBLevelsCommand::DoCommand() {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (new_levels_ <= 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Invalid number of levels.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st;
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
int old_level_num = -1;
|
|
|
|
st = GetOldNumOfLevels(opt, &old_level_num);
|
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
if (print_old_levels_) {
|
2012-11-09 03:45:19 +01:00
|
|
|
fprintf(stdout, "The old number of levels in use is %d\n", old_level_num);
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
if (old_level_num <= new_levels_) {
|
|
|
|
return;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
old_levels_ = old_level_num;
|
|
|
|
|
|
|
|
OpenDB();
|
2012-11-21 01:14:04 +01:00
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Compact the whole DB to put all files to the highest level.
|
2012-11-09 03:45:19 +01:00
|
|
|
fprintf(stdout, "Compacting the db...\n");
|
2013-02-20 03:12:20 +01:00
|
|
|
db_->CompactRange(nullptr, nullptr);
|
2012-10-31 19:47:18 +01:00
|
|
|
CloseDB();
|
|
|
|
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
Make VersionSet::ReduceNumberOfLevels() static
Summary:
A lot of our code implicitly assumes number_levels to be static. ReduceNumberOfLevels() breaks that assumption. For example, after calling ReduceNumberOfLevels(), DBImpl::NumberLevels() will be different from VersionSet::NumberLevels(). This is dangerous. Thankfully, it's not in public headers and is only used from LDB cmd tool. LDB tool is only using it statically, i.e. it never calls it with running DB instance. With this diff, we make it explicitly static. This way, we can assume number_levels to be immutable and not break assumption that lot of our code is relying upon. LDB tool can still use the method.
Also, I removed the method from a separate file since it breaks filename completition. version_se<TAB> now completes to "version_set." instead of "version_set" (without the dot). I don't see a big reason that the function should be in a different file.
Test Plan: reduce_levels_test
Reviewers: dhruba, haobo, kailiu, sdong
Reviewed By: kailiu
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15303
2014-01-24 23:57:04 +01:00
|
|
|
st = VersionSet::ReduceNumberOfLevels(db_path_, &opt, soptions, new_levels_);
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-04 22:13:08 +02:00
|
|
|
const string ChangeCompactionStyleCommand::ARG_OLD_COMPACTION_STYLE =
|
|
|
|
"old_compaction_style";
|
|
|
|
const string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE =
|
|
|
|
"new_compaction_style";
|
|
|
|
|
|
|
|
ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
|
|
|
const vector<string>& params, const map<string, string>& options,
|
|
|
|
const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_OLD_COMPACTION_STYLE,
|
|
|
|
ARG_NEW_COMPACTION_STYLE})),
|
|
|
|
old_compaction_style_(-1),
|
|
|
|
new_compaction_style_(-1) {
|
|
|
|
|
|
|
|
ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
|
|
|
|
exec_state_);
|
|
|
|
if (old_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
old_compaction_style_ != kCompactionStyleUniversal) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
|
|
|
|
exec_state_);
|
|
|
|
if (new_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ != kCompactionStyleUniversal) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_compaction_style_ == old_compaction_style_) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Old compaction style is the same as new compaction style. "
|
|
|
|
"Nothing to do.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleUniversal &&
|
|
|
|
new_compaction_style_ == kCompactionStyleLevel) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Convert from universal compaction to level compaction. "
|
|
|
|
"Nothing to do.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChangeCompactionStyleCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ChangeCompactionStyleCommand::Name());
|
|
|
|
ret.append(" --" + ARG_OLD_COMPACTION_STYLE + "=<Old compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append(" --" + ARG_NEW_COMPACTION_STYLE + "=<New compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
Options ChangeCompactionStyleCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// In order to convert from level compaction to universal compaction, we
|
|
|
|
// need to compact all data into a single file and move it to level 0.
|
|
|
|
opt.disable_auto_compactions = true;
|
|
|
|
opt.target_file_size_base = INT_MAX;
|
|
|
|
opt.target_file_size_multiplier = 1;
|
|
|
|
opt.max_bytes_for_level_base = INT_MAX;
|
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChangeCompactionStyleCommand::DoCommand() {
|
|
|
|
// print db stats before we have made any change
|
|
|
|
std::string property;
|
|
|
|
std::string files_per_level;
|
|
|
|
for (int i = 0; i < db_->NumberLevels(); i++) {
|
2013-10-05 07:32:05 +02:00
|
|
|
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
|
|
|
// format print string
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
|
|
|
files_per_level += buf;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "files per level before compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
|
|
|
|
// manual compact into a single file and move the file to level 0
|
|
|
|
db_->CompactRange(nullptr, nullptr,
|
|
|
|
true /* reduce level */,
|
|
|
|
0 /* reduce to level 0 */);
|
|
|
|
|
|
|
|
// verify compaction result
|
|
|
|
files_per_level = "";
|
|
|
|
int num_files = 0;
|
|
|
|
for (int i = 0; i < db_->NumberLevels(); i++) {
|
2013-10-05 07:32:05 +02:00
|
|
|
db_->GetProperty("rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
|
|
|
// format print string
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
|
|
|
files_per_level += buf;
|
|
|
|
|
|
|
|
num_files = atoi(property.c_str());
|
|
|
|
|
|
|
|
// level 0 should have only 1 file
|
|
|
|
if (i == 0 && num_files != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
|
|
|
"level 0 after compaction is " + std::to_string(num_files) +
|
|
|
|
", not 1.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// other levels should have no file
|
|
|
|
if (i > 0 && num_files != 0) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("Number of db files at "
|
|
|
|
"level " + std::to_string(i) + " after compaction is " +
|
|
|
|
std::to_string(num_files) + ", not 0.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "files per level after compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
}
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
class InMemoryHandler : public WriteBatch::Handler {
|
|
|
|
public:
|
2013-12-04 08:16:36 +01:00
|
|
|
InMemoryHandler(stringstream& row, bool print_values) : Handler(),row_(row) {
|
|
|
|
print_values_ = print_values;
|
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
|
2013-12-04 08:16:36 +01:00
|
|
|
void commonPutMerge(const Slice& key, const Slice& value) {
|
|
|
|
string k = LDBCommand::StringToHex(key.ToString());
|
|
|
|
if (print_values_) {
|
|
|
|
string v = LDBCommand::StringToHex(value.ToString());
|
|
|
|
row_ << k << " : ";
|
|
|
|
row_ << v << " ";
|
|
|
|
} else {
|
|
|
|
row_ << k << " ";
|
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
|
|
|
virtual void Put(const Slice& key, const Slice& value) {
|
|
|
|
row_ << "PUT : ";
|
|
|
|
commonPutMerge(key, value);
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2013-12-04 08:16:36 +01:00
|
|
|
virtual void Merge(const Slice& key, const Slice& value) {
|
|
|
|
row_ << "MERGE : ";
|
|
|
|
commonPutMerge(key, value);
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
|
|
|
virtual void Delete(const Slice& key) {
|
|
|
|
row_ <<",DELETE : ";
|
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2013-12-04 08:16:36 +01:00
|
|
|
virtual ~InMemoryHandler() { };
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
private:
|
2013-12-04 08:16:36 +01:00
|
|
|
stringstream & row_;
|
|
|
|
bool print_values_;
|
2013-02-20 03:12:20 +01:00
|
|
|
};
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
const string WALDumperCommand::ARG_WAL_FILE = "walfile";
|
2013-02-20 03:12:20 +01:00
|
|
|
const string WALDumperCommand::ARG_PRINT_VALUE = "print_value";
|
2013-01-11 20:09:23 +01:00
|
|
|
const string WALDumperCommand::ARG_PRINT_HEADER = "header";
|
|
|
|
|
|
|
|
WALDumperCommand::WALDumperCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, true,
|
2013-02-20 03:12:20 +01:00
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_WAL_FILE, ARG_PRINT_HEADER, ARG_PRINT_VALUE})),
|
|
|
|
print_header_(false), print_values_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
wal_file_.clear();
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_WAL_FILE);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
wal_file_ = itr->second;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
|
|
|
|
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (wal_file_.empty()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Argument " + ARG_WAL_FILE + " must be specified.");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void WALDumperCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WALDumperCommand::Name());
|
|
|
|
ret.append(" --" + ARG_WAL_FILE + "=<write_ahead_log_file_path>");
|
2013-09-04 22:13:08 +02:00
|
|
|
ret.append(" [--" + ARG_PRINT_HEADER + "] ");
|
|
|
|
ret.append(" [--" + ARG_PRINT_VALUE + "] ");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void WALDumperCommand::DoCommand() {
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
struct StdErrReporter : public log::Reader::Reporter {
|
|
|
|
virtual void Corruption(size_t bytes, const Status& s) {
|
2013-03-20 01:28:30 +01:00
|
|
|
cerr<<"Corruption detected in log file "<<s.ToString()<<"\n";
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-01-20 11:07:13 +01:00
|
|
|
unique_ptr<SequentialFile> file;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
Env* env_ = Env::Default();
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
2013-03-15 01:00:04 +01:00
|
|
|
Status status = env_->NewSequentialFile(wal_file_, &file, soptions);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (!status.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED("Failed to open WAL file " +
|
|
|
|
status.ToString());
|
|
|
|
} else {
|
|
|
|
StdErrReporter reporter;
|
2013-03-20 01:28:30 +01:00
|
|
|
log::Reader reader(move(file), &reporter, true, 0);
|
2013-01-11 20:09:23 +01:00
|
|
|
string scratch;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
WriteBatch batch;
|
|
|
|
Slice record;
|
2013-03-20 01:28:30 +01:00
|
|
|
stringstream row;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (print_header_) {
|
2013-03-20 01:28:30 +01:00
|
|
|
cout<<"Sequence,Count,ByteSize,Physical Offset,Key(s)";
|
2013-02-20 03:12:20 +01:00
|
|
|
if (print_values_) {
|
2013-03-20 01:28:30 +01:00
|
|
|
cout << " : value ";
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-03-20 01:28:30 +01:00
|
|
|
cout << "\n";
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
while(reader.ReadRecord(&record, &scratch)) {
|
2012-11-19 19:46:36 +01:00
|
|
|
row.str("");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (record.size() < 12) {
|
|
|
|
reporter.Corruption(
|
|
|
|
record.size(), Status::Corruption("log record too small"));
|
|
|
|
} else {
|
|
|
|
WriteBatchInternal::SetContents(&batch, record);
|
|
|
|
row<<WriteBatchInternal::Sequence(&batch)<<",";
|
|
|
|
row<<WriteBatchInternal::Count(&batch)<<",";
|
2012-11-19 19:46:36 +01:00
|
|
|
row<<WriteBatchInternal::ByteSize(&batch)<<",";
|
2013-02-20 03:12:20 +01:00
|
|
|
row<<reader.LastRecordOffset()<<",";
|
2013-12-04 08:16:36 +01:00
|
|
|
InMemoryHandler handler(row, print_values_);
|
2013-02-20 03:12:20 +01:00
|
|
|
batch.Iterate(&handler);
|
|
|
|
row<<"\n";
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
2013-03-20 01:28:30 +01:00
|
|
|
cout<<row.str();
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
GetCommand::GetCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-05-14 04:11:56 +02:00
|
|
|
LDBCommand(options, flags, true, BuildCmdLineOptions({ARG_TTL, ARG_HEX,
|
|
|
|
ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"<key> must be specified for the get command");
|
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetCommand::Name());
|
|
|
|
ret.append(" <key>");
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void GetCommand::DoCommand() {
|
|
|
|
string value;
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Get(ReadOptions(), key_, &value);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "%s\n",
|
|
|
|
(is_value_hex_ ? StringToHex(value) : value).c_str());
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ApproxSizeCommand::ApproxSizeCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_FROM, ARG_TO})) {
|
|
|
|
|
|
|
|
if (options.find(ARG_FROM) != options.end()) {
|
|
|
|
start_key_ = options.find(ARG_FROM)->second;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_FROM +
|
|
|
|
" must be specified for approxsize command");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.find(ARG_TO) != options.end()) {
|
|
|
|
end_key_ = options.find(ARG_TO)->second;
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_TO +
|
|
|
|
" must be specified for approxsize command");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ApproxSizeCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ApproxSizeCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void ApproxSizeCommand::DoCommand() {
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Range ranges[1];
|
|
|
|
ranges[0] = Range(start_key_, end_key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
uint64_t sizes[1];
|
|
|
|
db_->GetApproximateSizes(ranges, 1, sizes);
|
2013-11-13 06:02:03 +01:00
|
|
|
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
2013-07-04 00:32:49 +02:00
|
|
|
/* Weird that GetApproximateSizes() returns void, although documentation
|
2013-01-11 20:09:23 +01:00
|
|
|
* says that it returns a Status object.
|
|
|
|
if (!st.ok()) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
BatchPutCommand::BatchPutCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
2013-01-11 20:09:23 +01:00
|
|
|
ARG_CREATE_IF_MISSING})) {
|
|
|
|
|
|
|
|
if (params.size() < 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"At least one <key> <value> pair must be specified batchput.");
|
|
|
|
} else if (params.size() % 2 != 0) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"Equal number of <key>s and <value>s must be specified for batchput.");
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < params.size(); i += 2) {
|
|
|
|
string key = params.at(i);
|
|
|
|
string value = params.at(i+1);
|
2013-03-20 01:28:30 +01:00
|
|
|
key_values_.push_back(pair<string, string>(
|
2013-01-11 20:09:23 +01:00
|
|
|
is_key_hex_ ? HexToString(key) : key,
|
|
|
|
is_value_hex_ ? HexToString(value) : value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void BatchPutCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(BatchPutCommand::Name());
|
|
|
|
ret.append(" <key> <value> [<key> <value>] [..]");
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void BatchPutCommand::DoCommand() {
|
2013-04-12 05:21:49 +02:00
|
|
|
WriteBatch batch;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-03-20 01:28:30 +01:00
|
|
|
for (vector<pair<string, string>>::const_iterator itr
|
2014-09-29 23:05:12 +02:00
|
|
|
= key_values_.begin(); itr != key_values_.end(); ++itr) {
|
2013-01-11 20:09:23 +01:00
|
|
|
batch.Put(itr->first, itr->second);
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Write(WriteOptions(), &batch);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options BatchPutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.create_if_missing = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
ScanCommand::ScanCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, true,
|
2013-06-19 04:57:54 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_TO,
|
|
|
|
ARG_VALUE_HEX, ARG_FROM, ARG_TIMESTAMP,
|
|
|
|
ARG_MAX_KEYS, ARG_TTL_START, ARG_TTL_END})),
|
2013-01-11 20:09:23 +01:00
|
|
|
start_key_specified_(false),
|
|
|
|
end_key_specified_(false),
|
|
|
|
max_keys_scanned_(-1) {
|
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
start_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
}
|
|
|
|
start_key_specified_ = true;
|
|
|
|
}
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
end_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
end_key_specified_ = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2013-03-20 01:28:30 +01:00
|
|
|
max_keys_scanned_ = stoi(itr->second);
|
|
|
|
} catch(const invalid_argument&) {
|
2013-01-11 20:09:23 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
|
|
|
|
" has an invalid value");
|
2013-03-20 01:28:30 +01:00
|
|
|
} catch(const out_of_range&) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(ARG_MAX_KEYS +
|
|
|
|
" has a value out-of-range");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScanCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ScanCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>q] ");
|
2013-06-20 20:50:33 +02:00
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void ScanCommand::DoCommand() {
|
|
|
|
|
|
|
|
int num_keys_scanned = 0;
|
2013-04-12 05:21:49 +02:00
|
|
|
Iterator* it = db_->NewIterator(ReadOptions());
|
2013-01-11 20:09:23 +01:00
|
|
|
if (start_key_specified_) {
|
|
|
|
it->Seek(start_key_);
|
|
|
|
} else {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete it;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "Scanning key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
for ( ;
|
|
|
|
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
|
|
|
|
it->Next()) {
|
2014-06-20 08:54:13 +02:00
|
|
|
string key = ldb_options_.key_formatter->Format(it->key());
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2013-08-06 02:55:44 +02:00
|
|
|
TtlIterator* it_ttl = dynamic_cast<TtlIterator*>(it);
|
|
|
|
assert(it_ttl);
|
2013-06-20 20:50:33 +02:00
|
|
|
int rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
|
|
|
}
|
2013-06-20 20:50:33 +02:00
|
|
|
string value = it->value().ToString();
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "%s : %s\n",
|
2014-06-20 08:54:13 +02:00
|
|
|
(is_key_hex_ ? "0x" + it->key().ToString(true) : key).c_str(),
|
|
|
|
(is_value_hex_ ? StringToHex(value) : value).c_str()
|
2013-01-11 20:09:23 +01:00
|
|
|
);
|
|
|
|
num_keys_scanned++;
|
|
|
|
if (max_keys_scanned_ >= 0 && num_keys_scanned >= max_keys_scanned_) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!it->status().ok()) { // Check for any errors found during the scan
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(it->status().ToString());
|
|
|
|
}
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
DeleteCommand::DeleteCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"KEY must be specified for the delete command");
|
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteCommand::Name() + " <key>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCommand::DoCommand() {
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Delete(WriteOptions(), key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
PutCommand::PutCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
2013-01-11 20:09:23 +01:00
|
|
|
ARG_CREATE_IF_MISSING})) {
|
|
|
|
|
|
|
|
if (params.size() != 2) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(
|
|
|
|
"<key> and <value> must be specified for the put command");
|
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
value_ = params.at(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_value_hex_) {
|
|
|
|
value_ = HexToString(value_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(PutCommand::Name());
|
|
|
|
ret.append(" <key> <value> ");
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void PutCommand::DoCommand() {
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Put(WriteOptions(), key_, value_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options PutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.create_if_missing = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
const char* DBQuerierCommand::HELP_CMD = "help";
|
|
|
|
const char* DBQuerierCommand::GET_CMD = "get";
|
|
|
|
const char* DBQuerierCommand::PUT_CMD = "put";
|
|
|
|
const char* DBQuerierCommand::DELETE_CMD = "delete";
|
|
|
|
|
|
|
|
DBQuerierCommand::DBQuerierCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBQuerierCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBQuerierCommand::Name());
|
2013-06-19 04:57:54 +02:00
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.append("\n");
|
|
|
|
ret.append(" Starts a REPL shell. Type help for list of available "
|
|
|
|
"commands.");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBQuerierCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
ReadOptions read_options;
|
|
|
|
WriteOptions write_options;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
string line;
|
|
|
|
string key;
|
|
|
|
string value;
|
2013-03-20 01:28:30 +01:00
|
|
|
while (getline(cin, line, '\n')) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
// Parse line into vector<string>
|
|
|
|
vector<string> tokens;
|
|
|
|
size_t pos = 0;
|
|
|
|
while (true) {
|
|
|
|
size_t pos2 = line.find(' ', pos);
|
|
|
|
if (pos2 == string::npos) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos, pos2-pos));
|
|
|
|
pos = pos2 + 1;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos));
|
|
|
|
|
|
|
|
const string& cmd = tokens[0];
|
|
|
|
|
|
|
|
if (cmd == HELP_CMD) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"get <key>\n"
|
|
|
|
"put <key> <value>\n"
|
|
|
|
"delete <key>\n");
|
|
|
|
} else if (cmd == DELETE_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
db_->Delete(write_options, Slice(key));
|
|
|
|
fprintf(stdout, "Successfully deleted %s\n", tokens[1].c_str());
|
|
|
|
} else if (cmd == PUT_CMD && tokens.size() == 3) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
value = (is_value_hex_ ? HexToString(tokens[2]) : tokens[2]);
|
|
|
|
db_->Put(write_options, Slice(key), Slice(value));
|
|
|
|
fprintf(stdout, "Successfully put %s %s\n",
|
|
|
|
tokens[1].c_str(), tokens[2].c_str());
|
|
|
|
} else if (cmd == GET_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
if (db_->Get(read_options, Slice(key), &value).ok()) {
|
|
|
|
fprintf(stdout, "%s\n", PrintKeyValue(key, value,
|
|
|
|
is_key_hex_, is_value_hex_).c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Not found %s\n", tokens[1].c_str());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Unknown command %s\n", line.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
CheckConsistencyCommand::CheckConsistencyCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({})) {
|
|
|
|
}
|
|
|
|
|
|
|
|
void CheckConsistencyCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckConsistencyCommand::Name());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
void CheckConsistencyCommand::DoCommand() {
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2014-03-20 22:18:29 +01:00
|
|
|
opt.paranoid_checks = true;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
2014-03-20 22:18:29 +01:00
|
|
|
DB* db;
|
|
|
|
Status st = DB::OpenForReadOnly(opt, db_path_, &db, false);
|
|
|
|
delete db;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::FAILED(st.ToString());
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
2014-03-20 21:42:45 +01:00
|
|
|
|
|
|
|
} // namespace rocksdb
|
2014-04-15 22:39:26 +02:00
|
|
|
#endif // ROCKSDB_LITE
|