2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 23:59:46 +02:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2014-04-15 22:39:26 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2015-10-15 02:08:28 +02:00
|
|
|
#include "tools/ldb_cmd.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
|
2015-09-09 00:46:16 +02:00
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
#include "db/db_impl.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "db/dbformat.h"
|
2013-03-22 17:17:30 +01:00
|
|
|
#include "db/filename.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "db/log_reader.h"
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
#include "db/write_batch_internal.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "db/writebuffer.h"
|
|
|
|
#include "port/dirent.h"
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2014-11-24 19:04:16 +01:00
|
|
|
#include "rocksdb/table_properties.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "rocksdb/write_batch.h"
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/scoped_arena_iterator.h"
|
2015-10-15 02:08:28 +02:00
|
|
|
#include "tools/sst_dump_tool_imp.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
#include "util/coding.h"
|
2016-04-01 20:06:06 +02:00
|
|
|
#include "util/stderr_logger.h"
|
2015-04-24 04:17:57 +02:00
|
|
|
#include "util/string_util.h"
|
2014-04-29 05:34:20 +02:00
|
|
|
#include "utilities/ttl/db_ttl_impl.h"
|
2013-06-19 04:57:54 +02:00
|
|
|
|
2015-04-24 04:17:57 +02:00
|
|
|
#include <cstdlib>
|
2014-11-01 03:22:49 +01:00
|
|
|
#include <ctime>
|
|
|
|
#include <limits>
|
|
|
|
#include <sstream>
|
|
|
|
#include <string>
|
|
|
|
#include <stdexcept>
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
using namespace std;
|
|
|
|
|
|
|
|
const string LDBCommand::ARG_DB = "db";
|
2016-01-06 23:19:08 +01:00
|
|
|
const string LDBCommand::ARG_PATH = "path";
|
2014-11-01 03:22:49 +01:00
|
|
|
const string LDBCommand::ARG_HEX = "hex";
|
|
|
|
const string LDBCommand::ARG_KEY_HEX = "key_hex";
|
|
|
|
const string LDBCommand::ARG_VALUE_HEX = "value_hex";
|
2016-01-23 00:46:32 +01:00
|
|
|
const string LDBCommand::ARG_CF_NAME = "column_family";
|
2014-11-01 03:22:49 +01:00
|
|
|
const string LDBCommand::ARG_TTL = "ttl";
|
|
|
|
const string LDBCommand::ARG_TTL_START = "start_time";
|
|
|
|
const string LDBCommand::ARG_TTL_END = "end_time";
|
|
|
|
const string LDBCommand::ARG_TIMESTAMP = "timestamp";
|
|
|
|
const string LDBCommand::ARG_FROM = "from";
|
|
|
|
const string LDBCommand::ARG_TO = "to";
|
|
|
|
const string LDBCommand::ARG_MAX_KEYS = "max_keys";
|
|
|
|
const string LDBCommand::ARG_BLOOM_BITS = "bloom_bits";
|
|
|
|
const string LDBCommand::ARG_FIX_PREFIX_LEN = "fix_prefix_len";
|
|
|
|
const string LDBCommand::ARG_COMPRESSION_TYPE = "compression_type";
|
|
|
|
const string LDBCommand::ARG_BLOCK_SIZE = "block_size";
|
|
|
|
const string LDBCommand::ARG_AUTO_COMPACTION = "auto_compaction";
|
2014-12-02 21:09:20 +01:00
|
|
|
const string LDBCommand::ARG_DB_WRITE_BUFFER_SIZE = "db_write_buffer_size";
|
2014-11-01 03:22:49 +01:00
|
|
|
const string LDBCommand::ARG_WRITE_BUFFER_SIZE = "write_buffer_size";
|
|
|
|
const string LDBCommand::ARG_FILE_SIZE = "file_size";
|
|
|
|
const string LDBCommand::ARG_CREATE_IF_MISSING = "create_if_missing";
|
2016-01-11 19:51:42 +01:00
|
|
|
const string LDBCommand::ARG_NO_VALUE = "no_value";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2012-12-27 00:15:54 +01:00
|
|
|
const char* LDBCommand::DELIM = " ==> ";
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
namespace {
|
|
|
|
|
|
|
|
void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
|
|
|
|
LDBCommandExecuteResult* exec_state);
|
|
|
|
|
|
|
|
void DumpSstFile(std::string filename, bool output_hex, bool show_properties);
|
|
|
|
};
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2016-01-23 00:46:32 +01:00
|
|
|
int argc, char** argv, const Options& options,
|
|
|
|
const LDBOptions& ldb_options,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>* column_families) {
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<string> args;
|
2013-01-11 20:09:23 +01:00
|
|
|
for (int i = 1; i < argc; i++) {
|
|
|
|
args.push_back(argv[i]);
|
|
|
|
}
|
2016-01-23 00:46:32 +01:00
|
|
|
return InitFromCmdLineArgs(args, options, ldb_options, column_families);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Parse the command-line arguments and create the appropriate LDBCommand2
|
|
|
|
* instance.
|
|
|
|
* The command line arguments must be in the following format:
|
2014-11-01 03:22:49 +01:00
|
|
|
* ./ldb --db=PATH_TO_DB [--commonOpt1=commonOpt1Val] ..
|
|
|
|
* COMMAND <PARAM1> <PARAM2> ... [-cmdSpecificOpt1=cmdSpecificOpt1Val] ..
|
2013-01-11 20:09:23 +01:00
|
|
|
* This is similar to the command line format used by HBaseClientTool.
|
|
|
|
* Command name is not included in args.
|
2013-02-20 03:12:20 +01:00
|
|
|
* Returns nullptr if the command-line cannot be parsed.
|
2013-01-11 20:09:23 +01:00
|
|
|
*/
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* LDBCommand::InitFromCmdLineArgs(
|
2016-01-23 00:46:32 +01:00
|
|
|
const vector<string>& args, const Options& options,
|
|
|
|
const LDBOptions& ldb_options,
|
|
|
|
const std::vector<ColumnFamilyDescriptor>* column_families) {
|
2014-11-01 03:22:49 +01:00
|
|
|
// --x=y command line arguments are added as x->y map entries.
|
|
|
|
map<string, string> option_map;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
// Command-line arguments of the form --hex end up in this array as hex
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<string> flags;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
// Everything other than option_map and flags. Represents commands
|
2014-11-01 03:22:49 +01:00
|
|
|
// and their parameters. For eg: put key1 value1 go into this vector.
|
|
|
|
vector<string> cmdTokens;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string OPTION_PREFIX = "--";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
for (const auto& arg : args) {
|
2014-11-01 03:22:49 +01:00
|
|
|
if (arg[0] == '-' && arg[1] == '-'){
|
2014-11-21 17:05:28 +01:00
|
|
|
vector<string> splits = StringSplit(arg, '=');
|
2013-01-11 20:09:23 +01:00
|
|
|
if (splits.size() == 2) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2013-04-12 05:21:49 +02:00
|
|
|
option_map[optionKey] = splits[1];
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
string optionKey = splits[0].substr(OPTION_PREFIX.size());
|
2013-01-11 20:09:23 +01:00
|
|
|
flags.push_back(optionKey);
|
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
} else {
|
2013-06-21 01:02:36 +02:00
|
|
|
cmdTokens.push_back(arg);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (cmdTokens.size() < 1) {
|
|
|
|
fprintf(stderr, "Command not specified!");
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
string cmd = cmdTokens[0];
|
|
|
|
vector<string> cmdParams(cmdTokens.begin()+1, cmdTokens.end());
|
2013-04-12 05:21:49 +02:00
|
|
|
LDBCommand* command = LDBCommand::SelectCommand(
|
2014-11-01 03:22:49 +01:00
|
|
|
cmd,
|
|
|
|
cmdParams,
|
|
|
|
option_map,
|
|
|
|
flags
|
|
|
|
);
|
2013-04-12 05:21:49 +02:00
|
|
|
|
|
|
|
if (command) {
|
2014-06-20 08:54:13 +02:00
|
|
|
command->SetDBOptions(options);
|
|
|
|
command->SetLDBOptions(ldb_options);
|
2013-04-12 05:21:49 +02:00
|
|
|
}
|
|
|
|
return command;
|
|
|
|
}
|
|
|
|
|
|
|
|
LDBCommand* LDBCommand::SelectCommand(
|
|
|
|
const std::string& cmd,
|
2014-11-01 03:22:49 +01:00
|
|
|
const vector<string>& cmdParams,
|
|
|
|
const map<string, string>& option_map,
|
|
|
|
const vector<string>& flags
|
|
|
|
) {
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
if (cmd == GetCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new GetCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == PutCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new PutCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == BatchPutCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new BatchPutCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ScanCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ScanCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DeleteCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DeleteCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ApproxSizeCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ApproxSizeCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBQuerierCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBQuerierCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == CompactorCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new CompactorCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == WALDumperCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new WALDumperCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == ReduceDBLevelsCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ReduceDBLevelsCommand(cmdParams, option_map, flags);
|
2013-09-04 22:13:08 +02:00
|
|
|
} else if (cmd == ChangeCompactionStyleCommand::Name()) {
|
|
|
|
return new ChangeCompactionStyleCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBDumperCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBDumperCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (cmd == DBLoaderCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new DBLoaderCommand(cmdParams, option_map, flags);
|
2013-03-22 17:17:30 +01:00
|
|
|
} else if (cmd == ManifestDumpCommand::Name()) {
|
2013-04-12 05:21:49 +02:00
|
|
|
return new ManifestDumpCommand(cmdParams, option_map, flags);
|
2014-02-28 01:18:23 +01:00
|
|
|
} else if (cmd == ListColumnFamiliesCommand::Name()) {
|
|
|
|
return new ListColumnFamiliesCommand(cmdParams, option_map, flags);
|
2016-01-23 00:46:32 +01:00
|
|
|
} else if (cmd == CreateColumnFamilyCommand::Name()) {
|
|
|
|
return new CreateColumnFamilyCommand(cmdParams, option_map, flags);
|
2014-11-24 19:04:16 +01:00
|
|
|
} else if (cmd == DBFileDumperCommand::Name()) {
|
|
|
|
return new DBFileDumperCommand(cmdParams, option_map, flags);
|
2013-06-21 01:02:36 +02:00
|
|
|
} else if (cmd == InternalDumpCommand::Name()) {
|
|
|
|
return new InternalDumpCommand(cmdParams, option_map, flags);
|
2014-03-20 21:42:45 +01:00
|
|
|
} else if (cmd == CheckConsistencyCommand::Name()) {
|
|
|
|
return new CheckConsistencyCommand(cmdParams, option_map, flags);
|
2016-03-12 22:50:20 +01:00
|
|
|
} else if (cmd == RepairCommand::Name()) {
|
|
|
|
return new RepairCommand(cmdParams, option_map, flags);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
return nullptr;
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Parses the specific integer option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false if the option is not found or if there is an error parsing the
|
|
|
|
* value. If there is an error, the specified exec_state is also
|
|
|
|
* updated.
|
|
|
|
*/
|
2014-11-01 03:22:49 +01:00
|
|
|
bool LDBCommand::ParseIntOption(const map<string, string>& options,
|
|
|
|
const string& option, int& value,
|
|
|
|
LDBCommandExecuteResult& exec_state) {
|
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = option_map_.find(option);
|
2013-04-12 05:21:49 +02:00
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
value = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2014-11-01 03:22:49 +01:00
|
|
|
value = stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2013-01-11 20:09:23 +01:00
|
|
|
return true;
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const invalid_argument&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state =
|
|
|
|
LDBCommandExecuteResult::Failed(option + " has an invalid value.");
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state = LDBCommandExecuteResult::Failed(
|
|
|
|
option + " has a value out-of-range.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
return false;
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
|
2013-06-21 01:02:36 +02:00
|
|
|
/**
|
|
|
|
* Parses the specified option and fills in the value.
|
|
|
|
* Returns true if the option is found.
|
|
|
|
* Returns false otherwise.
|
|
|
|
*/
|
2014-11-01 03:22:49 +01:00
|
|
|
bool LDBCommand::ParseStringOption(const map<string, string>& options,
|
|
|
|
const string& option, string* value) {
|
2013-06-21 01:02:36 +02:00
|
|
|
auto itr = option_map_.find(option);
|
|
|
|
if (itr != option_map_.end()) {
|
|
|
|
*value = itr->second;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options LDBCommand::PrepareOptionsForOpenDB() {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options opt = options_;
|
2012-12-06 00:37:03 +01:00
|
|
|
opt.create_if_missing = false;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
map<string, string>::const_iterator itr;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-08-25 23:22:05 +02:00
|
|
|
BlockBasedTableOptions table_options;
|
2014-10-14 01:08:02 +02:00
|
|
|
bool use_table_options = false;
|
2013-01-11 20:09:23 +01:00
|
|
|
int bits;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOOM_BITS, bits, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (bits > 0) {
|
2014-10-14 01:08:02 +02:00
|
|
|
use_table_options = true;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.filter_policy.reset(NewBloomFilterPolicy(bits));
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOOM_BITS + " must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int block_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_BLOCK_SIZE, block_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (block_size > 0) {
|
2014-10-14 01:08:02 +02:00
|
|
|
use_table_options = true;
|
2014-08-25 23:22:05 +02:00
|
|
|
table_options.block_size = block_size;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_BLOCK_SIZE + " must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-10-14 01:08:02 +02:00
|
|
|
if (use_table_options) {
|
|
|
|
opt.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_AUTO_COMPACTION);
|
|
|
|
if (itr != option_map_.end()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.disable_auto_compactions = ! StringToBool(itr->second);
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
itr = option_map_.find(ARG_COMPRESSION_TYPE);
|
|
|
|
if (itr != option_map_.end()) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string comp = itr->second;
|
2013-01-11 20:09:23 +01:00
|
|
|
if (comp == "no") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kNoCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "snappy") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kSnappyCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "zlib") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kZlibCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (comp == "bzip2") {
|
2013-04-12 05:21:49 +02:00
|
|
|
opt.compression = kBZip2Compression;
|
2014-02-08 03:12:30 +01:00
|
|
|
} else if (comp == "lz4") {
|
|
|
|
opt.compression = kLZ4Compression;
|
|
|
|
} else if (comp == "lz4hc") {
|
|
|
|
opt.compression = kLZ4HCCompression;
|
2016-04-20 07:54:24 +02:00
|
|
|
} else if (comp == "xpress") {
|
|
|
|
opt.compression = kXpressCompression;
|
2015-08-28 00:40:42 +02:00
|
|
|
} else if (comp == "zstd") {
|
|
|
|
opt.compression = kZSTDNotFinalCompression;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
// Unknown compression.
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Unknown compression level: " + comp);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-12-02 21:09:20 +01:00
|
|
|
int db_write_buffer_size;
|
|
|
|
if (ParseIntOption(option_map_, ARG_DB_WRITE_BUFFER_SIZE,
|
|
|
|
db_write_buffer_size, exec_state_)) {
|
|
|
|
if (db_write_buffer_size >= 0) {
|
|
|
|
opt.db_write_buffer_size = db_write_buffer_size;
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_DB_WRITE_BUFFER_SIZE +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" must be >= 0.");
|
2014-12-02 21:09:20 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
int write_buffer_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size,
|
|
|
|
exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (write_buffer_size > 0) {
|
2012-12-21 07:56:58 +01:00
|
|
|
opt.write_buffer_size = write_buffer_size;
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_WRITE_BUFFER_SIZE +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" must be > 0.");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
int file_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_FILE_SIZE, file_size, exec_state_)) {
|
2013-01-11 20:09:23 +01:00
|
|
|
if (file_size > 0) {
|
2012-12-21 07:56:58 +01:00
|
|
|
opt.target_file_size_base = file_size;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(ARG_FILE_SIZE + " must be > 0.");
|
2012-12-06 00:37:03 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
if (opt.db_paths.size() == 0) {
|
2014-07-15 00:34:30 +02:00
|
|
|
opt.db_paths.emplace_back(db_path_, std::numeric_limits<uint64_t>::max());
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
|
|
|
|
2014-10-14 01:08:02 +02:00
|
|
|
int fix_prefix_len;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (ParseIntOption(option_map_, ARG_FIX_PREFIX_LEN, fix_prefix_len,
|
|
|
|
exec_state_)) {
|
2014-10-14 01:08:02 +02:00
|
|
|
if (fix_prefix_len > 0) {
|
|
|
|
opt.prefix_extractor.reset(
|
|
|
|
NewFixedPrefixTransform(static_cast<size_t>(fix_prefix_len)));
|
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
exec_state_ =
|
2015-03-17 02:08:59 +01:00
|
|
|
LDBCommandExecuteResult::Failed(ARG_FIX_PREFIX_LEN + " must be > 0.");
|
2014-10-14 01:08:02 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-12-06 00:37:03 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
bool LDBCommand::ParseKeyValue(const string& line, string* key, string* value,
|
|
|
|
bool is_key_hex, bool is_value_hex) {
|
2013-01-11 20:09:23 +01:00
|
|
|
size_t pos = line.find(DELIM);
|
2014-11-01 03:22:49 +01:00
|
|
|
if (pos != string::npos) {
|
2013-01-11 20:09:23 +01:00
|
|
|
*key = line.substr(0, pos);
|
|
|
|
*value = line.substr(pos + strlen(DELIM));
|
|
|
|
if (is_key_hex) {
|
|
|
|
*key = HexToString(*key);
|
|
|
|
}
|
|
|
|
if (is_value_hex) {
|
|
|
|
*value = HexToString(*value);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-12-06 00:37:03 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
/**
|
|
|
|
* Make sure that ONLY the command-line options and flags expected by this
|
|
|
|
* command are specified on the command-line. Extraneous options are usually
|
|
|
|
* the result of user error.
|
|
|
|
* Returns true if all checks pass. Else returns false, and prints an
|
|
|
|
* appropriate error msg to stderr.
|
|
|
|
*/
|
|
|
|
bool LDBCommand::ValidateCmdLineOptions() {
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
for (map<string, string>::const_iterator itr = option_map_.begin();
|
|
|
|
itr != option_map_.end(); ++itr) {
|
2013-03-20 01:28:30 +01:00
|
|
|
if (find(valid_cmd_line_options_.begin(),
|
2014-11-01 03:22:49 +01:00
|
|
|
valid_cmd_line_options_.end(), itr->first) ==
|
2013-01-11 20:09:23 +01:00
|
|
|
valid_cmd_line_options_.end()) {
|
|
|
|
fprintf(stderr, "Invalid command-line option %s\n", itr->first.c_str());
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
for (vector<string>::const_iterator itr = flags_.begin();
|
2014-09-29 23:05:12 +02:00
|
|
|
itr != flags_.end(); ++itr) {
|
2013-03-20 01:28:30 +01:00
|
|
|
if (find(valid_cmd_line_options_.begin(),
|
2014-11-01 03:22:49 +01:00
|
|
|
valid_cmd_line_options_.end(), *itr) ==
|
2013-01-11 20:09:23 +01:00
|
|
|
valid_cmd_line_options_.end()) {
|
|
|
|
fprintf(stderr, "Invalid command-line flag %s\n", itr->c_str());
|
|
|
|
return false;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
if (!NoDBOpen() && option_map_.find(ARG_DB) == option_map_.end() &&
|
|
|
|
option_map_.find(ARG_PATH) == option_map_.end()) {
|
|
|
|
fprintf(stderr, "Either %s or %s must be specified.\n", ARG_DB.c_str(),
|
|
|
|
ARG_PATH.c_str());
|
2013-01-11 20:09:23 +01:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
CompactorCommand::CompactorCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_FROM, ARG_TO, ARG_HEX, ARG_KEY_HEX,
|
2013-10-17 02:57:03 +02:00
|
|
|
ARG_VALUE_HEX, ARG_TTL})),
|
2013-01-11 20:09:23 +01:00
|
|
|
null_from_(true), null_to_(true) {
|
2014-11-01 03:22:49 +01:00
|
|
|
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void CompactorCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CompactorCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void CompactorCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Slice* begin = nullptr;
|
|
|
|
Slice* end = nullptr;
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
begin = new Slice(from_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
if (!null_to_) {
|
2013-04-12 05:21:49 +02:00
|
|
|
end = new Slice(to_);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2015-11-17 23:45:26 +01:00
|
|
|
CompactRangeOptions cro;
|
|
|
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
|
|
|
|
|
|
|
db_->CompactRange(cro, begin, end);
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
2012-10-31 19:47:18 +01:00
|
|
|
|
|
|
|
delete begin;
|
|
|
|
delete end;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string DBLoaderCommand::ARG_DISABLE_WAL = "disable_wal";
|
|
|
|
const string DBLoaderCommand::ARG_BULK_LOAD = "bulk_load";
|
|
|
|
const string DBLoaderCommand::ARG_COMPACT = "compact";
|
2012-12-17 02:06:51 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
DBLoaderCommand::DBLoaderCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_FROM, ARG_TO, ARG_CREATE_IF_MISSING,
|
2013-02-26 07:57:37 +01:00
|
|
|
ARG_DISABLE_WAL, ARG_BULK_LOAD,
|
|
|
|
ARG_COMPACT})),
|
|
|
|
create_if_missing_(false), disable_wal_(false), bulk_load_(false),
|
|
|
|
compact_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
create_if_missing_ = IsFlagPresent(flags, ARG_CREATE_IF_MISSING);
|
|
|
|
disable_wal_ = IsFlagPresent(flags, ARG_DISABLE_WAL);
|
2013-02-26 07:57:37 +01:00
|
|
|
bulk_load_ = IsFlagPresent(flags, ARG_BULK_LOAD);
|
|
|
|
compact_ = IsFlagPresent(flags, ARG_COMPACT);
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void DBLoaderCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBLoaderCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_CREATE_IF_MISSING + "]");
|
|
|
|
ret.append(" [--" + ARG_DISABLE_WAL + "]");
|
|
|
|
ret.append(" [--" + ARG_BULK_LOAD + "]");
|
|
|
|
ret.append(" [--" + ARG_COMPACT + "]");
|
|
|
|
ret.append("\n");
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options DBLoaderCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-12-17 02:06:51 +01:00
|
|
|
opt.create_if_missing = create_if_missing_;
|
2013-02-26 07:57:37 +01:00
|
|
|
if (bulk_load_) {
|
|
|
|
opt.PrepareForBulkLoad();
|
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBLoaderCommand::DoCommand() {
|
2012-12-17 02:06:51 +01:00
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2012-12-17 02:06:51 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
if (disable_wal_) {
|
|
|
|
write_options.disableWAL = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bad_lines = 0;
|
2014-11-01 03:22:49 +01:00
|
|
|
string line;
|
|
|
|
while (getline(cin, line, '\n')) {
|
|
|
|
string key;
|
|
|
|
string value;
|
2013-01-11 20:09:23 +01:00
|
|
|
if (ParseKeyValue(line, &key, &value, is_key_hex_, is_value_hex_)) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
2012-12-17 02:06:51 +01:00
|
|
|
} else if (0 == line.find("Keys in range:")) {
|
|
|
|
// ignore this line
|
|
|
|
} else if (0 == line.find("Created bg thread 0x")) {
|
|
|
|
// ignore this line
|
|
|
|
} else {
|
|
|
|
bad_lines ++;
|
|
|
|
}
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2012-12-17 02:06:51 +01:00
|
|
|
if (bad_lines > 0) {
|
2014-11-01 03:22:49 +01:00
|
|
|
cout << "Warning: " << bad_lines << " bad lines ignored." << endl;
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
2013-02-26 07:57:37 +01:00
|
|
|
if (compact_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
2013-02-26 07:57:37 +01:00
|
|
|
}
|
2012-12-17 02:06:51 +01:00
|
|
|
}
|
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
namespace {
|
|
|
|
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
void DumpManifestFile(std::string file, bool verbose, bool hex, bool json) {
|
2014-11-24 19:04:16 +01:00
|
|
|
Options options;
|
|
|
|
EnvOptions sopt;
|
|
|
|
std::string dbname("dummy");
|
2015-03-17 23:04:37 +01:00
|
|
|
std::shared_ptr<Cache> tc(NewLRUCache(options.max_open_files - 10,
|
|
|
|
options.table_cache_numshardbits));
|
2014-11-24 19:04:16 +01:00
|
|
|
// Notice we are using the default options not through SanitizeOptions(),
|
|
|
|
// if VersionSet::DumpManifest() depends on any option done by
|
|
|
|
// SanitizeOptions(), we need to initialize it manually.
|
|
|
|
options.db_paths.emplace_back("dummy", 0);
|
2015-08-03 20:01:24 +02:00
|
|
|
options.num_levels = 64;
|
2015-05-16 00:52:51 +02:00
|
|
|
WriteController wc(options.delayed_write_rate);
|
2014-11-24 19:04:16 +01:00
|
|
|
WriteBuffer wb(options.db_write_buffer_size);
|
|
|
|
VersionSet versions(dbname, &options, sopt, tc.get(), &wb, &wc);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
Status s = versions.DumpManifest(options, file, verbose, hex, json);
|
2014-11-24 19:04:16 +01:00
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing file %s %s\n", file.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string ManifestDumpCommand::ARG_VERBOSE = "verbose";
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
const string ManifestDumpCommand::ARG_JSON = "json";
|
|
|
|
const string ManifestDumpCommand::ARG_PATH = "path";
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ManifestDumpCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ManifestDumpCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_VERBOSE + "]");
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
ret.append(" [--" + ARG_JSON + "]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_manifest_file>]");
|
|
|
|
ret.append("\n");
|
2013-03-22 17:17:30 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ManifestDumpCommand::ManifestDumpCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-03-22 17:17:30 +01:00
|
|
|
LDBCommand(options, flags, false,
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
BuildCmdLineOptions({ARG_VERBOSE, ARG_PATH, ARG_HEX, ARG_JSON})),
|
2013-03-22 17:17:30 +01:00
|
|
|
verbose_(false),
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
json_(false),
|
2014-11-01 03:22:49 +01:00
|
|
|
path_("")
|
|
|
|
{
|
2013-03-22 17:17:30 +01:00
|
|
|
verbose_ = IsFlagPresent(flags, ARG_VERBOSE);
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
json_ = IsFlagPresent(flags, ARG_JSON);
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_PATH);
|
2013-03-22 17:17:30 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
if (path_.empty()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("--path: missing pathname");
|
2013-03-22 17:17:30 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ManifestDumpCommand::DoCommand() {
|
|
|
|
|
|
|
|
std::string manifestfile;
|
|
|
|
|
|
|
|
if (!path_.empty()) {
|
|
|
|
manifestfile = path_;
|
|
|
|
} else {
|
|
|
|
bool found = false;
|
|
|
|
// We need to find the manifest file by searching the directory
|
|
|
|
// containing the db for files of the form MANIFEST_[0-9]+
|
2015-07-02 01:13:49 +02:00
|
|
|
|
|
|
|
auto CloseDir = [](DIR* p) { closedir(p); };
|
2015-07-13 21:11:05 +02:00
|
|
|
std::unique_ptr<DIR, decltype(CloseDir)> d(opendir(db_path_.c_str()),
|
|
|
|
CloseDir);
|
2015-07-02 01:13:49 +02:00
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
if (d == nullptr) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed(db_path_ + " is not a directory");
|
2013-03-22 17:17:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
struct dirent* entry;
|
2015-07-02 01:13:49 +02:00
|
|
|
while ((entry = readdir(d.get())) != nullptr) {
|
2013-03-22 17:17:30 +01:00
|
|
|
unsigned int match;
|
2015-09-08 23:23:42 +02:00
|
|
|
uint64_t num;
|
2015-09-09 00:46:16 +02:00
|
|
|
if (sscanf(entry->d_name, "MANIFEST-%" PRIu64 "%n", &num, &match) &&
|
2015-09-08 23:23:42 +02:00
|
|
|
match == strlen(entry->d_name)) {
|
2013-03-22 17:17:30 +01:00
|
|
|
if (!found) {
|
|
|
|
manifestfile = db_path_ + "/" + std::string(entry->d_name);
|
|
|
|
found = true;
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Multiple MANIFEST files found; use --path to select one");
|
2013-03-22 17:17:30 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
DumpManifestFile(manifestfile, verbose_, is_key_hex_, json_);
|
|
|
|
|
2013-03-22 17:17:30 +01:00
|
|
|
if (verbose_) {
|
|
|
|
printf("Processing Manifest file %s done\n", manifestfile.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-02-28 01:18:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ListColumnFamiliesCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ListColumnFamiliesCommand::Name());
|
|
|
|
ret.append(" full_path_to_db_directory ");
|
|
|
|
ret.append("\n");
|
2014-02-28 01:18:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ListColumnFamiliesCommand::ListColumnFamiliesCommand(
|
2014-11-01 03:22:49 +01:00
|
|
|
const vector<string>& params, const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
2014-02-28 01:18:23 +01:00
|
|
|
: LDBCommand(options, flags, false, {}) {
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2014-02-28 01:18:23 +01:00
|
|
|
"dbname must be specified for the list_column_families command");
|
|
|
|
} else {
|
|
|
|
dbname_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void ListColumnFamiliesCommand::DoCommand() {
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<string> column_families;
|
2014-02-28 01:18:23 +01:00
|
|
|
Status s = DB::ListColumnFamilies(DBOptions(), dbname_, &column_families);
|
|
|
|
if (!s.ok()) {
|
|
|
|
printf("Error in processing db %s %s\n", dbname_.c_str(),
|
|
|
|
s.ToString().c_str());
|
|
|
|
} else {
|
|
|
|
printf("Column families in %s: \n{", dbname_.c_str());
|
|
|
|
bool first = true;
|
|
|
|
for (auto cf : column_families) {
|
|
|
|
if (!first) {
|
|
|
|
printf(", ");
|
|
|
|
}
|
|
|
|
first = false;
|
|
|
|
printf("%s", cf.c_str());
|
|
|
|
}
|
|
|
|
printf("}\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-01-23 00:46:32 +01:00
|
|
|
void CreateColumnFamilyCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CreateColumnFamilyCommand::Name());
|
|
|
|
ret.append(" --db=<db_path> <new_column_family_name>");
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
CreateColumnFamilyCommand::CreateColumnFamilyCommand(
|
|
|
|
const vector<string>& params, const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, true, {ARG_DB}) {
|
|
|
|
if (params.size() != 1) {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"new column family name must be specified");
|
|
|
|
} else {
|
|
|
|
new_cf_name_ = params[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateColumnFamilyCommand::DoCommand() {
|
|
|
|
ColumnFamilyHandle* new_cf_handle;
|
|
|
|
Status st = db_->CreateColumnFamily(options_, new_cf_name_, &new_cf_handle);
|
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Fail to create new column family: " + st.ToString());
|
|
|
|
}
|
|
|
|
delete new_cf_handle;
|
|
|
|
CloseDB();
|
|
|
|
}
|
|
|
|
|
2014-02-28 01:18:23 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-03-22 17:17:30 +01:00
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
namespace {
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
string ReadableTime(int unixtime) {
|
2013-06-19 04:57:54 +02:00
|
|
|
char time_buffer [80];
|
|
|
|
time_t rawtime = unixtime;
|
2015-09-08 23:23:42 +02:00
|
|
|
struct tm tInfo;
|
|
|
|
struct tm* timeinfo = localtime_r(&rawtime, &tInfo);
|
|
|
|
assert(timeinfo == &tInfo);
|
2013-06-19 04:57:54 +02:00
|
|
|
strftime(time_buffer, 80, "%c", timeinfo);
|
2014-11-01 03:22:49 +01:00
|
|
|
return string(time_buffer);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// This function only called when it's the sane case of >1 buckets in time-range
|
|
|
|
// Also called only when timekv falls between ttl_start and ttl_end provided
|
2014-11-01 03:22:49 +01:00
|
|
|
void IncBucketCounts(vector<uint64_t>& bucket_counts, int ttl_start,
|
2013-06-20 20:50:33 +02:00
|
|
|
int time_range, int bucket_size, int timekv, int num_buckets) {
|
|
|
|
assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 &&
|
|
|
|
timekv < (ttl_start + time_range) && num_buckets > 1);
|
|
|
|
int bucket = (timekv - ttl_start) / bucket_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
bucket_counts[bucket]++;
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
|
|
|
|
int ttl_end, int bucket_size, int num_buckets) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int time_point = ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
|
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(time_point + bucket_size).c_str(),
|
2014-11-01 03:22:49 +01:00
|
|
|
(unsigned long)bucket_counts[i]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
2013-06-19 04:57:54 +02:00
|
|
|
ReadableTime(time_point).c_str(),
|
2013-11-13 06:02:03 +01:00
|
|
|
ReadableTime(ttl_end).c_str(),
|
2014-11-01 03:22:49 +01:00
|
|
|
(unsigned long)bucket_counts[num_buckets - 1]);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
|
2014-04-10 06:17:14 +02:00
|
|
|
} // namespace
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const string InternalDumpCommand::ARG_STATS = "stats";
|
|
|
|
const string InternalDumpCommand::ARG_INPUT_KEY_HEX = "input_key_hex";
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
InternalDumpCommand::InternalDumpCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options,
|
2016-01-06 23:19:08 +01:00
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(
|
|
|
|
options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY,
|
|
|
|
ARG_COUNT_DELIM, ARG_STATS, ARG_INPUT_KEY_HEX})),
|
|
|
|
has_from_(false),
|
|
|
|
has_to_(false),
|
|
|
|
max_keys_(-1),
|
|
|
|
delim_("."),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
|
|
|
print_stats_(false),
|
|
|
|
is_input_key_hex_(false) {
|
2013-06-21 01:02:36 +02:00
|
|
|
has_from_ = ParseStringOption(options, ARG_FROM, &from_);
|
|
|
|
has_to_ = ParseStringOption(options, ARG_TO, &to_);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(options, ARG_MAX_KEYS, max_keys_, exec_state_);
|
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_COUNT_DELIM);
|
2013-11-01 21:59:14 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
2014-11-01 03:22:49 +01:00
|
|
|
// fprintf(stdout,"delim = %c\n",delim_[0]);
|
2013-11-01 21:59:14 +01:00
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2014-11-01 03:22:49 +01:00
|
|
|
delim_=".";
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
2013-08-09 00:51:16 +02:00
|
|
|
is_input_key_hex_ = IsFlagPresent(flags, ARG_INPUT_KEY_HEX);
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
if (is_input_key_hex_) {
|
2013-06-21 01:02:36 +02:00
|
|
|
if (has_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (has_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void InternalDumpCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(InternalDumpCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_INPUT_KEY_HEX + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append("\n");
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void InternalDumpCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-06-21 01:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (print_stats_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string stats;
|
2016-01-23 00:46:32 +01:00
|
|
|
if (db_->GetProperty(GetCfHandle(), "rocksdb.stats", &stats)) {
|
2013-06-21 01:02:36 +02:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Cast as DBImpl to get internal iterator
|
|
|
|
DBImpl* idb = dynamic_cast<DBImpl*>(db_);
|
|
|
|
if (!idb) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("DB is not DBImpl");
|
2013-06-21 01:02:36 +02:00
|
|
|
return;
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
string rtype1,rtype2,row,val;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
2013-06-21 01:02:36 +02:00
|
|
|
// Setup internal key iterator
|
2014-09-05 02:40:41 +02:00
|
|
|
Arena arena;
|
2015-10-13 02:22:37 +02:00
|
|
|
ScopedArenaIterator iter(idb->NewInternalIterator(&arena));
|
2013-06-21 01:02:36 +02:00
|
|
|
Status st = iter->status();
|
|
|
|
if (!st.ok()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Iterator error:" + st.ToString());
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if (has_from_) {
|
2015-04-24 03:08:37 +02:00
|
|
|
InternalKey ikey;
|
|
|
|
ikey.SetMaxPossibleForUserKey(from_);
|
2013-06-21 01:02:36 +02:00
|
|
|
iter->Seek(ikey.Encode());
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
long long count = 0;
|
2013-06-21 01:02:36 +02:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
|
|
|
ParsedInternalKey ikey;
|
|
|
|
if (!ParseInternalKey(iter->key(), &ikey)) {
|
|
|
|
fprintf(stderr, "Internal Key [%s] parse error!\n",
|
|
|
|
iter->key().ToString(true /* in hex*/).data());
|
|
|
|
// TODO: add error counter
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (has_to_ && options_.comparator->Compare(ikey.user_key, to_) >= 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
int k;
|
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
s1=0;
|
2013-11-01 21:59:14 +01:00
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
2014-11-01 03:22:49 +01:00
|
|
|
for(k=0;row[k]!='\x01' && row[k]!='\0';k++)
|
2013-11-01 21:59:14 +01:00
|
|
|
s1++;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(k=0;val[k]!='\x01' && val[k]!='\0';k++)
|
2013-11-01 21:59:14 +01:00
|
|
|
s1++;
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long)c,(long long)s2);
|
|
|
|
c=1;
|
|
|
|
s2=s1;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
|
|
|
c++;
|
2014-11-01 03:22:49 +01:00
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
|
|
|
}
|
2013-06-21 01:02:36 +02:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string key = ikey.DebugString(is_key_hex_);
|
|
|
|
string value = iter->value().ToString(is_value_hex_);
|
|
|
|
std::cout << key << " => " << value << "\n";
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
2014-11-01 03:22:49 +01:00
|
|
|
if (max_keys_ > 0 && count >= max_keys_) break;
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
if(count_delim_) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n", rtype2.c_str(),
|
|
|
|
(long long)c,(long long)s2);
|
2013-11-01 21:59:14 +01:00
|
|
|
} else
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "Internal keys in range: %lld\n", (long long) count);
|
2013-06-21 01:02:36 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string DBDumperCommand::ARG_COUNT_ONLY = "count_only";
|
|
|
|
const string DBDumperCommand::ARG_COUNT_DELIM = "count_delim";
|
|
|
|
const string DBDumperCommand::ARG_STATS = "stats";
|
|
|
|
const string DBDumperCommand::ARG_TTL_BUCKET = "bucket";
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
DBDumperCommand::DBDumperCommand(const vector<string>& params,
|
2016-01-06 23:19:08 +01:00
|
|
|
const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX, ARG_FROM,
|
|
|
|
ARG_TO, ARG_MAX_KEYS, ARG_COUNT_ONLY, ARG_COUNT_DELIM,
|
|
|
|
ARG_STATS, ARG_TTL_START, ARG_TTL_END, ARG_TTL_BUCKET,
|
|
|
|
ARG_TIMESTAMP, ARG_PATH})),
|
|
|
|
null_from_(true),
|
|
|
|
null_to_(true),
|
|
|
|
max_keys_(-1),
|
|
|
|
count_only_(false),
|
|
|
|
count_delim_(false),
|
|
|
|
print_stats_(false) {
|
2014-11-01 03:22:49 +01:00
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
null_from_ = false;
|
|
|
|
from_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
null_to_ = false;
|
|
|
|
to_ = itr->second;
|
|
|
|
}
|
|
|
|
|
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2013-03-20 01:28:30 +01:00
|
|
|
max_keys_ = stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const invalid_argument&) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" has an invalid value");
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
itr = options.find(ARG_COUNT_DELIM);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
delim_ = itr->second;
|
|
|
|
count_delim_ = true;
|
|
|
|
} else {
|
|
|
|
count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM);
|
2014-11-01 03:22:49 +01:00
|
|
|
delim_=".";
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
print_stats_ = IsFlagPresent(flags, ARG_STATS);
|
|
|
|
count_only_ = IsFlagPresent(flags, ARG_COUNT_ONLY);
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!null_from_) {
|
|
|
|
from_ = HexToString(from_);
|
|
|
|
}
|
|
|
|
if (!null_to_) {
|
|
|
|
to_ = HexToString(to_);
|
|
|
|
}
|
|
|
|
}
|
2016-01-06 23:19:08 +01:00
|
|
|
|
|
|
|
itr = options.find(ARG_PATH);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
path_ = itr->second;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void DBDumperCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBDumperCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_ONLY + "]");
|
|
|
|
ret.append(" [--" + ARG_COUNT_DELIM + "=<char>]");
|
|
|
|
ret.append(" [--" + ARG_STATS + "]");
|
|
|
|
ret.append(" [--" + ARG_TTL_BUCKET + "=<N>]");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-06 23:19:08 +01:00
|
|
|
ret.append(" [--" + ARG_PATH + "=<path_to_a_file>]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2016-01-06 23:19:08 +01:00
|
|
|
/**
|
|
|
|
* Handles two separate cases:
|
|
|
|
*
|
|
|
|
* 1) --db is specified - just dump the database.
|
|
|
|
*
|
|
|
|
* 2) --path is specified - determine based on file extension what dumping
|
|
|
|
* function to call. Please note that we intentionally use the extension
|
|
|
|
* and avoid probing the file contents under the assumption that renaming
|
|
|
|
* the files is not a supported scenario.
|
|
|
|
*
|
|
|
|
*/
|
2013-01-11 20:09:23 +01:00
|
|
|
void DBDumperCommand::DoCommand() {
|
2012-11-21 22:26:32 +01:00
|
|
|
if (!db_) {
|
2016-01-06 23:19:08 +01:00
|
|
|
assert(!path_.empty());
|
|
|
|
string fileName = GetFileNameFromPath(path_);
|
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Succeed("");
|
|
|
|
|
|
|
|
if (!ParseFileName(fileName, &number, &type)) {
|
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Can't parse file type: " + path_);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (type) {
|
|
|
|
case kLogFile:
|
|
|
|
DumpWalFile(path_, /* print_header_ */ true, /* print_values_ */ true,
|
|
|
|
&exec_state_);
|
|
|
|
break;
|
|
|
|
case kTableFile:
|
|
|
|
DumpSstFile(path_, is_key_hex_, /* show_properties */ true);
|
|
|
|
break;
|
|
|
|
case kDescriptorFile:
|
|
|
|
DumpManifestFile(path_, /* verbose_ */ false, is_key_hex_,
|
|
|
|
/* json_ */ false);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"File type not supported: " + path_);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
DoDumpCommand();
|
2012-11-21 22:26:32 +01:00
|
|
|
}
|
2016-01-06 23:19:08 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBDumperCommand::DoDumpCommand() {
|
|
|
|
assert(nullptr != db_);
|
|
|
|
assert(path_.empty());
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
// Parse command line args
|
|
|
|
uint64_t count = 0;
|
|
|
|
if (print_stats_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string stats;
|
2013-10-05 07:32:05 +02:00
|
|
|
if (db_->GetProperty("rocksdb.stats", &stats)) {
|
2012-10-31 19:47:18 +01:00
|
|
|
fprintf(stdout, "%s\n", stats.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Setup key iterator
|
2016-01-23 00:46:32 +01:00
|
|
|
Iterator* iter = db_->NewIterator(ReadOptions(), GetCfHandle());
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = iter->status();
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Iterator error." + st.ToString());
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (!null_from_) {
|
|
|
|
iter->Seek(from_);
|
|
|
|
} else {
|
|
|
|
iter->SeekToFirst();
|
|
|
|
}
|
|
|
|
|
|
|
|
int max_keys = max_keys_;
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete iter;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
int time_range = ttl_end - ttl_start;
|
|
|
|
int bucket_size;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) ||
|
2013-06-19 04:57:54 +02:00
|
|
|
bucket_size <= 0) {
|
|
|
|
bucket_size = time_range; // Will have just 1 bucket by default
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
//cretaing variables for row count of each type
|
2014-11-01 03:22:49 +01:00
|
|
|
string rtype1,rtype2,row,val;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = "";
|
2014-11-01 03:22:49 +01:00
|
|
|
uint64_t c=0;
|
|
|
|
uint64_t s1=0,s2=0;
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// At this point, bucket_size=0 => time_range=0
|
2014-11-11 22:47:22 +01:00
|
|
|
int num_buckets = (bucket_size >= time_range)
|
|
|
|
? 1
|
|
|
|
: ((time_range + bucket_size - 1) / bucket_size);
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<uint64_t> bucket_counts(num_buckets, 0);
|
2013-11-01 21:59:14 +01:00
|
|
|
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
fprintf(stdout, "Dumping key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
for (; iter->Valid(); iter->Next()) {
|
2013-06-19 04:57:54 +02:00
|
|
|
int rawtime = 0;
|
2012-10-31 19:47:18 +01:00
|
|
|
// If end marker was specified, we stop before it
|
|
|
|
if (!null_to_ && (iter->key().ToString() >= to_))
|
|
|
|
break;
|
|
|
|
// Terminate if maximum number of keys have been dumped
|
|
|
|
if (max_keys == 0)
|
|
|
|
break;
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2013-08-06 02:55:44 +02:00
|
|
|
TtlIterator* it_ttl = dynamic_cast<TtlIterator*>(iter);
|
|
|
|
assert(it_ttl);
|
2013-06-20 20:50:33 +02:00
|
|
|
rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
if (max_keys > 0) {
|
|
|
|
--max_keys;
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && num_buckets > 1) {
|
2014-11-01 03:22:49 +01:00
|
|
|
IncBucketCounts(bucket_counts, ttl_start, time_range, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
rawtime, num_buckets);
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
++count;
|
2013-11-01 21:59:14 +01:00
|
|
|
if (count_delim_) {
|
|
|
|
rtype1 = "";
|
|
|
|
row = iter->key().ToString();
|
|
|
|
val = iter->value().ToString();
|
|
|
|
s1 = row.size()+val.size();
|
2014-11-01 03:22:49 +01:00
|
|
|
for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++)
|
|
|
|
rtype1+=row[j];
|
|
|
|
if(rtype2.compare("") && rtype2.compare(rtype1)!=0) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long )c,(long long)s2);
|
|
|
|
c=1;
|
|
|
|
s2=s1;
|
2013-11-01 21:59:14 +01:00
|
|
|
rtype2 = rtype1;
|
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
c++;
|
|
|
|
s2+=s1;
|
|
|
|
rtype2=rtype1;
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
|
|
|
|
|
2013-11-01 21:59:14 +01:00
|
|
|
if (!count_only_ && !count_delim_) {
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
string str = PrintKeyValue(iter->key().ToString(),
|
2013-06-20 20:50:33 +02:00
|
|
|
iter->value().ToString(), is_key_hex_,
|
|
|
|
is_value_hex_);
|
2012-12-27 00:15:54 +01:00
|
|
|
fprintf(stdout, "%s\n", str.c_str());
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
2013-11-01 21:59:14 +01:00
|
|
|
|
2013-06-19 04:57:54 +02:00
|
|
|
if (num_buckets > 1 && is_db_ttl_) {
|
2013-06-20 20:50:33 +02:00
|
|
|
PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size,
|
2013-06-19 04:57:54 +02:00
|
|
|
num_buckets);
|
2014-11-01 03:22:49 +01:00
|
|
|
} else if(count_delim_) {
|
|
|
|
fprintf(stdout,"%s => count:%lld\tsize:%lld\n",rtype2.c_str(),
|
|
|
|
(long long )c,(long long)s2);
|
2013-06-19 04:57:54 +02:00
|
|
|
} else {
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "Keys in range: %lld\n", (long long) count);
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Clean up
|
|
|
|
delete iter;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string ReduceDBLevelsCommand::ARG_NEW_LEVELS = "new_levels";
|
|
|
|
const string ReduceDBLevelsCommand::ARG_PRINT_OLD_LEVELS = "print_old_levels";
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ReduceDBLevelsCommand::ReduceDBLevelsCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_NEW_LEVELS, ARG_PRINT_OLD_LEVELS})),
|
2015-04-17 04:31:34 +02:00
|
|
|
old_levels_(1 << 7),
|
2013-01-11 20:09:23 +01:00
|
|
|
new_levels_(-1),
|
|
|
|
print_old_levels_(false) {
|
2012-12-27 00:15:54 +01:00
|
|
|
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_);
|
2013-01-11 20:09:23 +01:00
|
|
|
print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
if(new_levels_ <= 0) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" Use --" + ARG_NEW_LEVELS + " to specify a new level number\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<string> ReduceDBLevelsCommand::PrepareArgs(const string& db_path,
|
2013-01-11 20:09:23 +01:00
|
|
|
int new_levels, bool print_old_level) {
|
2014-11-01 03:22:49 +01:00
|
|
|
vector<string> ret;
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.push_back("reduce_levels");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.push_back("--" + ARG_DB + "=" + db_path);
|
2015-04-24 04:17:57 +02:00
|
|
|
ret.push_back("--" + ARG_NEW_LEVELS + "=" + rocksdb::ToString(new_levels));
|
2014-11-01 03:22:49 +01:00
|
|
|
if(print_old_level) {
|
2013-01-11 20:09:23 +01:00
|
|
|
ret.push_back("--" + ARG_PRINT_OLD_LEVELS);
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ReduceDBLevelsCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ReduceDBLevelsCommand::Name());
|
|
|
|
ret.append(" --" + ARG_NEW_LEVELS + "=<New number of levels>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_OLD_LEVELS + "]");
|
|
|
|
ret.append("\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options ReduceDBLevelsCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.num_levels = old_levels_;
|
2013-05-23 19:56:36 +02:00
|
|
|
opt.max_bytes_for_level_multiplier_additional.resize(opt.num_levels, 1);
|
2012-11-09 03:45:19 +01:00
|
|
|
// Disable size compaction
|
2014-02-03 22:48:30 +01:00
|
|
|
opt.max_bytes_for_level_base = 1ULL << 50;
|
2012-11-09 03:45:19 +01:00
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
2012-10-31 19:47:18 +01:00
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt,
|
2013-01-11 20:09:23 +01:00
|
|
|
int* levels) {
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
[CF] Rethink table cache
Summary:
Adapting table cache to column families is interesting. We want table cache to be global LRU, so if some column families are use not as often as others, we want them to be evicted from cache. However, current TableCache object also constructs tables on its own. If table is not found in the cache, TableCache automatically creates new table. We want each column family to be able to specify different table factory.
To solve the problem, we still have a single LRU, but we provide the LRUCache object to TableCache on construction. We have one TableCache per column family, but the underyling cache is shared by all TableCache objects.
This allows us to have a global LRU, but still be able to support different table factories for different column families. Also, in the future it will also be able to support different directories for different column families.
Test Plan: make check
Reviewers: dhruba, haobo, kailiu, sdong
CC: leveldb
Differential Revision: https://reviews.facebook.net/D15915
2014-02-05 18:07:55 +01:00
|
|
|
std::shared_ptr<Cache> tc(
|
2015-03-17 23:04:37 +01:00
|
|
|
NewLRUCache(opt.max_open_files - 10, opt.table_cache_numshardbits));
|
2013-03-08 21:29:19 +01:00
|
|
|
const InternalKeyComparator cmp(opt.comparator);
|
2015-05-16 00:52:51 +02:00
|
|
|
WriteController wc(opt.delayed_write_rate);
|
2014-12-02 21:09:20 +01:00
|
|
|
WriteBuffer wb(opt.db_write_buffer_size);
|
|
|
|
VersionSet versions(db_path_, &opt, soptions, tc.get(), &wb, &wc);
|
2014-01-22 20:44:53 +01:00
|
|
|
std::vector<ColumnFamilyDescriptor> dummy;
|
2014-04-09 18:56:17 +02:00
|
|
|
ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName,
|
2014-02-01 04:44:48 +01:00
|
|
|
ColumnFamilyOptions(opt));
|
|
|
|
dummy.push_back(dummy_descriptor);
|
2012-11-09 03:45:19 +01:00
|
|
|
// We rely the VersionSet::Recover to tell us the internal data structures
|
|
|
|
// in the db. And the Recover() should never do any change
|
|
|
|
// (like LogAndApply) to the manifest file.
|
2014-01-22 20:44:53 +01:00
|
|
|
Status st = versions.Recover(dummy);
|
2012-11-09 03:45:19 +01:00
|
|
|
if (!st.ok()) {
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
int max = -1;
|
2014-01-27 23:33:50 +01:00
|
|
|
auto default_cfd = versions.GetColumnFamilySet()->GetDefault();
|
2014-02-03 21:08:33 +01:00
|
|
|
for (int i = 0; i < default_cfd->NumberLevels(); i++) {
|
2014-10-31 16:48:19 +01:00
|
|
|
if (default_cfd->current()->storage_info()->NumLevelFiles(i)) {
|
2012-11-09 03:45:19 +01:00
|
|
|
max = i;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
*levels = max + 1;
|
|
|
|
return st;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void ReduceDBLevelsCommand::DoCommand() {
|
2012-10-31 19:47:18 +01:00
|
|
|
if (new_levels_ <= 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ =
|
|
|
|
LDBCommandExecuteResult::Failed("Invalid number of levels.\n");
|
2012-10-31 19:47:18 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st;
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2012-11-09 03:45:19 +01:00
|
|
|
int old_level_num = -1;
|
|
|
|
st = GetOldNumOfLevels(opt, &old_level_num);
|
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-11-09 03:45:19 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2012-10-31 19:47:18 +01:00
|
|
|
if (print_old_levels_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
fprintf(stdout, "The old number of levels in use is %d\n", old_level_num);
|
2012-11-09 03:45:19 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
if (old_level_num <= new_levels_) {
|
|
|
|
return;
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
|
|
|
|
2012-11-09 03:45:19 +01:00
|
|
|
old_levels_ = old_level_num;
|
|
|
|
|
|
|
|
OpenDB();
|
2012-11-21 01:14:04 +01:00
|
|
|
if (!db_) {
|
|
|
|
return;
|
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
// Compact the whole DB to put all files to the highest level.
|
2012-11-09 03:45:19 +01:00
|
|
|
fprintf(stdout, "Compacting the db...\n");
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
2012-10-31 19:47:18 +01:00
|
|
|
CloseDB();
|
|
|
|
|
2013-06-08 00:35:17 +02:00
|
|
|
EnvOptions soptions;
|
2014-11-01 03:22:49 +01:00
|
|
|
st = VersionSet::ReduceNumberOfLevels(db_path_, &opt, soptions, new_levels_);
|
2012-10-31 19:47:18 +01:00
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2012-10-31 19:47:18 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string ChangeCompactionStyleCommand::ARG_OLD_COMPACTION_STYLE =
|
2013-09-04 22:13:08 +02:00
|
|
|
"old_compaction_style";
|
2014-11-01 03:22:49 +01:00
|
|
|
const string ChangeCompactionStyleCommand::ARG_NEW_COMPACTION_STYLE =
|
2013-09-04 22:13:08 +02:00
|
|
|
"new_compaction_style";
|
|
|
|
|
|
|
|
ChangeCompactionStyleCommand::ChangeCompactionStyleCommand(
|
2014-11-01 03:22:49 +01:00
|
|
|
const vector<string>& params, const map<string, string>& options,
|
|
|
|
const vector<string>& flags) :
|
2013-09-04 22:13:08 +02:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_OLD_COMPACTION_STYLE,
|
|
|
|
ARG_NEW_COMPACTION_STYLE})),
|
|
|
|
old_compaction_style_(-1),
|
|
|
|
new_compaction_style_(-1) {
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_,
|
|
|
|
exec_state_);
|
2013-09-04 22:13:08 +02:00
|
|
|
if (old_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
old_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_,
|
|
|
|
exec_state_);
|
2013-09-04 22:13:08 +02:00
|
|
|
if (new_compaction_style_ != kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ != kCompactionStyleUniversal) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " +
|
|
|
|
"style. Check ldb help for proper compaction style value.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_compaction_style_ == old_compaction_style_) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Old compaction style is the same as new compaction style. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleUniversal &&
|
|
|
|
new_compaction_style_ == kCompactionStyleLevel) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"Convert from universal compaction to level compaction. "
|
|
|
|
"Nothing to do.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ChangeCompactionStyleCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ChangeCompactionStyleCommand::Name());
|
|
|
|
ret.append(" --" + ARG_OLD_COMPACTION_STYLE + "=<Old compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append(" --" + ARG_NEW_COMPACTION_STYLE + "=<New compaction style: 0 " +
|
|
|
|
"for level compaction, 1 for universal compaction>");
|
|
|
|
ret.append("\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
Options ChangeCompactionStyleCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
|
|
|
|
|
|
|
if (old_compaction_style_ == kCompactionStyleLevel &&
|
|
|
|
new_compaction_style_ == kCompactionStyleUniversal) {
|
|
|
|
// In order to convert from level compaction to universal compaction, we
|
|
|
|
// need to compact all data into a single file and move it to level 0.
|
|
|
|
opt.disable_auto_compactions = true;
|
|
|
|
opt.target_file_size_base = INT_MAX;
|
|
|
|
opt.target_file_size_multiplier = 1;
|
|
|
|
opt.max_bytes_for_level_base = INT_MAX;
|
|
|
|
opt.max_bytes_for_level_multiplier = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
|
|
|
void ChangeCompactionStyleCommand::DoCommand() {
|
|
|
|
// print db stats before we have made any change
|
|
|
|
std::string property;
|
|
|
|
std::string files_per_level;
|
2016-01-23 00:46:32 +01:00
|
|
|
for (int i = 0; i < db_->NumberLevels(GetCfHandle()); i++) {
|
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// format print string
|
2013-09-04 22:13:08 +02:00
|
|
|
char buf[100];
|
2014-11-01 03:22:49 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 22:13:08 +02:00
|
|
|
files_per_level += buf;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "files per level before compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
|
|
|
|
// manual compact into a single file and move the file to level 0
|
2015-06-17 23:36:14 +02:00
|
|
|
CompactRangeOptions compact_options;
|
|
|
|
compact_options.change_level = true;
|
|
|
|
compact_options.target_level = 0;
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->CompactRange(compact_options, GetCfHandle(), nullptr, nullptr);
|
2013-09-04 22:13:08 +02:00
|
|
|
|
|
|
|
// verify compaction result
|
|
|
|
files_per_level = "";
|
|
|
|
int num_files = 0;
|
|
|
|
for (int i = 0; i < db_->NumberLevels(); i++) {
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->GetProperty(GetCfHandle(),
|
|
|
|
"rocksdb.num-files-at-level" + NumberToString(i),
|
2013-09-04 22:13:08 +02:00
|
|
|
&property);
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
// format print string
|
2013-09-04 22:13:08 +02:00
|
|
|
char buf[100];
|
2014-11-01 03:22:49 +01:00
|
|
|
snprintf(buf, sizeof(buf), "%s%s", (i ? "," : ""), property.c_str());
|
2013-09-04 22:13:08 +02:00
|
|
|
files_per_level += buf;
|
|
|
|
|
|
|
|
num_files = atoi(property.c_str());
|
|
|
|
|
|
|
|
// level 0 should have only 1 file
|
|
|
|
if (i == 0 && num_files != 1) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level 0 after compaction is " +
|
|
|
|
ToString(num_files) + ", not 1.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
// other levels should have no file
|
|
|
|
if (i > 0 && num_files != 0) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
"Number of db files at "
|
|
|
|
"level " +
|
|
|
|
ToString(i) + " after compaction is " + ToString(num_files) +
|
|
|
|
", not 0.\n");
|
2013-09-04 22:13:08 +02:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fprintf(stdout, "files per level after compaction: %s\n",
|
|
|
|
files_per_level.c_str());
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
struct StdErrReporter : public log::Reader::Reporter {
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Corruption(size_t bytes, const Status& s) override {
|
2014-11-24 19:04:16 +01:00
|
|
|
cerr << "Corruption detected in log file " << s.ToString() << "\n";
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
class InMemoryHandler : public WriteBatch::Handler {
|
|
|
|
public:
|
2014-11-24 19:04:16 +01:00
|
|
|
InMemoryHandler(stringstream& row, bool print_values) : Handler(), row_(row) {
|
2013-12-04 08:16:36 +01:00
|
|
|
print_values_ = print_values;
|
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
|
2013-12-04 08:16:36 +01:00
|
|
|
void commonPutMerge(const Slice& key, const Slice& value) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string k = LDBCommand::StringToHex(key.ToString());
|
2013-12-04 08:16:36 +01:00
|
|
|
if (print_values_) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string v = LDBCommand::StringToHex(value.ToString());
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << k << " : ";
|
|
|
|
row_ << v << " ";
|
|
|
|
} else {
|
|
|
|
row_ << k << " ";
|
|
|
|
}
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Put(const Slice& key, const Slice& value) override {
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << "PUT : ";
|
|
|
|
commonPutMerge(key, value);
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Merge(const Slice& key, const Slice& value) override {
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << "MERGE : ";
|
|
|
|
commonPutMerge(key, value);
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2015-02-26 20:28:41 +01:00
|
|
|
virtual void Delete(const Slice& key) override {
|
2014-11-01 03:22:49 +01:00
|
|
|
row_ <<",DELETE : ";
|
2013-12-04 08:16:36 +01:00
|
|
|
row_ << LDBCommand::StringToHex(key.ToString()) << " ";
|
2013-02-20 03:12:20 +01:00
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
virtual ~InMemoryHandler() {}
|
2013-12-04 08:16:36 +01:00
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
private:
|
2014-11-01 03:22:49 +01:00
|
|
|
stringstream & row_;
|
2013-12-04 08:16:36 +01:00
|
|
|
bool print_values_;
|
2013-02-20 03:12:20 +01:00
|
|
|
};
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
void DumpWalFile(std::string wal_file, bool print_header, bool print_values,
|
|
|
|
LDBCommandExecuteResult* exec_state) {
|
|
|
|
Env* env_ = Env::Default();
|
|
|
|
EnvOptions soptions;
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
unique_ptr<SequentialFileReader> wal_file_reader;
|
|
|
|
|
|
|
|
Status status;
|
|
|
|
{
|
|
|
|
unique_ptr<SequentialFile> file;
|
|
|
|
status = env_->NewSequentialFile(wal_file, &file, soptions);
|
|
|
|
if (status.ok()) {
|
|
|
|
wal_file_reader.reset(new SequentialFileReader(std::move(file)));
|
|
|
|
}
|
|
|
|
}
|
2014-11-24 19:04:16 +01:00
|
|
|
if (!status.ok()) {
|
|
|
|
if (exec_state) {
|
2015-03-17 02:08:59 +01:00
|
|
|
*exec_state = LDBCommandExecuteResult::Failed("Failed to open WAL file " +
|
2014-11-24 19:04:16 +01:00
|
|
|
status.ToString());
|
|
|
|
} else {
|
|
|
|
cerr << "Error: Failed to open WAL file " << status.ToString()
|
|
|
|
<< std::endl;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
StdErrReporter reporter;
|
2015-10-08 19:06:16 +02:00
|
|
|
uint64_t log_number;
|
|
|
|
FileType type;
|
|
|
|
|
|
|
|
// we need the log number, but ParseFilename expects dbname/NNN.log.
|
|
|
|
string sanitized = wal_file;
|
|
|
|
size_t lastslash = sanitized.rfind('/');
|
|
|
|
if (lastslash != std::string::npos)
|
|
|
|
sanitized = sanitized.substr(lastslash + 1);
|
|
|
|
if (!ParseFileName(sanitized, &log_number, &type)) {
|
|
|
|
// bogus input, carry on as best we can
|
|
|
|
log_number = 0;
|
|
|
|
}
|
|
|
|
DBOptions db_options;
|
|
|
|
log::Reader reader(db_options.info_log, move(wal_file_reader), &reporter,
|
|
|
|
true, 0, log_number);
|
2014-11-24 19:04:16 +01:00
|
|
|
string scratch;
|
|
|
|
WriteBatch batch;
|
|
|
|
Slice record;
|
|
|
|
stringstream row;
|
|
|
|
if (print_header) {
|
|
|
|
cout << "Sequence,Count,ByteSize,Physical Offset,Key(s)";
|
|
|
|
if (print_values) {
|
|
|
|
cout << " : value ";
|
|
|
|
}
|
|
|
|
cout << "\n";
|
|
|
|
}
|
|
|
|
while (reader.ReadRecord(&record, &scratch)) {
|
|
|
|
row.str("");
|
2016-03-30 19:35:22 +02:00
|
|
|
if (record.size() < WriteBatchInternal::kHeader) {
|
2014-11-24 19:04:16 +01:00
|
|
|
reporter.Corruption(record.size(),
|
|
|
|
Status::Corruption("log record too small"));
|
|
|
|
} else {
|
|
|
|
WriteBatchInternal::SetContents(&batch, record);
|
|
|
|
row << WriteBatchInternal::Sequence(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::Count(&batch) << ",";
|
|
|
|
row << WriteBatchInternal::ByteSize(&batch) << ",";
|
|
|
|
row << reader.LastRecordOffset() << ",";
|
|
|
|
InMemoryHandler handler(row, print_values);
|
|
|
|
batch.Iterate(&handler);
|
|
|
|
row << "\n";
|
|
|
|
}
|
|
|
|
cout << row.str();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string WALDumperCommand::ARG_WAL_FILE = "walfile";
|
|
|
|
const string WALDumperCommand::ARG_PRINT_VALUE = "print_value";
|
|
|
|
const string WALDumperCommand::ARG_PRINT_HEADER = "header";
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
WALDumperCommand::WALDumperCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, true,
|
2013-02-20 03:12:20 +01:00
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_WAL_FILE, ARG_PRINT_HEADER, ARG_PRINT_VALUE})),
|
|
|
|
print_header_(false), print_values_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
wal_file_.clear();
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_WAL_FILE);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
wal_file_ = itr->second;
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
|
2013-02-20 03:12:20 +01:00
|
|
|
print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER);
|
|
|
|
print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
if (wal_file_.empty()) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed("Argument " + ARG_WAL_FILE +
|
|
|
|
" must be specified.");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void WALDumperCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(WALDumperCommand::Name());
|
|
|
|
ret.append(" --" + ARG_WAL_FILE + "=<write_ahead_log_file_path>");
|
|
|
|
ret.append(" [--" + ARG_PRINT_HEADER + "] ");
|
|
|
|
ret.append(" [--" + ARG_PRINT_VALUE + "] ");
|
|
|
|
ret.append("\n");
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
void WALDumperCommand::DoCommand() {
|
2014-11-24 19:04:16 +01:00
|
|
|
DumpWalFile(wal_file_, print_header_, print_values_, &exec_state_);
|
LDB can read WAL.
Summary:
Add option to read WAL and print a summary for each record.
facebook task => #1885013
E.G. Output :
./ldb dump_wal --walfile=/tmp/leveldbtest-5907/dbbench/026122.log --header
Sequence,Count,ByteSize
49981,1,100033
49981,1,100033
49982,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49981,1,100033
49982,1,100033
49983,1,100033
49984,1,100033
49981,1,100033
49982,1,100033
Test Plan:
Works run
./ldb read_wal --wal-file=/tmp/leveldbtest-5907/dbbench/000078.log --header
Reviewers: dhruba, heyongqiang
Reviewed By: dhruba
CC: emayanke, leveldb, zshao
Differential Revision: https://reviews.facebook.net/D6675
2012-11-13 01:45:45 +01:00
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
GetCommand::GetCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-05-14 04:11:56 +02:00
|
|
|
LDBCommand(options, flags, true, BuildCmdLineOptions({ARG_TTL, ARG_HEX,
|
|
|
|
ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"<key> must be specified for the get command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void GetCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(GetCommand::Name());
|
|
|
|
ret.append(" <key>");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void GetCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
string value;
|
2016-01-23 00:46:32 +01:00
|
|
|
Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "%s\n",
|
|
|
|
(is_value_hex_ ? StringToHex(value) : value).c_str());
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ApproxSizeCommand::ApproxSizeCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_FROM, ARG_TO})) {
|
|
|
|
|
|
|
|
if (options.find(ARG_FROM) != options.end()) {
|
|
|
|
start_key_ = options.find(ARG_FROM)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_FROM + " must be specified for approxsize command");
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (options.find(ARG_TO) != options.end()) {
|
|
|
|
end_key_ = options.find(ARG_TO)->second;
|
|
|
|
} else {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_TO + " must be specified for approxsize command");
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ApproxSizeCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ApproxSizeCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ApproxSizeCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
Range ranges[1];
|
|
|
|
ranges[0] = Range(start_key_, end_key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
uint64_t sizes[1];
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->GetApproximateSizes(GetCfHandle(), ranges, 1, sizes);
|
2013-11-13 06:02:03 +01:00
|
|
|
fprintf(stdout, "%lu\n", (unsigned long)sizes[0]);
|
2013-07-04 00:32:49 +02:00
|
|
|
/* Weird that GetApproximateSizes() returns void, although documentation
|
2013-01-11 20:09:23 +01:00
|
|
|
* says that it returns a Status object.
|
|
|
|
if (!st.ok()) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
*/
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
BatchPutCommand::BatchPutCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
|
|
|
ARG_CREATE_IF_MISSING})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (params.size() < 2) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2014-11-01 03:22:49 +01:00
|
|
|
"At least one <key> <value> pair must be specified batchput.");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else if (params.size() % 2 != 0) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
2013-01-11 20:09:23 +01:00
|
|
|
"Equal number of <key>s and <value>s must be specified for batchput.");
|
|
|
|
} else {
|
|
|
|
for (size_t i = 0; i < params.size(); i += 2) {
|
2014-11-01 03:22:49 +01:00
|
|
|
string key = params.at(i);
|
|
|
|
string value = params.at(i+1);
|
|
|
|
key_values_.push_back(pair<string, string>(
|
2013-01-11 20:09:23 +01:00
|
|
|
is_key_hex_ ? HexToString(key) : key,
|
|
|
|
is_value_hex_ ? HexToString(value) : value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void BatchPutCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(BatchPutCommand::Name());
|
|
|
|
ret.append(" <key> <value> [<key> <value>] [..]");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void BatchPutCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
WriteBatch batch;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
for (vector<pair<string, string>>::const_iterator itr
|
2014-09-29 23:05:12 +02:00
|
|
|
= key_values_.begin(); itr != key_values_.end(); ++itr) {
|
2016-01-23 00:46:32 +01:00
|
|
|
batch.Put(GetCfHandle(), itr->first, itr->second);
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
2013-04-12 05:21:49 +02:00
|
|
|
Status st = db_->Write(WriteOptions(), &batch);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options BatchPutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.create_if_missing = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
ScanCommand::ScanCommand(const vector<string>& params,
|
2016-03-10 22:34:42 +01:00
|
|
|
const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, true,
|
|
|
|
BuildCmdLineOptions(
|
|
|
|
{ARG_TTL, ARG_NO_VALUE, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_TO, ARG_VALUE_HEX, ARG_FROM, ARG_TIMESTAMP,
|
|
|
|
ARG_MAX_KEYS, ARG_TTL_START, ARG_TTL_END})),
|
|
|
|
start_key_specified_(false),
|
|
|
|
end_key_specified_(false),
|
|
|
|
max_keys_scanned_(-1),
|
|
|
|
no_value_(false) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
map<string, string>::const_iterator itr = options.find(ARG_FROM);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (itr != options.end()) {
|
|
|
|
start_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
start_key_ = HexToString(start_key_);
|
|
|
|
}
|
|
|
|
start_key_specified_ = true;
|
|
|
|
}
|
|
|
|
itr = options.find(ARG_TO);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
end_key_ = itr->second;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
end_key_ = HexToString(end_key_);
|
|
|
|
}
|
|
|
|
end_key_specified_ = true;
|
|
|
|
}
|
|
|
|
|
2016-03-10 22:34:42 +01:00
|
|
|
vector<string>::const_iterator vitr =
|
|
|
|
std::find(flags.begin(), flags.end(), ARG_NO_VALUE);
|
2016-01-11 19:51:42 +01:00
|
|
|
if (vitr != flags.end()) {
|
|
|
|
no_value_ = true;
|
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
itr = options.find(ARG_MAX_KEYS);
|
|
|
|
if (itr != options.end()) {
|
|
|
|
try {
|
2015-04-24 04:17:57 +02:00
|
|
|
#if defined(CYGWIN)
|
|
|
|
max_keys_scanned_ = strtol(itr->second.c_str(), 0, 10);
|
|
|
|
#else
|
2013-03-20 01:28:30 +01:00
|
|
|
max_keys_scanned_ = stoi(itr->second);
|
2015-04-24 04:17:57 +02:00
|
|
|
#endif
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const invalid_argument&) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(ARG_MAX_KEYS +
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
" has an invalid value");
|
2014-11-01 03:22:49 +01:00
|
|
|
} catch(const out_of_range&) {
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
|
|
|
ARG_MAX_KEYS + " has a value out-of-range");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void ScanCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(ScanCommand::Name());
|
|
|
|
ret.append(HelpRangeCmdArgs());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append(" [--" + ARG_TIMESTAMP + "]");
|
|
|
|
ret.append(" [--" + ARG_MAX_KEYS + "=<N>q] ");
|
|
|
|
ret.append(" [--" + ARG_TTL_START + "=<N>:- is inclusive]");
|
|
|
|
ret.append(" [--" + ARG_TTL_END + "=<N>:- is exclusive]");
|
2016-01-11 19:51:42 +01:00
|
|
|
ret.append(" [--" + ARG_NO_VALUE + "]");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void ScanCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
int num_keys_scanned = 0;
|
2016-01-23 00:46:32 +01:00
|
|
|
Iterator* it = db_->NewIterator(ReadOptions(), GetCfHandle());
|
2013-01-11 20:09:23 +01:00
|
|
|
if (start_key_specified_) {
|
|
|
|
it->Seek(start_key_);
|
|
|
|
} else {
|
|
|
|
it->SeekToFirst();
|
|
|
|
}
|
2013-06-19 04:57:54 +02:00
|
|
|
int ttl_start;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_START, ttl_start, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_start = DBWithTTLImpl::kMinTimestamp; // TTL introduction time
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
int ttl_end;
|
2014-11-01 03:22:49 +01:00
|
|
|
if (!ParseIntOption(option_map_, ARG_TTL_END, ttl_end, exec_state_)) {
|
2014-04-29 05:34:20 +02:00
|
|
|
ttl_end = DBWithTTLImpl::kMaxTimestamp; // Max time allowed by TTL feature
|
2013-06-19 04:57:54 +02:00
|
|
|
}
|
|
|
|
if (ttl_end < ttl_start) {
|
|
|
|
fprintf(stderr, "Error: End time can't be less than start time\n");
|
|
|
|
delete it;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (is_db_ttl_ && timestamp_) {
|
|
|
|
fprintf(stdout, "Scanning key-values from %s to %s\n",
|
|
|
|
ReadableTime(ttl_start).c_str(), ReadableTime(ttl_end).c_str());
|
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
for ( ;
|
2014-11-01 03:22:49 +01:00
|
|
|
it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_);
|
|
|
|
it->Next()) {
|
2013-06-19 04:57:54 +02:00
|
|
|
if (is_db_ttl_) {
|
2013-08-06 02:55:44 +02:00
|
|
|
TtlIterator* it_ttl = dynamic_cast<TtlIterator*>(it);
|
|
|
|
assert(it_ttl);
|
2013-06-20 20:50:33 +02:00
|
|
|
int rawtime = it_ttl->timestamp();
|
|
|
|
if (rawtime < ttl_start || rawtime >= ttl_end) {
|
2013-06-19 04:57:54 +02:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (timestamp_) {
|
|
|
|
fprintf(stdout, "%s ", ReadableTime(rawtime).c_str());
|
|
|
|
}
|
|
|
|
}
|
2015-11-20 07:26:37 +01:00
|
|
|
|
|
|
|
Slice key_slice = it->key();
|
|
|
|
|
|
|
|
std::string formatted_key;
|
|
|
|
if (is_key_hex_) {
|
|
|
|
formatted_key = "0x" + key_slice.ToString(true /* hex */);
|
|
|
|
key_slice = formatted_key;
|
|
|
|
} else if (ldb_options_.key_formatter) {
|
|
|
|
formatted_key = ldb_options_.key_formatter->Format(key_slice);
|
|
|
|
key_slice = formatted_key;
|
|
|
|
}
|
|
|
|
|
2016-01-11 19:51:42 +01:00
|
|
|
if (no_value_) {
|
2016-03-10 22:34:42 +01:00
|
|
|
fprintf(stdout, "%.*s\n", static_cast<int>(key_slice.size()),
|
|
|
|
key_slice.data());
|
2016-01-11 19:51:42 +01:00
|
|
|
} else {
|
2016-03-10 22:34:42 +01:00
|
|
|
Slice val_slice = it->value();
|
|
|
|
std::string formatted_value;
|
|
|
|
if (is_value_hex_) {
|
|
|
|
formatted_value = "0x" + val_slice.ToString(true /* hex */);
|
|
|
|
val_slice = formatted_value;
|
|
|
|
}
|
|
|
|
fprintf(stdout, "%.*s : %.*s\n", static_cast<int>(key_slice.size()),
|
|
|
|
key_slice.data(), static_cast<int>(val_slice.size()),
|
|
|
|
val_slice.data());
|
2015-11-20 07:26:37 +01:00
|
|
|
}
|
|
|
|
|
2013-01-11 20:09:23 +01:00
|
|
|
num_keys_scanned++;
|
|
|
|
if (max_keys_scanned_ >= 0 && num_keys_scanned >= max_keys_scanned_) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!it->status().ok()) { // Check for any errors found during the scan
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(it->status().ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
delete it;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
DeleteCommand::DeleteCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX})) {
|
|
|
|
|
|
|
|
if (params.size() != 1) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"KEY must be specified for the delete command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void DeleteCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DeleteCommand::Name() + " <key>");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DeleteCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Delete(WriteOptions(), GetCfHandle(), key_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
PutCommand::PutCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX, ARG_VALUE_HEX,
|
2013-01-11 20:09:23 +01:00
|
|
|
ARG_CREATE_IF_MISSING})) {
|
|
|
|
|
|
|
|
if (params.size() != 2) {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(
|
rocksdb: Replace ASSERT* with EXPECT* in functions that does not return void value
Summary:
gtest does not use exceptions to fail a unit test by design, and `ASSERT*`s are implemented using `return`. As a consequence we cannot use `ASSERT*` in a function that does not return `void` value ([[ https://code.google.com/p/googletest/wiki/AdvancedGuide#Assertion_Placement | 1]]), and have to fix our existing code. This diff does this in a generic way, with no manual changes.
In order to detect all existing `ASSERT*` that are used in functions that doesn't return void value, I change the code to generate compile errors for such cases.
In `util/testharness.h` I defined `EXPECT*` assertions, the same way as `ASSERT*`, and redefined `ASSERT*` to return `void`. Then executed:
```lang=bash
% USE_CLANG=1 make all -j55 -k 2> build.log
% perl -naF: -e 'print "-- -number=".$F[1]." ".$F[0]."\n" if /: error:/' \
build.log | xargs -L 1 perl -spi -e 's/ASSERT/EXPECT/g if $. == $number'
% make format
```
After that I reverted back change to `ASSERT*` in `util/testharness.h`. But preserved introduced `EXPECT*`, which is the same as `ASSERT*`. This will be deleted once switched to gtest.
This diff is independent and contains manual changes only in `util/testharness.h`.
Test Plan:
Make sure all tests are passing.
```lang=bash
% USE_CLANG=1 make check
```
Reviewers: igor, lgalanis, sdong, yufei.zhu, rven, meyering
Reviewed By: meyering
Subscribers: dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D33333
2015-03-17 04:52:32 +01:00
|
|
|
"<key> and <value> must be specified for the put command");
|
2013-01-11 20:09:23 +01:00
|
|
|
} else {
|
|
|
|
key_ = params.at(0);
|
|
|
|
value_ = params.at(1);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_key_hex_) {
|
|
|
|
key_ = HexToString(key_);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_value_hex_) {
|
|
|
|
value_ = HexToString(value_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void PutCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(PutCommand::Name());
|
|
|
|
ret.append(" <key> <value> ");
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void PutCommand::DoCommand() {
|
2016-01-23 00:46:32 +01:00
|
|
|
if (!db_) {
|
|
|
|
assert(GetExecuteState().IsFailed());
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status st = db_->Put(WriteOptions(), GetCfHandle(), key_, value_);
|
2013-01-11 20:09:23 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
Options PutCommand::PrepareOptionsForOpenDB() {
|
|
|
|
Options opt = LDBCommand::PrepareOptionsForOpenDB();
|
2013-01-11 20:09:23 +01:00
|
|
|
opt.create_if_missing = IsFlagPresent(flags_, ARG_CREATE_IF_MISSING);
|
|
|
|
return opt;
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
const char* DBQuerierCommand::HELP_CMD = "help";
|
|
|
|
const char* DBQuerierCommand::GET_CMD = "get";
|
|
|
|
const char* DBQuerierCommand::PUT_CMD = "put";
|
|
|
|
const char* DBQuerierCommand::DELETE_CMD = "delete";
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
DBQuerierCommand::DBQuerierCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2013-01-11 20:09:23 +01:00
|
|
|
LDBCommand(options, flags, false,
|
2013-05-14 04:11:56 +02:00
|
|
|
BuildCmdLineOptions({ARG_TTL, ARG_HEX, ARG_KEY_HEX,
|
|
|
|
ARG_VALUE_HEX})) {
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void DBQuerierCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBQuerierCommand::Name());
|
|
|
|
ret.append(" [--" + ARG_TTL + "]");
|
|
|
|
ret.append("\n");
|
|
|
|
ret.append(" Starts a REPL shell. Type help for list of available "
|
2013-01-11 20:09:23 +01:00
|
|
|
"commands.");
|
2014-11-01 03:22:49 +01:00
|
|
|
ret.append("\n");
|
2013-01-11 20:09:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
void DBQuerierCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2013-01-11 20:09:23 +01:00
|
|
|
return;
|
|
|
|
}
|
2014-11-01 03:22:49 +01:00
|
|
|
|
2013-04-12 05:21:49 +02:00
|
|
|
ReadOptions read_options;
|
|
|
|
WriteOptions write_options;
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
string line;
|
|
|
|
string key;
|
|
|
|
string value;
|
|
|
|
while (getline(cin, line, '\n')) {
|
|
|
|
|
|
|
|
// Parse line into vector<string>
|
|
|
|
vector<string> tokens;
|
2013-01-11 20:09:23 +01:00
|
|
|
size_t pos = 0;
|
|
|
|
while (true) {
|
|
|
|
size_t pos2 = line.find(' ', pos);
|
2014-11-01 03:22:49 +01:00
|
|
|
if (pos2 == string::npos) {
|
2013-01-11 20:09:23 +01:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos, pos2-pos));
|
|
|
|
pos = pos2 + 1;
|
|
|
|
}
|
|
|
|
tokens.push_back(line.substr(pos));
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
const string& cmd = tokens[0];
|
2013-01-11 20:09:23 +01:00
|
|
|
|
|
|
|
if (cmd == HELP_CMD) {
|
|
|
|
fprintf(stdout,
|
|
|
|
"get <key>\n"
|
|
|
|
"put <key> <value>\n"
|
|
|
|
"delete <key>\n");
|
|
|
|
} else if (cmd == DELETE_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Delete(write_options, GetCfHandle(), Slice(key));
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "Successfully deleted %s\n", tokens[1].c_str());
|
|
|
|
} else if (cmd == PUT_CMD && tokens.size() == 3) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
|
|
|
value = (is_value_hex_ ? HexToString(tokens[2]) : tokens[2]);
|
2016-01-23 00:46:32 +01:00
|
|
|
db_->Put(write_options, GetCfHandle(), Slice(key), Slice(value));
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "Successfully put %s %s\n",
|
|
|
|
tokens[1].c_str(), tokens[2].c_str());
|
|
|
|
} else if (cmd == GET_CMD && tokens.size() == 2) {
|
|
|
|
key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]);
|
2016-01-23 00:46:32 +01:00
|
|
|
if (db_->Get(read_options, GetCfHandle(), Slice(key), &value).ok()) {
|
2013-01-11 20:09:23 +01:00
|
|
|
fprintf(stdout, "%s\n", PrintKeyValue(key, value,
|
|
|
|
is_key_hex_, is_value_hex_).c_str());
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Not found %s\n", tokens[1].c_str());
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
fprintf(stdout, "Unknown command %s\n", line.c_str());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
CheckConsistencyCommand::CheckConsistencyCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options, const vector<string>& flags) :
|
2014-03-20 21:42:45 +01:00
|
|
|
LDBCommand(options, flags, false,
|
|
|
|
BuildCmdLineOptions({})) {
|
|
|
|
}
|
|
|
|
|
2014-11-01 03:22:49 +01:00
|
|
|
void CheckConsistencyCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(CheckConsistencyCommand::Name());
|
|
|
|
ret.append("\n");
|
2014-03-20 21:42:45 +01:00
|
|
|
}
|
2013-01-11 20:09:23 +01:00
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
void CheckConsistencyCommand::DoCommand() {
|
|
|
|
Options opt = PrepareOptionsForOpenDB();
|
2014-03-20 22:18:29 +01:00
|
|
|
opt.paranoid_checks = true;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (!exec_state_.IsNotStarted()) {
|
|
|
|
return;
|
|
|
|
}
|
2014-03-20 22:18:29 +01:00
|
|
|
DB* db;
|
|
|
|
Status st = DB::OpenForReadOnly(opt, db_path_, &db, false);
|
|
|
|
delete db;
|
2014-03-20 21:42:45 +01:00
|
|
|
if (st.ok()) {
|
|
|
|
fprintf(stdout, "OK\n");
|
|
|
|
} else {
|
2015-03-17 02:08:59 +01:00
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
2014-03-20 21:42:45 +01:00
|
|
|
}
|
2012-10-31 19:47:18 +01:00
|
|
|
}
|
2014-03-20 21:42:45 +01:00
|
|
|
|
2014-11-24 19:04:16 +01:00
|
|
|
// ----------------------------------------------------------------------------
|
2016-03-12 22:50:20 +01:00
|
|
|
|
|
|
|
RepairCommand::RepairCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, false, BuildCmdLineOptions({})) {}
|
|
|
|
|
|
|
|
void RepairCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(RepairCommand::Name());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void RepairCommand::DoCommand() {
|
|
|
|
Options options = PrepareOptionsForOpenDB();
|
2016-04-01 20:06:06 +02:00
|
|
|
options.info_log.reset(new StderrLogger(InfoLogLevel::WARN_LEVEL));
|
2016-03-12 22:50:20 +01:00
|
|
|
Status status = RepairDB(db_path_, options);
|
|
|
|
if (status.ok()) {
|
|
|
|
printf("OK\n");
|
|
|
|
} else {
|
|
|
|
exec_state_ = LDBCommandExecuteResult::Failed(status.ToString());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ----------------------------------------------------------------------------
|
2014-11-24 19:04:16 +01:00
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
void DumpSstFile(std::string filename, bool output_hex, bool show_properties) {
|
|
|
|
std::string from_key;
|
|
|
|
std::string to_key;
|
|
|
|
if (filename.length() <= 4 ||
|
|
|
|
filename.rfind(".sst") != filename.length() - 4) {
|
|
|
|
std::cout << "Invalid sst file name." << std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
// no verification
|
|
|
|
rocksdb::SstFileReader reader(filename, false, output_hex);
|
|
|
|
Status st = reader.ReadSequential(true, -1, false, // has_from
|
|
|
|
from_key, false, // has_to
|
|
|
|
to_key);
|
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << "Error in reading SST file " << filename << st.ToString()
|
|
|
|
<< std::endl;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (show_properties) {
|
|
|
|
const rocksdb::TableProperties* table_properties;
|
|
|
|
|
|
|
|
std::shared_ptr<const rocksdb::TableProperties>
|
|
|
|
table_properties_from_reader;
|
|
|
|
st = reader.ReadTableProperties(&table_properties_from_reader);
|
|
|
|
if (!st.ok()) {
|
|
|
|
std::cerr << filename << ": " << st.ToString()
|
|
|
|
<< ". Try to use initial table properties" << std::endl;
|
|
|
|
table_properties = reader.GetInitTableProperties();
|
|
|
|
} else {
|
|
|
|
table_properties = table_properties_from_reader.get();
|
|
|
|
}
|
|
|
|
if (table_properties != nullptr) {
|
|
|
|
std::cout << std::endl << "Table Properties:" << std::endl;
|
|
|
|
std::cout << table_properties->ToString("\n") << std::endl;
|
|
|
|
std::cout << "# deleted keys: "
|
|
|
|
<< rocksdb::GetDeletedKeys(
|
|
|
|
table_properties->user_collected_properties)
|
|
|
|
<< std::endl;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
DBFileDumperCommand::DBFileDumperCommand(const vector<string>& params,
|
|
|
|
const map<string, string>& options,
|
|
|
|
const vector<string>& flags)
|
|
|
|
: LDBCommand(options, flags, true, BuildCmdLineOptions({})) {}
|
|
|
|
|
|
|
|
void DBFileDumperCommand::Help(string& ret) {
|
|
|
|
ret.append(" ");
|
|
|
|
ret.append(DBFileDumperCommand::Name());
|
|
|
|
ret.append("\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void DBFileDumperCommand::DoCommand() {
|
|
|
|
if (!db_) {
|
2016-01-23 00:46:32 +01:00
|
|
|
assert(GetExecuteState().IsFailed());
|
2014-11-24 19:04:16 +01:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Status s;
|
|
|
|
|
|
|
|
std::cout << "Manifest File" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
std::string manifest_filename;
|
|
|
|
s = ReadFileToString(db_->GetEnv(), CurrentFileName(db_->GetName()),
|
|
|
|
&manifest_filename);
|
|
|
|
if (!s.ok() || manifest_filename.empty() ||
|
|
|
|
manifest_filename.back() != '\n') {
|
|
|
|
std::cerr << "Error when reading CURRENT file "
|
|
|
|
<< CurrentFileName(db_->GetName()) << std::endl;
|
|
|
|
}
|
|
|
|
// remove the trailing '\n'
|
|
|
|
manifest_filename.resize(manifest_filename.size() - 1);
|
|
|
|
string manifest_filepath = db_->GetName() + "/" + manifest_filename;
|
|
|
|
std::cout << manifest_filepath << std::endl;
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
DumpManifestFile(manifest_filepath, false, false, false);
|
2014-11-24 19:04:16 +01:00
|
|
|
std::cout << std::endl;
|
|
|
|
|
|
|
|
std::cout << "SST Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
std::vector<LiveFileMetaData> metadata;
|
|
|
|
db_->GetLiveFilesMetaData(&metadata);
|
|
|
|
for (auto& fileMetadata : metadata) {
|
|
|
|
std::string filename = fileMetadata.db_path + fileMetadata.name;
|
|
|
|
std::cout << filename << " level:" << fileMetadata.level << std::endl;
|
|
|
|
std::cout << "------------------------------" << std::endl;
|
|
|
|
DumpSstFile(filename, false, true);
|
|
|
|
std::cout << std::endl;
|
|
|
|
}
|
|
|
|
std::cout << std::endl;
|
|
|
|
|
|
|
|
std::cout << "Write Ahead Log Files" << std::endl;
|
|
|
|
std::cout << "==============================" << std::endl;
|
|
|
|
rocksdb::VectorLogPtr wal_files;
|
|
|
|
s = db_->GetSortedWalFiles(wal_files);
|
|
|
|
if (!s.ok()) {
|
|
|
|
std::cerr << "Error when getting WAL files" << std::endl;
|
|
|
|
} else {
|
|
|
|
for (auto& wal : wal_files) {
|
|
|
|
// TODO(qyang): option.wal_dir should be passed into ldb command
|
|
|
|
std::string filename = db_->GetOptions().wal_dir + wal->PathName();
|
|
|
|
std::cout << filename << std::endl;
|
|
|
|
DumpWalFile(filename, true, true, &exec_state_);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-20 21:42:45 +01:00
|
|
|
} // namespace rocksdb
|
2014-04-15 22:39:26 +02:00
|
|
|
#endif // ROCKSDB_LITE
|