Turn on -Wmissing-prototypes
Summary: Compiling for iOS has by default turned on -Wmissing-prototypes, which causes rocksdb to fail compiling. This diff turns on -Wmissing-prototypes in our compile options and cleans up all functions with missing prototypes. Test Plan: compiles Reviewers: dhruba, haobo, ljin, sdong Reviewed By: ljin CC: leveldb Differential Revision: https://reviews.facebook.net/D17649
This commit is contained in:
parent
df2a8b6a1a
commit
4daea66343
@ -145,7 +145,7 @@ static char* FilterCreate(
|
||||
memcpy(result, "fake", 4);
|
||||
return result;
|
||||
}
|
||||
unsigned char FilterKeyMatch(
|
||||
static unsigned char FilterKeyMatch(
|
||||
void* arg,
|
||||
const char* key, size_t length,
|
||||
const char* filter, size_t filter_length) {
|
||||
|
@ -325,6 +325,7 @@ DEFINE_uint64(delete_obsolete_files_period_micros, 0, "Option to delete "
|
||||
"obsolete files periodically. 0 means that obsolete files are"
|
||||
" deleted after every compaction run.");
|
||||
|
||||
namespace {
|
||||
enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
|
||||
assert(ctype);
|
||||
|
||||
@ -344,6 +345,8 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
|
||||
fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
|
||||
return rocksdb::kSnappyCompression; //default value
|
||||
}
|
||||
} // namespace
|
||||
|
||||
DEFINE_string(compression_type, "snappy",
|
||||
"Algorithm to use to compress the database");
|
||||
static enum rocksdb::CompressionType FLAGS_compression_type_e =
|
||||
@ -482,6 +485,8 @@ enum RepFactory {
|
||||
kVectorRep,
|
||||
kHashLinkedList
|
||||
};
|
||||
|
||||
namespace {
|
||||
enum RepFactory StringToRepFactory(const char* ctype) {
|
||||
assert(ctype);
|
||||
|
||||
@ -497,6 +502,8 @@ enum RepFactory StringToRepFactory(const char* ctype) {
|
||||
fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
|
||||
return kSkipList;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
static enum RepFactory FLAGS_rep_factory;
|
||||
DEFINE_string(memtablerep, "skip_list", "");
|
||||
DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count");
|
||||
|
@ -959,6 +959,7 @@ static long TestGetTickerCount(const Options& options, Tickers ticker_type) {
|
||||
// A helper function that ensures the table properties returned in
|
||||
// `GetPropertiesOfAllTablesTest` is correct.
|
||||
// This test assumes entries size is differnt for each of the tables.
|
||||
namespace {
|
||||
void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
|
||||
TablePropertiesCollection props;
|
||||
ASSERT_OK(db->GetPropertiesOfAllTables(&props));
|
||||
@ -977,28 +978,7 @@ void VerifyTableProperties(DB* db, uint64_t expected_entries_size) {
|
||||
ASSERT_EQ(props.size(), unique_entries.size());
|
||||
ASSERT_EQ(expected_entries_size, sum);
|
||||
}
|
||||
|
||||
std::unordered_map<std::string, size_t> GetMemoryUsage(MemTable* memtable) {
|
||||
const auto& arena = memtable->TEST_GetArena();
|
||||
return {{"memtable.approximate.usage", memtable->ApproximateMemoryUsage()},
|
||||
{"arena.approximate.usage", arena.ApproximateMemoryUsage()},
|
||||
{"arena.allocated.memory", arena.MemoryAllocatedBytes()},
|
||||
{"arena.unused.bytes", arena.AllocatedAndUnused()},
|
||||
{"irregular.blocks", arena.IrregularBlockNum()}};
|
||||
}
|
||||
|
||||
void PrintMemoryUsage(const std::unordered_map<std::string, size_t>& usage) {
|
||||
for (const auto& item : usage) {
|
||||
std::cout << "\t" << item.first << ": " << item.second << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
void AddRandomKV(MemTable* memtable, Random* rnd, size_t arena_block_size) {
|
||||
memtable->Add(0, kTypeValue, RandomString(rnd, 20) /* key */,
|
||||
// make sure we will be able to generate some over sized entries
|
||||
RandomString(rnd, rnd->Uniform(arena_block_size / 4) * 1.15 +
|
||||
10) /* value */);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, Empty) {
|
||||
do {
|
||||
@ -1505,9 +1485,11 @@ TEST(DBTest, IterSeekBeforePrev) {
|
||||
delete iter;
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::string MakeLongKey(size_t length, char c) {
|
||||
return std::string(length, c);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, IterLongKeys) {
|
||||
ASSERT_OK(Put(MakeLongKey(20, 0), "0"));
|
||||
@ -3272,6 +3254,7 @@ TEST(DBTest, ConvertCompactionStyle) {
|
||||
ASSERT_EQ(keys_in_db, expected_keys);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void MinLevelHelper(DBTest* self, Options& options) {
|
||||
Random rnd(301);
|
||||
|
||||
@ -3345,6 +3328,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits,
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, MinLevelToCompress1) {
|
||||
Options options = CurrentOptions();
|
||||
@ -5242,6 +5226,7 @@ TEST(DBTest, CompactOnFlush) {
|
||||
} while (ChangeCompactOptions());
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
|
||||
std::vector<std::string> files;
|
||||
std::vector<uint64_t> log_files;
|
||||
@ -5257,6 +5242,7 @@ std::vector<std::uint64_t> ListLogFiles(Env* env, const std::string& path) {
|
||||
}
|
||||
return std::move(log_files);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, WALArchivalTtl) {
|
||||
do {
|
||||
@ -5304,6 +5290,7 @@ TEST(DBTest, WALArchivalTtl) {
|
||||
} while (ChangeCompactOptions());
|
||||
}
|
||||
|
||||
namespace {
|
||||
uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) {
|
||||
uint64_t dir_size = 0;
|
||||
std::vector<std::string> files;
|
||||
@ -5320,6 +5307,7 @@ uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) {
|
||||
}
|
||||
return dir_size;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, WALArchivalSizeLimit) {
|
||||
do {
|
||||
@ -5364,6 +5352,7 @@ TEST(DBTest, WALArchivalSizeLimit) {
|
||||
} while (ChangeCompactOptions());
|
||||
}
|
||||
|
||||
namespace {
|
||||
SequenceNumber ReadRecords(
|
||||
std::unique_ptr<TransactionLogIterator>& iter,
|
||||
int& count) {
|
||||
@ -5388,6 +5377,7 @@ void ExpectRecords(
|
||||
ReadRecords(iter, num_records);
|
||||
ASSERT_EQ(num_records, expected_no_records);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, TransactionLogIterator) {
|
||||
do {
|
||||
@ -6314,6 +6304,7 @@ TEST(DBTest, MultiGetEmpty) {
|
||||
} while (ChangeCompactOptions());
|
||||
}
|
||||
|
||||
namespace {
|
||||
void PrefixScanInit(DBTest *dbtest) {
|
||||
char buf[100];
|
||||
std::string keystr;
|
||||
@ -6363,6 +6354,7 @@ void PrefixScanInit(DBTest *dbtest) {
|
||||
dbtest->Flush();
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, PrefixScan) {
|
||||
ReadOptions ro = ReadOptions();
|
||||
@ -6444,6 +6436,7 @@ TEST(DBTest, PrefixScan) {
|
||||
delete options.filter_policy;
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::string MakeKey(unsigned int num) {
|
||||
char buf[30];
|
||||
snprintf(buf, sizeof(buf), "%016u", num);
|
||||
@ -6503,6 +6496,7 @@ void BM_LogAndApply(int iters, int num_base_files) {
|
||||
"BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n",
|
||||
buf, iters, us, ((float)us) / iters);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(DBTest, TailingIteratorSingle) {
|
||||
ReadOptions read_options;
|
||||
|
@ -44,10 +44,6 @@ static int FlattenPath(const std::string& path, char* dest, int len) {
|
||||
return write_idx;
|
||||
}
|
||||
|
||||
// A utility routine: write "data" to the named file and Sync() it.
|
||||
extern Status WriteStringToFileSync(Env* env, const Slice& data,
|
||||
const std::string& fname);
|
||||
|
||||
static std::string MakeFileName(const std::string& name, uint64_t number,
|
||||
const char* suffix) {
|
||||
char buf[100];
|
||||
@ -238,7 +234,7 @@ Status SetCurrentFile(Env* env, const std::string& dbname,
|
||||
assert(contents.starts_with(dbname + "/"));
|
||||
contents.remove_prefix(dbname.size() + 1);
|
||||
std::string tmp = TempFileName(dbname, descriptor_number);
|
||||
Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp);
|
||||
Status s = WriteStringToFile(env, contents.ToString() + "\n", tmp, true);
|
||||
if (s.ok()) {
|
||||
s = env->RenameFile(tmp, CurrentFileName(dbname));
|
||||
}
|
||||
@ -253,7 +249,7 @@ Status SetIdentityFile(Env* env, const std::string& dbname) {
|
||||
assert(!id.empty());
|
||||
// Reserve the filename dbname/000000.dbtmp for the temporary identity file
|
||||
std::string tmp = TempFileName(dbname, 0);
|
||||
Status s = WriteStringToFileSync(env, id, tmp);
|
||||
Status s = WriteStringToFile(env, id, tmp, true);
|
||||
if (s.ok()) {
|
||||
s = env->RenameFile(tmp, IdentityFileName(dbname));
|
||||
}
|
||||
|
@ -75,6 +75,7 @@ class CountMergeOperator : public AssociativeMergeOperator {
|
||||
std::shared_ptr<MergeOperator> mergeOperator_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
std::shared_ptr<DB> OpenDb(const string& dbname, const bool ttl = false,
|
||||
const size_t max_successive_merges = 0,
|
||||
const uint32_t min_partial_merge_operands = 2) {
|
||||
@ -100,6 +101,7 @@ std::shared_ptr<DB> OpenDb(const string& dbname, const bool ttl = false,
|
||||
}
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Imagine we are maintaining a set of uint64 counters.
|
||||
// Each counter has a distinct name. And we would like
|
||||
@ -237,6 +239,7 @@ class MergeBasedCounters : public Counters {
|
||||
}
|
||||
};
|
||||
|
||||
namespace {
|
||||
void dumpDb(DB* db) {
|
||||
auto it = unique_ptr<Iterator>(db->NewIterator(ReadOptions()));
|
||||
for (it->SeekToFirst(); it->Valid(); it->Next()) {
|
||||
@ -454,6 +457,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
//TODO: Make this test like a general rocksdb unit-test
|
||||
|
@ -429,9 +429,11 @@ TEST(PlainTableDBTest, Iterator) {
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
std::string MakeLongKey(size_t length, char c) {
|
||||
return std::string(length, c);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(PlainTableDBTest, IteratorLargeKeys) {
|
||||
Options options = CurrentOptions();
|
||||
|
@ -104,6 +104,7 @@ class TestKeyComparator : public Comparator {
|
||||
|
||||
};
|
||||
|
||||
namespace {
|
||||
void PutKey(DB* db, WriteOptions write_options, uint64_t prefix,
|
||||
uint64_t suffix, const Slice& value) {
|
||||
TestKey test_key(prefix, suffix);
|
||||
@ -133,6 +134,7 @@ std::string Get(DB* db, const ReadOptions& read_options, uint64_t prefix,
|
||||
}
|
||||
return result;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
class PrefixTest {
|
||||
public:
|
||||
|
@ -83,6 +83,7 @@ class DumbLogger : public Logger {
|
||||
};
|
||||
|
||||
// Utilities test functions
|
||||
namespace {
|
||||
void MakeBuilder(const Options& options,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
std::unique_ptr<FakeWritableFile>* writable,
|
||||
@ -91,6 +92,7 @@ void MakeBuilder(const Options& options,
|
||||
builder->reset(options.table_factory->NewTableBuilder(
|
||||
options, internal_comparator, writable->get(), options.compression));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Collects keys that starts with "A" in a table.
|
||||
class RegularKeysStartWithA: public TablePropertiesCollector {
|
||||
@ -126,6 +128,7 @@ class RegularKeysStartWithA: public TablePropertiesCollector {
|
||||
|
||||
extern uint64_t kBlockBasedTableMagicNumber;
|
||||
extern uint64_t kPlainTableMagicNumber;
|
||||
namespace {
|
||||
void TestCustomizedTablePropertiesCollector(
|
||||
uint64_t magic_number, bool encode_as_internal, const Options& options,
|
||||
const InternalKeyComparator& internal_comparator) {
|
||||
@ -178,6 +181,7 @@ void TestCustomizedTablePropertiesCollector(
|
||||
ASSERT_TRUE(GetVarint32(&key, &starts_with_A));
|
||||
ASSERT_EQ(3u, starts_with_A);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(TablePropertiesTest, CustomizedTablePropertiesCollector) {
|
||||
// Test properties collectors with internal keys or regular keys
|
||||
@ -209,6 +213,7 @@ TEST(TablePropertiesTest, CustomizedTablePropertiesCollector) {
|
||||
ikc);
|
||||
}
|
||||
|
||||
namespace {
|
||||
void TestInternalKeyPropertiesCollector(
|
||||
uint64_t magic_number,
|
||||
bool sanitized,
|
||||
@ -280,6 +285,7 @@ void TestInternalKeyPropertiesCollector(
|
||||
ASSERT_EQ(1u, starts_with_A);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(TablePropertiesTest, InternalKeyPropertiesCollector) {
|
||||
TestInternalKeyPropertiesCollector(
|
||||
|
@ -649,7 +649,8 @@ extern void Fatal(Logger* info_log, const char* format, ...);
|
||||
|
||||
// A utility routine: write "data" to the named file.
|
||||
extern Status WriteStringToFile(Env* env, const Slice& data,
|
||||
const std::string& fname);
|
||||
const std::string& fname,
|
||||
bool should_sync = false);
|
||||
|
||||
// A utility routine: read contents of named file into *data
|
||||
extern Status ReadFileToString(Env* env, const std::string& fname,
|
||||
|
@ -954,11 +954,14 @@ Status BlockBasedTable::Get(
|
||||
return s;
|
||||
}
|
||||
|
||||
namespace {
|
||||
bool SaveDidIO(void* arg, const ParsedInternalKey& key, const Slice& value,
|
||||
bool didIO) {
|
||||
*reinterpret_cast<bool*>(arg) = didIO;
|
||||
return false;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options,
|
||||
const Slice& key) {
|
||||
// We use Get() as it has logic that checks whether we read the
|
||||
|
@ -41,24 +41,4 @@ class MinIteratorComparator {
|
||||
const Comparator* comparator_;
|
||||
};
|
||||
|
||||
typedef std::priority_queue<
|
||||
IteratorWrapper*,
|
||||
std::vector<IteratorWrapper*>,
|
||||
MaxIteratorComparator> MaxIterHeap;
|
||||
|
||||
typedef std::priority_queue<
|
||||
IteratorWrapper*,
|
||||
std::vector<IteratorWrapper*>,
|
||||
MinIteratorComparator> MinIterHeap;
|
||||
|
||||
// Return's a new MaxHeap of IteratorWrapper's using the provided Comparator.
|
||||
MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
|
||||
return MaxIterHeap(MaxIteratorComparator(comparator));
|
||||
}
|
||||
|
||||
// Return's a new MinHeap of IteratorWrapper's using the provided Comparator.
|
||||
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
|
||||
return MinIterHeap(MinIteratorComparator(comparator));
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -9,6 +9,9 @@
|
||||
|
||||
#include "table/merger.h"
|
||||
|
||||
#include <vector>
|
||||
#include <queue>
|
||||
|
||||
#include "rocksdb/comparator.h"
|
||||
#include "rocksdb/iterator.h"
|
||||
#include "rocksdb/options.h"
|
||||
@ -17,12 +20,29 @@
|
||||
#include "util/stop_watch.h"
|
||||
#include "util/perf_context_imp.h"
|
||||
|
||||
#include <vector>
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
|
||||
typedef std::priority_queue<
|
||||
IteratorWrapper*,
|
||||
std::vector<IteratorWrapper*>,
|
||||
MaxIteratorComparator> MaxIterHeap;
|
||||
|
||||
typedef std::priority_queue<
|
||||
IteratorWrapper*,
|
||||
std::vector<IteratorWrapper*>,
|
||||
MinIteratorComparator> MinIterHeap;
|
||||
|
||||
// Return's a new MaxHeap of IteratorWrapper's using the provided Comparator.
|
||||
MaxIterHeap NewMaxIterHeap(const Comparator* comparator) {
|
||||
return MaxIterHeap(MaxIteratorComparator(comparator));
|
||||
}
|
||||
|
||||
// Return's a new MinHeap of IteratorWrapper's using the provided Comparator.
|
||||
MinIterHeap NewMinIterHeap(const Comparator* comparator) {
|
||||
return MinIterHeap(MinIteratorComparator(comparator));
|
||||
}
|
||||
|
||||
class MergingIterator : public Iterator {
|
||||
public:
|
||||
MergingIterator(const Comparator* comparator, Iterator** children, int n)
|
||||
|
@ -19,6 +19,8 @@
|
||||
#include "util/testutil.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
namespace {
|
||||
// Make a key that i determines the first 4 characters and j determines the
|
||||
// last 4 characters.
|
||||
static std::string MakeKey(int i, int j, bool through_db) {
|
||||
@ -43,6 +45,7 @@ static bool DummySaveValue(void* arg, const ParsedInternalKey& ikey,
|
||||
uint64_t Now(Env* env, bool measured_by_nanosecond) {
|
||||
return measured_by_nanosecond ? env->NowNanos() : env->NowMicros();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// A very simple benchmark that.
|
||||
// Create a table with roughly numKey1 * numKey2 keys,
|
||||
@ -57,6 +60,7 @@ uint64_t Now(Env* env, bool measured_by_nanosecond) {
|
||||
//
|
||||
// If for_terator=true, instead of just query one key each time, it queries
|
||||
// a range sharing the same prefix.
|
||||
namespace {
|
||||
void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
|
||||
ReadOptions& read_options, int num_keys1,
|
||||
int num_keys2, int num_iter, int prefix_len,
|
||||
@ -215,6 +219,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options,
|
||||
DestroyDB(dbname, opts);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
} // namespace rocksdb
|
||||
|
||||
DEFINE_bool(query_empty, false, "query non-existing keys instead of existing "
|
||||
@ -264,10 +269,10 @@ int main(int argc, char** argv) {
|
||||
|
||||
options.table_factory =
|
||||
std::shared_ptr<rocksdb::TableFactory>(tf);
|
||||
TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1,
|
||||
FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len,
|
||||
FLAGS_query_empty, FLAGS_iterator, FLAGS_through_db,
|
||||
measured_by_nanosecond);
|
||||
rocksdb::TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1,
|
||||
FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len,
|
||||
FLAGS_query_empty, FLAGS_iterator,
|
||||
FLAGS_through_db, measured_by_nanosecond);
|
||||
delete tf;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1,3 +1,8 @@
|
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
|
||||
#include <cstdio>
|
||||
#include <vector>
|
||||
#include <atomic>
|
||||
@ -23,11 +28,13 @@ uint64_t timeout_sec;
|
||||
Env *env;
|
||||
BlobStore* bs;
|
||||
|
||||
static std::string RandomString(Random* rnd, uint64_t len) {
|
||||
namespace {
|
||||
std::string RandomString(Random* rnd, uint64_t len) {
|
||||
std::string r;
|
||||
test::RandomString(rnd, len, &r);
|
||||
return r;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
struct Result {
|
||||
uint32_t writes;
|
||||
@ -59,11 +66,13 @@ struct Result {
|
||||
|
||||
};
|
||||
|
||||
namespace {
|
||||
Result operator + (const Result &a, const Result &b) {
|
||||
return Result(a.writes + b.writes, a.reads + b.reads,
|
||||
a.deletes + b.deletes, a.data_written + b.data_written,
|
||||
a.data_read + b.data_read);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
struct WorkerThread {
|
||||
uint64_t data_size_from, data_size_to;
|
||||
@ -131,6 +140,7 @@ static void WorkerThreadBody(void* arg) {
|
||||
t->stopped.store(true);
|
||||
}
|
||||
|
||||
namespace {
|
||||
Result StartBenchmark(vector<WorkerThread*>& config) {
|
||||
for (auto w : config) {
|
||||
env->StartThread(WorkerThreadBody, w);
|
||||
@ -241,6 +251,7 @@ vector<WorkerThread*> SetupBenchmarkReadHeavy() {
|
||||
|
||||
return config;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int main(int argc, const char** argv) {
|
||||
srand(33);
|
||||
|
@ -145,6 +145,7 @@ class SanityTestPlainTableFactory : public SanityTest {
|
||||
Options options_;
|
||||
};
|
||||
|
||||
namespace {
|
||||
bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||
std::vector<SanityTest*> sanity_tests = {
|
||||
new SanityTestBasic(path),
|
||||
@ -176,6 +177,7 @@ bool RunSanityTests(const std::string& command, const std::string& path) {
|
||||
}
|
||||
return true;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
|
@ -271,6 +271,7 @@ static const bool FLAGS_num_iterations_dummy __attribute__((unused)) =
|
||||
DEFINE_bool(disable_seek_compaction, false,
|
||||
"Option to disable compation triggered by read.");
|
||||
|
||||
namespace {
|
||||
enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
|
||||
assert(ctype);
|
||||
|
||||
@ -290,6 +291,8 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) {
|
||||
fprintf(stdout, "Cannot parse compression type '%s'\n", ctype);
|
||||
return rocksdb::kSnappyCompression; //default value
|
||||
}
|
||||
} // namespace
|
||||
|
||||
DEFINE_string(compression_type, "snappy",
|
||||
"Algorithm to use to compress the database");
|
||||
static enum rocksdb::CompressionType FLAGS_compression_type_e =
|
||||
@ -323,6 +326,8 @@ enum RepFactory {
|
||||
kHashSkipList,
|
||||
kVectorRep
|
||||
};
|
||||
|
||||
namespace {
|
||||
enum RepFactory StringToRepFactory(const char* ctype) {
|
||||
assert(ctype);
|
||||
|
||||
@ -336,6 +341,8 @@ enum RepFactory StringToRepFactory(const char* ctype) {
|
||||
fprintf(stdout, "Cannot parse memreptable %s\n", ctype);
|
||||
return kSkipList;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
static enum RepFactory FLAGS_rep_factory;
|
||||
DEFINE_string(memtablerep, "prefix_hash", "");
|
||||
|
||||
|
@ -220,6 +220,7 @@ static void print_help() {
|
||||
" [--show_properties]\n");
|
||||
}
|
||||
|
||||
namespace {
|
||||
string HexToString(const string& str) {
|
||||
string parsed;
|
||||
if (str[0] != '0' || str[1] != 'x') {
|
||||
@ -236,6 +237,7 @@ string HexToString(const string& str) {
|
||||
}
|
||||
return parsed;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
const char* dir_or_file = nullptr;
|
||||
|
@ -50,6 +50,7 @@ Env* AutoRollLoggerTest::env = Env::Default();
|
||||
// no format. LogMessage() provides such a simple interface and
|
||||
// avoids the [format-security] warning which occurs when you
|
||||
// call Log(logger, log_message) directly.
|
||||
namespace {
|
||||
void LogMessage(Logger* logger, const char* message) {
|
||||
Log(logger, "%s", message);
|
||||
}
|
||||
@ -58,7 +59,9 @@ void LogMessage(const InfoLogLevel log_level, Logger* logger,
|
||||
const char* message) {
|
||||
Log(log_level, logger, "%s", message);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace {
|
||||
void GetFileCreateTime(const std::string& fname, uint64_t* file_ctime) {
|
||||
struct stat s;
|
||||
if (stat(fname.c_str(), &s) != 0) {
|
||||
@ -66,6 +69,7 @@ void GetFileCreateTime(const std::string& fname, uint64_t* file_ctime) {
|
||||
}
|
||||
*file_ctime = static_cast<uint64_t>(s.st_ctime);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger,
|
||||
size_t log_max_size,
|
||||
@ -281,26 +285,6 @@ TEST(AutoRollLoggerTest, InfoLogLevel) {
|
||||
inFile.close();
|
||||
}
|
||||
|
||||
int OldLogFileCount(const string& dir) {
|
||||
std::vector<std::string> files;
|
||||
Env::Default()->GetChildren(dir, &files);
|
||||
int log_file_count = 0;
|
||||
|
||||
for (std::vector<std::string>::iterator it = files.begin();
|
||||
it != files.end(); ++it) {
|
||||
uint64_t create_time;
|
||||
FileType type;
|
||||
if (!ParseFileName(*it, &create_time, &type)) {
|
||||
continue;
|
||||
}
|
||||
if (type == kInfoLogFile && create_time > 0) {
|
||||
++log_file_count;
|
||||
}
|
||||
}
|
||||
|
||||
return log_file_count;
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
@ -70,6 +70,7 @@ TEST(AutoVectorTest, EmplaceBack) {
|
||||
ASSERT_TRUE(!vec.only_in_stack());
|
||||
}
|
||||
|
||||
namespace {
|
||||
void AssertEqual(
|
||||
const autovector<size_t, kSize>& a, const autovector<size_t, kSize>& b) {
|
||||
ASSERT_EQ(a.size(), b.size());
|
||||
@ -79,6 +80,7 @@ void AssertEqual(
|
||||
ASSERT_EQ(a[i], b[i]);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(AutoVectorTest, CopyAndAssignment) {
|
||||
// Test both heap-allocated and stack-allocated cases.
|
||||
@ -159,6 +161,7 @@ TEST(AutoVectorTest, Iterators) {
|
||||
}
|
||||
}
|
||||
|
||||
namespace {
|
||||
vector<string> GetTestKeys(size_t size) {
|
||||
vector<string> keys;
|
||||
keys.resize(size);
|
||||
@ -169,6 +172,7 @@ vector<string> GetTestKeys(size_t size) {
|
||||
}
|
||||
return keys;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template<class TVector>
|
||||
void BenchmarkVectorCreationAndInsertion(
|
||||
|
@ -107,7 +107,9 @@ class CacheTest {
|
||||
};
|
||||
CacheTest* CacheTest::current_;
|
||||
|
||||
namespace {
|
||||
void dumbDeleter(const Slice& key, void* value) { }
|
||||
} // namespace
|
||||
|
||||
TEST(CacheTest, UsageTest) {
|
||||
// cache is shared_ptr and will be automatically cleaned up.
|
||||
@ -382,9 +384,11 @@ class Value {
|
||||
~Value() { std::cout << v_ << " is destructed\n"; }
|
||||
};
|
||||
|
||||
namespace {
|
||||
void deleter(const Slice& key, void* value) {
|
||||
delete (Value *)value;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TEST(CacheTest, BadEviction) {
|
||||
int n = 10;
|
||||
|
15
util/env.cc
15
util/env.cc
@ -172,9 +172,8 @@ void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
||||
}
|
||||
}
|
||||
|
||||
static Status DoWriteStringToFile(Env* env, const Slice& data,
|
||||
const std::string& fname,
|
||||
bool should_sync) {
|
||||
Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname,
|
||||
bool should_sync) {
|
||||
unique_ptr<WritableFile> file;
|
||||
EnvOptions soptions;
|
||||
Status s = env->NewWritableFile(fname, &file, soptions);
|
||||
@ -191,16 +190,6 @@ static Status DoWriteStringToFile(Env* env, const Slice& data,
|
||||
return s;
|
||||
}
|
||||
|
||||
Status WriteStringToFile(Env* env, const Slice& data,
|
||||
const std::string& fname) {
|
||||
return DoWriteStringToFile(env, data, fname, false);
|
||||
}
|
||||
|
||||
Status WriteStringToFileSync(Env* env, const Slice& data,
|
||||
const std::string& fname) {
|
||||
return DoWriteStringToFile(env, data, fname, true);
|
||||
}
|
||||
|
||||
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
||||
EnvOptions soptions;
|
||||
data->clear();
|
||||
|
@ -200,6 +200,17 @@ TEST(EnvPosixTest, TwoPools) {
|
||||
ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH));
|
||||
}
|
||||
|
||||
#ifdef OS_LINUX
|
||||
// To make sure the Env::GetUniqueId() related tests work correctly, The files
|
||||
// should be stored in regular storage like "hard disk" or "flash device".
|
||||
// Otherwise we cannot get the correct id.
|
||||
//
|
||||
// The following function act as the replacement of test::TmpDir() that may be
|
||||
// customized by user to be on a storage that doesn't work with GetUniqueId().
|
||||
//
|
||||
// TODO(kailiu) This function still assumes /tmp/<test-dir> reside in regular
|
||||
// storage system.
|
||||
namespace {
|
||||
bool IsSingleVarint(const std::string& s) {
|
||||
Slice slice(s);
|
||||
|
||||
@ -211,16 +222,6 @@ bool IsSingleVarint(const std::string& s) {
|
||||
return slice.size() == 0;
|
||||
}
|
||||
|
||||
#ifdef OS_LINUX
|
||||
// To make sure the Env::GetUniqueId() related tests work correctly, The files
|
||||
// should be stored in regular storage like "hard disk" or "flash device".
|
||||
// Otherwise we cannot get the correct id.
|
||||
//
|
||||
// The following function act as the replacement of test::TmpDir() that may be
|
||||
// customized by user to be on a storage that doesn't work with GetUniqueId().
|
||||
//
|
||||
// TODO(kailiu) This function still assumes /tmp/<test-dir> reside in regular
|
||||
// storage system.
|
||||
bool IsUniqueIDValid(const std::string& s) {
|
||||
return !s.empty() && !IsSingleVarint(s);
|
||||
}
|
||||
@ -237,6 +238,7 @@ std::string GetOnDiskTestDir() {
|
||||
|
||||
return base;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Only works in linux platforms
|
||||
TEST(EnvPosixTest, RandomAccessUniqueID) {
|
||||
|
@ -601,6 +601,8 @@ void ListColumnFamiliesCommand::DoCommand() {
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
namespace {
|
||||
|
||||
string ReadableTime(int unixtime) {
|
||||
char time_buffer [80];
|
||||
time_t rawtime = unixtime;
|
||||
@ -634,6 +636,8 @@ void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
|
||||
(unsigned long)bucket_counts[num_buckets - 1]);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only";
|
||||
const string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim";
|
||||
const string InternalDumpCommand::ARG_STATS = "stats";
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include "util/stack_trace.h"
|
||||
#include <assert.h>
|
||||
|
||||
namespace {
|
||||
void f0() {
|
||||
char *p = nullptr;
|
||||
*p = 10; /* SIGSEGV here!! */
|
||||
@ -22,6 +23,7 @@ void f2() {
|
||||
void f3() {
|
||||
f2();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
int main() {
|
||||
rocksdb::InstallStackTraceHandler();
|
||||
|
@ -6,21 +6,18 @@
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include "util/string_util.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
using namespace std;
|
||||
using std::string;
|
||||
using std::vector;
|
||||
using std::stringstream;
|
||||
|
||||
vector<string> stringSplit(string arg, char delim) {
|
||||
vector<string> splits;
|
||||
stringstream ss(arg);
|
||||
string item;
|
||||
while(getline(ss, item, delim)) {
|
||||
std::vector<std::string> stringSplit(std::string arg, char delim) {
|
||||
std::vector<std::string> splits;
|
||||
std::stringstream ss(arg);
|
||||
std::string item;
|
||||
while (std::getline(ss, item, delim)) {
|
||||
splits.push_back(item);
|
||||
}
|
||||
return splits;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -3,9 +3,13 @@
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
//
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#pragma once
|
||||
namespace rocksdb {
|
||||
|
||||
extern std::vector<std::string> stringSplit(std::string arg, char delim);
|
||||
|
||||
}
|
||||
} // namespace rocksdb
|
||||
|
@ -25,6 +25,7 @@ namespace rocksdb {
|
||||
// Path to the database on file system
|
||||
const std::string kDbName = "/tmp/mergetestdb";
|
||||
|
||||
namespace {
|
||||
// OpenDb opens a (possibly new) rocksdb database with a StringAppendOperator
|
||||
std::shared_ptr<DB> OpenNormalDb(char delim_char) {
|
||||
DB* db;
|
||||
@ -44,6 +45,7 @@ std::shared_ptr<DB> OpenTtlDb(char delim_char) {
|
||||
ASSERT_OK(UtilityDB::OpenTtlDB(options, kDbName, &db, 123456));
|
||||
return std::shared_ptr<DB>(db);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/// StringLists represents a set of string-lists, each with a key-index.
|
||||
/// Supports Append(list, string) and Get(list)
|
||||
|
@ -1,3 +1,7 @@
|
||||
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under the BSD-style license found in the
|
||||
// LICENSE file in the root directory of this source tree. An additional grant
|
||||
// of patent rights can be found in the PATENTS file in the same directory.
|
||||
/**
|
||||
* A test harness for the Redis API built on rocksdb.
|
||||
*
|
||||
@ -9,7 +13,6 @@
|
||||
* TODO: Add LARGE random test cases to verify efficiency and scalability
|
||||
*
|
||||
* @author Deon Nicholas (dnicholas@fb.com)
|
||||
* Copyright 2013 Facebook
|
||||
*/
|
||||
|
||||
|
||||
@ -41,6 +44,7 @@ Options RedisListsTest::options = Options();
|
||||
// operator== and operator<< are defined below for vectors (lists)
|
||||
// Needed for ASSERT_EQ
|
||||
|
||||
namespace {
|
||||
void AssertListEq(const std::vector<std::string>& result,
|
||||
const std::vector<std::string>& expected_result) {
|
||||
ASSERT_EQ(result.size(), expected_result.size());
|
||||
@ -48,6 +52,7 @@ void AssertListEq(const std::vector<std::string>& result,
|
||||
ASSERT_EQ(result[i], expected_result[i]);
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// PushRight, Length, Index, Range
|
||||
TEST(RedisListsTest, SimpleTest) {
|
||||
@ -738,6 +743,7 @@ TEST(RedisListsTest, PersistenceMultiKeyTest) {
|
||||
/// THE manual REDIS TEST begins here
|
||||
/// THIS WILL ONLY OCCUR IF YOU RUN: ./redis_test -m
|
||||
|
||||
namespace {
|
||||
void MakeUpper(std::string* const s) {
|
||||
int len = s->length();
|
||||
for(int i=0; i<len; ++i) {
|
||||
@ -842,6 +848,7 @@ int manual_redis_test(bool destructive){
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
} // namespace rocksdb
|
||||
|
||||
@ -851,6 +858,7 @@ int manual_redis_test(bool destructive){
|
||||
// "./redis_test -m -d" for destructive manual test (erase db before use)
|
||||
|
||||
|
||||
namespace {
|
||||
// Check for "want" argument in the argument list
|
||||
bool found_arg(int argc, char* argv[], const char* want){
|
||||
for(int i=1; i<argc; ++i){
|
||||
@ -860,6 +868,7 @@ bool found_arg(int argc, char* argv[], const char* want){
|
||||
}
|
||||
return false;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// Will run unit tests.
|
||||
// However, if -m is specified, it will do user manual/interactive testing
|
||||
|
Loading…
Reference in New Issue
Block a user