diff --git a/db/c_test.c b/db/c_test.c index e6c5a9e67..cd9299bec 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -145,7 +145,7 @@ static char* FilterCreate( memcpy(result, "fake", 4); return result; } -unsigned char FilterKeyMatch( +static unsigned char FilterKeyMatch( void* arg, const char* key, size_t length, const char* filter, size_t filter_length) { diff --git a/db/db_bench.cc b/db/db_bench.cc index d6993c6d4..74af88b77 100644 --- a/db/db_bench.cc +++ b/db/db_bench.cc @@ -325,6 +325,7 @@ DEFINE_uint64(delete_obsolete_files_period_micros, 0, "Option to delete " "obsolete files periodically. 0 means that obsolete files are" " deleted after every compaction run."); +namespace { enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { assert(ctype); @@ -344,6 +345,8 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { fprintf(stdout, "Cannot parse compression type '%s'\n", ctype); return rocksdb::kSnappyCompression; //default value } +} // namespace + DEFINE_string(compression_type, "snappy", "Algorithm to use to compress the database"); static enum rocksdb::CompressionType FLAGS_compression_type_e = @@ -482,6 +485,8 @@ enum RepFactory { kVectorRep, kHashLinkedList }; + +namespace { enum RepFactory StringToRepFactory(const char* ctype) { assert(ctype); @@ -497,6 +502,8 @@ enum RepFactory StringToRepFactory(const char* ctype) { fprintf(stdout, "Cannot parse memreptable %s\n", ctype); return kSkipList; } +} // namespace + static enum RepFactory FLAGS_rep_factory; DEFINE_string(memtablerep, "skip_list", ""); DEFINE_int64(hash_bucket_count, 1024 * 1024, "hash bucket count"); diff --git a/db/db_test.cc b/db/db_test.cc index 59e94bea8..0285905f2 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -959,6 +959,7 @@ static long TestGetTickerCount(const Options& options, Tickers ticker_type) { // A helper function that ensures the table properties returned in // `GetPropertiesOfAllTablesTest` is correct. // This test assumes entries size is differnt for each of the tables. +namespace { void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { TablePropertiesCollection props; ASSERT_OK(db->GetPropertiesOfAllTables(&props)); @@ -977,28 +978,7 @@ void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { ASSERT_EQ(props.size(), unique_entries.size()); ASSERT_EQ(expected_entries_size, sum); } - -std::unordered_map GetMemoryUsage(MemTable* memtable) { - const auto& arena = memtable->TEST_GetArena(); - return {{"memtable.approximate.usage", memtable->ApproximateMemoryUsage()}, - {"arena.approximate.usage", arena.ApproximateMemoryUsage()}, - {"arena.allocated.memory", arena.MemoryAllocatedBytes()}, - {"arena.unused.bytes", arena.AllocatedAndUnused()}, - {"irregular.blocks", arena.IrregularBlockNum()}}; -} - -void PrintMemoryUsage(const std::unordered_map& usage) { - for (const auto& item : usage) { - std::cout << "\t" << item.first << ": " << item.second << std::endl; - } -} - -void AddRandomKV(MemTable* memtable, Random* rnd, size_t arena_block_size) { - memtable->Add(0, kTypeValue, RandomString(rnd, 20) /* key */, - // make sure we will be able to generate some over sized entries - RandomString(rnd, rnd->Uniform(arena_block_size / 4) * 1.15 + - 10) /* value */); -} +} // namespace TEST(DBTest, Empty) { do { @@ -1505,9 +1485,11 @@ TEST(DBTest, IterSeekBeforePrev) { delete iter; } +namespace { std::string MakeLongKey(size_t length, char c) { return std::string(length, c); } +} // namespace TEST(DBTest, IterLongKeys) { ASSERT_OK(Put(MakeLongKey(20, 0), "0")); @@ -3272,6 +3254,7 @@ TEST(DBTest, ConvertCompactionStyle) { ASSERT_EQ(keys_in_db, expected_keys); } +namespace { void MinLevelHelper(DBTest* self, Options& options) { Random rnd(301); @@ -3345,6 +3328,7 @@ bool MinLevelToCompress(CompressionType& type, Options& options, int wbits, } return true; } +} // namespace TEST(DBTest, MinLevelToCompress1) { Options options = CurrentOptions(); @@ -5242,6 +5226,7 @@ TEST(DBTest, CompactOnFlush) { } while (ChangeCompactOptions()); } +namespace { std::vector ListLogFiles(Env* env, const std::string& path) { std::vector files; std::vector log_files; @@ -5257,6 +5242,7 @@ std::vector ListLogFiles(Env* env, const std::string& path) { } return std::move(log_files); } +} // namespace TEST(DBTest, WALArchivalTtl) { do { @@ -5304,6 +5290,7 @@ TEST(DBTest, WALArchivalTtl) { } while (ChangeCompactOptions()); } +namespace { uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) { uint64_t dir_size = 0; std::vector files; @@ -5320,6 +5307,7 @@ uint64_t GetLogDirSize(std::string dir_path, SpecialEnv* env) { } return dir_size; } +} // namespace TEST(DBTest, WALArchivalSizeLimit) { do { @@ -5364,6 +5352,7 @@ TEST(DBTest, WALArchivalSizeLimit) { } while (ChangeCompactOptions()); } +namespace { SequenceNumber ReadRecords( std::unique_ptr& iter, int& count) { @@ -5388,6 +5377,7 @@ void ExpectRecords( ReadRecords(iter, num_records); ASSERT_EQ(num_records, expected_no_records); } +} // namespace TEST(DBTest, TransactionLogIterator) { do { @@ -6314,6 +6304,7 @@ TEST(DBTest, MultiGetEmpty) { } while (ChangeCompactOptions()); } +namespace { void PrefixScanInit(DBTest *dbtest) { char buf[100]; std::string keystr; @@ -6363,6 +6354,7 @@ void PrefixScanInit(DBTest *dbtest) { dbtest->Flush(); } } +} // namespace TEST(DBTest, PrefixScan) { ReadOptions ro = ReadOptions(); @@ -6444,6 +6436,7 @@ TEST(DBTest, PrefixScan) { delete options.filter_policy; } +namespace { std::string MakeKey(unsigned int num) { char buf[30]; snprintf(buf, sizeof(buf), "%016u", num); @@ -6503,6 +6496,7 @@ void BM_LogAndApply(int iters, int num_base_files) { "BM_LogAndApply/%-6s %8d iters : %9u us (%7.0f us / iter)\n", buf, iters, us, ((float)us) / iters); } +} // namespace TEST(DBTest, TailingIteratorSingle) { ReadOptions read_options; diff --git a/db/filename.cc b/db/filename.cc index cdbd1bc7a..e2ab0956a 100644 --- a/db/filename.cc +++ b/db/filename.cc @@ -44,10 +44,6 @@ static int FlattenPath(const std::string& path, char* dest, int len) { return write_idx; } -// A utility routine: write "data" to the named file and Sync() it. -extern Status WriteStringToFileSync(Env* env, const Slice& data, - const std::string& fname); - static std::string MakeFileName(const std::string& name, uint64_t number, const char* suffix) { char buf[100]; @@ -238,7 +234,7 @@ Status SetCurrentFile(Env* env, const std::string& dbname, assert(contents.starts_with(dbname + "/")); contents.remove_prefix(dbname.size() + 1); std::string tmp = TempFileName(dbname, descriptor_number); - Status s = WriteStringToFileSync(env, contents.ToString() + "\n", tmp); + Status s = WriteStringToFile(env, contents.ToString() + "\n", tmp, true); if (s.ok()) { s = env->RenameFile(tmp, CurrentFileName(dbname)); } @@ -253,7 +249,7 @@ Status SetIdentityFile(Env* env, const std::string& dbname) { assert(!id.empty()); // Reserve the filename dbname/000000.dbtmp for the temporary identity file std::string tmp = TempFileName(dbname, 0); - Status s = WriteStringToFileSync(env, id, tmp); + Status s = WriteStringToFile(env, id, tmp, true); if (s.ok()) { s = env->RenameFile(tmp, IdentityFileName(dbname)); } diff --git a/db/merge_test.cc b/db/merge_test.cc index 4b98f0581..71254f51a 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -75,6 +75,7 @@ class CountMergeOperator : public AssociativeMergeOperator { std::shared_ptr mergeOperator_; }; +namespace { std::shared_ptr OpenDb(const string& dbname, const bool ttl = false, const size_t max_successive_merges = 0, const uint32_t min_partial_merge_operands = 2) { @@ -100,6 +101,7 @@ std::shared_ptr OpenDb(const string& dbname, const bool ttl = false, } return std::shared_ptr(db); } +} // namespace // Imagine we are maintaining a set of uint64 counters. // Each counter has a distinct name. And we would like @@ -237,6 +239,7 @@ class MergeBasedCounters : public Counters { } }; +namespace { void dumpDb(DB* db) { auto it = unique_ptr(db->NewIterator(ReadOptions())); for (it->SeekToFirst(); it->Valid(); it->Next()) { @@ -454,6 +457,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) { } } } +} // namespace int main(int argc, char *argv[]) { //TODO: Make this test like a general rocksdb unit-test diff --git a/db/plain_table_db_test.cc b/db/plain_table_db_test.cc index 6a95a2585..9f836b76e 100644 --- a/db/plain_table_db_test.cc +++ b/db/plain_table_db_test.cc @@ -429,9 +429,11 @@ TEST(PlainTableDBTest, Iterator) { } } +namespace { std::string MakeLongKey(size_t length, char c) { return std::string(length, c); } +} // namespace TEST(PlainTableDBTest, IteratorLargeKeys) { Options options = CurrentOptions(); diff --git a/db/prefix_test.cc b/db/prefix_test.cc index 89e31b60c..c73cf00a6 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -104,6 +104,7 @@ class TestKeyComparator : public Comparator { }; +namespace { void PutKey(DB* db, WriteOptions write_options, uint64_t prefix, uint64_t suffix, const Slice& value) { TestKey test_key(prefix, suffix); @@ -133,6 +134,7 @@ std::string Get(DB* db, const ReadOptions& read_options, uint64_t prefix, } return result; } +} // namespace class PrefixTest { public: diff --git a/db/table_properties_collector_test.cc b/db/table_properties_collector_test.cc index a9f770ca5..ea15260b3 100644 --- a/db/table_properties_collector_test.cc +++ b/db/table_properties_collector_test.cc @@ -83,6 +83,7 @@ class DumbLogger : public Logger { }; // Utilities test functions +namespace { void MakeBuilder(const Options& options, const InternalKeyComparator& internal_comparator, std::unique_ptr* writable, @@ -91,6 +92,7 @@ void MakeBuilder(const Options& options, builder->reset(options.table_factory->NewTableBuilder( options, internal_comparator, writable->get(), options.compression)); } +} // namespace // Collects keys that starts with "A" in a table. class RegularKeysStartWithA: public TablePropertiesCollector { @@ -126,6 +128,7 @@ class RegularKeysStartWithA: public TablePropertiesCollector { extern uint64_t kBlockBasedTableMagicNumber; extern uint64_t kPlainTableMagicNumber; +namespace { void TestCustomizedTablePropertiesCollector( uint64_t magic_number, bool encode_as_internal, const Options& options, const InternalKeyComparator& internal_comparator) { @@ -178,6 +181,7 @@ void TestCustomizedTablePropertiesCollector( ASSERT_TRUE(GetVarint32(&key, &starts_with_A)); ASSERT_EQ(3u, starts_with_A); } +} // namespace TEST(TablePropertiesTest, CustomizedTablePropertiesCollector) { // Test properties collectors with internal keys or regular keys @@ -209,6 +213,7 @@ TEST(TablePropertiesTest, CustomizedTablePropertiesCollector) { ikc); } +namespace { void TestInternalKeyPropertiesCollector( uint64_t magic_number, bool sanitized, @@ -280,6 +285,7 @@ void TestInternalKeyPropertiesCollector( ASSERT_EQ(1u, starts_with_A); } } +} // namespace TEST(TablePropertiesTest, InternalKeyPropertiesCollector) { TestInternalKeyPropertiesCollector( diff --git a/include/rocksdb/env.h b/include/rocksdb/env.h index 7a989d29c..b2fe00d25 100644 --- a/include/rocksdb/env.h +++ b/include/rocksdb/env.h @@ -649,7 +649,8 @@ extern void Fatal(Logger* info_log, const char* format, ...); // A utility routine: write "data" to the named file. extern Status WriteStringToFile(Env* env, const Slice& data, - const std::string& fname); + const std::string& fname, + bool should_sync = false); // A utility routine: read contents of named file into *data extern Status ReadFileToString(Env* env, const std::string& fname, diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index a8d8695b9..c1555747a 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -954,11 +954,14 @@ Status BlockBasedTable::Get( return s; } +namespace { bool SaveDidIO(void* arg, const ParsedInternalKey& key, const Slice& value, bool didIO) { *reinterpret_cast(arg) = didIO; return false; } +} // namespace + bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options, const Slice& key) { // We use Get() as it has logic that checks whether we read the diff --git a/table/iter_heap.h b/table/iter_heap.h index af8834e38..9569d3638 100644 --- a/table/iter_heap.h +++ b/table/iter_heap.h @@ -41,24 +41,4 @@ class MinIteratorComparator { const Comparator* comparator_; }; -typedef std::priority_queue< - IteratorWrapper*, - std::vector, - MaxIteratorComparator> MaxIterHeap; - -typedef std::priority_queue< - IteratorWrapper*, - std::vector, - MinIteratorComparator> MinIterHeap; - -// Return's a new MaxHeap of IteratorWrapper's using the provided Comparator. -MaxIterHeap NewMaxIterHeap(const Comparator* comparator) { - return MaxIterHeap(MaxIteratorComparator(comparator)); -} - -// Return's a new MinHeap of IteratorWrapper's using the provided Comparator. -MinIterHeap NewMinIterHeap(const Comparator* comparator) { - return MinIterHeap(MinIteratorComparator(comparator)); -} - } // namespace rocksdb diff --git a/table/merger.cc b/table/merger.cc index 03d177a6a..b829f7133 100644 --- a/table/merger.cc +++ b/table/merger.cc @@ -9,6 +9,9 @@ #include "table/merger.h" +#include +#include + #include "rocksdb/comparator.h" #include "rocksdb/iterator.h" #include "rocksdb/options.h" @@ -17,12 +20,29 @@ #include "util/stop_watch.h" #include "util/perf_context_imp.h" -#include - namespace rocksdb { - namespace { +typedef std::priority_queue< + IteratorWrapper*, + std::vector, + MaxIteratorComparator> MaxIterHeap; + +typedef std::priority_queue< + IteratorWrapper*, + std::vector, + MinIteratorComparator> MinIterHeap; + +// Return's a new MaxHeap of IteratorWrapper's using the provided Comparator. +MaxIterHeap NewMaxIterHeap(const Comparator* comparator) { + return MaxIterHeap(MaxIteratorComparator(comparator)); +} + +// Return's a new MinHeap of IteratorWrapper's using the provided Comparator. +MinIterHeap NewMinIterHeap(const Comparator* comparator) { + return MinIterHeap(MinIteratorComparator(comparator)); +} + class MergingIterator : public Iterator { public: MergingIterator(const Comparator* comparator, Iterator** children, int n) diff --git a/table/table_reader_bench.cc b/table/table_reader_bench.cc index ab86521f2..32f6ee618 100644 --- a/table/table_reader_bench.cc +++ b/table/table_reader_bench.cc @@ -19,6 +19,8 @@ #include "util/testutil.h" namespace rocksdb { + +namespace { // Make a key that i determines the first 4 characters and j determines the // last 4 characters. static std::string MakeKey(int i, int j, bool through_db) { @@ -43,6 +45,7 @@ static bool DummySaveValue(void* arg, const ParsedInternalKey& ikey, uint64_t Now(Env* env, bool measured_by_nanosecond) { return measured_by_nanosecond ? env->NowNanos() : env->NowMicros(); } +} // namespace // A very simple benchmark that. // Create a table with roughly numKey1 * numKey2 keys, @@ -57,6 +60,7 @@ uint64_t Now(Env* env, bool measured_by_nanosecond) { // // If for_terator=true, instead of just query one key each time, it queries // a range sharing the same prefix. +namespace { void TableReaderBenchmark(Options& opts, EnvOptions& env_options, ReadOptions& read_options, int num_keys1, int num_keys2, int num_iter, int prefix_len, @@ -215,6 +219,7 @@ void TableReaderBenchmark(Options& opts, EnvOptions& env_options, DestroyDB(dbname, opts); } } +} // namespace } // namespace rocksdb DEFINE_bool(query_empty, false, "query non-existing keys instead of existing " @@ -264,10 +269,10 @@ int main(int argc, char** argv) { options.table_factory = std::shared_ptr(tf); - TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1, - FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len, - FLAGS_query_empty, FLAGS_iterator, FLAGS_through_db, - measured_by_nanosecond); + rocksdb::TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1, + FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len, + FLAGS_query_empty, FLAGS_iterator, + FLAGS_through_db, measured_by_nanosecond); delete tf; return 0; } diff --git a/tools/blob_store_bench.cc b/tools/blob_store_bench.cc index 70ece2c5f..60a0b84a6 100644 --- a/tools/blob_store_bench.cc +++ b/tools/blob_store_bench.cc @@ -1,3 +1,8 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + #include #include #include @@ -23,11 +28,13 @@ uint64_t timeout_sec; Env *env; BlobStore* bs; -static std::string RandomString(Random* rnd, uint64_t len) { +namespace { +std::string RandomString(Random* rnd, uint64_t len) { std::string r; test::RandomString(rnd, len, &r); return r; } +} // namespace struct Result { uint32_t writes; @@ -59,11 +66,13 @@ struct Result { }; +namespace { Result operator + (const Result &a, const Result &b) { return Result(a.writes + b.writes, a.reads + b.reads, a.deletes + b.deletes, a.data_written + b.data_written, a.data_read + b.data_read); } +} // namespace struct WorkerThread { uint64_t data_size_from, data_size_to; @@ -131,6 +140,7 @@ static void WorkerThreadBody(void* arg) { t->stopped.store(true); } +namespace { Result StartBenchmark(vector& config) { for (auto w : config) { env->StartThread(WorkerThreadBody, w); @@ -241,6 +251,7 @@ vector SetupBenchmarkReadHeavy() { return config; } +} // namespace int main(int argc, const char** argv) { srand(33); diff --git a/tools/db_sanity_test.cc b/tools/db_sanity_test.cc index 9642e9e5f..e970f5e91 100644 --- a/tools/db_sanity_test.cc +++ b/tools/db_sanity_test.cc @@ -145,6 +145,7 @@ class SanityTestPlainTableFactory : public SanityTest { Options options_; }; +namespace { bool RunSanityTests(const std::string& command, const std::string& path) { std::vector sanity_tests = { new SanityTestBasic(path), @@ -176,6 +177,7 @@ bool RunSanityTests(const std::string& command, const std::string& path) { } return true; } +} // namespace } // namespace rocksdb diff --git a/tools/db_stress.cc b/tools/db_stress.cc index 5503c36f0..0b4938f25 100644 --- a/tools/db_stress.cc +++ b/tools/db_stress.cc @@ -271,6 +271,7 @@ static const bool FLAGS_num_iterations_dummy __attribute__((unused)) = DEFINE_bool(disable_seek_compaction, false, "Option to disable compation triggered by read."); +namespace { enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { assert(ctype); @@ -290,6 +291,8 @@ enum rocksdb::CompressionType StringToCompressionType(const char* ctype) { fprintf(stdout, "Cannot parse compression type '%s'\n", ctype); return rocksdb::kSnappyCompression; //default value } +} // namespace + DEFINE_string(compression_type, "snappy", "Algorithm to use to compress the database"); static enum rocksdb::CompressionType FLAGS_compression_type_e = @@ -323,6 +326,8 @@ enum RepFactory { kHashSkipList, kVectorRep }; + +namespace { enum RepFactory StringToRepFactory(const char* ctype) { assert(ctype); @@ -336,6 +341,8 @@ enum RepFactory StringToRepFactory(const char* ctype) { fprintf(stdout, "Cannot parse memreptable %s\n", ctype); return kSkipList; } +} // namespace + static enum RepFactory FLAGS_rep_factory; DEFINE_string(memtablerep, "prefix_hash", ""); diff --git a/tools/sst_dump.cc b/tools/sst_dump.cc index b34b7fa82..b8c470fe7 100644 --- a/tools/sst_dump.cc +++ b/tools/sst_dump.cc @@ -220,6 +220,7 @@ static void print_help() { " [--show_properties]\n"); } +namespace { string HexToString(const string& str) { string parsed; if (str[0] != '0' || str[1] != 'x') { @@ -236,6 +237,7 @@ string HexToString(const string& str) { } return parsed; } +} // namespace int main(int argc, char** argv) { const char* dir_or_file = nullptr; diff --git a/util/auto_roll_logger_test.cc b/util/auto_roll_logger_test.cc index 742713e9d..cbc84d9d0 100755 --- a/util/auto_roll_logger_test.cc +++ b/util/auto_roll_logger_test.cc @@ -50,6 +50,7 @@ Env* AutoRollLoggerTest::env = Env::Default(); // no format. LogMessage() provides such a simple interface and // avoids the [format-security] warning which occurs when you // call Log(logger, log_message) directly. +namespace { void LogMessage(Logger* logger, const char* message) { Log(logger, "%s", message); } @@ -58,7 +59,9 @@ void LogMessage(const InfoLogLevel log_level, Logger* logger, const char* message) { Log(log_level, logger, "%s", message); } +} // namespace +namespace { void GetFileCreateTime(const std::string& fname, uint64_t* file_ctime) { struct stat s; if (stat(fname.c_str(), &s) != 0) { @@ -66,6 +69,7 @@ void GetFileCreateTime(const std::string& fname, uint64_t* file_ctime) { } *file_ctime = static_cast(s.st_ctime); } +} // namespace void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, @@ -281,26 +285,6 @@ TEST(AutoRollLoggerTest, InfoLogLevel) { inFile.close(); } -int OldLogFileCount(const string& dir) { - std::vector files; - Env::Default()->GetChildren(dir, &files); - int log_file_count = 0; - - for (std::vector::iterator it = files.begin(); - it != files.end(); ++it) { - uint64_t create_time; - FileType type; - if (!ParseFileName(*it, &create_time, &type)) { - continue; - } - if (type == kInfoLogFile && create_time > 0) { - ++log_file_count; - } - } - - return log_file_count; -} - } // namespace rocksdb int main(int argc, char** argv) { diff --git a/util/autovector_test.cc b/util/autovector_test.cc index eb244aabf..88744cf17 100644 --- a/util/autovector_test.cc +++ b/util/autovector_test.cc @@ -70,6 +70,7 @@ TEST(AutoVectorTest, EmplaceBack) { ASSERT_TRUE(!vec.only_in_stack()); } +namespace { void AssertEqual( const autovector& a, const autovector& b) { ASSERT_EQ(a.size(), b.size()); @@ -79,6 +80,7 @@ void AssertEqual( ASSERT_EQ(a[i], b[i]); } } +} // namespace TEST(AutoVectorTest, CopyAndAssignment) { // Test both heap-allocated and stack-allocated cases. @@ -159,6 +161,7 @@ TEST(AutoVectorTest, Iterators) { } } +namespace { vector GetTestKeys(size_t size) { vector keys; keys.resize(size); @@ -169,6 +172,7 @@ vector GetTestKeys(size_t size) { } return keys; } +} // namespace template void BenchmarkVectorCreationAndInsertion( diff --git a/util/cache_test.cc b/util/cache_test.cc index b99f47b38..61732fbfa 100644 --- a/util/cache_test.cc +++ b/util/cache_test.cc @@ -107,7 +107,9 @@ class CacheTest { }; CacheTest* CacheTest::current_; +namespace { void dumbDeleter(const Slice& key, void* value) { } +} // namespace TEST(CacheTest, UsageTest) { // cache is shared_ptr and will be automatically cleaned up. @@ -382,9 +384,11 @@ class Value { ~Value() { std::cout << v_ << " is destructed\n"; } }; +namespace { void deleter(const Slice& key, void* value) { delete (Value *)value; } +} // namespace TEST(CacheTest, BadEviction) { int n = 10; diff --git a/util/env.cc b/util/env.cc index 5ae6fdf0b..b7a2a18a2 100644 --- a/util/env.cc +++ b/util/env.cc @@ -172,9 +172,8 @@ void Log(const shared_ptr& info_log, const char* format, ...) { } } -static Status DoWriteStringToFile(Env* env, const Slice& data, - const std::string& fname, - bool should_sync) { +Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname, + bool should_sync) { unique_ptr file; EnvOptions soptions; Status s = env->NewWritableFile(fname, &file, soptions); @@ -191,16 +190,6 @@ static Status DoWriteStringToFile(Env* env, const Slice& data, return s; } -Status WriteStringToFile(Env* env, const Slice& data, - const std::string& fname) { - return DoWriteStringToFile(env, data, fname, false); -} - -Status WriteStringToFileSync(Env* env, const Slice& data, - const std::string& fname) { - return DoWriteStringToFile(env, data, fname, true); -} - Status ReadFileToString(Env* env, const std::string& fname, std::string* data) { EnvOptions soptions; data->clear(); diff --git a/util/env_test.cc b/util/env_test.cc index 0a83037c3..96c44a1d4 100644 --- a/util/env_test.cc +++ b/util/env_test.cc @@ -200,6 +200,17 @@ TEST(EnvPosixTest, TwoPools) { ASSERT_EQ(0U, env_->GetThreadPoolQueueLen(Env::Priority::HIGH)); } +#ifdef OS_LINUX +// To make sure the Env::GetUniqueId() related tests work correctly, The files +// should be stored in regular storage like "hard disk" or "flash device". +// Otherwise we cannot get the correct id. +// +// The following function act as the replacement of test::TmpDir() that may be +// customized by user to be on a storage that doesn't work with GetUniqueId(). +// +// TODO(kailiu) This function still assumes /tmp/ reside in regular +// storage system. +namespace { bool IsSingleVarint(const std::string& s) { Slice slice(s); @@ -211,16 +222,6 @@ bool IsSingleVarint(const std::string& s) { return slice.size() == 0; } -#ifdef OS_LINUX -// To make sure the Env::GetUniqueId() related tests work correctly, The files -// should be stored in regular storage like "hard disk" or "flash device". -// Otherwise we cannot get the correct id. -// -// The following function act as the replacement of test::TmpDir() that may be -// customized by user to be on a storage that doesn't work with GetUniqueId(). -// -// TODO(kailiu) This function still assumes /tmp/ reside in regular -// storage system. bool IsUniqueIDValid(const std::string& s) { return !s.empty() && !IsSingleVarint(s); } @@ -237,6 +238,7 @@ std::string GetOnDiskTestDir() { return base; } +} // namespace // Only works in linux platforms TEST(EnvPosixTest, RandomAccessUniqueID) { diff --git a/util/ldb_cmd.cc b/util/ldb_cmd.cc index 738a5c081..8ed8014f2 100644 --- a/util/ldb_cmd.cc +++ b/util/ldb_cmd.cc @@ -601,6 +601,8 @@ void ListColumnFamiliesCommand::DoCommand() { // ---------------------------------------------------------------------------- +namespace { + string ReadableTime(int unixtime) { char time_buffer [80]; time_t rawtime = unixtime; @@ -634,6 +636,8 @@ void PrintBucketCounts(const vector& bucket_counts, int ttl_start, (unsigned long)bucket_counts[num_buckets - 1]); } +} // namespace + const string InternalDumpCommand::ARG_COUNT_ONLY = "count_only"; const string InternalDumpCommand::ARG_COUNT_DELIM = "count_delim"; const string InternalDumpCommand::ARG_STATS = "stats"; diff --git a/util/signal_test.cc b/util/signal_test.cc index bffc298d3..d3446818d 100644 --- a/util/signal_test.cc +++ b/util/signal_test.cc @@ -6,6 +6,7 @@ #include "util/stack_trace.h" #include +namespace { void f0() { char *p = nullptr; *p = 10; /* SIGSEGV here!! */ @@ -22,6 +23,7 @@ void f2() { void f3() { f2(); } +} // namespace int main() { rocksdb::InstallStackTraceHandler(); diff --git a/util/string_util.cc b/util/string_util.cc index 33f84d9b4..97b7f9de9 100644 --- a/util/string_util.cc +++ b/util/string_util.cc @@ -6,21 +6,18 @@ #include #include #include +#include "util/string_util.h" namespace rocksdb { -using namespace std; -using std::string; -using std::vector; -using std::stringstream; - -vector stringSplit(string arg, char delim) { - vector splits; - stringstream ss(arg); - string item; - while(getline(ss, item, delim)) { +std::vector stringSplit(std::string arg, char delim) { + std::vector splits; + std::stringstream ss(arg); + std::string item; + while (std::getline(ss, item, delim)) { splits.push_back(item); } return splits; } -} + +} // namespace rocksdb diff --git a/util/string_util.h b/util/string_util.h index 7dfd68aad..676f4aae8 100644 --- a/util/string_util.h +++ b/util/string_util.h @@ -3,9 +3,13 @@ // LICENSE file in the root directory of this source tree. An additional grant // of patent rights can be found in the PATENTS file in the same directory. // +#include +#include +#include + #pragma once namespace rocksdb { extern std::vector stringSplit(std::string arg, char delim); -} +} // namespace rocksdb diff --git a/utilities/merge_operators/string_append/stringappend_test.cc b/utilities/merge_operators/string_append/stringappend_test.cc index 81af64622..b0b5c5b59 100644 --- a/utilities/merge_operators/string_append/stringappend_test.cc +++ b/utilities/merge_operators/string_append/stringappend_test.cc @@ -25,6 +25,7 @@ namespace rocksdb { // Path to the database on file system const std::string kDbName = "/tmp/mergetestdb"; +namespace { // OpenDb opens a (possibly new) rocksdb database with a StringAppendOperator std::shared_ptr OpenNormalDb(char delim_char) { DB* db; @@ -44,6 +45,7 @@ std::shared_ptr OpenTtlDb(char delim_char) { ASSERT_OK(UtilityDB::OpenTtlDB(options, kDbName, &db, 123456)); return std::shared_ptr(db); } +} // namespace /// StringLists represents a set of string-lists, each with a key-index. /// Supports Append(list, string) and Get(list) diff --git a/utilities/redis/redis_lists_test.cc b/utilities/redis/redis_lists_test.cc index 0600e0e54..b05c6c798 100644 --- a/utilities/redis/redis_lists_test.cc +++ b/utilities/redis/redis_lists_test.cc @@ -1,3 +1,7 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. /** * A test harness for the Redis API built on rocksdb. * @@ -9,7 +13,6 @@ * TODO: Add LARGE random test cases to verify efficiency and scalability * * @author Deon Nicholas (dnicholas@fb.com) - * Copyright 2013 Facebook */ @@ -41,6 +44,7 @@ Options RedisListsTest::options = Options(); // operator== and operator<< are defined below for vectors (lists) // Needed for ASSERT_EQ +namespace { void AssertListEq(const std::vector& result, const std::vector& expected_result) { ASSERT_EQ(result.size(), expected_result.size()); @@ -48,6 +52,7 @@ void AssertListEq(const std::vector& result, ASSERT_EQ(result[i], expected_result[i]); } } +} // namespace // PushRight, Length, Index, Range TEST(RedisListsTest, SimpleTest) { @@ -738,6 +743,7 @@ TEST(RedisListsTest, PersistenceMultiKeyTest) { /// THE manual REDIS TEST begins here /// THIS WILL ONLY OCCUR IF YOU RUN: ./redis_test -m +namespace { void MakeUpper(std::string* const s) { int len = s->length(); for(int i=0; i