From 9f7fc3ac45fad83fa49a93a67ef9651ac4401d02 Mon Sep 17 00:00:00 2001 From: Igor Canadi Date: Fri, 31 Oct 2014 11:59:54 -0700 Subject: [PATCH] Turn on -Wshadow Summary: ...and fix all the errors :) Jim suggested turning on -Wshadow because it helped him fix number of critical bugs in fbcode. I think it's a good idea to be -Wshadow clean. Test Plan: compiles Reviewers: yhchiang, rven, sdong, ljin Reviewed By: ljin Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D27711 --- Makefile | 2 +- db/builder.cc | 12 +++--- db/c_test.c | 4 -- db/column_family.cc | 3 +- db/compaction_picker.cc | 36 +++++++++-------- db/db_impl.cc | 14 +++---- db/db_iter_test.cc | 23 +++++------ db/db_test.cc | 33 ++++++++-------- db/file_indexer.cc | 2 +- db/internal_stats.h | 4 +- db/merge_test.cc | 25 ++++++------ db/prefix_test.cc | 3 +- db/skiplist_test.cc | 2 +- db/version_builder.h | 2 +- db/version_edit.cc | 8 ++-- db/version_edit.h | 12 ++---- db/version_set.cc | 4 +- db/wal_manager.cc | 10 ++--- port/port_posix.h | 8 ++-- table/block_based_table_builder.cc | 25 ++++++------ table/block_based_table_reader.cc | 23 ++++++----- table/block_hash_index.h | 4 +- table/block_prefix_index.cc | 4 +- table/cuckoo_table_builder.cc | 11 +++--- table/cuckoo_table_reader_test.cc | 4 +- table/plain_table_reader.cc | 6 +-- table/table_test.cc | 39 ++++++++++--------- table/two_level_iterator.h | 4 +- util/blob_store.h | 4 +- util/cache.cc | 2 +- util/rate_limiter.cc | 4 +- util/rate_limiter_test.cc | 18 ++++----- util/thread_local_test.cc | 34 ++++++++-------- utilities/geodb/geodb_impl.cc | 4 +- .../write_batch_with_index.cc | 4 +- 35 files changed, 195 insertions(+), 202 deletions(-) diff --git a/Makefile b/Makefile index 5ed8a5a67..0beadda85 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ install: @[ ! -e $(SHARED) ] || install -C -m 644 $(SHARED) $(INSTALL_PATH)/lib #------------------------------------------------- -WARNING_FLAGS = -Wall -Werror -Wsign-compare +WARNING_FLAGS = -Wall -Werror -Wsign-compare -Wshadow CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual diff --git a/db/builder.cc b/db/builder.cc index 2c5094370..5d3273e78 100644 --- a/db/builder.cc +++ b/db/builder.cc @@ -72,11 +72,13 @@ Status BuildTable(const std::string& dbname, Env* env, ioptions, internal_comparator, file.get(), compression, compression_opts); - // the first key is the smallest key - Slice key = iter->key(); - meta->smallest.DecodeFrom(key); - meta->smallest_seqno = GetInternalKeySeqno(key); - meta->largest_seqno = meta->smallest_seqno; + { + // the first key is the smallest key + Slice key = iter->key(); + meta->smallest.DecodeFrom(key); + meta->smallest_seqno = GetInternalKeySeqno(key); + meta->largest_seqno = meta->smallest_seqno; + } MergeHelper merge(internal_comparator.user_comparator(), ioptions.merge_operator, ioptions.info_log, diff --git a/db/c_test.c b/db/c_test.c index 171fd6d5c..d693f52ca 100644 --- a/db/c_test.c +++ b/db/c_test.c @@ -576,7 +576,6 @@ int main(int argc, char** argv) { StartPhase("compaction_filter"); { - rocksdb_options_t* options = rocksdb_options_create(); rocksdb_options_set_create_if_missing(options, 1); rocksdb_compactionfilter_t* cfilter; cfilter = rocksdb_compactionfilter_create(NULL, CFilterDestroy, @@ -589,12 +588,10 @@ int main(int argc, char** argv) { rocksdb_options_set_compaction_filter(options, NULL); rocksdb_compactionfilter_destroy(cfilter); - rocksdb_options_destroy(options); } StartPhase("compaction_filter_factory"); { - rocksdb_options_t* options = rocksdb_options_create(); rocksdb_options_set_create_if_missing(options, 1); rocksdb_compactionfilterfactory_t* factory; factory = rocksdb_compactionfilterfactory_create( @@ -606,7 +603,6 @@ int main(int argc, char** argv) { db = CheckCompaction(db, options, roptions, woptions); rocksdb_options_set_compaction_filter_factory(options, NULL); - rocksdb_options_destroy(options); } StartPhase("compaction_filter_v2"); diff --git a/db/column_family.cc b/db/column_family.cc index b7497ecfe..c5c4e35e5 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -263,8 +263,7 @@ ColumnFamilyData::ColumnFamilyData(uint32_t id, const std::string& name, Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, "Options for column family \"%s\":\n", name.c_str()); - const ColumnFamilyOptions* cf_options = &options_; - cf_options->Dump(ioptions_.info_log); + options_.Dump(ioptions_.info_log); } RecalculateWriteStallConditions(mutable_cf_options_); diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 676f39b7d..096f0d77d 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -697,8 +697,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( // Check if the suceeding files need compaction. for (unsigned int i = loop + 1; candidate_count < max_files_to_compact && i < files.size(); i++) { - FileMetaData* f = files[i]; - if (f->being_compacted) { + FileMetaData* suceeding_file = files[i]; + if (suceeding_file->being_compacted) { break; } // Pick files if the total/last candidate file size (increased by the @@ -708,14 +708,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( // kCompactionStopStyleSimilarSize, it's simply the size of the last // picked file. double sz = candidate_size * (100.0 + ratio) / 100.0; - if (sz < static_cast(f->fd.GetFileSize())) { + if (sz < static_cast(suceeding_file->fd.GetFileSize())) { break; } if (ioptions_.compaction_options_universal.stop_style == kCompactionStopStyleSimilarSize) { // Similar-size stopping rule: also check the last picked file isn't // far larger than the next candidate file. - sz = (f->fd.GetFileSize() * (100.0 + ratio)) / 100.0; + sz = (suceeding_file->fd.GetFileSize() * (100.0 + ratio)) / 100.0; if (sz < static_cast(candidate_size)) { // If the small file we've encountered begins a run of similar-size // files, we'll pick them up on a future iteration of the outer @@ -723,9 +723,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( // by the last-resort read amp strategy which disregards size ratios. break; } - candidate_size = f->compensated_file_size; - } else { // default kCompactionStopStyleTotalSize - candidate_size += f->compensated_file_size; + candidate_size = suceeding_file->compensated_file_size; + } else { // default kCompactionStopStyleTotalSize + candidate_size += suceeding_file->compensated_file_size; } candidate_count++; } @@ -738,12 +738,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( } else { for (unsigned int i = loop; i < loop + candidate_count && i < files.size(); i++) { - FileMetaData* f = files[i]; + FileMetaData* skipping_file = files[i]; LogToBuffer(log_buffer, "[%s] Universal: Skipping file %" PRIu64 "[%d] with size %" PRIu64 " (compensated size %" PRIu64 ") %d\n", - cf_name.c_str(), f->fd.GetNumber(), i, f->fd.GetFileSize(), - f->compensated_file_size, f->being_compacted); + cf_name.c_str(), f->fd.GetNumber(), i, + skipping_file->fd.GetFileSize(), + skipping_file->compensated_file_size, + skipping_file->being_compacted); } } } @@ -782,16 +784,17 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( c->score_ = score; for (unsigned int i = start_index; i < first_index_after; i++) { - FileMetaData* f = files[i]; - c->inputs_[0].files.push_back(f); + FileMetaData* picking_file = files[i]; + c->inputs_[0].files.push_back(picking_file); char file_num_buf[kFormatFileNumberBufSize]; - FormatFileNumber(f->fd.GetNumber(), f->fd.GetPathId(), file_num_buf, - sizeof(file_num_buf)); + FormatFileNumber(picking_file->fd.GetNumber(), picking_file->fd.GetPathId(), + file_num_buf, sizeof(file_num_buf)); LogToBuffer(log_buffer, "[%s] Universal: Picking file %s[%d] " "with size %" PRIu64 " (compensated size %" PRIu64 ")\n", - cf_name.c_str(), file_num_buf, i, f->fd.GetFileSize(), - f->compensated_file_size); + cf_name.c_str(), file_num_buf, i, + picking_file->fd.GetFileSize(), + picking_file->compensated_file_size); } return c; } @@ -850,7 +853,6 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp( for (unsigned int loop = start_index; loop < files.size() - 1; loop++) { f = files[loop]; if (f->being_compacted) { - char file_num_buf[kFormatFileNumberBufSize]; FormatFileNumber(f->fd.GetNumber(), f->fd.GetPathId(), file_num_buf, sizeof(file_num_buf)); LogToBuffer( diff --git a/db/db_impl.cc b/db/db_impl.cc index 231325cc3..5b2635d1a 100644 --- a/db/db_impl.cc +++ b/db/db_impl.cc @@ -2953,8 +2953,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact, namespace { struct IterState { - IterState(DBImpl* db, port::Mutex* mu, SuperVersion* super_version) - : db(db), mu(mu), super_version(super_version) {} + IterState(DBImpl* _db, port::Mutex* _mu, SuperVersion* _super_version) + : db(_db), mu(_mu), super_version(_super_version) {} DBImpl* db; port::Mutex* mu; @@ -3812,14 +3812,14 @@ Status DBImpl::SetNewMemtableAndNewLogFile(ColumnFamilyData* cfd, log_.reset(new_log); log_empty_ = true; alive_log_files_.push_back(LogFileNumberSize(logfile_number_)); - for (auto cfd : *versions_->GetColumnFamilySet()) { + for (auto loop_cfd : *versions_->GetColumnFamilySet()) { // all this is just optimization to delete logs that // are no longer needed -- if CF is empty, that means it // doesn't need that particular log to stay alive, so we just // advance the log number. no need to persist this in the manifest - if (cfd->mem()->GetFirstSequenceNumber() == 0 && - cfd->imm()->size() == 0) { - cfd->SetLogNumber(logfile_number_); + if (loop_cfd->mem()->GetFirstSequenceNumber() == 0 && + loop_cfd->imm()->size() == 0) { + loop_cfd->SetLogNumber(logfile_number_); } } } @@ -4398,8 +4398,6 @@ Status DestroyDB(const std::string& dbname, const Options& options) { for (auto& db_path : options.db_paths) { env->GetChildren(db_path.path, &filenames); - uint64_t number; - FileType type; for (size_t i = 0; i < filenames.size(); i++) { if (ParseFileName(filenames[i], &number, &type) && type == kTableFile) { // Lock file will be deleted at end diff --git a/db/db_iter_test.cc b/db/db_iter_test.cc index 2aa30e327..e6b96c410 100644 --- a/db/db_iter_test.cc +++ b/db/db_iter_test.cc @@ -217,7 +217,6 @@ TEST(DBIteratorTest, DBIteratorPrevNext) { } { - Options options; TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddPut("a", "val_a"); internal_iter->AddPut("b", "val_b"); @@ -254,7 +253,6 @@ TEST(DBIteratorTest, DBIteratorPrevNext) { } { - Options options; TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddPut("a", "val_a"); internal_iter->AddPut("a", "val_a"); @@ -364,8 +362,8 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddMerge("b", "merge_1"); internal_iter->AddMerge("a", "merge_2"); - for (size_t i = 0; i < 200; ++i) { - internal_iter->AddPut("c", std::to_string(i)); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("c", std::to_string(k)); } internal_iter->Finish(); @@ -400,7 +398,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddMerge("b", "merge_1"); internal_iter->AddMerge("a", "merge_2"); - for (size_t i = 0; i < 200; ++i) { + for (size_t k = 0; k < 200; ++k) { internal_iter->AddDeletion("c"); } internal_iter->AddPut("c", "200"); @@ -463,7 +461,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { { for (size_t i = 0; i < 200; ++i) { TestIterator* internal_iter = new TestIterator(BytewiseComparator()); - for (size_t i = 0; i < 200; ++i) { + for (size_t k = 0; k < 200; ++k) { internal_iter->AddDeletion("c"); } internal_iter->AddPut("c", "200"); @@ -511,12 +509,12 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddMerge("b", "merge_1"); internal_iter->AddMerge("a", "merge_2"); - for (size_t i = 0; i < 200; ++i) { - internal_iter->AddPut("d", std::to_string(i)); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("d", std::to_string(k)); } - for (size_t i = 0; i < 200; ++i) { - internal_iter->AddPut("c", std::to_string(i)); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("c", std::to_string(k)); } internal_iter->Finish(); @@ -550,8 +548,8 @@ TEST(DBIteratorTest, DBIteratorUseSkip) { TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddMerge("b", "b"); internal_iter->AddMerge("a", "a"); - for (size_t i = 0; i < 200; ++i) { - internal_iter->AddMerge("c", std::to_string(i)); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddMerge("c", std::to_string(k)); } internal_iter->Finish(); @@ -1390,7 +1388,6 @@ TEST(DBIteratorTest, DBIterator) { } { - Options options; TestIterator* internal_iter = new TestIterator(BytewiseComparator()); internal_iter->AddDeletion("a"); internal_iter->AddPut("a", "0"); diff --git a/db/db_test.cc b/db/db_test.cc index 59b611c65..62c5e483b 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -4544,16 +4544,18 @@ TEST(DBTest, CompactionFilter) { ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); - // Scan the entire database to ensure that nothing is left - Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); - iter->SeekToFirst(); - count = 0; - while (iter->Valid()) { - count++; - iter->Next(); + { + // Scan the entire database to ensure that nothing is left + std::unique_ptr iter( + db_->NewIterator(ReadOptions(), handles_[1])); + iter->SeekToFirst(); + count = 0; + while (iter->Valid()) { + count++; + iter->Next(); + } + ASSERT_EQ(count, 0); } - ASSERT_EQ(count, 0); - delete iter; // The sequence number of the remaining record // is not zeroed out even though it is at the @@ -5014,7 +5016,7 @@ TEST(DBTest, CompactionFilterV2NULLPrefix) { for (int i = 1; i < 100000; i++) { char key[100]; snprintf(key, sizeof(key), "%08d%010d", i, i); - std::string newvalue = Get(key); + newvalue = Get(key); ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); } } @@ -5623,7 +5625,7 @@ TEST(DBTest, ManualCompaction) { ASSERT_EQ("0,0,1", FilesPerLevel(1)); if (iter == 0) { - Options options = CurrentOptions(); + options = CurrentOptions(); options.max_background_flushes = 0; options.num_levels = 3; options.create_if_missing = true; @@ -7591,7 +7593,6 @@ void PrefixScanInit(DBTest *dbtest) { // GROUP 2 for (int i = 1; i <= big_range_sstfiles; i++) { - std::string keystr; snprintf(buf, sizeof(buf), "%02d______:start", 0); keystr = std::string(buf); ASSERT_OK(dbtest->Put(keystr, keystr)); @@ -8877,7 +8878,7 @@ TEST(DBTest, PartialCompactionFailure) { DestroyAndReopen(options); - const int kNumKeys = + const int kNumInsertedKeys = options.level0_file_num_compaction_trigger * (options.max_write_buffer_number - 1) * kKeysPerBuffer; @@ -8885,7 +8886,7 @@ TEST(DBTest, PartialCompactionFailure) { Random rnd(301); std::vector keys; std::vector values; - for (int k = 0; k < kNumKeys; ++k) { + for (int k = 0; k < kNumInsertedKeys; ++k) { keys.emplace_back(RandomString(&rnd, kKeySize)); values.emplace_back(RandomString(&rnd, kKvSize - kKeySize)); ASSERT_OK(Put(Slice(keys[k]), Slice(values[k]))); @@ -8914,7 +8915,7 @@ TEST(DBTest, PartialCompactionFailure) { ASSERT_EQ(NumTableFilesAtLevel(0), previous_num_level0_files); // All key-values must exist after compaction fails. - for (int k = 0; k < kNumKeys; ++k) { + for (int k = 0; k < kNumInsertedKeys; ++k) { ASSERT_EQ(values[k], Get(keys[k])); } @@ -8924,7 +8925,7 @@ TEST(DBTest, PartialCompactionFailure) { Reopen(options); // Verify again after reopen. - for (int k = 0; k < kNumKeys; ++k) { + for (int k = 0; k < kNumInsertedKeys; ++k) { ASSERT_EQ(values[k], Get(keys[k])); } } diff --git a/db/file_indexer.cc b/db/file_indexer.cc index ca2ef9bc8..8c0ca043e 100644 --- a/db/file_indexer.cc +++ b/db/file_indexer.cc @@ -100,7 +100,7 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels, } IndexLevel& index_level = next_level_index_[level]; index_level.num_index = upper_size; - char* mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit)); + mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit)); index_level.index_units = new (mem) IndexUnit[upper_size]; CalculateLB( diff --git a/db/internal_stats.h b/db/internal_stats.h index 5caa33415..0c3ee6db7 100644 --- a/db/internal_stats.h +++ b/db/internal_stats.h @@ -136,7 +136,7 @@ class InternalStats { // Number of compactions done int count; - explicit CompactionStats(int count = 0) + explicit CompactionStats(int _count = 0) : micros(0), bytes_readn(0), bytes_readnp1(0), @@ -146,7 +146,7 @@ class InternalStats { files_out_levelnp1(0), num_input_records(0), num_dropped_records(0), - count(count) {} + count(_count) {} explicit CompactionStats(const CompactionStats& c) : micros(c.micros), diff --git a/db/merge_test.cc b/db/merge_test.cc index 7e71ccf86..249e96ad7 100644 --- a/db/merge_test.cc +++ b/db/merge_test.cc @@ -407,14 +407,6 @@ void testSingleBatchSuccessiveMerge( } void runTest(int argc, const string& dbname, const bool use_ttl = false) { - auto db = OpenDb(dbname, use_ttl); - - { - cout << "Test read-modify-write counters... \n"; - Counters counters(db, 0); - testCounters(counters, db.get(), true); - } - bool compact = false; if (argc > 1) { compact = true; @@ -422,13 +414,22 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) { } { - cout << "Test merge-based counters... \n"; - MergeBasedCounters counters(db, 0); - testCounters(counters, db.get(), compact); + auto db = OpenDb(dbname, use_ttl); + + { + cout << "Test read-modify-write counters... \n"; + Counters counters(db, 0); + testCounters(counters, db.get(), true); + } + + { + cout << "Test merge-based counters... \n"; + MergeBasedCounters counters(db, 0); + testCounters(counters, db.get(), compact); + } } DestroyDB(dbname, Options()); - db.reset(); { cout << "Test merge in memtable... \n"; diff --git a/db/prefix_test.cc b/db/prefix_test.cc index a69dda2b4..c896ab8d8 100644 --- a/db/prefix_test.cc +++ b/db/prefix_test.cc @@ -52,7 +52,8 @@ struct TestKey { uint64_t prefix; uint64_t sorted; - TestKey(uint64_t prefix, uint64_t sorted) : prefix(prefix), sorted(sorted) {} + TestKey(uint64_t _prefix, uint64_t _sorted) + : prefix(_prefix), sorted(_sorted) {} }; // return a slice backed by test_key diff --git a/db/skiplist_test.cc b/db/skiplist_test.cc index 48323b244..010616cc0 100644 --- a/db/skiplist_test.cc +++ b/db/skiplist_test.cc @@ -357,7 +357,7 @@ static void RunConcurrent(int run) { TestState state(seed + 1); Env::Default()->Schedule(ConcurrentReader, &state); state.Wait(TestState::RUNNING); - for (int i = 0; i < kSize; i++) { + for (int k = 0; k < kSize; k++) { state.t_.WriteStep(&rnd); } state.quit_flag_.store(true, std::memory_order_release); diff --git a/db/version_builder.h b/db/version_builder.h index f8c91a88c..caeb34970 100644 --- a/db/version_builder.h +++ b/db/version_builder.h @@ -15,7 +15,7 @@ namespace rocksdb { class TableCache; class VersionStorageInfo; class VersionEdit; -class FileMetaData; +struct FileMetaData; // A helper class so we can efficiently apply a whole sequence // of edits to a particular state without creating intermediate diff --git a/db/version_edit.cc b/db/version_edit.cc index 4a6506c7d..f7b288870 100644 --- a/db/version_edit.cc +++ b/db/version_edit.cc @@ -168,7 +168,6 @@ Status VersionEdit::DecodeFrom(const Slice& src) { // Temporary storage for parsing int level; - uint64_t number; FileMetaData f; Slice str; InternalKey key; @@ -237,9 +236,9 @@ Status VersionEdit::DecodeFrom(const Slice& src) { } break; - case kDeletedFile: - if (GetLevel(&input, &level, &msg) && - GetVarint64(&input, &number)) { + case kDeletedFile: { + uint64_t number; + if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number)) { deleted_files_.insert(std::make_pair(level, number)); } else { if (!msg) { @@ -247,6 +246,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) { } } break; + } case kNewFile: { uint64_t number; diff --git a/db/version_edit.h b/db/version_edit.h index f8e71d2e9..0a8bbf257 100644 --- a/db/version_edit.h +++ b/db/version_edit.h @@ -39,10 +39,10 @@ struct FileDescriptor { FileDescriptor() : FileDescriptor(0, 0, 0) {} - FileDescriptor(uint64_t number, uint32_t path_id, uint64_t file_size) + FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size) : table_reader(nullptr), packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)), - file_size(file_size) {} + file_size(_file_size) {} FileDescriptor& operator=(const FileDescriptor& fd) { table_reader = fd.table_reader; @@ -110,12 +110,8 @@ struct FdWithKeyRange { largest_key() { } - FdWithKeyRange(FileDescriptor fd, - Slice smallest_key, Slice largest_key) - : fd(fd), - smallest_key(smallest_key), - largest_key(largest_key) { - } + FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key) + : fd(_fd), smallest_key(_smallest_key), largest_key(_largest_key) {} }; // Data structure to store an array of FdWithKeyRange in one level diff --git a/db/version_set.cc b/db/version_set.cc index a195a7168..fc37460b8 100644 --- a/db/version_set.cc +++ b/db/version_set.cc @@ -1455,9 +1455,9 @@ struct VersionSet::ManifestWriter { ColumnFamilyData* cfd; VersionEdit* edit; - explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* cfd, + explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* _cfd, VersionEdit* e) - : done(false), cv(mu), cfd(cfd), edit(e) {} + : done(false), cv(mu), cfd(_cfd), edit(e) {} }; class VersionBuilder::Rep { diff --git a/db/wal_manager.cc b/db/wal_manager.cc index c08b3b220..9b86a0f97 100644 --- a/db/wal_manager.cc +++ b/db/wal_manager.cc @@ -166,14 +166,14 @@ void WalManager::PurgeObsoleteWALFiles() { std::string const file_path = archival_dir + "/" + f; if (ttl_enabled) { uint64_t file_m_time; - Status const s = env_->GetFileModificationTime(file_path, &file_m_time); + s = env_->GetFileModificationTime(file_path, &file_m_time); if (!s.ok()) { Log(db_options_.info_log, "Can't get file mod time: %s: %s", file_path.c_str(), s.ToString().c_str()); continue; } if (now_seconds - file_m_time > db_options_.WAL_ttl_seconds) { - Status const s = env_->DeleteFile(file_path); + s = env_->DeleteFile(file_path); if (!s.ok()) { Log(db_options_.info_log, "Can't delete file: %s: %s", file_path.c_str(), s.ToString().c_str()); @@ -188,7 +188,7 @@ void WalManager::PurgeObsoleteWALFiles() { if (size_limit_enabled) { uint64_t file_size; - Status const s = env_->GetFileSize(file_path, &file_size); + s = env_->GetFileSize(file_path, &file_size); if (!s.ok()) { Log(db_options_.info_log, "Can't get file size: %s: %s", file_path.c_str(), s.ToString().c_str()); @@ -198,7 +198,7 @@ void WalManager::PurgeObsoleteWALFiles() { log_file_size = std::max(log_file_size, file_size); ++log_files_num; } else { - Status s = env_->DeleteFile(file_path); + s = env_->DeleteFile(file_path); if (!s.ok()) { Log(db_options_.info_log, "Can't delete file: %s: %s", file_path.c_str(), s.ToString().c_str()); @@ -236,7 +236,7 @@ void WalManager::PurgeObsoleteWALFiles() { for (size_t i = 0; i < files_del_num; ++i) { std::string const file_path = archived_logs[i]->PathName(); - Status const s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path); + s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path); if (!s.ok()) { Log(db_options_.info_log, "Can't delete file: %s: %s", file_path.c_str(), s.ToString().c_str()); diff --git a/port/port_posix.h b/port/port_posix.h index dae8f7219..ceb6d0aa1 100644 --- a/port/port_posix.h +++ b/port/port_posix.h @@ -212,7 +212,7 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input, int old_sz =0, new_sz =0, new_sz_delta =0; bool done = false; while (!done) { - int st = deflate(&_stream, Z_FINISH); + st = deflate(&_stream, Z_FINISH); switch (st) { case Z_STREAM_END: done = true; @@ -274,7 +274,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length, //while(_stream.next_in != nullptr && _stream.avail_in != 0) { while (!done) { - int st = inflate(&_stream, Z_SYNC_FLUSH); + st = inflate(&_stream, Z_SYNC_FLUSH); switch (st) { case Z_STREAM_END: done = true; @@ -337,7 +337,7 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input, int old_sz =0, new_sz =0; while(_stream.next_in != nullptr && _stream.avail_in != 0) { - int st = BZ2_bzCompress(&_stream, BZ_FINISH); + st = BZ2_bzCompress(&_stream, BZ_FINISH); switch (st) { case BZ_STREAM_END: break; @@ -390,7 +390,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length, char* tmp = nullptr; while(_stream.next_in != nullptr && _stream.avail_in != 0) { - int st = BZ2_bzDecompress(&_stream); + st = BZ2_bzDecompress(&_stream); switch (st) { case BZ_STREAM_END: break; diff --git a/table/block_based_table_builder.cc b/table/block_based_table_builder.cc index c053e7e4f..f158ca8c4 100644 --- a/table/block_based_table_builder.cc +++ b/table/block_based_table_builder.cc @@ -430,25 +430,26 @@ struct BlockBasedTableBuilder::Rep { std::vector> table_properties_collectors; - Rep(const ImmutableCFOptions& ioptions, + Rep(const ImmutableCFOptions& _ioptions, const BlockBasedTableOptions& table_opt, - const InternalKeyComparator& icomparator, - WritableFile* f, const CompressionType compression_type, - const CompressionOptions& compression_opts) - : ioptions(ioptions), + const InternalKeyComparator& icomparator, WritableFile* f, + const CompressionType _compression_type, + const CompressionOptions& _compression_opts) + : ioptions(_ioptions), table_options(table_opt), internal_comparator(icomparator), file(f), data_block(table_options.block_restart_interval), - internal_prefix_transform(ioptions.prefix_extractor), - index_builder(CreateIndexBuilder( - table_options.index_type, &internal_comparator, - &this->internal_prefix_transform)), - compression_type(compression_type), - filter_block(CreateFilterBlockBuilder(ioptions, table_options)), + internal_prefix_transform(_ioptions.prefix_extractor), + index_builder(CreateIndexBuilder(table_options.index_type, + &internal_comparator, + &this->internal_prefix_transform)), + compression_type(_compression_type), + compression_opts(_compression_opts), + filter_block(CreateFilterBlockBuilder(_ioptions, table_options)), flush_block_policy( table_options.flush_block_policy_factory->NewFlushBlockPolicy( - table_options, data_block)) { + table_options, data_block)) { for (auto& collector_factories : ioptions.table_properties_collector_factories) { table_properties_collectors.emplace_back( diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 1fe1a7d02..5cb35834a 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -316,13 +316,14 @@ class HashIndexReader : public IndexReader { struct BlockBasedTable::Rep { - Rep(const ImmutableCFOptions& ioptions, - const EnvOptions& env_options, - const BlockBasedTableOptions& table_opt, - const InternalKeyComparator& internal_comparator) - : ioptions(ioptions), env_options(env_options), table_options(table_opt), - filter_policy(table_opt.filter_policy.get()), - internal_comparator(internal_comparator) {} + Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options, + const BlockBasedTableOptions& _table_opt, + const InternalKeyComparator& _internal_comparator) + : ioptions(_ioptions), + env_options(_env_options), + table_options(_table_opt), + filter_policy(_table_opt.filter_policy.get()), + internal_comparator(_internal_comparator) {} const ImmutableCFOptions& ioptions; const EnvOptions& env_options; @@ -364,11 +365,9 @@ BlockBasedTable::~BlockBasedTable() { // was not read from cache, `cache_handle` will be nullptr. template struct BlockBasedTable::CachableEntry { - CachableEntry(TValue* value, Cache::Handle* cache_handle) - : value(value) - , cache_handle(cache_handle) { - } - CachableEntry(): CachableEntry(nullptr, nullptr) { } + CachableEntry(TValue* _value, Cache::Handle* _cache_handle) + : value(_value), cache_handle(_cache_handle) {} + CachableEntry() : CachableEntry(nullptr, nullptr) {} void Release(Cache* cache) { if (cache_handle) { cache->Release(cache_handle); diff --git a/table/block_hash_index.h b/table/block_hash_index.h index d5603d366..582910796 100644 --- a/table/block_hash_index.h +++ b/table/block_hash_index.h @@ -25,8 +25,8 @@ class BlockHashIndex { public: // Represents a restart index in the index block's restart array. struct RestartIndex { - explicit RestartIndex(uint32_t first_index, uint32_t num_blocks = 1) - : first_index(first_index), num_blocks(num_blocks) {} + explicit RestartIndex(uint32_t _first_index, uint32_t _num_blocks = 1) + : first_index(_first_index), num_blocks(_num_blocks) {} // For a given prefix, what is the restart index for the first data block // that contains it. diff --git a/table/block_prefix_index.cc b/table/block_prefix_index.cc index d64b73b98..c1c9d520e 100644 --- a/table/block_prefix_index.cc +++ b/table/block_prefix_index.cc @@ -143,8 +143,8 @@ class BlockPrefixIndex::Builder { auto current = prefixes_per_bucket[i]; // populate block ids from largest to smallest while (current != nullptr) { - for (uint32_t i = 0; i < current->num_blocks; i++) { - *last_block = current->end_block - i; + for (uint32_t iter = 0; iter < current->num_blocks; iter++) { + *last_block = current->end_block - iter; last_block--; } current = current->next; diff --git a/table/cuckoo_table_builder.cc b/table/cuckoo_table_builder.cc index 6ff1fa0cf..a11945cf7 100644 --- a/table/cuckoo_table_builder.cc +++ b/table/cuckoo_table_builder.cc @@ -436,8 +436,8 @@ bool CuckooTableBuilder::MakeSpaceForKey( uint64_t bucket_id; uint32_t depth; uint32_t parent_pos; - CuckooNode(uint64_t bucket_id, uint32_t depth, int parent_pos) - : bucket_id(bucket_id), depth(depth), parent_pos(parent_pos) {} + CuckooNode(uint64_t _bucket_id, uint32_t _depth, int _parent_pos) + : bucket_id(_bucket_id), depth(_depth), parent_pos(_parent_pos) {} }; // This is BFS search tree that is stored simply as a vector. // Each node stores the index of parent node in the vector. @@ -451,10 +451,9 @@ bool CuckooTableBuilder::MakeSpaceForKey( // It is unlikely for the increment operation to overflow because the maximum // no. of times this will be called is <= max_num_hash_func_ + num_entries_. for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { - uint64_t bucket_id = hash_vals[hash_cnt]; - (*buckets)[bucket_id].make_space_for_key_call_id = - make_space_for_key_call_id; - tree.push_back(CuckooNode(bucket_id, 0, 0)); + uint64_t bid = hash_vals[hash_cnt]; + (*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id; + tree.push_back(CuckooNode(bid, 0, 0)); } bool null_found = false; uint32_t curr_pos = 0; diff --git a/table/cuckoo_table_reader_test.cc b/table/cuckoo_table_reader_test.cc index 66d88fc71..7bd18f536 100644 --- a/table/cuckoo_table_reader_test.cc +++ b/table/cuckoo_table_reader_test.cc @@ -72,8 +72,8 @@ class CuckooReaderTest { env_options = EnvOptions(options); } - void SetUp(int num_items) { - this->num_items = num_items; + void SetUp(int num) { + num_items = num; hash_map.clear(); keys.clear(); keys.resize(num_items); diff --git a/table/plain_table_reader.cc b/table/plain_table_reader.cc index db37241a9..16120d32b 100644 --- a/table/plain_table_reader.cc +++ b/table/plain_table_reader.cc @@ -358,12 +358,12 @@ Status PlainTableReader::PopulateIndex(TableProperties* props, std::vector prefix_hashes; if (!index_in_file) { - Status s = PopulateIndexRecordList(&index_builder, &prefix_hashes); + s = PopulateIndexRecordList(&index_builder, &prefix_hashes); if (!s.ok()) { return s; } } else { - Status s = index_.InitFromRawData(*index_block); + s = index_.InitFromRawData(*index_block); if (!s.ok()) { return s; } @@ -566,7 +566,7 @@ Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target, PlainTableKeyDecoder decoder(encoding_type_, user_key_len_, ioptions_.prefix_extractor); while (offset < data_end_offset_) { - Status s = Next(&decoder, &offset, &found_key, nullptr, &found_value); + s = Next(&decoder, &offset, &found_key, nullptr, &found_value); if (!s.ok()) { return s; } diff --git a/table/table_test.cc b/table/table_test.cc index 362905eea..5f34e92eb 100644 --- a/table/table_test.cc +++ b/table/table_test.cc @@ -1416,31 +1416,32 @@ class BlockCachePropertiesSnapshot { filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT); } - void AssertIndexBlockStat(int64_t index_block_cache_miss, - int64_t index_block_cache_hit) { - ASSERT_EQ(index_block_cache_miss, this->index_block_cache_miss); - ASSERT_EQ(index_block_cache_hit, this->index_block_cache_hit); + void AssertIndexBlockStat(int64_t expected_index_block_cache_miss, + int64_t expected_index_block_cache_hit) { + ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit); } - void AssertFilterBlockStat(int64_t filter_block_cache_miss, - int64_t filter_block_cache_hit) { - ASSERT_EQ(filter_block_cache_miss, this->filter_block_cache_miss); - ASSERT_EQ(filter_block_cache_hit, this->filter_block_cache_hit); + void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss, + int64_t expected_filter_block_cache_hit) { + ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss); + ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit); } // Check if the fetched props matches the expected ones. // TODO(kailiu) Use this only when you disabled filter policy! - void AssertEqual(int64_t index_block_cache_miss, - int64_t index_block_cache_hit, int64_t data_block_cache_miss, - int64_t data_block_cache_hit) const { - ASSERT_EQ(index_block_cache_miss, this->index_block_cache_miss); - ASSERT_EQ(index_block_cache_hit, this->index_block_cache_hit); - ASSERT_EQ(data_block_cache_miss, this->data_block_cache_miss); - ASSERT_EQ(data_block_cache_hit, this->data_block_cache_hit); - ASSERT_EQ(index_block_cache_miss + data_block_cache_miss, - this->block_cache_miss); - ASSERT_EQ(index_block_cache_hit + data_block_cache_hit, - this->block_cache_hit); + void AssertEqual(int64_t expected_index_block_cache_miss, + int64_t expected_index_block_cache_hit, + int64_t expected_data_block_cache_miss, + int64_t expected_data_block_cache_hit) const { + ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit); + ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss); + ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit); + ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss, + block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit, + block_cache_hit); } private: diff --git a/table/two_level_iterator.h b/table/two_level_iterator.h index d955dd763..030193597 100644 --- a/table/two_level_iterator.h +++ b/table/two_level_iterator.h @@ -19,8 +19,8 @@ class InternalKeyComparator; class Arena; struct TwoLevelIteratorState { - explicit TwoLevelIteratorState(bool check_prefix_may_match) - : check_prefix_may_match(check_prefix_may_match) {} + explicit TwoLevelIteratorState(bool _check_prefix_may_match) + : check_prefix_may_match(_check_prefix_may_match) {} virtual ~TwoLevelIteratorState() {} virtual Iterator* NewSecondaryIterator(const Slice& handle) = 0; diff --git a/util/blob_store.h b/util/blob_store.h index ce8633740..917fb947e 100644 --- a/util/blob_store.h +++ b/util/blob_store.h @@ -25,8 +25,8 @@ struct BlobChunk { uint32_t offset; // in blocks uint32_t size; // in blocks BlobChunk() {} - BlobChunk(uint32_t bucket_id, uint32_t offset, uint32_t size) : - bucket_id(bucket_id), offset(offset), size(size) {} + BlobChunk(uint32_t _bucket_id, uint32_t _offset, uint32_t _size) + : bucket_id(_bucket_id), offset(_offset), size(_size) {} // returns true if it's immediately before chunk bool ImmediatelyBefore(const BlobChunk& chunk) const; diff --git a/util/cache.cc b/util/cache.cc index f1c48a829..850fdb537 100644 --- a/util/cache.cc +++ b/util/cache.cc @@ -325,7 +325,7 @@ Cache::Handle* LRUCache::Insert( // Free the space following strict LRU policy until enough space // is freed. while (usage_ > capacity_ && lru_.next != &lru_) { - LRUHandle* old = lru_.next; + old = lru_.next; LRU_Remove(old); table_.Remove(old->key(), old->hash); if (Unref(old)) { diff --git a/util/rate_limiter.cc b/util/rate_limiter.cc index 47f96de84..3da2627e7 100644 --- a/util/rate_limiter.cc +++ b/util/rate_limiter.cc @@ -15,8 +15,8 @@ namespace rocksdb { // Pending request struct GenericRateLimiter::Req { - explicit Req(int64_t bytes, port::Mutex* mu) : - bytes(bytes), cv(mu), granted(false) {} + explicit Req(int64_t _bytes, port::Mutex* _mu) + : bytes(_bytes), cv(_mu), granted(false) {} int64_t bytes; port::CondVar cv; bool granted; diff --git a/util/rate_limiter_test.cc b/util/rate_limiter_test.cc index 9d6cfb7e6..cdeca578d 100644 --- a/util/rate_limiter_test.cc +++ b/util/rate_limiter_test.cc @@ -30,23 +30,23 @@ TEST(RateLimiterTest, StartStop) { TEST(RateLimiterTest, Rate) { auto* env = Env::Default(); struct Arg { - Arg(int64_t target_rate, int burst) - : limiter(new GenericRateLimiter(target_rate, 100 * 1000, 10)), - request_size(target_rate / 10), - burst(burst) {} + Arg(int64_t _target_rate, int _burst) + : limiter(new GenericRateLimiter(_target_rate, 100 * 1000, 10)), + request_size(_target_rate / 10), + burst(_burst) {} std::unique_ptr limiter; int64_t request_size; int burst; }; auto writer = [](void* p) { - auto* env = Env::Default(); + auto* thread_env = Env::Default(); auto* arg = static_cast(p); // Test for 2 seconds - auto until = env->NowMicros() + 2 * 1000000; - Random r((uint32_t)(env->NowNanos() % - std::numeric_limits::max())); - while (env->NowMicros() < until) { + auto until = thread_env->NowMicros() + 2 * 1000000; + Random r((uint32_t)(thread_env->NowNanos() % + std::numeric_limits::max())); + while (thread_env->NowMicros() < until) { for (int i = 0; i < static_cast(r.Skewed(arg->burst) + 1); ++i) { arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, Env::IO_HIGH); diff --git a/util/thread_local_test.cc b/util/thread_local_test.cc index 70dfa956e..155ef243c 100644 --- a/util/thread_local_test.cc +++ b/util/thread_local_test.cc @@ -24,11 +24,11 @@ class ThreadLocalTest { namespace { struct Params { - Params(port::Mutex* m, port::CondVar* c, int* unref, int n, + Params(port::Mutex* m, port::CondVar* c, int* u, int n, UnrefHandler handler = nullptr) : mu(m), cv(c), - unref(unref), + unref(u), total(n), started(0), completed(0), @@ -112,24 +112,24 @@ TEST(ThreadLocalTest, SequentialReadWriteTest) { p.tls2 = &tls2; auto func = [](void* ptr) { - auto& p = *static_cast(ptr); + auto& params = *static_cast(ptr); - ASSERT_TRUE(p.tls1.Get() == nullptr); - p.tls1.Reset(reinterpret_cast(1)); - ASSERT_TRUE(p.tls1.Get() == reinterpret_cast(1)); - p.tls1.Reset(reinterpret_cast(2)); - ASSERT_TRUE(p.tls1.Get() == reinterpret_cast(2)); + ASSERT_TRUE(params.tls1.Get() == nullptr); + params.tls1.Reset(reinterpret_cast(1)); + ASSERT_TRUE(params.tls1.Get() == reinterpret_cast(1)); + params.tls1.Reset(reinterpret_cast(2)); + ASSERT_TRUE(params.tls1.Get() == reinterpret_cast(2)); - ASSERT_TRUE(p.tls2->Get() == nullptr); - p.tls2->Reset(reinterpret_cast(1)); - ASSERT_TRUE(p.tls2->Get() == reinterpret_cast(1)); - p.tls2->Reset(reinterpret_cast(2)); - ASSERT_TRUE(p.tls2->Get() == reinterpret_cast(2)); + ASSERT_TRUE(params.tls2->Get() == nullptr); + params.tls2->Reset(reinterpret_cast(1)); + ASSERT_TRUE(params.tls2->Get() == reinterpret_cast(1)); + params.tls2->Reset(reinterpret_cast(2)); + ASSERT_TRUE(params.tls2->Get() == reinterpret_cast(2)); - p.mu->Lock(); - ++(p.completed); - p.cv->SignalAll(); - p.mu->Unlock(); + params.mu->Lock(); + ++(params.completed); + params.cv->SignalAll(); + params.mu->Unlock(); }; for (int iter = 0; iter < 1024; ++iter) { diff --git a/utilities/geodb/geodb_impl.cc b/utilities/geodb/geodb_impl.cc index 6c13fd691..fc387b4ca 100644 --- a/utilities/geodb/geodb_impl.cc +++ b/utilities/geodb/geodb_impl.cc @@ -192,8 +192,8 @@ Status GeoDBImpl::SearchRadial(const GeoPosition& pos, // we are looking for. auto res = std::mismatch(qid.begin(), qid.end(), quadkey->begin()); if (res.first == qid.end()) { - GeoPosition pos(atof(parts[3].c_str()), atof(parts[4].c_str())); - GeoObject obj(pos, parts[4], iter->value().ToString()); + GeoPosition obj_pos(atof(parts[3].c_str()), atof(parts[4].c_str())); + GeoObject obj(obj_pos, parts[4], iter->value().ToString()); values->push_back(obj); number_of_values--; } else { diff --git a/utilities/write_batch_with_index/write_batch_with_index.cc b/utilities/write_batch_with_index/write_batch_with_index.cc index 4ba063a06..ff9d89f2f 100644 --- a/utilities/write_batch_with_index/write_batch_with_index.cc +++ b/utilities/write_batch_with_index/write_batch_with_index.cc @@ -437,11 +437,11 @@ class WBWIIteratorImpl : public WBWIIterator { struct WriteBatchWithIndex::Rep { Rep(const Comparator* index_comparator, size_t reserved_bytes = 0, - bool overwrite_key = false) + bool _overwrite_key = false) : write_batch(reserved_bytes), comparator(index_comparator, &write_batch), skip_list(comparator, &arena), - overwrite_key(overwrite_key), + overwrite_key(_overwrite_key), last_entry_offset(0) {} ReadableWriteBatch write_batch; WriteBatchEntryComparator comparator;