Fix minor bugs in delete operator, snprintf, and size_t usage

Summary:
List of changes:

1) Fix the snprintf() usage in cases where wrong variable was used to determine the output buffer size.

2) Remove unnecessary checks before calling delete operator.

3) Increase code correctness by using size_t type when getting vector's size.

4) Unify the coding style by removing namespace::std usage at the top of the file to confirm to the majority usage.

5) Fix various lint errors pointed out by 'arc lint'.

Test Plan:
Code review and build:

git diff
make clean
make -j 32 commit-prereq
arc lint

Reviewers: kradhakrishnan, sdong, rven, anthony, yhchiang, igor

Reviewed By: igor

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D51849
This commit is contained in:
Gunnar Kudrjavets 2015-12-15 15:26:20 -08:00
parent b68dc0f83e
commit 97265f5f14
18 changed files with 99 additions and 100 deletions

View File

@ -46,7 +46,7 @@ void Compaction::GetBoundaryKeys(
Slice* largest_user_key) {
bool initialized = false;
const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
for (uint32_t i = 0; i < inputs.size(); ++i) {
for (size_t i = 0; i < inputs.size(); ++i) {
if (inputs[i].files.empty()) {
continue;
}
@ -311,7 +311,7 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Mark (or clear) each file that is being compacted
void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) {
for (size_t i = 0; i < num_input_levels(); i++) {
for (unsigned int j = 0; j < inputs_[i].size(); j++) {
for (size_t j = 0; j < inputs_[i].size(); j++) {
assert(mark_as_compacted ? !inputs_[i][j]->being_compacted :
inputs_[i][j]->being_compacted);
inputs_[i][j]->being_compacted = mark_as_compacted;
@ -371,7 +371,7 @@ int InputSummary(const std::vector<FileMetaData*>& files, char* output,
int len) {
*output = '\0';
int write = 0;
for (unsigned int i = 0; i < files.size(); i++) {
for (size_t i = 0; i < files.size(); i++) {
int sz = len - write;
int ret;
char sztxt[16];

View File

@ -243,7 +243,7 @@ bool CompactionPicker::ExpandWhileOverlapping(const std::string& cf_name,
// Returns true if any one of specified files are being compacted
bool CompactionPicker::FilesInCompaction(
const std::vector<FileMetaData*>& files) {
for (unsigned int i = 0; i < files.size(); i++) {
for (size_t i = 0; i < files.size(); i++) {
if (files[i]->being_compacted) {
return true;
}
@ -1142,18 +1142,19 @@ void UniversalCompactionPicker::SortedRun::Dump(char* out_buf,
}
void UniversalCompactionPicker::SortedRun::DumpSizeInfo(
char* out_buf, size_t out_buf_size, int sorted_run_count) const {
char* out_buf, size_t out_buf_size, size_t sorted_run_count) const {
if (level == 0) {
assert(file != nullptr);
snprintf(out_buf, out_buf_size,
"file %" PRIu64
"[%d] "
"file %" PRIu64 "[%" ROCKSDB_PRIszt
"] "
"with size %" PRIu64 " (compensated size %" PRIu64 ")",
file->fd.GetNumber(), sorted_run_count, file->fd.GetFileSize(),
file->compensated_file_size);
} else {
snprintf(out_buf, out_buf_size,
"level %d[%d] "
"level %d[%" ROCKSDB_PRIszt
"] "
"with size %" PRIu64 " (compensated size %" PRIu64 ")",
level, sorted_run_count, size, compensated_file_size);
}
@ -1435,16 +1436,21 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
const SortedRun* sr = nullptr;
bool done = false;
int start_index = 0;
size_t start_index = 0;
unsigned int candidate_count = 0;
unsigned int max_files_to_compact = std::min(max_merge_width,
max_number_of_files_to_compact);
min_merge_width = std::max(min_merge_width, 2U);
// Caller checks the size before executing this function. This invariant is
// important because otherwise we may have a possible integer underflow when
// dealing with unsigned types.
assert(sorted_runs.size() > 0);
// Considers a candidate file only if it is smaller than the
// total size accumulated so far.
for (unsigned int loop = 0; loop < sorted_runs.size(); loop++) {
for (size_t loop = 0; loop < sorted_runs.size(); loop++) {
candidate_count = 0;
// Skip files that are already being compacted
@ -1476,7 +1482,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
}
// Check if the succeeding files need compaction.
for (unsigned int i = loop + 1;
for (size_t i = loop + 1;
candidate_count < max_files_to_compact && i < sorted_runs.size();
i++) {
const SortedRun* succeeding_sr = &sorted_runs[i];
@ -1518,7 +1524,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
done = true;
break;
} else {
for (unsigned int i = loop;
for (size_t i = loop;
i < loop + candidate_count && i < sorted_runs.size(); i++) {
const SortedRun* skipping_sr = &sorted_runs[i];
char file_num_buf[256];
@ -1531,7 +1537,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (!done || candidate_count <= 1) {
return nullptr;
}
unsigned int first_index_after = start_index + candidate_count;
size_t first_index_after = start_index + candidate_count;
// Compression is enabled if files compacted earlier already reached
// size ratio of compression.
bool enable_compression = true;
@ -1572,7 +1578,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
for (size_t i = 0; i < inputs.size(); ++i) {
inputs[i].level = start_level + static_cast<int>(i);
}
for (unsigned int i = start_index; i < first_index_after; i++) {
for (size_t i = start_index; i < first_index_after; i++) {
auto& picking_sr = sorted_runs[i];
if (picking_sr.level == 0) {
FileMetaData* picking_file = picking_sr.file;
@ -1612,11 +1618,11 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
unsigned int candidate_count = 0;
uint64_t candidate_size = 0;
unsigned int start_index = 0;
size_t start_index = 0;
const SortedRun* sr = nullptr;
// Skip files that are already being compacted
for (unsigned int loop = 0; loop < sorted_runs.size() - 1; loop++) {
for (size_t loop = 0; loop < sorted_runs.size() - 1; loop++) {
sr = &sorted_runs[loop];
if (!sr->being_compacted) {
start_index = loop; // Consider this as the first candidate.
@ -1636,13 +1642,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
{
char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, "[%s] Universal: First candidate %s[%d] %s",
LogToBuffer(log_buffer,
"[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s",
cf_name.c_str(), file_num_buf, start_index,
" to reduce size amp.\n");
}
// keep adding up all the remaining files
for (unsigned int loop = start_index; loop < sorted_runs.size() - 1; loop++) {
for (size_t loop = start_index; loop < sorted_runs.size() - 1; loop++) {
sr = &sorted_runs[loop];
if (sr->being_compacted) {
char file_num_buf[kFormatFileNumberBufSize];
@ -1682,7 +1689,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
// Estimate total file size
uint64_t estimated_total_size = 0;
for (unsigned int loop = start_index; loop < sorted_runs.size(); loop++) {
for (size_t loop = start_index; loop < sorted_runs.size(); loop++) {
estimated_total_size += sorted_runs[loop].size;
}
uint32_t path_id = GetPathId(ioptions_, estimated_total_size);
@ -1693,7 +1700,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
inputs[i].level = start_level + static_cast<int>(i);
}
// We always compact all the files, so always compress.
for (unsigned int loop = start_index; loop < sorted_runs.size(); loop++) {
for (size_t loop = start_index; loop < sorted_runs.size(); loop++) {
auto& picking_sr = sorted_runs[loop];
if (picking_sr.level == 0) {
FileMetaData* f = picking_sr.file;

View File

@ -251,7 +251,7 @@ class UniversalCompactionPicker : public CompactionPicker {
// sorted_run_count is added into the string to print
void DumpSizeInfo(char* out_buf, size_t out_buf_size,
int sorted_run_count) const;
size_t sorted_run_count) const;
int level;
// `file` Will be null for level > 0. For level = 0, the sorted run is

View File

@ -183,7 +183,7 @@ class CorruptionTest : public testing::Test {
FileType type;
std::string fname;
int picked_number = -1;
for (unsigned int i = 0; i < filenames.size(); i++) {
for (size_t i = 0; i < filenames.size(); i++) {
if (ParseFileName(filenames[i], &number, &type) &&
type == filetype &&
static_cast<int>(number) > picked_number) { // Pick latest file

View File

@ -1708,7 +1708,11 @@ class Benchmark {
#if defined(__linux)
time_t now = time(nullptr);
fprintf(stderr, "Date: %s", ctime(&now)); // ctime() adds newline
char buf[52];
// Lint complains about ctime() usage, so replace it with ctime_r(). The
// requirement is to provide a buffer which is at least 26 bytes.
fprintf(stderr, "Date: %s",
ctime_r(&now, buf)); // ctime_r() adds newline
FILE* cpuinfo = fopen("/proc/cpuinfo", "r");
if (cpuinfo != nullptr) {
@ -1789,7 +1793,7 @@ class Benchmark {
std::vector<std::string> files;
FLAGS_env->GetChildren(FLAGS_db, &files);
for (unsigned int i = 0; i < files.size(); i++) {
for (size_t i = 0; i < files.size(); i++) {
if (Slice(files[i]).starts_with("heap-")) {
FLAGS_env->DeleteFile(FLAGS_db + "/" + files[i]);
}
@ -3880,12 +3884,8 @@ class Benchmark {
}
}
if (txn) {
delete txn;
}
if (batch) {
delete batch;
}
delete txn;
delete batch;
if (!failed) {
thread->stats.FinishedOps(nullptr, db, 1, kOthers);
@ -4067,7 +4067,7 @@ int main(int argc, char** argv) {
std::vector<std::string> fanout = rocksdb::StringSplit(
FLAGS_max_bytes_for_level_multiplier_additional, ',');
for (unsigned int j= 0; j < fanout.size(); j++) {
for (size_t j = 0; j < fanout.size(); j++) {
FLAGS_max_bytes_for_level_multiplier_additional_v.push_back(
#ifndef CYGWIN
std::stoi(fanout[j]));

View File

@ -676,7 +676,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
Random rnd(301);
std::map<int32_t, std::string> values;
for (uint32_t i = 0; i < ranges.size(); i++) {
for (size_t i = 0; i < ranges.size(); i++) {
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
values[j] = RandomString(&rnd, value_size);
ASSERT_OK(Put(Key(j), values[j]));
@ -698,7 +698,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0);
ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files);
for (uint32_t i = 0; i < ranges.size(); i++) {
for (size_t i = 0; i < ranges.size(); i++) {
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
ASSERT_EQ(Get(Key(j)), values[j]);
}
@ -722,7 +722,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
{500, 560}, // this range overlap with the next one
{551, 599},
};
for (uint32_t i = 0; i < ranges.size(); i++) {
for (size_t i = 0; i < ranges.size(); i++) {
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
values[j] = RandomString(&rnd, value_size);
ASSERT_OK(Put(Key(j), values[j]));
@ -732,7 +732,7 @@ TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) {
db_->CompactRange(cro, nullptr, nullptr);
for (uint32_t i = 0; i < ranges.size(); i++) {
for (size_t i = 0; i < ranges.size(); i++) {
for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) {
ASSERT_EQ(Get(Key(j)), values[j]);
}

View File

@ -568,8 +568,7 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
versions_->AddLiveFiles(&job_context->sst_live);
if (doing_the_full_scan) {
for (uint32_t path_id = 0; path_id < db_options_.db_paths.size();
path_id++) {
for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) {
// set of all files in the directory. We'll exclude files that are still
// alive in the subsequent processings.
std::vector<std::string> files;

View File

@ -168,7 +168,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
char buf3[32];
char buf4[32];
snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5);
snprintf(buf3, sizeof(buf1), "00b0%016d", i * 5);
snprintf(buf3, sizeof(buf3), "00b0%016d", i * 5);
Slice key(buf1, 20);
ASSERT_OK(Put(1, key, value));
@ -181,7 +181,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
if (i == 299) {
file_iters_deleted = true;
}
snprintf(buf4, sizeof(buf1), "00a0%016d", i * 5 / 2);
snprintf(buf4, sizeof(buf4), "00a0%016d", i * 5 / 2);
Slice target(buf4, 20);
iterh->Seek(target);
ASSERT_TRUE(iter->Valid());

View File

@ -4577,7 +4577,7 @@ TEST_F(DBTest, SnapshotFiles) {
std::string snapdir = dbname_ + ".snapdir/";
ASSERT_OK(env_->CreateDirIfMissing(snapdir));
for (unsigned int i = 0; i < files.size(); i++) {
for (size_t i = 0; i < files.size(); i++) {
// our clients require that GetLiveFiles returns
// files with "/" as first character!
ASSERT_EQ(files[i][0], '/');
@ -4646,7 +4646,7 @@ TEST_F(DBTest, SnapshotFiles) {
// the same one as in the previous snapshot. But its size should be
// larger because we added an extra key after taking the
// previous shapshot.
for (unsigned int i = 0; i < newfiles.size(); i++) {
for (size_t i = 0; i < newfiles.size(); i++) {
std::string src = dbname_ + "/" + newfiles[i];
// record the lognumber and the size of the
// latest manifest file
@ -10124,7 +10124,7 @@ TEST_F(DBTest, SSTsWithLdbSuffixHandling) {
std::vector<std::string> filenames;
GetSstFiles(dbname_, &filenames);
int num_ldb_files = 0;
for (unsigned int i = 0; i < filenames.size(); ++i) {
for (size_t i = 0; i < filenames.size(); ++i) {
if (i & 1) {
continue;
}

View File

@ -74,6 +74,7 @@ class DeleteFileTest : public testing::Test {
void CloseDB() {
delete db_;
db_ = nullptr;
}
void AddKeys(int numkeys, int startkey = 0) {

View File

@ -645,18 +645,17 @@ class FaultInjectionTest : public testing::Test,
return test::RandomString(&r, kValueSize, storage);
}
Status OpenDB() {
delete db_;
db_ = NULL;
env_->ResetState();
return DB::Open(options_, dbname_, &db_);
}
void CloseDB() {
delete db_;
db_ = NULL;
}
Status OpenDB() {
CloseDB();
env_->ResetState();
return DB::Open(options_, dbname_, &db_);
}
void DeleteAllData() {
Iterator* iter = db_->NewIterator(ReadOptions());
WriteOptions options;

View File

@ -262,7 +262,7 @@ void ForwardIterator::SeekInternal(const Slice& internal_key,
}
const VersionStorageInfo* vstorage = sv_->current->storage_info();
const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
for (uint32_t i = 0; i < l0.size(); ++i) {
for (size_t i = 0; i < l0.size(); ++i) {
if (!l0_iters_[i]) {
continue;
}
@ -521,7 +521,7 @@ void ForwardIterator::RenewIterators() {
const auto& l0_files = vstorage->LevelFiles(0);
const auto* vstorage_new = svnew->current->storage_info();
const auto& l0_files_new = vstorage_new->LevelFiles(0);
uint32_t iold, inew;
size_t iold, inew;
bool found;
std::vector<InternalIterator*> l0_iters_new;
l0_iters_new.reserve(l0_files_new.size());
@ -589,7 +589,7 @@ void ForwardIterator::BuildLevelIterators(const VersionStorageInfo* vstorage) {
void ForwardIterator::ResetIncompleteIterators() {
const auto& l0_files = sv_->current->storage_info()->LevelFiles(0);
for (uint32_t i = 0; i < l0_iters_.size(); ++i) {
for (size_t i = 0; i < l0_iters_.size(); ++i) {
assert(i < l0_files.size());
if (!l0_iters_[i] || !l0_iters_[i]->status().IsIncomplete()) {
continue;
@ -680,7 +680,7 @@ bool ForwardIterator::NeedToSeekImmutable(const Slice& target) {
void ForwardIterator::DeleteCurrentIter() {
const VersionStorageInfo* vstorage = sv_->current->storage_info();
const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
for (uint32_t i = 0; i < l0.size(); ++i) {
for (size_t i = 0; i < l0.size(); ++i) {
if (!l0_iters_[i]) {
continue;
}
@ -712,7 +712,7 @@ bool ForwardIterator::TEST_CheckDeletedIters(int* pdeleted_iters,
const VersionStorageInfo* vstorage = sv_->current->storage_info();
const std::vector<FileMetaData*>& l0 = vstorage->LevelFiles(0);
for (uint32_t i = 0; i < l0.size(); ++i) {
for (size_t i = 0; i < l0.size(); ++i) {
if (!l0_iters_[i]) {
retval = true;
deleted_iters++;

View File

@ -20,7 +20,6 @@
#include "utilities/merge_operators.h"
#include "util/testharness.h"
using namespace std;
using namespace rocksdb;
namespace {
@ -76,7 +75,7 @@ class CountMergeOperator : public AssociativeMergeOperator {
};
namespace {
std::shared_ptr<DB> OpenDb(const string& dbname, const bool ttl = false,
std::shared_ptr<DB> OpenDb(const std::string& dbname, const bool ttl = false,
const size_t max_successive_merges = 0,
const uint32_t min_partial_merge_operands = 2) {
DB* db;
@ -90,7 +89,7 @@ std::shared_ptr<DB> OpenDb(const string& dbname, const bool ttl = false,
// DBWithTTL is not supported in ROCKSDB_LITE
#ifndef ROCKSDB_LITE
if (ttl) {
cout << "Opening database with TTL\n";
std::cout << "Opening database with TTL\n";
DBWithTTL* db_with_ttl;
s = DBWithTTL::Open(options, dbname, &db_with_ttl);
db = db_with_ttl;
@ -102,7 +101,7 @@ std::shared_ptr<DB> OpenDb(const string& dbname, const bool ttl = false,
s = DB::Open(options, dbname, &db);
#endif // !ROCKSDB_LITE
if (!s.ok()) {
cerr << s.ToString() << endl;
std::cerr << s.ToString() << std::endl;
assert(false);
}
return std::shared_ptr<DB>(db);
@ -142,7 +141,7 @@ class Counters {
// if the underlying level db operation failed.
// mapped to a levedb Put
bool set(const string& key, uint64_t value) {
bool set(const std::string& key, uint64_t value) {
// just treat the internal rep of int64 as the string
Slice slice((char *)&value, sizeof(value));
auto s = db_->Put(put_option_, key, slice);
@ -150,26 +149,26 @@ class Counters {
if (s.ok()) {
return true;
} else {
cerr << s.ToString() << endl;
std::cerr << s.ToString() << std::endl;
return false;
}
}
// mapped to a rocksdb Delete
bool remove(const string& key) {
bool remove(const std::string& key) {
auto s = db_->Delete(delete_option_, key);
if (s.ok()) {
return true;
} else {
cerr << s.ToString() << std::endl;
std::cerr << s.ToString() << std::endl;
return false;
}
}
// mapped to a rocksdb Get
bool get(const string& key, uint64_t *value) {
string str;
bool get(const std::string& key, uint64_t* value) {
std::string str;
auto s = db_->Get(get_option_, key, &str);
if (s.IsNotFound()) {
@ -179,35 +178,33 @@ class Counters {
} else if (s.ok()) {
// deserialization
if (str.size() != sizeof(uint64_t)) {
cerr << "value corruption\n";
std::cerr << "value corruption\n";
return false;
}
*value = DecodeFixed64(&str[0]);
return true;
} else {
cerr << s.ToString() << std::endl;
std::cerr << s.ToString() << std::endl;
return false;
}
}
// 'add' is implemented as get -> modify -> set
// An alternative is a single merge operation, see MergeBasedCounters
virtual bool add(const string& key, uint64_t value) {
virtual bool add(const std::string& key, uint64_t value) {
uint64_t base = default_;
return get(key, &base) && set(key, base + value);
}
// convenience functions for testing
void assert_set(const string& key, uint64_t value) {
void assert_set(const std::string& key, uint64_t value) {
assert(set(key, value));
}
void assert_remove(const string& key) {
assert(remove(key));
}
void assert_remove(const std::string& key) { assert(remove(key)); }
uint64_t assert_get(const string& key) {
uint64_t assert_get(const std::string& key) {
uint64_t value = default_;
int result = get(key, &value);
assert(result);
@ -215,7 +212,7 @@ class Counters {
return value;
}
void assert_add(const string& key, uint64_t value) {
void assert_add(const std::string& key, uint64_t value) {
int result = add(key, value);
assert(result);
if (result == 0) exit(1); // Disable unused variable warning.
@ -234,7 +231,7 @@ class MergeBasedCounters : public Counters {
}
// mapped to a rocksdb Merge operation
virtual bool add(const string& key, uint64_t value) override {
virtual bool add(const std::string& key, uint64_t value) override {
char encoded[sizeof(uint64_t)];
EncodeFixed64(encoded, value);
Slice slice(encoded, sizeof(uint64_t));
@ -243,7 +240,7 @@ class MergeBasedCounters : public Counters {
if (s.ok()) {
return true;
} else {
cerr << s.ToString() << endl;
std::cerr << s.ToString() << std::endl;
return false;
}
}
@ -254,7 +251,7 @@ void dumpDb(DB* db) {
auto it = unique_ptr<Iterator>(db->NewIterator(ReadOptions()));
for (it->SeekToFirst(); it->Valid(); it->Next()) {
uint64_t value = DecodeFixed64(it->value().data());
cout << it->key().ToString() << ": " << value << endl;
std::cout << it->key().ToString() << ": " << value << std::endl;
}
assert(it->status().ok()); // Check for any errors found during the scan
}
@ -302,9 +299,9 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
if (test_compaction) {
db->Flush(o);
cout << "Compaction started ...\n";
std::cout << "Compaction started ...\n";
db->CompactRange(CompactRangeOptions(), nullptr, nullptr);
cout << "Compaction ended\n";
std::cout << "Compaction ended\n";
dumpDb(db);
@ -400,7 +397,7 @@ void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
// Get the value
resetNumMergeOperatorCalls();
string get_value_str;
std::string get_value_str;
{
Status s = db->Get(ReadOptions(), key, &get_value_str);
assert(s.ok());
@ -412,24 +409,24 @@ void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
static_cast<size_t>((num_merges % (max_num_merges + 1))));
}
void runTest(int argc, const string& dbname, const bool use_ttl = false) {
void runTest(int argc, const std::string& dbname, const bool use_ttl = false) {
bool compact = false;
if (argc > 1) {
compact = true;
cout << "Turn on Compaction\n";
std::cout << "Turn on Compaction\n";
}
{
auto db = OpenDb(dbname, use_ttl);
{
cout << "Test read-modify-write counters... \n";
std::cout << "Test read-modify-write counters... \n";
Counters counters(db, 0);
testCounters(counters, db.get(), true);
}
{
cout << "Test merge-based counters... \n";
std::cout << "Test merge-based counters... \n";
MergeBasedCounters counters(db, 0);
testCounters(counters, db.get(), compact);
}
@ -438,7 +435,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
DestroyDB(dbname, Options());
{
cout << "Test merge in memtable... \n";
std::cout << "Test merge in memtable... \n";
size_t max_merge = 5;
auto db = OpenDb(dbname, use_ttl, max_merge);
MergeBasedCounters counters(db, 0);
@ -449,7 +446,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
}
{
cout << "Test Partial-Merge\n";
std::cout << "Test Partial-Merge\n";
size_t max_merge = 100;
for (uint32_t min_merge = 5; min_merge < 25; min_merge += 5) {
for (uint32_t count = min_merge - 1; count <= min_merge + 1; count++) {
@ -469,7 +466,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
}
{
cout << "Test merge-operator not set after reopen\n";
std::cout << "Test merge-operator not set after reopen\n";
{
auto db = OpenDb(dbname);
MergeBasedCounters counters(db, 0);
@ -489,7 +486,7 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
/* Temporary remove this test
{
cout << "Test merge-operator not set after reopen (recovery case)\n";
std::cout << "Test merge-operator not set after reopen (recovery case)\n";
{
auto db = OpenDb(dbname);
MergeBasedCounters counters(db, 0);

View File

@ -165,7 +165,7 @@ class Repairer {
Status FindFiles() {
std::vector<std::string> filenames;
bool found_file = false;
for (uint32_t path_id = 0; path_id < options_.db_paths.size(); path_id++) {
for (size_t path_id = 0; path_id < options_.db_paths.size(); path_id++) {
Status status =
env_->GetChildren(options_.db_paths[path_id].path, &filenames);
if (!status.ok()) {

View File

@ -165,7 +165,7 @@ class VersionBuilder::Rep {
for (int l = 0; !found && l < base_vstorage_->num_levels(); l++) {
const std::vector<FileMetaData*>& base_files =
base_vstorage_->LevelFiles(l);
for (unsigned int i = 0; i < base_files.size(); i++) {
for (size_t i = 0; i < base_files.size(); i++) {
FileMetaData* f = base_files[i];
if (f->fd.GetNumber() == number) {
found = true;

View File

@ -1262,7 +1262,7 @@ namespace {
// used to sort files by size
struct Fsize {
int index;
size_t index;
FileMetaData* file;
};
@ -1370,7 +1370,7 @@ void VersionStorageInfo::UpdateFilesByCompactionPri(
// populate a temp vector for sorting based on size
std::vector<Fsize> temp(files.size());
for (unsigned int i = 0; i < files.size(); i++) {
for (size_t i = 0; i < files.size(); i++) {
temp[i].index = i;
temp[i].file = files[i];
}
@ -1403,8 +1403,8 @@ void VersionStorageInfo::UpdateFilesByCompactionPri(
assert(temp.size() == files.size());
// initialize files_by_compaction_pri_
for (unsigned int i = 0; i < temp.size(); i++) {
files_by_compaction_pri.push_back(temp[i].index);
for (size_t i = 0; i < temp.size(); i++) {
files_by_compaction_pri.push_back(static_cast<int>(temp[i].index));
}
next_file_to_compact_by_size_[level] = 0;
assert(files_[level].size() == files_by_compaction_pri_[level].size());
@ -3316,7 +3316,7 @@ bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
for (size_t i = 0; i < c->num_input_files(input); ++i) {
uint64_t number = c->input(input, i)->fd.GetNumber();
bool found = false;
for (unsigned int j = 0; j < vstorage->files_[level].size(); j++) {
for (size_t j = 0; j < vstorage->files_[level].size(); j++) {
FileMetaData* f = vstorage->files_[level][j];
if (f->fd.GetNumber() == number) {
found = true;

View File

@ -23,7 +23,7 @@ class GenerateLevelFilesBriefTest : public testing::Test {
GenerateLevelFilesBriefTest() { }
~GenerateLevelFilesBriefTest() {
for (unsigned int i = 0; i < files_.size(); i++) {
for (size_t i = 0; i < files_.size(); i++) {
delete files_[i];
}
}

View File

@ -129,11 +129,7 @@ WriteBatch& WriteBatch::operator=(WriteBatch&& src) {
return *this;
}
WriteBatch::~WriteBatch() {
if (save_points_ != nullptr) {
delete save_points_;
}
}
WriteBatch::~WriteBatch() { delete save_points_; }
WriteBatch::Handler::~Handler() { }