Merge pull request #324 from dalgaaf/wip-da-SCA-20140930
Various SCA fixes
This commit is contained in:
commit
986dad0251
@ -920,7 +920,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
|
||||
"earliest-file-size %" PRIu64,
|
||||
version->cfd_->GetName().c_str(), candidate_size, earliest_file_size);
|
||||
}
|
||||
assert(start_index >= 0 && start_index < files.size() - 1);
|
||||
assert(start_index < files.size() - 1);
|
||||
|
||||
// Estimate total file size
|
||||
uint64_t estimated_total_size = 0;
|
||||
|
@ -131,7 +131,7 @@ class CorruptionTest {
|
||||
ASSERT_GE(max_expected, correct);
|
||||
}
|
||||
|
||||
void CorruptFile(const std::string fname, int offset, int bytes_to_corrupt) {
|
||||
void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) {
|
||||
struct stat sbuf;
|
||||
if (stat(fname.c_str(), &sbuf) != 0) {
|
||||
const char* msg = strerror(errno);
|
||||
|
@ -218,6 +218,7 @@ TEST(CuckooTableDBTest, Uint64Comparator) {
|
||||
|
||||
// Add more keys.
|
||||
ASSERT_OK(Delete(Uint64Key(2))); // Delete.
|
||||
dbfull()->TEST_FlushMemTable();
|
||||
ASSERT_OK(Put(Uint64Key(3), "v0")); // Update.
|
||||
ASSERT_OK(Put(Uint64Key(4), "v4"));
|
||||
dbfull()->TEST_FlushMemTable();
|
||||
|
@ -3064,7 +3064,6 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
assert(compact);
|
||||
compact->CleanupBatchBuffer();
|
||||
compact->CleanupMergedBuffer();
|
||||
bool prefix_initialized = false;
|
||||
|
||||
// Generate file_levels_ for compaction berfore making Iterator
|
||||
compact->compaction->GenerateFileLevels();
|
||||
@ -3149,6 +3148,7 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
||||
// 2) send value_buffer to compaction filter and alternate the values;
|
||||
// 3) merge value_buffer with ineligible_value_buffer;
|
||||
// 4) run the modified "compaction" using the old for loop.
|
||||
bool prefix_initialized = false;
|
||||
shared_ptr<Iterator> backup_input(
|
||||
versions_->MakeInputIterator(compact->compaction));
|
||||
backup_input->SeekToFirst();
|
||||
@ -4037,11 +4037,10 @@ Status DBImpl::Write(const WriteOptions& options, WriteBatch* my_batch) {
|
||||
RecordTick(stats_, WAL_FILE_BYTES, log_size);
|
||||
if (status.ok() && options.sync) {
|
||||
RecordTick(stats_, WAL_FILE_SYNCED);
|
||||
StopWatch sw(env_, stats_, WAL_FILE_SYNC_MICROS);
|
||||
if (db_options_.use_fsync) {
|
||||
StopWatch(env_, stats_, WAL_FILE_SYNC_MICROS);
|
||||
status = log_->file()->Fsync();
|
||||
} else {
|
||||
StopWatch(env_, stats_, WAL_FILE_SYNC_MICROS);
|
||||
status = log_->file()->Sync();
|
||||
}
|
||||
}
|
||||
|
@ -6138,7 +6138,7 @@ class WrappedBloom : public FilterPolicy {
|
||||
const FilterPolicy* filter_;
|
||||
mutable uint32_t counter_;
|
||||
|
||||
rocksdb::Slice convertKey(const rocksdb::Slice key) const {
|
||||
rocksdb::Slice convertKey(const rocksdb::Slice& key) const {
|
||||
return key;
|
||||
}
|
||||
};
|
||||
|
@ -413,7 +413,6 @@ static bool SaveValue(void* arg, const char* entry) {
|
||||
*(s->found_final_value) = true;
|
||||
return false;
|
||||
}
|
||||
std::string merge_result; // temporary area for merge results later
|
||||
Slice v = GetLengthPrefixedSlice(key_ptr + key_length);
|
||||
*(s->merge_in_progress) = true;
|
||||
merge_context->PushOperand(v);
|
||||
|
@ -2169,7 +2169,7 @@ Status VersionSet::Recover(
|
||||
|
||||
// there were some column families in the MANIFEST that weren't specified
|
||||
// in the argument. This is OK in read_only mode
|
||||
if (read_only == false && column_families_not_found.size() > 0) {
|
||||
if (read_only == false && !column_families_not_found.empty()) {
|
||||
std::string list_of_not_found;
|
||||
for (const auto& cf : column_families_not_found) {
|
||||
list_of_not_found += ", " + cf.second;
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
void BloomBlockBuilder::AddKeysHashes(const std::vector<uint32_t> keys_hashes) {
|
||||
void BloomBlockBuilder::AddKeysHashes(const std::vector<uint32_t>& keys_hashes) {
|
||||
for (auto hash : keys_hashes) {
|
||||
bloom_.AddHash(hash);
|
||||
}
|
||||
|
@ -26,7 +26,7 @@ class BloomBlockBuilder {
|
||||
|
||||
uint32_t GetNumBlocks() const { return bloom_.GetNumBlocks(); }
|
||||
|
||||
void AddKeysHashes(const std::vector<uint32_t> keys_hashes);
|
||||
void AddKeysHashes(const std::vector<uint32_t>& keys_hashes);
|
||||
|
||||
Slice Finish();
|
||||
|
||||
|
@ -191,7 +191,7 @@ class CuckooTableIterator : public Iterator {
|
||||
|
||||
private:
|
||||
struct BucketComparator {
|
||||
BucketComparator(const Slice file_data, const Comparator* ucomp,
|
||||
BucketComparator(const Slice& file_data, const Comparator* ucomp,
|
||||
uint32_t bucket_len, uint32_t user_key_len,
|
||||
const Slice target = Slice())
|
||||
: file_data_(file_data),
|
||||
|
@ -52,10 +52,10 @@ std::string PlainTableFactory::GetPrintableTableOptions() const {
|
||||
snprintf(buffer, kBufferSize, " hash_table_ratio: %lf\n",
|
||||
hash_table_ratio_);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " index_sparseness: %zd\n",
|
||||
snprintf(buffer, kBufferSize, " index_sparseness: %zu\n",
|
||||
index_sparseness_);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " huge_page_tlb_size: %zd\n",
|
||||
snprintf(buffer, kBufferSize, " huge_page_tlb_size: %zu\n",
|
||||
huge_page_tlb_size_);
|
||||
ret.append(buffer);
|
||||
snprintf(buffer, kBufferSize, " encoding_type: %d\n",
|
||||
|
@ -1216,7 +1216,7 @@ static std::string RandomString(Random* rnd, int len) {
|
||||
return r;
|
||||
}
|
||||
|
||||
void AddInternalKey(TableConstructor* c, const std::string prefix,
|
||||
void AddInternalKey(TableConstructor* c, const std::string& prefix,
|
||||
int suffix_len = 800) {
|
||||
static Random rnd(1023);
|
||||
InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue);
|
||||
|
@ -325,7 +325,7 @@ bool LDBCommand::ParseKeyValue(const string& line, string* key, string* value,
|
||||
bool LDBCommand::ValidateCmdLineOptions() {
|
||||
|
||||
for (map<string, string>::const_iterator itr = option_map_.begin();
|
||||
itr != option_map_.end(); itr++) {
|
||||
itr != option_map_.end(); ++itr) {
|
||||
if (find(valid_cmd_line_options_.begin(),
|
||||
valid_cmd_line_options_.end(), itr->first) ==
|
||||
valid_cmd_line_options_.end()) {
|
||||
@ -335,7 +335,7 @@ bool LDBCommand::ValidateCmdLineOptions() {
|
||||
}
|
||||
|
||||
for (vector<string>::const_iterator itr = flags_.begin();
|
||||
itr != flags_.end(); itr++) {
|
||||
itr != flags_.end(); ++itr) {
|
||||
if (find(valid_cmd_line_options_.begin(),
|
||||
valid_cmd_line_options_.end(), *itr) ==
|
||||
valid_cmd_line_options_.end()) {
|
||||
@ -1538,7 +1538,7 @@ void BatchPutCommand::DoCommand() {
|
||||
WriteBatch batch;
|
||||
|
||||
for (vector<pair<string, string>>::const_iterator itr
|
||||
= key_values_.begin(); itr != key_values_.end(); itr++) {
|
||||
= key_values_.begin(); itr != key_values_.end(); ++itr) {
|
||||
batch.Put(itr->first, itr->second);
|
||||
}
|
||||
Status st = db_->Write(WriteOptions(), &batch);
|
||||
|
@ -13,15 +13,10 @@ public:
|
||||
EXEC_NOT_STARTED = 0, EXEC_SUCCEED = 1, EXEC_FAILED = 2,
|
||||
};
|
||||
|
||||
LDBCommandExecuteResult() {
|
||||
state_ = EXEC_NOT_STARTED;
|
||||
message_ = "";
|
||||
}
|
||||
LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {}
|
||||
|
||||
LDBCommandExecuteResult(State state, std::string& msg) {
|
||||
state_ = state;
|
||||
message_ = msg;
|
||||
}
|
||||
LDBCommandExecuteResult(State state, std::string& msg) :
|
||||
state_(state), message_(msg) {}
|
||||
|
||||
std::string ToString() {
|
||||
std::string ret;
|
||||
|
@ -9,6 +9,7 @@
|
||||
namespace {
|
||||
void f0() {
|
||||
char *p = nullptr;
|
||||
// cppcheck-suppress nullPointer
|
||||
*p = 10; /* SIGSEGV here!! */
|
||||
}
|
||||
|
||||
|
@ -33,7 +33,7 @@ namespace {
|
||||
// > 0 <=> lhs == rhs
|
||||
// TODO(icanadi) move this to JSONDocument?
|
||||
int DocumentCompare(const JSONDocument& lhs, const JSONDocument& rhs) {
|
||||
assert(rhs.IsObject() == false && rhs.IsObject() == false &&
|
||||
assert(lhs.IsObject() == false && rhs.IsObject() == false &&
|
||||
lhs.type() == rhs.type());
|
||||
|
||||
switch (lhs.type()) {
|
||||
@ -376,7 +376,7 @@ class IndexKey {
|
||||
|
||||
class SimpleSortedIndex : public Index {
|
||||
public:
|
||||
SimpleSortedIndex(const std::string field, const std::string& name)
|
||||
SimpleSortedIndex(const std::string& field, const std::string& name)
|
||||
: field_(field), name_(name) {}
|
||||
|
||||
virtual const char* Name() const override { return name_.c_str(); }
|
||||
|
@ -369,7 +369,7 @@ class SpatialIndexCursor : public Cursor {
|
||||
}
|
||||
delete spatial_iterator;
|
||||
|
||||
valid_ = valid_ && primary_key_ids_.size() > 0;
|
||||
valid_ = valid_ && !primary_key_ids_.empty();
|
||||
|
||||
if (valid_) {
|
||||
primary_keys_iterator_ = primary_key_ids_.begin();
|
||||
|
@ -206,7 +206,7 @@ class TtlCompactionFilterFactory : public CompactionFilterFactory {
|
||||
class TtlMergeOperator : public MergeOperator {
|
||||
|
||||
public:
|
||||
explicit TtlMergeOperator(const std::shared_ptr<MergeOperator> merge_op,
|
||||
explicit TtlMergeOperator(const std::shared_ptr<MergeOperator>& merge_op,
|
||||
Env* env)
|
||||
: user_merge_op_(merge_op), env_(env) {
|
||||
assert(merge_op);
|
||||
|
@ -120,7 +120,7 @@ class TtlTest {
|
||||
static FlushOptions flush_opts;
|
||||
WriteBatch batch;
|
||||
kv_it_ = kvmap_.begin();
|
||||
for (int i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, kv_it_++) {
|
||||
for (int i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, ++kv_it_) {
|
||||
switch (batch_ops[i]) {
|
||||
case PUT:
|
||||
batch.Put(kv_it_->first, kv_it_->second);
|
||||
@ -145,7 +145,7 @@ class TtlTest {
|
||||
static FlushOptions flush_opts;
|
||||
kv_it_ = kvmap_.begin();
|
||||
advance(kv_it_, start_pos_map);
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < num_entries; i++, kv_it_++) {
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < num_entries; i++, ++kv_it_) {
|
||||
ASSERT_OK(cf == nullptr
|
||||
? db_ttl_->Put(wopts, kv_it_->first, kv_it_->second)
|
||||
: db_ttl_->Put(wopts, cf, kv_it_->first, kv_it_->second));
|
||||
@ -207,7 +207,7 @@ class TtlTest {
|
||||
kv_it_ = kvmap_.begin();
|
||||
advance(kv_it_, st_pos);
|
||||
std::string v;
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < span; i++, kv_it_++) {
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < span; i++, ++kv_it_) {
|
||||
Status s = (cf == nullptr) ? db_ttl_->Get(ropts, kv_it_->first, &v)
|
||||
: db_ttl_->Get(ropts, cf, kv_it_->first, &v);
|
||||
if (s.ok() != check) {
|
||||
@ -252,7 +252,7 @@ class TtlTest {
|
||||
} else { // dbiter should have found out kvmap_[st_pos]
|
||||
for (int i = st_pos;
|
||||
kv_it_ != kvmap_.end() && i < st_pos + span;
|
||||
i++, kv_it_++) {
|
||||
i++, ++kv_it_) {
|
||||
ASSERT_TRUE(dbiter->Valid());
|
||||
ASSERT_EQ(dbiter->value().compare(kv_it_->second), 0);
|
||||
dbiter->Next();
|
||||
|
Loading…
Reference in New Issue
Block a user