Merge pull request #327 from dalgaaf/wip-da-SCA-20141001
Fix some issues from SCA
This commit is contained in:
commit
d6987216c9
@ -287,7 +287,6 @@ void DBIter::MergeValuesNewToOld() {
|
||||
std::deque<std::string> operands;
|
||||
operands.push_front(iter_->value().ToString());
|
||||
|
||||
std::string merge_result; // Temporary string to hold merge result later
|
||||
ParsedInternalKey ikey;
|
||||
for (iter_->Next(); iter_->Valid(); iter_->Next()) {
|
||||
if (!ParseKey(&ikey)) {
|
||||
|
@ -4684,9 +4684,9 @@ TEST(DBTest, CompactionFilterContextManual) {
|
||||
ASSERT_EQ(NumTableFilesAtLevel(0), 1);
|
||||
|
||||
// Verify total number of keys is correct after manual compaction.
|
||||
int count = 0;
|
||||
int total = 0;
|
||||
{
|
||||
int count = 0;
|
||||
int total = 0;
|
||||
Arena arena;
|
||||
ScopedArenaIterator iter(dbfull()->TEST_NewInternalIterator(&arena));
|
||||
iter->SeekToFirst();
|
||||
@ -8205,7 +8205,6 @@ static void RandomTimeoutWriter(void* arg) {
|
||||
if (write_opt.timeout_hint_us == 0 ||
|
||||
put_duration + kTimerBias < write_opt.timeout_hint_us) {
|
||||
ASSERT_OK(s);
|
||||
std::string result;
|
||||
}
|
||||
if (s.IsTimedOut()) {
|
||||
timeout_count++;
|
||||
|
@ -148,7 +148,6 @@ class DeleteFileTest {
|
||||
TEST(DeleteFileTest, AddKeysAndQueryLevels) {
|
||||
CreateTwoLevels();
|
||||
std::vector<LiveFileMetaData> metadata;
|
||||
std::vector<int> keysinlevel;
|
||||
db_->GetLiveFilesMetaData(&metadata);
|
||||
|
||||
std::string level1file = "";
|
||||
|
@ -1219,7 +1219,7 @@ bool Version::HasOverlappingUserKey(
|
||||
// Check the last file in inputs against the file after it
|
||||
size_t last_file = FindFile(cfd_->internal_comparator(), file_level,
|
||||
inputs->back()->largest.Encode());
|
||||
assert(0 <= last_file && last_file < kNumFiles); // File should exist!
|
||||
assert(last_file < kNumFiles); // File should exist!
|
||||
if (last_file < kNumFiles-1) { // If not the last file
|
||||
const Slice last_key_in_input = ExtractUserKey(
|
||||
files[last_file].largest_key);
|
||||
@ -1234,7 +1234,7 @@ bool Version::HasOverlappingUserKey(
|
||||
// Check the first file in inputs against the file just before it
|
||||
size_t first_file = FindFile(cfd_->internal_comparator(), file_level,
|
||||
inputs->front()->smallest.Encode());
|
||||
assert(0 <= first_file && first_file <= last_file); // File should exist!
|
||||
assert(first_file <= last_file); // File should exist!
|
||||
if (first_file > 0) { // If not first file
|
||||
const Slice& first_key_in_input = ExtractUserKey(
|
||||
files[first_file].smallest_key);
|
||||
|
@ -721,7 +721,6 @@ Status BlockBasedTableBuilder::Finish() {
|
||||
// Write properties block.
|
||||
{
|
||||
PropertyBlockBuilder property_block_builder;
|
||||
std::vector<std::string> failed_user_prop_collectors;
|
||||
r->props.filter_policy_name = r->table_options.filter_policy != nullptr ?
|
||||
r->table_options.filter_policy->Name() : "";
|
||||
r->props.index_size =
|
||||
|
@ -193,7 +193,7 @@ class CuckooTableIterator : public Iterator {
|
||||
struct BucketComparator {
|
||||
BucketComparator(const Slice& file_data, const Comparator* ucomp,
|
||||
uint32_t bucket_len, uint32_t user_key_len,
|
||||
const Slice target = Slice())
|
||||
const Slice& target = Slice())
|
||||
: file_data_(file_data),
|
||||
ucomp_(ucomp),
|
||||
bucket_len_(bucket_len),
|
||||
|
@ -334,9 +334,9 @@ Status UncompressBlockContents(const char* data, size_t n,
|
||||
case kZlibCompression:
|
||||
ubuf = std::unique_ptr<char[]>(
|
||||
port::Zlib_Uncompress(data, n, &decompress_size));
|
||||
static char zlib_corrupt_msg[] =
|
||||
"Zlib not supported or corrupted Zlib compressed block contents";
|
||||
if (!ubuf) {
|
||||
static char zlib_corrupt_msg[] =
|
||||
"Zlib not supported or corrupted Zlib compressed block contents";
|
||||
return Status::Corruption(zlib_corrupt_msg);
|
||||
}
|
||||
*contents =
|
||||
@ -345,9 +345,9 @@ Status UncompressBlockContents(const char* data, size_t n,
|
||||
case kBZip2Compression:
|
||||
ubuf = std::unique_ptr<char[]>(
|
||||
port::BZip2_Uncompress(data, n, &decompress_size));
|
||||
static char bzip2_corrupt_msg[] =
|
||||
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
||||
if (!ubuf) {
|
||||
static char bzip2_corrupt_msg[] =
|
||||
"Bzip2 not supported or corrupted Bzip2 compressed block contents";
|
||||
return Status::Corruption(bzip2_corrupt_msg);
|
||||
}
|
||||
*contents =
|
||||
@ -356,9 +356,9 @@ Status UncompressBlockContents(const char* data, size_t n,
|
||||
case kLZ4Compression:
|
||||
ubuf = std::unique_ptr<char[]>(
|
||||
port::LZ4_Uncompress(data, n, &decompress_size));
|
||||
static char lz4_corrupt_msg[] =
|
||||
"LZ4 not supported or corrupted LZ4 compressed block contents";
|
||||
if (!ubuf) {
|
||||
static char lz4_corrupt_msg[] =
|
||||
"LZ4 not supported or corrupted LZ4 compressed block contents";
|
||||
return Status::Corruption(lz4_corrupt_msg);
|
||||
}
|
||||
*contents =
|
||||
@ -367,9 +367,9 @@ Status UncompressBlockContents(const char* data, size_t n,
|
||||
case kLZ4HCCompression:
|
||||
ubuf = std::unique_ptr<char[]>(
|
||||
port::LZ4_Uncompress(data, n, &decompress_size));
|
||||
static char lz4hc_corrupt_msg[] =
|
||||
"LZ4HC not supported or corrupted LZ4HC compressed block contents";
|
||||
if (!ubuf) {
|
||||
static char lz4hc_corrupt_msg[] =
|
||||
"LZ4HC not supported or corrupted LZ4HC compressed block contents";
|
||||
return Status::Corruption(lz4hc_corrupt_msg);
|
||||
}
|
||||
*contents =
|
||||
|
@ -386,7 +386,7 @@ class Value {
|
||||
|
||||
namespace {
|
||||
void deleter(const Slice& key, void* value) {
|
||||
delete (Value *)value;
|
||||
delete static_cast<Value *>(value);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
@ -228,7 +228,7 @@ class FileManager : public EnvWrapper {
|
||||
public:
|
||||
explicit FileManager(Env* t) : EnvWrapper(t), rnd_(5) {}
|
||||
|
||||
Status DeleteRandomFileInDir(const std::string dir) {
|
||||
Status DeleteRandomFileInDir(const std::string& dir) {
|
||||
std::vector<std::string> children;
|
||||
GetChildren(dir, &children);
|
||||
if (children.size() <= 2) { // . and ..
|
||||
|
@ -407,7 +407,6 @@ class SimpleSortedIndex : public Index {
|
||||
assert(interval != nullptr); // because index is useful
|
||||
Direction direction;
|
||||
|
||||
std::string op;
|
||||
const JSONDocument* limit;
|
||||
if (interval->lower_bound != nullptr) {
|
||||
limit = interval->lower_bound;
|
||||
|
@ -56,7 +56,7 @@ class DocumentDBTest {
|
||||
}
|
||||
}
|
||||
|
||||
JSONDocument* Parse(const std::string doc) {
|
||||
JSONDocument* Parse(const std::string& doc) {
|
||||
return JSONDocument::ParseJSON(ConvertQuotes(doc).c_str());
|
||||
}
|
||||
|
||||
|
@ -263,7 +263,7 @@ class TtlTest {
|
||||
|
||||
class TestFilter : public CompactionFilter {
|
||||
public:
|
||||
TestFilter(const int64_t kSampleSize, const std::string kNewValue)
|
||||
TestFilter(const int64_t kSampleSize, const std::string& kNewValue)
|
||||
: kSampleSize_(kSampleSize),
|
||||
kNewValue_(kNewValue) {
|
||||
}
|
||||
@ -311,7 +311,7 @@ class TtlTest {
|
||||
|
||||
class TestFilterFactory : public CompactionFilterFactory {
|
||||
public:
|
||||
TestFilterFactory(const int64_t kSampleSize, const std::string kNewValue)
|
||||
TestFilterFactory(const int64_t kSampleSize, const std::string& kNewValue)
|
||||
: kSampleSize_(kSampleSize),
|
||||
kNewValue_(kNewValue) {
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user