Turn -Wshadow back on
Summary: It turns out that -Wshadow has different rules for gcc than clang. Previous commit fixed clang. This commits fixes the rest of the warnings for gcc. Test Plan: compiles Reviewers: ljin, yhchiang, rven, sdong Reviewed By: sdong Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D28131
This commit is contained in:
parent
c02338a698
commit
9f20395cd6
2
Makefile
2
Makefile
@ -69,7 +69,7 @@ install:
|
||||
@[ ! -e $(SHARED) ] || install -C -m 644 $(SHARED) $(INSTALL_PATH)/lib
|
||||
#-------------------------------------------------
|
||||
|
||||
WARNING_FLAGS = -Wall -Werror -Wsign-compare
|
||||
WARNING_FLAGS = -Wall -Werror -Wsign-compare -Wshadow
|
||||
CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
|
||||
CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual
|
||||
|
||||
|
@ -62,9 +62,9 @@ uint64_t SlowdownAmount(int n, double bottom, double top) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
ColumnFamilyHandleImpl::ColumnFamilyHandleImpl(ColumnFamilyData* cfd,
|
||||
DBImpl* db, port::Mutex* mutex)
|
||||
: cfd_(cfd), db_(db), mutex_(mutex) {
|
||||
ColumnFamilyHandleImpl::ColumnFamilyHandleImpl(
|
||||
ColumnFamilyData* column_family_data, DBImpl* db, port::Mutex* mutex)
|
||||
: cfd_(column_family_data), db_(db), mutex_(mutex) {
|
||||
if (cfd_ != nullptr) {
|
||||
cfd_->Ref();
|
||||
}
|
||||
@ -217,14 +217,15 @@ void SuperVersionUnrefHandle(void* ptr) {
|
||||
} // anonymous namespace
|
||||
|
||||
ColumnFamilyData::ColumnFamilyData(uint32_t id, const std::string& name,
|
||||
Version* dummy_versions, Cache* table_cache,
|
||||
Version* _dummy_versions,
|
||||
Cache* _table_cache,
|
||||
const ColumnFamilyOptions& cf_options,
|
||||
const DBOptions* db_options,
|
||||
const EnvOptions& env_options,
|
||||
ColumnFamilySet* column_family_set)
|
||||
: id_(id),
|
||||
name_(name),
|
||||
dummy_versions_(dummy_versions),
|
||||
dummy_versions_(_dummy_versions),
|
||||
current_(nullptr),
|
||||
refs_(0),
|
||||
dropped_(false),
|
||||
@ -243,11 +244,11 @@ ColumnFamilyData::ColumnFamilyData(uint32_t id, const std::string& name,
|
||||
column_family_set_(column_family_set) {
|
||||
Ref();
|
||||
|
||||
// if dummy_versions is nullptr, then this is a dummy column family.
|
||||
if (dummy_versions != nullptr) {
|
||||
// if _dummy_versions is nullptr, then this is a dummy column family.
|
||||
if (_dummy_versions != nullptr) {
|
||||
internal_stats_.reset(
|
||||
new InternalStats(ioptions_.num_levels, db_options->env, this));
|
||||
table_cache_.reset(new TableCache(ioptions_, env_options, table_cache));
|
||||
table_cache_.reset(new TableCache(ioptions_, env_options, _table_cache));
|
||||
if (ioptions_.compaction_style == kCompactionStyleUniversal) {
|
||||
compaction_picker_.reset(
|
||||
new UniversalCompactionPicker(ioptions_, &internal_comparator_));
|
||||
@ -389,7 +390,9 @@ const EnvOptions* ColumnFamilyData::soptions() const {
|
||||
return &(column_family_set_->env_options_);
|
||||
}
|
||||
|
||||
void ColumnFamilyData::SetCurrent(Version* current) { current_ = current; }
|
||||
void ColumnFamilyData::SetCurrent(Version* current_version) {
|
||||
current_ = current_version;
|
||||
}
|
||||
|
||||
void ColumnFamilyData::CreateNewMemtable(
|
||||
const MutableCFOptions& mutable_cf_options) {
|
||||
|
@ -70,7 +70,7 @@ class ColumnFamilyHandleInternal : public ColumnFamilyHandleImpl {
|
||||
ColumnFamilyHandleInternal()
|
||||
: ColumnFamilyHandleImpl(nullptr, nullptr, nullptr) {}
|
||||
|
||||
void SetCFD(ColumnFamilyData* cfd) { internal_cfd_ = cfd; }
|
||||
void SetCFD(ColumnFamilyData* _cfd) { internal_cfd_ = _cfd; }
|
||||
virtual ColumnFamilyData* cfd() const override { return internal_cfd_; }
|
||||
|
||||
private:
|
||||
@ -178,7 +178,7 @@ class ColumnFamilyData {
|
||||
// REQUIRES: DB mutex held
|
||||
// This returns the MutableCFOptions used by current SuperVersion
|
||||
// You shoul use this API to reference MutableCFOptions most of the time.
|
||||
const MutableCFOptions* mutable_cf_options() const {
|
||||
const MutableCFOptions* GetCurrentMutableCFOptions() const {
|
||||
return &(super_version_->mutable_cf_options);
|
||||
}
|
||||
// REQUIRES: DB mutex held
|
||||
|
@ -29,8 +29,8 @@ uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
|
||||
return sum;
|
||||
}
|
||||
|
||||
void Compaction::SetInputVersion(Version* input_version) {
|
||||
input_version_ = input_version;
|
||||
void Compaction::SetInputVersion(Version* _input_version) {
|
||||
input_version_ = _input_version;
|
||||
cfd_ = input_version_->cfd();
|
||||
|
||||
cfd_->Ref();
|
||||
@ -111,10 +111,10 @@ bool Compaction::IsTrivialMove() const {
|
||||
TotalFileSize(grandparents_) <= max_grandparent_overlap_bytes_);
|
||||
}
|
||||
|
||||
void Compaction::AddInputDeletions(VersionEdit* edit) {
|
||||
void Compaction::AddInputDeletions(VersionEdit* out_edit) {
|
||||
for (int which = 0; which < num_input_levels(); which++) {
|
||||
for (size_t i = 0; i < inputs_[which].size(); i++) {
|
||||
edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
|
||||
out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -261,14 +261,15 @@ void Compaction::Summary(char* output, int len) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (int level = 0; level < num_input_levels(); ++level) {
|
||||
if (level > 0) {
|
||||
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
|
||||
if (level_iter > 0) {
|
||||
write += snprintf(output + write, len - write, "], [");
|
||||
if (write < 0 || write >= len) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
write += InputSummary(inputs_[level].files, output + write, len - write);
|
||||
write +=
|
||||
InputSummary(inputs_[level_iter].files, output + write, len - write);
|
||||
if (write < 0 || write >= len) {
|
||||
return;
|
||||
}
|
||||
@ -284,8 +285,8 @@ uint64_t Compaction::OutputFilePreallocationSize(
|
||||
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
|
||||
preallocation_size = mutable_options.MaxFileSizeForLevel(output_level());
|
||||
} else {
|
||||
for (int level = 0; level < num_input_levels(); ++level) {
|
||||
for (const auto& f : inputs_[level].files) {
|
||||
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
|
||||
for (const auto& f : inputs_[level_iter].files) {
|
||||
preallocation_size += f->fd.GetFileSize();
|
||||
}
|
||||
}
|
||||
|
@ -312,8 +312,8 @@ void DBIter::MergeValuesNewToOld() {
|
||||
// hit a put, merge the put value with operands and store the
|
||||
// final result in saved_value_. We are done!
|
||||
// ignore corruption if there is any.
|
||||
const Slice value = iter_->value();
|
||||
user_merge_operator_->FullMerge(ikey.user_key, &value, operands,
|
||||
const Slice val = iter_->value();
|
||||
user_merge_operator_->FullMerge(ikey.user_key, &val, operands,
|
||||
&saved_value_, logger_);
|
||||
// iter_ is positioned after put
|
||||
iter_->Next();
|
||||
@ -323,8 +323,8 @@ void DBIter::MergeValuesNewToOld() {
|
||||
if (kTypeMerge == ikey.type) {
|
||||
// hit a merge, add the value as an operand and run associative merge.
|
||||
// when complete, add result to operands and continue.
|
||||
const Slice& value = iter_->value();
|
||||
operands.push_front(value.ToString());
|
||||
const Slice& val = iter_->value();
|
||||
operands.push_front(val.ToString());
|
||||
}
|
||||
}
|
||||
|
||||
@ -505,8 +505,8 @@ bool DBIter::FindValueForCurrentKeyUsingSeek() {
|
||||
return true;
|
||||
}
|
||||
|
||||
const Slice& value = iter_->value();
|
||||
user_merge_operator_->FullMerge(saved_key_.GetKey(), &value, operands,
|
||||
const Slice& val = iter_->value();
|
||||
user_merge_operator_->FullMerge(saved_key_.GetKey(), &val, operands,
|
||||
&saved_value_, logger_);
|
||||
valid_ = true;
|
||||
return true;
|
||||
|
@ -33,20 +33,23 @@ class TestIterator : public Iterator {
|
||||
iter_(0),
|
||||
cmp(comparator) {}
|
||||
|
||||
void AddMerge(std::string key, std::string value) {
|
||||
Add(key, kTypeMerge, value);
|
||||
void AddMerge(std::string argkey, std::string argvalue) {
|
||||
Add(argkey, kTypeMerge, argvalue);
|
||||
}
|
||||
|
||||
void AddDeletion(std::string key) { Add(key, kTypeDeletion, std::string()); }
|
||||
|
||||
void AddPut(std::string key, std::string value) {
|
||||
Add(key, kTypeValue, value);
|
||||
void AddDeletion(std::string argkey) {
|
||||
Add(argkey, kTypeDeletion, std::string());
|
||||
}
|
||||
|
||||
void Add(std::string key, ValueType type, std::string value) {
|
||||
void AddPut(std::string argkey, std::string argvalue) {
|
||||
Add(argkey, kTypeValue, argvalue);
|
||||
}
|
||||
|
||||
void Add(std::string argkey, ValueType type, std::string argvalue) {
|
||||
valid_ = true;
|
||||
ParsedInternalKey internal_key(key, sequence_number_++, type);
|
||||
data_.push_back(std::pair<std::string, std::string>(std::string(), value));
|
||||
ParsedInternalKey internal_key(argkey, sequence_number_++, type);
|
||||
data_.push_back(
|
||||
std::pair<std::string, std::string>(std::string(), argvalue));
|
||||
AppendInternalKey(&data_.back().first, internal_key);
|
||||
}
|
||||
|
||||
|
@ -247,13 +247,13 @@ class SpecialEnv : public EnvWrapper {
|
||||
return base_->GetFileSize();
|
||||
}
|
||||
};
|
||||
class LogFile : public WritableFile {
|
||||
class WalFile : public WritableFile {
|
||||
private:
|
||||
SpecialEnv* env_;
|
||||
unique_ptr<WritableFile> base_;
|
||||
public:
|
||||
LogFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
||||
: env_(env), base_(std::move(b)) { }
|
||||
WalFile(SpecialEnv* env, unique_ptr<WritableFile>&& b)
|
||||
: env_(env), base_(std::move(b)) {}
|
||||
Status Append(const Slice& data) {
|
||||
if (env_->log_write_error_.load(std::memory_order_acquire)) {
|
||||
return Status::IOError("simulated writer error");
|
||||
@ -296,7 +296,7 @@ class SpecialEnv : public EnvWrapper {
|
||||
} else if (strstr(f.c_str(), "MANIFEST") != nullptr) {
|
||||
r->reset(new ManifestFile(this, std::move(*r)));
|
||||
} else if (strstr(f.c_str(), "log") != nullptr) {
|
||||
r->reset(new LogFile(this, std::move(*r)));
|
||||
r->reset(new WalFile(this, std::move(*r)));
|
||||
}
|
||||
}
|
||||
return s;
|
||||
|
@ -127,8 +127,8 @@ void InternalKeyComparator::FindShortSuccessor(std::string* key) const {
|
||||
}
|
||||
}
|
||||
|
||||
LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
|
||||
size_t usize = user_key.size();
|
||||
LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
|
||||
size_t usize = _user_key.size();
|
||||
size_t needed = usize + 13; // A conservative estimate
|
||||
char* dst;
|
||||
if (needed <= sizeof(space_)) {
|
||||
@ -139,7 +139,7 @@ LookupKey::LookupKey(const Slice& user_key, SequenceNumber s) {
|
||||
start_ = dst;
|
||||
dst = EncodeVarint32(dst, usize + 8);
|
||||
kstart_ = dst;
|
||||
memcpy(dst, user_key.data(), usize);
|
||||
memcpy(dst, _user_key.data(), usize);
|
||||
dst += usize;
|
||||
EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek));
|
||||
dst += 8;
|
||||
|
@ -132,8 +132,8 @@ class InternalKey {
|
||||
std::string rep_;
|
||||
public:
|
||||
InternalKey() { } // Leave rep_ as empty to indicate it is invalid
|
||||
InternalKey(const Slice& user_key, SequenceNumber s, ValueType t) {
|
||||
AppendInternalKey(&rep_, ParsedInternalKey(user_key, s, t));
|
||||
InternalKey(const Slice& _user_key, SequenceNumber s, ValueType t) {
|
||||
AppendInternalKey(&rep_, ParsedInternalKey(_user_key, s, t));
|
||||
}
|
||||
|
||||
bool Valid() const {
|
||||
@ -201,7 +201,7 @@ class LookupKey {
|
||||
public:
|
||||
// Initialize *this for looking up user_key at a snapshot with
|
||||
// the specified sequence number.
|
||||
LookupKey(const Slice& user_key, SequenceNumber sequence);
|
||||
LookupKey(const Slice& _user_key, SequenceNumber sequence);
|
||||
|
||||
~LookupKey();
|
||||
|
||||
|
@ -94,7 +94,6 @@ TEST(FileIndexerTest, Empty) {
|
||||
// Case 1: no overlap, files are on the left of next level files
|
||||
TEST(FileIndexerTest, no_overlap_left) {
|
||||
Arena arena;
|
||||
uint32_t kNumLevels = 4;
|
||||
indexer = new FileIndexer(&ucmp);
|
||||
// level 1
|
||||
AddFile(1, 100, 200);
|
||||
@ -135,7 +134,6 @@ TEST(FileIndexerTest, no_overlap_left) {
|
||||
// Case 2: no overlap, files are on the right of next level files
|
||||
TEST(FileIndexerTest, no_overlap_right) {
|
||||
Arena arena;
|
||||
uint32_t kNumLevels = 4;
|
||||
indexer = new FileIndexer(&ucmp);
|
||||
// level 1
|
||||
AddFile(1, 2100, 2200);
|
||||
@ -178,7 +176,6 @@ TEST(FileIndexerTest, no_overlap_right) {
|
||||
// Case 3: empty L2
|
||||
TEST(FileIndexerTest, empty_L2) {
|
||||
Arena arena;
|
||||
uint32_t kNumLevels = 4;
|
||||
indexer = new FileIndexer(&ucmp);
|
||||
for (uint32_t i = 1; i < kNumLevels; ++i) {
|
||||
ASSERT_EQ(0U, indexer->LevelIndexSize(i));
|
||||
|
@ -20,9 +20,9 @@ namespace log {
|
||||
Reader::Reporter::~Reporter() {
|
||||
}
|
||||
|
||||
Reader::Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
|
||||
Reader::Reader(unique_ptr<SequentialFile>&& _file, Reporter* reporter,
|
||||
bool checksum, uint64_t initial_offset)
|
||||
: file_(std::move(file)),
|
||||
: file_(std::move(_file)),
|
||||
reporter_(reporter),
|
||||
checksum_(checksum),
|
||||
backing_store_(new char[kBlockSize]),
|
||||
@ -32,8 +32,7 @@ Reader::Reader(unique_ptr<SequentialFile>&& file, Reporter* reporter,
|
||||
eof_offset_(0),
|
||||
last_record_offset_(0),
|
||||
end_of_buffer_offset_(0),
|
||||
initial_offset_(initial_offset) {
|
||||
}
|
||||
initial_offset_(initial_offset) {}
|
||||
|
||||
Reader::~Reader() {
|
||||
delete[] backing_store_;
|
||||
|
@ -268,11 +268,11 @@ void MemTableList::Add(MemTable* m) {
|
||||
|
||||
// Returns an estimate of the number of bytes of data in use.
|
||||
size_t MemTableList::ApproximateMemoryUsage() {
|
||||
size_t size = 0;
|
||||
size_t total_size = 0;
|
||||
for (auto& memtable : current_->memlist_) {
|
||||
size += memtable->ApproximateMemoryUsage();
|
||||
total_size += memtable->ApproximateMemoryUsage();
|
||||
}
|
||||
return size;
|
||||
return total_size;
|
||||
}
|
||||
|
||||
void MemTableList::InstallNewVersion() {
|
||||
|
@ -85,9 +85,10 @@ void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before,
|
||||
// We store the result in keys_.back() and operands_.back()
|
||||
// if nothing went wrong (i.e.: no operand corruption on disk)
|
||||
if (success_) {
|
||||
std::string& key = keys_.back(); // The original key encountered
|
||||
std::string& original_key =
|
||||
keys_.back(); // The original key encountered
|
||||
orig_ikey.type = kTypeValue;
|
||||
UpdateInternalKey(&key[0], key.size(),
|
||||
UpdateInternalKey(&original_key[0], original_key.size(),
|
||||
orig_ikey.sequence, orig_ikey.type);
|
||||
swap(operands_.back(), merge_result);
|
||||
} else {
|
||||
@ -108,17 +109,17 @@ void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before,
|
||||
// => store result in operands_.back() (and update keys_.back())
|
||||
// => change the entry type to kTypeValue for keys_.back()
|
||||
// We are done! Success!
|
||||
const Slice value = iter->value();
|
||||
success_ = user_merge_operator_->FullMerge(ikey.user_key, &value,
|
||||
operands_, &merge_result,
|
||||
logger_);
|
||||
const Slice val = iter->value();
|
||||
success_ = user_merge_operator_->FullMerge(ikey.user_key, &val, operands_,
|
||||
&merge_result, logger_);
|
||||
|
||||
// We store the result in keys_.back() and operands_.back()
|
||||
// if nothing went wrong (i.e.: no operand corruption on disk)
|
||||
if (success_) {
|
||||
std::string& key = keys_.back(); // The original key encountered
|
||||
std::string& original_key =
|
||||
keys_.back(); // The original key encountered
|
||||
orig_ikey.type = kTypeValue;
|
||||
UpdateInternalKey(&key[0], key.size(),
|
||||
UpdateInternalKey(&original_key[0], original_key.size(),
|
||||
orig_ikey.sequence, orig_ikey.type);
|
||||
swap(operands_.back(), merge_result);
|
||||
} else {
|
||||
@ -177,9 +178,9 @@ void MergeHelper::MergeUntil(Iterator* iter, SequenceNumber stop_before,
|
||||
logger_);
|
||||
|
||||
if (success_) {
|
||||
std::string& key = keys_.back(); // The original key encountered
|
||||
std::string& original_key = keys_.back(); // The original key encountered
|
||||
orig_ikey.type = kTypeValue;
|
||||
UpdateInternalKey(&key[0], key.size(),
|
||||
UpdateInternalKey(&original_key[0], original_key.size(),
|
||||
orig_ikey.sequence, orig_ikey.type);
|
||||
|
||||
// The final value() is always stored in operands_.back()
|
||||
|
@ -220,8 +220,8 @@ class ConcurrentTest {
|
||||
void WriteStep(Random* rnd) {
|
||||
const uint32_t k = rnd->Next() % K;
|
||||
const int g = current_.Get(k) + 1;
|
||||
const Key key = MakeKey(k, g);
|
||||
list_.Insert(key);
|
||||
const Key new_key = MakeKey(k, g);
|
||||
list_.Insert(new_key);
|
||||
current_.Set(k, g);
|
||||
}
|
||||
|
||||
|
@ -48,14 +48,14 @@ Status TransactionLogIteratorImpl::OpenLogFile(
|
||||
return env->NewSequentialFile(fname, file, soptions_);
|
||||
} else {
|
||||
std::string fname = LogFileName(dir_, logFile->LogNumber());
|
||||
Status status = env->NewSequentialFile(fname, file, soptions_);
|
||||
if (!status.ok()) {
|
||||
Status s = env->NewSequentialFile(fname, file, soptions_);
|
||||
if (!s.ok()) {
|
||||
// If cannot open file in DB directory.
|
||||
// Try the archive dir, as it could have moved in the meanwhile.
|
||||
fname = ArchivedLogFileName(dir_, logFile->LogNumber());
|
||||
status = env->NewSequentialFile(fname, file, soptions_);
|
||||
s = env->NewSequentialFile(fname, file, soptions_);
|
||||
}
|
||||
return status;
|
||||
return s;
|
||||
}
|
||||
}
|
||||
|
||||
@ -182,10 +182,10 @@ void TransactionLogIteratorImpl::NextImpl(bool internal) {
|
||||
// Open the next file
|
||||
if (currentFileIndex_ < files_->size() - 1) {
|
||||
++currentFileIndex_;
|
||||
Status status =OpenLogReader(files_->at(currentFileIndex_).get());
|
||||
if (!status.ok()) {
|
||||
Status s = OpenLogReader(files_->at(currentFileIndex_).get());
|
||||
if (!s.ok()) {
|
||||
isValid_ = false;
|
||||
currentStatus_ = status;
|
||||
currentStatus_ = s;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
@ -252,9 +252,9 @@ void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
|
||||
|
||||
Status TransactionLogIteratorImpl::OpenLogReader(const LogFile* logFile) {
|
||||
unique_ptr<SequentialFile> file;
|
||||
Status status = OpenLogFile(logFile, &file);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
Status s = OpenLogFile(logFile, &file);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
assert(file);
|
||||
currentLogReader_.reset(new log::Reader(std::move(file), &reporter_,
|
||||
|
@ -17,19 +17,6 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
struct LogReporter : public log::Reader::Reporter {
|
||||
Env* env;
|
||||
Logger* info_log;
|
||||
virtual void Corruption(size_t bytes, const Status& s) {
|
||||
Log(InfoLogLevel::ERROR_LEVEL, info_log,
|
||||
"dropping %zu bytes; %s", bytes, s.ToString().c_str());
|
||||
}
|
||||
virtual void Info(const char* s) {
|
||||
Log(InfoLogLevel::INFO_LEVEL,
|
||||
info_log, "%s", s);
|
||||
}
|
||||
};
|
||||
|
||||
class LogFileImpl : public LogFile {
|
||||
public:
|
||||
LogFileImpl(uint64_t logNum, WalFileType logType, SequenceNumber startSeq,
|
||||
@ -97,7 +84,19 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
|
||||
std::unique_ptr<WriteBatch> currentBatch_;
|
||||
unique_ptr<log::Reader> currentLogReader_;
|
||||
Status OpenLogFile(const LogFile* logFile, unique_ptr<SequentialFile>* file);
|
||||
LogReporter reporter_;
|
||||
|
||||
struct LogReporter : public log::Reader::Reporter {
|
||||
Env* env;
|
||||
Logger* info_log;
|
||||
virtual void Corruption(size_t bytes, const Status& s) {
|
||||
Log(InfoLogLevel::ERROR_LEVEL, info_log, "dropping %zu bytes; %s", bytes,
|
||||
s.ToString().c_str());
|
||||
}
|
||||
virtual void Info(const char* s) {
|
||||
Log(InfoLogLevel::INFO_LEVEL, info_log, "%s", s);
|
||||
}
|
||||
} reporter_;
|
||||
|
||||
SequenceNumber currentBatchSeq_; // sequence number at start of current batch
|
||||
SequenceNumber currentLastSeq_; // last sequence in the current batch
|
||||
// Used only to get latest seq. num
|
||||
|
@ -651,12 +651,12 @@ void Version::AddIterators(const ReadOptions& read_options,
|
||||
|
||||
VersionStorageInfo::VersionStorageInfo(
|
||||
const InternalKeyComparator* internal_comparator,
|
||||
const Comparator* user_comparator, int num_levels,
|
||||
const Comparator* user_comparator, int levels,
|
||||
CompactionStyle compaction_style, VersionStorageInfo* ref_vstorage)
|
||||
: internal_comparator_(internal_comparator),
|
||||
user_comparator_(user_comparator),
|
||||
// cfd is nullptr if Version is dummy
|
||||
num_levels_(num_levels),
|
||||
num_levels_(levels),
|
||||
num_non_empty_levels_(num_levels_),
|
||||
file_indexer_(user_comparator),
|
||||
compaction_style_(compaction_style),
|
||||
@ -683,22 +683,23 @@ VersionStorageInfo::VersionStorageInfo(
|
||||
}
|
||||
}
|
||||
|
||||
Version::Version(ColumnFamilyData* cfd, VersionSet* vset,
|
||||
Version::Version(ColumnFamilyData* column_family_data, VersionSet* vset,
|
||||
uint64_t version_number)
|
||||
: cfd_(cfd),
|
||||
info_log_((cfd == nullptr) ? nullptr : cfd->ioptions()->info_log),
|
||||
db_statistics_((cfd == nullptr) ? nullptr : cfd->ioptions()->statistics),
|
||||
table_cache_((cfd == nullptr) ? nullptr : cfd->table_cache()),
|
||||
merge_operator_((cfd == nullptr) ? nullptr
|
||||
: cfd->ioptions()->merge_operator),
|
||||
storage_info_((cfd == nullptr) ? nullptr : &cfd->internal_comparator(),
|
||||
(cfd == nullptr) ? nullptr : cfd->user_comparator(),
|
||||
cfd == nullptr ? 0 : cfd->NumberLevels(),
|
||||
cfd == nullptr ? kCompactionStyleLevel
|
||||
: cfd->ioptions()->compaction_style,
|
||||
(cfd == nullptr || cfd->current() == nullptr)
|
||||
: cfd_(column_family_data),
|
||||
info_log_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->info_log),
|
||||
db_statistics_((cfd_ == nullptr) ? nullptr
|
||||
: cfd_->ioptions()->statistics),
|
||||
table_cache_((cfd_ == nullptr) ? nullptr : cfd_->table_cache()),
|
||||
merge_operator_((cfd_ == nullptr) ? nullptr
|
||||
: cfd_->ioptions()->merge_operator),
|
||||
storage_info_((cfd_ == nullptr) ? nullptr : &cfd_->internal_comparator(),
|
||||
(cfd_ == nullptr) ? nullptr : cfd_->user_comparator(),
|
||||
cfd_ == nullptr ? 0 : cfd_->NumberLevels(),
|
||||
cfd_ == nullptr ? kCompactionStyleLevel
|
||||
: cfd_->ioptions()->compaction_style,
|
||||
(cfd_ == nullptr || cfd_->current() == nullptr)
|
||||
? nullptr
|
||||
: cfd->current()->storage_info()),
|
||||
: cfd_->current()->storage_info()),
|
||||
vset_(vset),
|
||||
next_(this),
|
||||
prev_(this),
|
||||
@ -1445,10 +1446,10 @@ struct VersionSet::ManifestWriter {
|
||||
};
|
||||
|
||||
VersionSet::VersionSet(const std::string& dbname, const DBOptions* db_options,
|
||||
const EnvOptions& env_options, Cache* table_cache,
|
||||
const EnvOptions& storage_options, Cache* table_cache,
|
||||
WriteController* write_controller)
|
||||
: column_family_set_(new ColumnFamilySet(dbname, db_options, env_options,
|
||||
table_cache, write_controller)),
|
||||
: column_family_set_(new ColumnFamilySet(
|
||||
dbname, db_options, storage_options, table_cache, write_controller)),
|
||||
env_(db_options->env),
|
||||
dbname_(dbname),
|
||||
db_options_(db_options),
|
||||
@ -1459,7 +1460,7 @@ VersionSet::VersionSet(const std::string& dbname, const DBOptions* db_options,
|
||||
prev_log_number_(0),
|
||||
current_version_number_(0),
|
||||
manifest_file_size_(0),
|
||||
env_options_(env_options),
|
||||
env_options_(storage_options),
|
||||
env_options_compactions_(env_options_) {}
|
||||
|
||||
VersionSet::~VersionSet() {
|
||||
@ -1842,8 +1843,8 @@ Status VersionSet::Recover(
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
uint64_t manifest_file_size;
|
||||
s = env_->GetFileSize(manifest_filename, &manifest_file_size);
|
||||
uint64_t current_manifest_file_size;
|
||||
s = env_->GetFileSize(manifest_filename, ¤t_manifest_file_size);
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -1855,7 +1856,7 @@ Status VersionSet::Recover(
|
||||
uint64_t next_file = 0;
|
||||
uint64_t last_sequence = 0;
|
||||
uint64_t log_number = 0;
|
||||
uint64_t prev_log_number = 0;
|
||||
uint64_t previous_log_number = 0;
|
||||
uint32_t max_column_family = 0;
|
||||
std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
|
||||
|
||||
@ -1984,7 +1985,7 @@ Status VersionSet::Recover(
|
||||
}
|
||||
|
||||
if (edit.has_prev_log_number_) {
|
||||
prev_log_number = edit.prev_log_number_;
|
||||
previous_log_number = edit.prev_log_number_;
|
||||
have_prev_log_number = true;
|
||||
}
|
||||
|
||||
@ -2014,12 +2015,12 @@ Status VersionSet::Recover(
|
||||
}
|
||||
|
||||
if (!have_prev_log_number) {
|
||||
prev_log_number = 0;
|
||||
previous_log_number = 0;
|
||||
}
|
||||
|
||||
column_family_set_->UpdateMaxColumnFamily(max_column_family);
|
||||
|
||||
MarkFileNumberUsed(prev_log_number);
|
||||
MarkFileNumberUsed(previous_log_number);
|
||||
MarkFileNumberUsed(log_number);
|
||||
}
|
||||
|
||||
@ -2059,10 +2060,10 @@ Status VersionSet::Recover(
|
||||
AppendVersion(cfd, v);
|
||||
}
|
||||
|
||||
manifest_file_size_ = manifest_file_size;
|
||||
manifest_file_size_ = current_manifest_file_size;
|
||||
next_file_number_ = next_file + 1;
|
||||
last_sequence_ = last_sequence;
|
||||
prev_log_number_ = prev_log_number;
|
||||
prev_log_number_ = previous_log_number;
|
||||
|
||||
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
||||
"Recovered from manifest file:%s succeeded,"
|
||||
@ -2254,7 +2255,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
||||
bool have_last_sequence = false;
|
||||
uint64_t next_file = 0;
|
||||
uint64_t last_sequence = 0;
|
||||
uint64_t prev_log_number = 0;
|
||||
uint64_t previous_log_number = 0;
|
||||
int count = 0;
|
||||
std::unordered_map<uint32_t, std::string> comparators;
|
||||
std::unordered_map<uint32_t, BaseReferencedVersionBuilder*> builders;
|
||||
@ -2345,7 +2346,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
||||
}
|
||||
|
||||
if (edit.has_prev_log_number_) {
|
||||
prev_log_number = edit.prev_log_number_;
|
||||
previous_log_number = edit.prev_log_number_;
|
||||
have_prev_log_number = true;
|
||||
}
|
||||
|
||||
@ -2376,7 +2377,7 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
||||
}
|
||||
|
||||
if (!have_prev_log_number) {
|
||||
prev_log_number = 0;
|
||||
previous_log_number = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2409,13 +2410,13 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
||||
|
||||
next_file_number_ = next_file + 1;
|
||||
last_sequence_ = last_sequence;
|
||||
prev_log_number_ = prev_log_number;
|
||||
prev_log_number_ = previous_log_number;
|
||||
|
||||
printf(
|
||||
"next_file_number %lu last_sequence "
|
||||
"%lu prev_log_number %lu max_column_family %u\n",
|
||||
(unsigned long)next_file_number_, (unsigned long)last_sequence,
|
||||
(unsigned long)prev_log_number,
|
||||
(unsigned long)previous_log_number,
|
||||
column_family_set_->GetMaxColumnFamily());
|
||||
}
|
||||
|
||||
@ -2491,10 +2492,9 @@ Status VersionSet::WriteSnapshot(log::Writer* log) {
|
||||
|
||||
// Opens the mainfest file and reads all records
|
||||
// till it finds the record we are looking for.
|
||||
bool VersionSet::ManifestContains(uint64_t manifest_file_number,
|
||||
bool VersionSet::ManifestContains(uint64_t manifest_file_num,
|
||||
const std::string& record) const {
|
||||
std::string fname =
|
||||
DescriptorFileName(dbname_, manifest_file_number);
|
||||
std::string fname = DescriptorFileName(dbname_, manifest_file_num);
|
||||
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log,
|
||||
"ManifestContains: checking %s\n", fname.c_str());
|
||||
unique_ptr<SequentialFile> file;
|
||||
|
@ -351,15 +351,14 @@ class InMemoryEnv : public EnvWrapper {
|
||||
return Status::NotSupported("getFileMTime", "Not supported in MemEnv");
|
||||
}
|
||||
|
||||
virtual Status RenameFile(const std::string& src,
|
||||
const std::string& target) {
|
||||
virtual Status RenameFile(const std::string& src, const std::string& dest) {
|
||||
MutexLock lock(&mutex_);
|
||||
if (file_map_.find(src) == file_map_.end()) {
|
||||
return Status::IOError(src, "File not found");
|
||||
}
|
||||
|
||||
DeleteFileInternal(target);
|
||||
file_map_[target] = file_map_[src];
|
||||
DeleteFileInternal(dest);
|
||||
file_map_[dest] = file_map_[src];
|
||||
file_map_.erase(src);
|
||||
return Status::OK();
|
||||
}
|
||||
|
@ -130,8 +130,8 @@ class Status {
|
||||
Code code_;
|
||||
const char* state_;
|
||||
|
||||
explicit Status(Code code) : code_(code), state_(nullptr) { }
|
||||
Status(Code code, const Slice& msg, const Slice& msg2);
|
||||
explicit Status(Code _code) : code_(_code), state_(nullptr) {}
|
||||
Status(Code _code, const Slice& msg, const Slice& msg2);
|
||||
static const char* CopyState(const char* s);
|
||||
};
|
||||
|
||||
|
@ -517,11 +517,11 @@ TEST(CuckooReaderTest, TestReadPerformance) {
|
||||
fprintf(stdout,
|
||||
"WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n");
|
||||
#endif
|
||||
std::vector<std::string> keys;
|
||||
for (uint64_t num : nums) {
|
||||
if (FLAGS_write || !Env::Default()->FileExists(GetFileName(num))) {
|
||||
GetKeys(num, &keys);
|
||||
WriteFile(keys, num, hash_ratio);
|
||||
std::vector<std::string> all_keys;
|
||||
GetKeys(num, &all_keys);
|
||||
WriteFile(all_keys, num, hash_ratio);
|
||||
}
|
||||
ReadKeys(num, 0);
|
||||
ReadKeys(num, 10);
|
||||
|
@ -107,11 +107,11 @@ inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) {
|
||||
}
|
||||
} // namespace
|
||||
|
||||
Footer::Footer(uint64_t table_magic_number)
|
||||
: version_(IsLegacyFooterFormat(table_magic_number) ? kLegacyFooter
|
||||
: kFooterVersion),
|
||||
Footer::Footer(uint64_t _table_magic_number)
|
||||
: version_(IsLegacyFooterFormat(_table_magic_number) ? kLegacyFooter
|
||||
: kFooterVersion),
|
||||
checksum_(kCRC32c),
|
||||
table_magic_number_(table_magic_number) {}
|
||||
table_magic_number_(_table_magic_number) {}
|
||||
|
||||
Status Footer::DecodeFrom(Slice* input) {
|
||||
assert(input != nullptr);
|
||||
@ -160,11 +160,11 @@ Status Footer::DecodeFrom(Slice* input) {
|
||||
} else {
|
||||
input->remove_prefix(input->size() - kVersion1EncodedLength);
|
||||
}
|
||||
uint32_t checksum;
|
||||
if (!GetVarint32(input, &checksum)) {
|
||||
uint32_t chksum;
|
||||
if (!GetVarint32(input, &chksum)) {
|
||||
return Status::Corruption("bad checksum type");
|
||||
}
|
||||
checksum_ = static_cast<ChecksumType>(checksum);
|
||||
checksum_ = static_cast<ChecksumType>(chksum);
|
||||
}
|
||||
|
||||
Status result = metaindex_handle_.DecodeFrom(input);
|
||||
|
@ -33,11 +33,11 @@ class BlockHandle {
|
||||
|
||||
// The offset of the block in the file.
|
||||
uint64_t offset() const { return offset_; }
|
||||
void set_offset(uint64_t offset) { offset_ = offset; }
|
||||
void set_offset(uint64_t _offset) { offset_ = _offset; }
|
||||
|
||||
// The size of the stored block
|
||||
uint64_t size() const { return size_; }
|
||||
void set_size(uint64_t size) { size_ = size; }
|
||||
void set_size(uint64_t _size) { size_ = _size; }
|
||||
|
||||
void EncodeTo(std::string* dst) const;
|
||||
Status DecodeFrom(Slice* input);
|
||||
@ -200,9 +200,7 @@ inline BlockHandle::BlockHandle()
|
||||
~static_cast<uint64_t>(0)) {
|
||||
}
|
||||
|
||||
inline BlockHandle::BlockHandle(uint64_t offset, uint64_t size)
|
||||
: offset_(offset),
|
||||
size_(size) {
|
||||
}
|
||||
inline BlockHandle::BlockHandle(uint64_t _offset, uint64_t _size)
|
||||
: offset_(_offset), size_(_size) {}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
@ -20,17 +20,15 @@ namespace rocksdb {
|
||||
class IteratorWrapper {
|
||||
public:
|
||||
IteratorWrapper(): iter_(nullptr), valid_(false) { }
|
||||
explicit IteratorWrapper(Iterator* iter): iter_(nullptr) {
|
||||
Set(iter);
|
||||
}
|
||||
explicit IteratorWrapper(Iterator* _iter) : iter_(nullptr) { Set(_iter); }
|
||||
~IteratorWrapper() {}
|
||||
Iterator* iter() const { return iter_; }
|
||||
|
||||
// Takes ownership of "iter" and will delete it when destroyed, or
|
||||
// when Set() is invoked again.
|
||||
void Set(Iterator* iter) {
|
||||
void Set(Iterator* _iter) {
|
||||
delete iter_;
|
||||
iter_ = iter;
|
||||
iter_ = _iter;
|
||||
if (iter_ == nullptr) {
|
||||
valid_ = false;
|
||||
} else {
|
||||
|
@ -240,14 +240,14 @@ class MergingIterator : public Iterator {
|
||||
}
|
||||
|
||||
virtual Status status() const {
|
||||
Status status;
|
||||
Status s;
|
||||
for (auto& child : children_) {
|
||||
status = child.status();
|
||||
if (!status.ok()) {
|
||||
s = child.status();
|
||||
if (!s.ok()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return status;
|
||||
return s;
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -251,15 +251,13 @@ class BlockConstructor: public Constructor {
|
||||
const ImmutableCFOptions& ioptions,
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
const KVMap& data) {
|
||||
const KVMap& kv_map) {
|
||||
delete block_;
|
||||
block_ = nullptr;
|
||||
BlockBuilder builder(table_options.block_restart_interval);
|
||||
|
||||
for (KVMap::const_iterator it = data.begin();
|
||||
it != data.end();
|
||||
++it) {
|
||||
builder.Add(it->first, it->second);
|
||||
for (const auto kv : kv_map) {
|
||||
builder.Add(kv.first, kv.second);
|
||||
}
|
||||
// Open the block
|
||||
data_ = builder.Finish().ToString();
|
||||
@ -307,12 +305,12 @@ class KeyConvertingIterator: public Iterator {
|
||||
|
||||
virtual Slice key() const {
|
||||
assert(Valid());
|
||||
ParsedInternalKey key;
|
||||
if (!ParseInternalKey(iter_->key(), &key)) {
|
||||
ParsedInternalKey parsed_key;
|
||||
if (!ParseInternalKey(iter_->key(), &parsed_key)) {
|
||||
status_ = Status::Corruption("malformed internal key");
|
||||
return Slice("corrupted key");
|
||||
}
|
||||
return key.user_key;
|
||||
return parsed_key.user_key;
|
||||
}
|
||||
|
||||
virtual Slice value() const { return iter_->value(); }
|
||||
@ -342,7 +340,7 @@ class TableConstructor: public Constructor {
|
||||
const ImmutableCFOptions& ioptions,
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
const KVMap& data) {
|
||||
const KVMap& kv_map) {
|
||||
Reset();
|
||||
sink_.reset(new StringSink());
|
||||
unique_ptr<TableBuilder> builder;
|
||||
@ -350,16 +348,14 @@ class TableConstructor: public Constructor {
|
||||
ioptions, internal_comparator, sink_.get(), options.compression,
|
||||
CompressionOptions()));
|
||||
|
||||
for (KVMap::const_iterator it = data.begin();
|
||||
it != data.end();
|
||||
++it) {
|
||||
for (const auto kv : kv_map) {
|
||||
if (convert_to_internal_key_) {
|
||||
ParsedInternalKey ikey(it->first, kMaxSequenceNumber, kTypeValue);
|
||||
ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue);
|
||||
std::string encoded;
|
||||
AppendInternalKey(&encoded, ikey);
|
||||
builder->Add(encoded, it->second);
|
||||
builder->Add(encoded, kv.second);
|
||||
} else {
|
||||
builder->Add(it->first, it->second);
|
||||
builder->Add(kv.first, kv.second);
|
||||
}
|
||||
ASSERT_TRUE(builder->status().ok());
|
||||
}
|
||||
@ -445,11 +441,10 @@ class MemTableConstructor: public Constructor {
|
||||
~MemTableConstructor() {
|
||||
delete memtable_->Unref();
|
||||
}
|
||||
virtual Status FinishImpl(const Options&,
|
||||
const ImmutableCFOptions& ioptions,
|
||||
virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions,
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
const KVMap& data) {
|
||||
const KVMap& kv_map) {
|
||||
delete memtable_->Unref();
|
||||
Options options;
|
||||
options.memtable_factory = table_factory_;
|
||||
@ -458,10 +453,8 @@ class MemTableConstructor: public Constructor {
|
||||
MutableCFOptions(options, mem_ioptions));
|
||||
memtable_->Ref();
|
||||
int seq = 1;
|
||||
for (KVMap::const_iterator it = data.begin();
|
||||
it != data.end();
|
||||
++it) {
|
||||
memtable_->Add(seq, kTypeValue, it->first, it->second);
|
||||
for (const auto kv : kv_map) {
|
||||
memtable_->Add(seq, kTypeValue, kv.first, kv.second);
|
||||
seq++;
|
||||
}
|
||||
return Status::OK();
|
||||
@ -497,15 +490,13 @@ class DBConstructor: public Constructor {
|
||||
const ImmutableCFOptions& ioptions,
|
||||
const BlockBasedTableOptions& table_options,
|
||||
const InternalKeyComparator& internal_comparator,
|
||||
const KVMap& data) {
|
||||
const KVMap& kv_map) {
|
||||
delete db_;
|
||||
db_ = nullptr;
|
||||
NewDB();
|
||||
for (KVMap::const_iterator it = data.begin();
|
||||
it != data.end();
|
||||
++it) {
|
||||
for (const auto kv : kv_map) {
|
||||
WriteBatch batch;
|
||||
batch.Put(it->first, it->second);
|
||||
batch.Put(kv.first, kv.second);
|
||||
ASSERT_TRUE(db_->Write(WriteOptions(), &batch).ok());
|
||||
}
|
||||
return Status::OK();
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
#include <utility>
|
||||
|
||||
#include "rocksdb/env.h"
|
||||
#include "util/autovector.h"
|
||||
@ -48,8 +49,8 @@ TEST(AutoVectorTest, PushBackAndPopBack) {
|
||||
}
|
||||
|
||||
TEST(AutoVectorTest, EmplaceBack) {
|
||||
typedef std::pair<size_t, std::string> ValueType;
|
||||
autovector<ValueType, kSize> vec;
|
||||
typedef std::pair<size_t, std::string> ValType;
|
||||
autovector<ValType, kSize> vec;
|
||||
|
||||
for (size_t i = 0; i < 1000 * kSize; ++i) {
|
||||
vec.emplace_back(i, std::to_string(i + 123));
|
||||
|
@ -539,10 +539,9 @@ Status MockEnv::GetFileModificationTime(const std::string& fname,
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status MockEnv::RenameFile(const std::string& src,
|
||||
const std::string& target) {
|
||||
Status MockEnv::RenameFile(const std::string& src, const std::string& dest) {
|
||||
auto s = NormalizePath(src);
|
||||
auto t = NormalizePath(target);
|
||||
auto t = NormalizePath(dest);
|
||||
MutexLock lock(&mutex_);
|
||||
if (file_map_.find(s) == file_map_.end()) {
|
||||
return Status::IOError(s, "File not found");
|
||||
|
@ -21,9 +21,8 @@ const char* Status::CopyState(const char* state) {
|
||||
return result;
|
||||
}
|
||||
|
||||
Status::Status(Code code, const Slice& msg, const Slice& msg2) :
|
||||
code_(code) {
|
||||
assert(code != kOk);
|
||||
Status::Status(Code _code, const Slice& msg, const Slice& msg2) : code_(_code) {
|
||||
assert(code_ != kOk);
|
||||
const uint32_t len1 = msg.size();
|
||||
const uint32_t len2 = msg2.size();
|
||||
const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
|
||||
|
@ -31,9 +31,9 @@ JSONDocument::JSONDocument(const std::string& s) : type_(kString) {
|
||||
JSONDocument::JSONDocument(const char* s) : type_(kString) {
|
||||
new (&data_.s) std::string(s);
|
||||
}
|
||||
JSONDocument::JSONDocument(Type type) : type_(type) {
|
||||
JSONDocument::JSONDocument(Type _type) : type_(_type) {
|
||||
// TODO(icanadi) make all of this better by using templates
|
||||
switch (type) {
|
||||
switch (type_) {
|
||||
case kNull:
|
||||
break;
|
||||
case kObject:
|
||||
@ -545,11 +545,11 @@ bool JSONDocument::DeserializeInternal(Slice* input) {
|
||||
}
|
||||
data_.a.resize(size);
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
Type type;
|
||||
if (!GetNextType(input, &type)) {
|
||||
Type t;
|
||||
if (!GetNextType(input, &t)) {
|
||||
return false;
|
||||
}
|
||||
data_.a[i] = new JSONDocument(type);
|
||||
data_.a[i] = new JSONDocument(t);
|
||||
if (!data_.a[i]->DeserializeInternal(input)) {
|
||||
return false;
|
||||
}
|
||||
@ -582,10 +582,10 @@ bool JSONDocument::DeserializeInternal(Slice* input) {
|
||||
for (uint32_t i = 0; ok && i < num_elements; ++i) {
|
||||
Slice key;
|
||||
ok = GetLengthPrefixedSlice(input, &key);
|
||||
Type type;
|
||||
ok = ok && GetNextType(input, &type);
|
||||
Type t;
|
||||
ok = ok && GetNextType(input, &t);
|
||||
if (ok) {
|
||||
std::unique_ptr<JSONDocument> value(new JSONDocument(type));
|
||||
std::unique_ptr<JSONDocument> value(new JSONDocument(t));
|
||||
ok = value->DeserializeInternal(input);
|
||||
if (ok) {
|
||||
data_.o.insert({key.ToString(), value.get()});
|
||||
|
@ -53,10 +53,10 @@ class BaseDeltaIterator : public Iterator {
|
||||
UpdateCurrent();
|
||||
}
|
||||
|
||||
void Seek(const Slice& key) override {
|
||||
void Seek(const Slice& k) override {
|
||||
forward_ = true;
|
||||
base_iterator_->Seek(key);
|
||||
delta_iterator_->Seek(key);
|
||||
base_iterator_->Seek(k);
|
||||
delta_iterator_->Seek(k);
|
||||
UpdateCurrent();
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user