Turn on -Wshadow
Summary: ...and fix all the errors :) Jim suggested turning on -Wshadow because it helped him fix number of critical bugs in fbcode. I think it's a good idea to be -Wshadow clean. Test Plan: compiles Reviewers: yhchiang, rven, sdong, ljin Reviewed By: ljin Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D27711
This commit is contained in:
parent
98849a35fa
commit
9f7fc3ac45
2
Makefile
2
Makefile
@ -69,7 +69,7 @@ install:
|
|||||||
@[ ! -e $(SHARED) ] || install -C -m 644 $(SHARED) $(INSTALL_PATH)/lib
|
@[ ! -e $(SHARED) ] || install -C -m 644 $(SHARED) $(INSTALL_PATH)/lib
|
||||||
#-------------------------------------------------
|
#-------------------------------------------------
|
||||||
|
|
||||||
WARNING_FLAGS = -Wall -Werror -Wsign-compare
|
WARNING_FLAGS = -Wall -Werror -Wsign-compare -Wshadow
|
||||||
CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
|
CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT)
|
||||||
CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual
|
CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual
|
||||||
|
|
||||||
|
@ -72,11 +72,13 @@ Status BuildTable(const std::string& dbname, Env* env,
|
|||||||
ioptions, internal_comparator, file.get(),
|
ioptions, internal_comparator, file.get(),
|
||||||
compression, compression_opts);
|
compression, compression_opts);
|
||||||
|
|
||||||
|
{
|
||||||
// the first key is the smallest key
|
// the first key is the smallest key
|
||||||
Slice key = iter->key();
|
Slice key = iter->key();
|
||||||
meta->smallest.DecodeFrom(key);
|
meta->smallest.DecodeFrom(key);
|
||||||
meta->smallest_seqno = GetInternalKeySeqno(key);
|
meta->smallest_seqno = GetInternalKeySeqno(key);
|
||||||
meta->largest_seqno = meta->smallest_seqno;
|
meta->largest_seqno = meta->smallest_seqno;
|
||||||
|
}
|
||||||
|
|
||||||
MergeHelper merge(internal_comparator.user_comparator(),
|
MergeHelper merge(internal_comparator.user_comparator(),
|
||||||
ioptions.merge_operator, ioptions.info_log,
|
ioptions.merge_operator, ioptions.info_log,
|
||||||
|
@ -576,7 +576,6 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
StartPhase("compaction_filter");
|
StartPhase("compaction_filter");
|
||||||
{
|
{
|
||||||
rocksdb_options_t* options = rocksdb_options_create();
|
|
||||||
rocksdb_options_set_create_if_missing(options, 1);
|
rocksdb_options_set_create_if_missing(options, 1);
|
||||||
rocksdb_compactionfilter_t* cfilter;
|
rocksdb_compactionfilter_t* cfilter;
|
||||||
cfilter = rocksdb_compactionfilter_create(NULL, CFilterDestroy,
|
cfilter = rocksdb_compactionfilter_create(NULL, CFilterDestroy,
|
||||||
@ -589,12 +588,10 @@ int main(int argc, char** argv) {
|
|||||||
|
|
||||||
rocksdb_options_set_compaction_filter(options, NULL);
|
rocksdb_options_set_compaction_filter(options, NULL);
|
||||||
rocksdb_compactionfilter_destroy(cfilter);
|
rocksdb_compactionfilter_destroy(cfilter);
|
||||||
rocksdb_options_destroy(options);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StartPhase("compaction_filter_factory");
|
StartPhase("compaction_filter_factory");
|
||||||
{
|
{
|
||||||
rocksdb_options_t* options = rocksdb_options_create();
|
|
||||||
rocksdb_options_set_create_if_missing(options, 1);
|
rocksdb_options_set_create_if_missing(options, 1);
|
||||||
rocksdb_compactionfilterfactory_t* factory;
|
rocksdb_compactionfilterfactory_t* factory;
|
||||||
factory = rocksdb_compactionfilterfactory_create(
|
factory = rocksdb_compactionfilterfactory_create(
|
||||||
@ -606,7 +603,6 @@ int main(int argc, char** argv) {
|
|||||||
db = CheckCompaction(db, options, roptions, woptions);
|
db = CheckCompaction(db, options, roptions, woptions);
|
||||||
|
|
||||||
rocksdb_options_set_compaction_filter_factory(options, NULL);
|
rocksdb_options_set_compaction_filter_factory(options, NULL);
|
||||||
rocksdb_options_destroy(options);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
StartPhase("compaction_filter_v2");
|
StartPhase("compaction_filter_v2");
|
||||||
|
@ -263,8 +263,7 @@ ColumnFamilyData::ColumnFamilyData(uint32_t id, const std::string& name,
|
|||||||
Log(InfoLogLevel::INFO_LEVEL,
|
Log(InfoLogLevel::INFO_LEVEL,
|
||||||
ioptions_.info_log, "Options for column family \"%s\":\n",
|
ioptions_.info_log, "Options for column family \"%s\":\n",
|
||||||
name.c_str());
|
name.c_str());
|
||||||
const ColumnFamilyOptions* cf_options = &options_;
|
options_.Dump(ioptions_.info_log);
|
||||||
cf_options->Dump(ioptions_.info_log);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RecalculateWriteStallConditions(mutable_cf_options_);
|
RecalculateWriteStallConditions(mutable_cf_options_);
|
||||||
|
@ -697,8 +697,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||||||
// Check if the suceeding files need compaction.
|
// Check if the suceeding files need compaction.
|
||||||
for (unsigned int i = loop + 1;
|
for (unsigned int i = loop + 1;
|
||||||
candidate_count < max_files_to_compact && i < files.size(); i++) {
|
candidate_count < max_files_to_compact && i < files.size(); i++) {
|
||||||
FileMetaData* f = files[i];
|
FileMetaData* suceeding_file = files[i];
|
||||||
if (f->being_compacted) {
|
if (suceeding_file->being_compacted) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// Pick files if the total/last candidate file size (increased by the
|
// Pick files if the total/last candidate file size (increased by the
|
||||||
@ -708,14 +708,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||||||
// kCompactionStopStyleSimilarSize, it's simply the size of the last
|
// kCompactionStopStyleSimilarSize, it's simply the size of the last
|
||||||
// picked file.
|
// picked file.
|
||||||
double sz = candidate_size * (100.0 + ratio) / 100.0;
|
double sz = candidate_size * (100.0 + ratio) / 100.0;
|
||||||
if (sz < static_cast<double>(f->fd.GetFileSize())) {
|
if (sz < static_cast<double>(suceeding_file->fd.GetFileSize())) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ioptions_.compaction_options_universal.stop_style ==
|
if (ioptions_.compaction_options_universal.stop_style ==
|
||||||
kCompactionStopStyleSimilarSize) {
|
kCompactionStopStyleSimilarSize) {
|
||||||
// Similar-size stopping rule: also check the last picked file isn't
|
// Similar-size stopping rule: also check the last picked file isn't
|
||||||
// far larger than the next candidate file.
|
// far larger than the next candidate file.
|
||||||
sz = (f->fd.GetFileSize() * (100.0 + ratio)) / 100.0;
|
sz = (suceeding_file->fd.GetFileSize() * (100.0 + ratio)) / 100.0;
|
||||||
if (sz < static_cast<double>(candidate_size)) {
|
if (sz < static_cast<double>(candidate_size)) {
|
||||||
// If the small file we've encountered begins a run of similar-size
|
// If the small file we've encountered begins a run of similar-size
|
||||||
// files, we'll pick them up on a future iteration of the outer
|
// files, we'll pick them up on a future iteration of the outer
|
||||||
@ -723,9 +723,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||||||
// by the last-resort read amp strategy which disregards size ratios.
|
// by the last-resort read amp strategy which disregards size ratios.
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
candidate_size = f->compensated_file_size;
|
candidate_size = suceeding_file->compensated_file_size;
|
||||||
} else { // default kCompactionStopStyleTotalSize
|
} else { // default kCompactionStopStyleTotalSize
|
||||||
candidate_size += f->compensated_file_size;
|
candidate_size += suceeding_file->compensated_file_size;
|
||||||
}
|
}
|
||||||
candidate_count++;
|
candidate_count++;
|
||||||
}
|
}
|
||||||
@ -738,12 +738,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||||||
} else {
|
} else {
|
||||||
for (unsigned int i = loop;
|
for (unsigned int i = loop;
|
||||||
i < loop + candidate_count && i < files.size(); i++) {
|
i < loop + candidate_count && i < files.size(); i++) {
|
||||||
FileMetaData* f = files[i];
|
FileMetaData* skipping_file = files[i];
|
||||||
LogToBuffer(log_buffer, "[%s] Universal: Skipping file %" PRIu64
|
LogToBuffer(log_buffer, "[%s] Universal: Skipping file %" PRIu64
|
||||||
"[%d] with size %" PRIu64
|
"[%d] with size %" PRIu64
|
||||||
" (compensated size %" PRIu64 ") %d\n",
|
" (compensated size %" PRIu64 ") %d\n",
|
||||||
cf_name.c_str(), f->fd.GetNumber(), i, f->fd.GetFileSize(),
|
cf_name.c_str(), f->fd.GetNumber(), i,
|
||||||
f->compensated_file_size, f->being_compacted);
|
skipping_file->fd.GetFileSize(),
|
||||||
|
skipping_file->compensated_file_size,
|
||||||
|
skipping_file->being_compacted);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -782,16 +784,17 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
|
|||||||
c->score_ = score;
|
c->score_ = score;
|
||||||
|
|
||||||
for (unsigned int i = start_index; i < first_index_after; i++) {
|
for (unsigned int i = start_index; i < first_index_after; i++) {
|
||||||
FileMetaData* f = files[i];
|
FileMetaData* picking_file = files[i];
|
||||||
c->inputs_[0].files.push_back(f);
|
c->inputs_[0].files.push_back(picking_file);
|
||||||
char file_num_buf[kFormatFileNumberBufSize];
|
char file_num_buf[kFormatFileNumberBufSize];
|
||||||
FormatFileNumber(f->fd.GetNumber(), f->fd.GetPathId(), file_num_buf,
|
FormatFileNumber(picking_file->fd.GetNumber(), picking_file->fd.GetPathId(),
|
||||||
sizeof(file_num_buf));
|
file_num_buf, sizeof(file_num_buf));
|
||||||
LogToBuffer(log_buffer,
|
LogToBuffer(log_buffer,
|
||||||
"[%s] Universal: Picking file %s[%d] "
|
"[%s] Universal: Picking file %s[%d] "
|
||||||
"with size %" PRIu64 " (compensated size %" PRIu64 ")\n",
|
"with size %" PRIu64 " (compensated size %" PRIu64 ")\n",
|
||||||
cf_name.c_str(), file_num_buf, i, f->fd.GetFileSize(),
|
cf_name.c_str(), file_num_buf, i,
|
||||||
f->compensated_file_size);
|
picking_file->fd.GetFileSize(),
|
||||||
|
picking_file->compensated_file_size);
|
||||||
}
|
}
|
||||||
return c;
|
return c;
|
||||||
}
|
}
|
||||||
@ -850,7 +853,6 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
|
|||||||
for (unsigned int loop = start_index; loop < files.size() - 1; loop++) {
|
for (unsigned int loop = start_index; loop < files.size() - 1; loop++) {
|
||||||
f = files[loop];
|
f = files[loop];
|
||||||
if (f->being_compacted) {
|
if (f->being_compacted) {
|
||||||
char file_num_buf[kFormatFileNumberBufSize];
|
|
||||||
FormatFileNumber(f->fd.GetNumber(), f->fd.GetPathId(), file_num_buf,
|
FormatFileNumber(f->fd.GetNumber(), f->fd.GetPathId(), file_num_buf,
|
||||||
sizeof(file_num_buf));
|
sizeof(file_num_buf));
|
||||||
LogToBuffer(
|
LogToBuffer(
|
||||||
|
@ -2953,8 +2953,8 @@ Status DBImpl::DoCompactionWork(CompactionState* compact,
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
struct IterState {
|
struct IterState {
|
||||||
IterState(DBImpl* db, port::Mutex* mu, SuperVersion* super_version)
|
IterState(DBImpl* _db, port::Mutex* _mu, SuperVersion* _super_version)
|
||||||
: db(db), mu(mu), super_version(super_version) {}
|
: db(_db), mu(_mu), super_version(_super_version) {}
|
||||||
|
|
||||||
DBImpl* db;
|
DBImpl* db;
|
||||||
port::Mutex* mu;
|
port::Mutex* mu;
|
||||||
@ -3812,14 +3812,14 @@ Status DBImpl::SetNewMemtableAndNewLogFile(ColumnFamilyData* cfd,
|
|||||||
log_.reset(new_log);
|
log_.reset(new_log);
|
||||||
log_empty_ = true;
|
log_empty_ = true;
|
||||||
alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
|
alive_log_files_.push_back(LogFileNumberSize(logfile_number_));
|
||||||
for (auto cfd : *versions_->GetColumnFamilySet()) {
|
for (auto loop_cfd : *versions_->GetColumnFamilySet()) {
|
||||||
// all this is just optimization to delete logs that
|
// all this is just optimization to delete logs that
|
||||||
// are no longer needed -- if CF is empty, that means it
|
// are no longer needed -- if CF is empty, that means it
|
||||||
// doesn't need that particular log to stay alive, so we just
|
// doesn't need that particular log to stay alive, so we just
|
||||||
// advance the log number. no need to persist this in the manifest
|
// advance the log number. no need to persist this in the manifest
|
||||||
if (cfd->mem()->GetFirstSequenceNumber() == 0 &&
|
if (loop_cfd->mem()->GetFirstSequenceNumber() == 0 &&
|
||||||
cfd->imm()->size() == 0) {
|
loop_cfd->imm()->size() == 0) {
|
||||||
cfd->SetLogNumber(logfile_number_);
|
loop_cfd->SetLogNumber(logfile_number_);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4398,8 +4398,6 @@ Status DestroyDB(const std::string& dbname, const Options& options) {
|
|||||||
|
|
||||||
for (auto& db_path : options.db_paths) {
|
for (auto& db_path : options.db_paths) {
|
||||||
env->GetChildren(db_path.path, &filenames);
|
env->GetChildren(db_path.path, &filenames);
|
||||||
uint64_t number;
|
|
||||||
FileType type;
|
|
||||||
for (size_t i = 0; i < filenames.size(); i++) {
|
for (size_t i = 0; i < filenames.size(); i++) {
|
||||||
if (ParseFileName(filenames[i], &number, &type) &&
|
if (ParseFileName(filenames[i], &number, &type) &&
|
||||||
type == kTableFile) { // Lock file will be deleted at end
|
type == kTableFile) { // Lock file will be deleted at end
|
||||||
|
@ -217,7 +217,6 @@ TEST(DBIteratorTest, DBIteratorPrevNext) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Options options;
|
|
||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddPut("a", "val_a");
|
internal_iter->AddPut("a", "val_a");
|
||||||
internal_iter->AddPut("b", "val_b");
|
internal_iter->AddPut("b", "val_b");
|
||||||
@ -254,7 +253,6 @@ TEST(DBIteratorTest, DBIteratorPrevNext) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Options options;
|
|
||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddPut("a", "val_a");
|
internal_iter->AddPut("a", "val_a");
|
||||||
internal_iter->AddPut("a", "val_a");
|
internal_iter->AddPut("a", "val_a");
|
||||||
@ -364,8 +362,8 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", std::to_string(i));
|
internal_iter->AddPut("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -400,7 +398,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddDeletion("c");
|
internal_iter->AddDeletion("c");
|
||||||
}
|
}
|
||||||
internal_iter->AddPut("c", "200");
|
internal_iter->AddPut("c", "200");
|
||||||
@ -463,7 +461,7 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
{
|
{
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t i = 0; i < 200; ++i) {
|
||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddDeletion("c");
|
internal_iter->AddDeletion("c");
|
||||||
}
|
}
|
||||||
internal_iter->AddPut("c", "200");
|
internal_iter->AddPut("c", "200");
|
||||||
@ -511,12 +509,12 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddMerge("b", "merge_1");
|
internal_iter->AddMerge("b", "merge_1");
|
||||||
internal_iter->AddMerge("a", "merge_2");
|
internal_iter->AddMerge("a", "merge_2");
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("d", std::to_string(i));
|
internal_iter->AddPut("d", std::to_string(k));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddPut("c", std::to_string(i));
|
internal_iter->AddPut("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -550,8 +548,8 @@ TEST(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddMerge("b", "b");
|
internal_iter->AddMerge("b", "b");
|
||||||
internal_iter->AddMerge("a", "a");
|
internal_iter->AddMerge("a", "a");
|
||||||
for (size_t i = 0; i < 200; ++i) {
|
for (size_t k = 0; k < 200; ++k) {
|
||||||
internal_iter->AddMerge("c", std::to_string(i));
|
internal_iter->AddMerge("c", std::to_string(k));
|
||||||
}
|
}
|
||||||
internal_iter->Finish();
|
internal_iter->Finish();
|
||||||
|
|
||||||
@ -1390,7 +1388,6 @@ TEST(DBIteratorTest, DBIterator) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
Options options;
|
|
||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
internal_iter->AddDeletion("a");
|
internal_iter->AddDeletion("a");
|
||||||
internal_iter->AddPut("a", "0");
|
internal_iter->AddPut("a", "0");
|
||||||
|
@ -4544,8 +4544,10 @@ TEST(DBTest, CompactionFilter) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0);
|
||||||
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
|
ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0);
|
||||||
|
|
||||||
|
{
|
||||||
// Scan the entire database to ensure that nothing is left
|
// Scan the entire database to ensure that nothing is left
|
||||||
Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]);
|
std::unique_ptr<Iterator> iter(
|
||||||
|
db_->NewIterator(ReadOptions(), handles_[1]));
|
||||||
iter->SeekToFirst();
|
iter->SeekToFirst();
|
||||||
count = 0;
|
count = 0;
|
||||||
while (iter->Valid()) {
|
while (iter->Valid()) {
|
||||||
@ -4553,7 +4555,7 @@ TEST(DBTest, CompactionFilter) {
|
|||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
ASSERT_EQ(count, 0);
|
ASSERT_EQ(count, 0);
|
||||||
delete iter;
|
}
|
||||||
|
|
||||||
// The sequence number of the remaining record
|
// The sequence number of the remaining record
|
||||||
// is not zeroed out even though it is at the
|
// is not zeroed out even though it is at the
|
||||||
@ -5014,7 +5016,7 @@ TEST(DBTest, CompactionFilterV2NULLPrefix) {
|
|||||||
for (int i = 1; i < 100000; i++) {
|
for (int i = 1; i < 100000; i++) {
|
||||||
char key[100];
|
char key[100];
|
||||||
snprintf(key, sizeof(key), "%08d%010d", i, i);
|
snprintf(key, sizeof(key), "%08d%010d", i, i);
|
||||||
std::string newvalue = Get(key);
|
newvalue = Get(key);
|
||||||
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
|
ASSERT_EQ(newvalue.compare(NEW_VALUE), 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -5623,7 +5625,7 @@ TEST(DBTest, ManualCompaction) {
|
|||||||
ASSERT_EQ("0,0,1", FilesPerLevel(1));
|
ASSERT_EQ("0,0,1", FilesPerLevel(1));
|
||||||
|
|
||||||
if (iter == 0) {
|
if (iter == 0) {
|
||||||
Options options = CurrentOptions();
|
options = CurrentOptions();
|
||||||
options.max_background_flushes = 0;
|
options.max_background_flushes = 0;
|
||||||
options.num_levels = 3;
|
options.num_levels = 3;
|
||||||
options.create_if_missing = true;
|
options.create_if_missing = true;
|
||||||
@ -7591,7 +7593,6 @@ void PrefixScanInit(DBTest *dbtest) {
|
|||||||
|
|
||||||
// GROUP 2
|
// GROUP 2
|
||||||
for (int i = 1; i <= big_range_sstfiles; i++) {
|
for (int i = 1; i <= big_range_sstfiles; i++) {
|
||||||
std::string keystr;
|
|
||||||
snprintf(buf, sizeof(buf), "%02d______:start", 0);
|
snprintf(buf, sizeof(buf), "%02d______:start", 0);
|
||||||
keystr = std::string(buf);
|
keystr = std::string(buf);
|
||||||
ASSERT_OK(dbtest->Put(keystr, keystr));
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
||||||
@ -8877,7 +8878,7 @@ TEST(DBTest, PartialCompactionFailure) {
|
|||||||
|
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
|
|
||||||
const int kNumKeys =
|
const int kNumInsertedKeys =
|
||||||
options.level0_file_num_compaction_trigger *
|
options.level0_file_num_compaction_trigger *
|
||||||
(options.max_write_buffer_number - 1) *
|
(options.max_write_buffer_number - 1) *
|
||||||
kKeysPerBuffer;
|
kKeysPerBuffer;
|
||||||
@ -8885,7 +8886,7 @@ TEST(DBTest, PartialCompactionFailure) {
|
|||||||
Random rnd(301);
|
Random rnd(301);
|
||||||
std::vector<std::string> keys;
|
std::vector<std::string> keys;
|
||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
for (int k = 0; k < kNumKeys; ++k) {
|
for (int k = 0; k < kNumInsertedKeys; ++k) {
|
||||||
keys.emplace_back(RandomString(&rnd, kKeySize));
|
keys.emplace_back(RandomString(&rnd, kKeySize));
|
||||||
values.emplace_back(RandomString(&rnd, kKvSize - kKeySize));
|
values.emplace_back(RandomString(&rnd, kKvSize - kKeySize));
|
||||||
ASSERT_OK(Put(Slice(keys[k]), Slice(values[k])));
|
ASSERT_OK(Put(Slice(keys[k]), Slice(values[k])));
|
||||||
@ -8914,7 +8915,7 @@ TEST(DBTest, PartialCompactionFailure) {
|
|||||||
ASSERT_EQ(NumTableFilesAtLevel(0), previous_num_level0_files);
|
ASSERT_EQ(NumTableFilesAtLevel(0), previous_num_level0_files);
|
||||||
|
|
||||||
// All key-values must exist after compaction fails.
|
// All key-values must exist after compaction fails.
|
||||||
for (int k = 0; k < kNumKeys; ++k) {
|
for (int k = 0; k < kNumInsertedKeys; ++k) {
|
||||||
ASSERT_EQ(values[k], Get(keys[k]));
|
ASSERT_EQ(values[k], Get(keys[k]));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -8924,7 +8925,7 @@ TEST(DBTest, PartialCompactionFailure) {
|
|||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
// Verify again after reopen.
|
// Verify again after reopen.
|
||||||
for (int k = 0; k < kNumKeys; ++k) {
|
for (int k = 0; k < kNumInsertedKeys; ++k) {
|
||||||
ASSERT_EQ(values[k], Get(keys[k]));
|
ASSERT_EQ(values[k], Get(keys[k]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
|
|||||||
}
|
}
|
||||||
IndexLevel& index_level = next_level_index_[level];
|
IndexLevel& index_level = next_level_index_[level];
|
||||||
index_level.num_index = upper_size;
|
index_level.num_index = upper_size;
|
||||||
char* mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit));
|
mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit));
|
||||||
index_level.index_units = new (mem) IndexUnit[upper_size];
|
index_level.index_units = new (mem) IndexUnit[upper_size];
|
||||||
|
|
||||||
CalculateLB(
|
CalculateLB(
|
||||||
|
@ -136,7 +136,7 @@ class InternalStats {
|
|||||||
// Number of compactions done
|
// Number of compactions done
|
||||||
int count;
|
int count;
|
||||||
|
|
||||||
explicit CompactionStats(int count = 0)
|
explicit CompactionStats(int _count = 0)
|
||||||
: micros(0),
|
: micros(0),
|
||||||
bytes_readn(0),
|
bytes_readn(0),
|
||||||
bytes_readnp1(0),
|
bytes_readnp1(0),
|
||||||
@ -146,7 +146,7 @@ class InternalStats {
|
|||||||
files_out_levelnp1(0),
|
files_out_levelnp1(0),
|
||||||
num_input_records(0),
|
num_input_records(0),
|
||||||
num_dropped_records(0),
|
num_dropped_records(0),
|
||||||
count(count) {}
|
count(_count) {}
|
||||||
|
|
||||||
explicit CompactionStats(const CompactionStats& c)
|
explicit CompactionStats(const CompactionStats& c)
|
||||||
: micros(c.micros),
|
: micros(c.micros),
|
||||||
|
@ -407,6 +407,13 @@ void testSingleBatchSuccessiveMerge(
|
|||||||
}
|
}
|
||||||
|
|
||||||
void runTest(int argc, const string& dbname, const bool use_ttl = false) {
|
void runTest(int argc, const string& dbname, const bool use_ttl = false) {
|
||||||
|
bool compact = false;
|
||||||
|
if (argc > 1) {
|
||||||
|
compact = true;
|
||||||
|
cout << "Turn on Compaction\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
auto db = OpenDb(dbname, use_ttl);
|
auto db = OpenDb(dbname, use_ttl);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -415,20 +422,14 @@ void runTest(int argc, const string& dbname, const bool use_ttl = false) {
|
|||||||
testCounters(counters, db.get(), true);
|
testCounters(counters, db.get(), true);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool compact = false;
|
|
||||||
if (argc > 1) {
|
|
||||||
compact = true;
|
|
||||||
cout << "Turn on Compaction\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
cout << "Test merge-based counters... \n";
|
cout << "Test merge-based counters... \n";
|
||||||
MergeBasedCounters counters(db, 0);
|
MergeBasedCounters counters(db, 0);
|
||||||
testCounters(counters, db.get(), compact);
|
testCounters(counters, db.get(), compact);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
DestroyDB(dbname, Options());
|
DestroyDB(dbname, Options());
|
||||||
db.reset();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
cout << "Test merge in memtable... \n";
|
cout << "Test merge in memtable... \n";
|
||||||
|
@ -52,7 +52,8 @@ struct TestKey {
|
|||||||
uint64_t prefix;
|
uint64_t prefix;
|
||||||
uint64_t sorted;
|
uint64_t sorted;
|
||||||
|
|
||||||
TestKey(uint64_t prefix, uint64_t sorted) : prefix(prefix), sorted(sorted) {}
|
TestKey(uint64_t _prefix, uint64_t _sorted)
|
||||||
|
: prefix(_prefix), sorted(_sorted) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
// return a slice backed by test_key
|
// return a slice backed by test_key
|
||||||
|
@ -357,7 +357,7 @@ static void RunConcurrent(int run) {
|
|||||||
TestState state(seed + 1);
|
TestState state(seed + 1);
|
||||||
Env::Default()->Schedule(ConcurrentReader, &state);
|
Env::Default()->Schedule(ConcurrentReader, &state);
|
||||||
state.Wait(TestState::RUNNING);
|
state.Wait(TestState::RUNNING);
|
||||||
for (int i = 0; i < kSize; i++) {
|
for (int k = 0; k < kSize; k++) {
|
||||||
state.t_.WriteStep(&rnd);
|
state.t_.WriteStep(&rnd);
|
||||||
}
|
}
|
||||||
state.quit_flag_.store(true, std::memory_order_release);
|
state.quit_flag_.store(true, std::memory_order_release);
|
||||||
|
@ -15,7 +15,7 @@ namespace rocksdb {
|
|||||||
class TableCache;
|
class TableCache;
|
||||||
class VersionStorageInfo;
|
class VersionStorageInfo;
|
||||||
class VersionEdit;
|
class VersionEdit;
|
||||||
class FileMetaData;
|
struct FileMetaData;
|
||||||
|
|
||||||
// A helper class so we can efficiently apply a whole sequence
|
// A helper class so we can efficiently apply a whole sequence
|
||||||
// of edits to a particular state without creating intermediate
|
// of edits to a particular state without creating intermediate
|
||||||
|
@ -168,7 +168,6 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
|
|
||||||
// Temporary storage for parsing
|
// Temporary storage for parsing
|
||||||
int level;
|
int level;
|
||||||
uint64_t number;
|
|
||||||
FileMetaData f;
|
FileMetaData f;
|
||||||
Slice str;
|
Slice str;
|
||||||
InternalKey key;
|
InternalKey key;
|
||||||
@ -237,9 +236,9 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case kDeletedFile:
|
case kDeletedFile: {
|
||||||
if (GetLevel(&input, &level, &msg) &&
|
uint64_t number;
|
||||||
GetVarint64(&input, &number)) {
|
if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number)) {
|
||||||
deleted_files_.insert(std::make_pair(level, number));
|
deleted_files_.insert(std::make_pair(level, number));
|
||||||
} else {
|
} else {
|
||||||
if (!msg) {
|
if (!msg) {
|
||||||
@ -247,6 +246,7 @@ Status VersionEdit::DecodeFrom(const Slice& src) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
case kNewFile: {
|
case kNewFile: {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
|
@ -39,10 +39,10 @@ struct FileDescriptor {
|
|||||||
|
|
||||||
FileDescriptor() : FileDescriptor(0, 0, 0) {}
|
FileDescriptor() : FileDescriptor(0, 0, 0) {}
|
||||||
|
|
||||||
FileDescriptor(uint64_t number, uint32_t path_id, uint64_t file_size)
|
FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size)
|
||||||
: table_reader(nullptr),
|
: table_reader(nullptr),
|
||||||
packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)),
|
packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)),
|
||||||
file_size(file_size) {}
|
file_size(_file_size) {}
|
||||||
|
|
||||||
FileDescriptor& operator=(const FileDescriptor& fd) {
|
FileDescriptor& operator=(const FileDescriptor& fd) {
|
||||||
table_reader = fd.table_reader;
|
table_reader = fd.table_reader;
|
||||||
@ -110,12 +110,8 @@ struct FdWithKeyRange {
|
|||||||
largest_key() {
|
largest_key() {
|
||||||
}
|
}
|
||||||
|
|
||||||
FdWithKeyRange(FileDescriptor fd,
|
FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key)
|
||||||
Slice smallest_key, Slice largest_key)
|
: fd(_fd), smallest_key(_smallest_key), largest_key(_largest_key) {}
|
||||||
: fd(fd),
|
|
||||||
smallest_key(smallest_key),
|
|
||||||
largest_key(largest_key) {
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Data structure to store an array of FdWithKeyRange in one level
|
// Data structure to store an array of FdWithKeyRange in one level
|
||||||
|
@ -1455,9 +1455,9 @@ struct VersionSet::ManifestWriter {
|
|||||||
ColumnFamilyData* cfd;
|
ColumnFamilyData* cfd;
|
||||||
VersionEdit* edit;
|
VersionEdit* edit;
|
||||||
|
|
||||||
explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* cfd,
|
explicit ManifestWriter(port::Mutex* mu, ColumnFamilyData* _cfd,
|
||||||
VersionEdit* e)
|
VersionEdit* e)
|
||||||
: done(false), cv(mu), cfd(cfd), edit(e) {}
|
: done(false), cv(mu), cfd(_cfd), edit(e) {}
|
||||||
};
|
};
|
||||||
|
|
||||||
class VersionBuilder::Rep {
|
class VersionBuilder::Rep {
|
||||||
|
@ -166,14 +166,14 @@ void WalManager::PurgeObsoleteWALFiles() {
|
|||||||
std::string const file_path = archival_dir + "/" + f;
|
std::string const file_path = archival_dir + "/" + f;
|
||||||
if (ttl_enabled) {
|
if (ttl_enabled) {
|
||||||
uint64_t file_m_time;
|
uint64_t file_m_time;
|
||||||
Status const s = env_->GetFileModificationTime(file_path, &file_m_time);
|
s = env_->GetFileModificationTime(file_path, &file_m_time);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(db_options_.info_log, "Can't get file mod time: %s: %s",
|
Log(db_options_.info_log, "Can't get file mod time: %s: %s",
|
||||||
file_path.c_str(), s.ToString().c_str());
|
file_path.c_str(), s.ToString().c_str());
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (now_seconds - file_m_time > db_options_.WAL_ttl_seconds) {
|
if (now_seconds - file_m_time > db_options_.WAL_ttl_seconds) {
|
||||||
Status const s = env_->DeleteFile(file_path);
|
s = env_->DeleteFile(file_path);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(db_options_.info_log, "Can't delete file: %s: %s",
|
Log(db_options_.info_log, "Can't delete file: %s: %s",
|
||||||
file_path.c_str(), s.ToString().c_str());
|
file_path.c_str(), s.ToString().c_str());
|
||||||
@ -188,7 +188,7 @@ void WalManager::PurgeObsoleteWALFiles() {
|
|||||||
|
|
||||||
if (size_limit_enabled) {
|
if (size_limit_enabled) {
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
Status const s = env_->GetFileSize(file_path, &file_size);
|
s = env_->GetFileSize(file_path, &file_size);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(db_options_.info_log, "Can't get file size: %s: %s",
|
Log(db_options_.info_log, "Can't get file size: %s: %s",
|
||||||
file_path.c_str(), s.ToString().c_str());
|
file_path.c_str(), s.ToString().c_str());
|
||||||
@ -198,7 +198,7 @@ void WalManager::PurgeObsoleteWALFiles() {
|
|||||||
log_file_size = std::max(log_file_size, file_size);
|
log_file_size = std::max(log_file_size, file_size);
|
||||||
++log_files_num;
|
++log_files_num;
|
||||||
} else {
|
} else {
|
||||||
Status s = env_->DeleteFile(file_path);
|
s = env_->DeleteFile(file_path);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(db_options_.info_log, "Can't delete file: %s: %s",
|
Log(db_options_.info_log, "Can't delete file: %s: %s",
|
||||||
file_path.c_str(), s.ToString().c_str());
|
file_path.c_str(), s.ToString().c_str());
|
||||||
@ -236,7 +236,7 @@ void WalManager::PurgeObsoleteWALFiles() {
|
|||||||
|
|
||||||
for (size_t i = 0; i < files_del_num; ++i) {
|
for (size_t i = 0; i < files_del_num; ++i) {
|
||||||
std::string const file_path = archived_logs[i]->PathName();
|
std::string const file_path = archived_logs[i]->PathName();
|
||||||
Status const s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path);
|
s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
Log(db_options_.info_log, "Can't delete file: %s: %s", file_path.c_str(),
|
Log(db_options_.info_log, "Can't delete file: %s: %s", file_path.c_str(),
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
|
@ -212,7 +212,7 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
|
|||||||
int old_sz =0, new_sz =0, new_sz_delta =0;
|
int old_sz =0, new_sz =0, new_sz_delta =0;
|
||||||
bool done = false;
|
bool done = false;
|
||||||
while (!done) {
|
while (!done) {
|
||||||
int st = deflate(&_stream, Z_FINISH);
|
st = deflate(&_stream, Z_FINISH);
|
||||||
switch (st) {
|
switch (st) {
|
||||||
case Z_STREAM_END:
|
case Z_STREAM_END:
|
||||||
done = true;
|
done = true;
|
||||||
@ -274,7 +274,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
|
|||||||
|
|
||||||
//while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
//while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
||||||
while (!done) {
|
while (!done) {
|
||||||
int st = inflate(&_stream, Z_SYNC_FLUSH);
|
st = inflate(&_stream, Z_SYNC_FLUSH);
|
||||||
switch (st) {
|
switch (st) {
|
||||||
case Z_STREAM_END:
|
case Z_STREAM_END:
|
||||||
done = true;
|
done = true;
|
||||||
@ -337,7 +337,7 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
|
|||||||
|
|
||||||
int old_sz =0, new_sz =0;
|
int old_sz =0, new_sz =0;
|
||||||
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
||||||
int st = BZ2_bzCompress(&_stream, BZ_FINISH);
|
st = BZ2_bzCompress(&_stream, BZ_FINISH);
|
||||||
switch (st) {
|
switch (st) {
|
||||||
case BZ_STREAM_END:
|
case BZ_STREAM_END:
|
||||||
break;
|
break;
|
||||||
@ -390,7 +390,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
|
|||||||
char* tmp = nullptr;
|
char* tmp = nullptr;
|
||||||
|
|
||||||
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
while(_stream.next_in != nullptr && _stream.avail_in != 0) {
|
||||||
int st = BZ2_bzDecompress(&_stream);
|
st = BZ2_bzDecompress(&_stream);
|
||||||
switch (st) {
|
switch (st) {
|
||||||
case BZ_STREAM_END:
|
case BZ_STREAM_END:
|
||||||
break;
|
break;
|
||||||
|
@ -430,22 +430,23 @@ struct BlockBasedTableBuilder::Rep {
|
|||||||
std::vector<std::unique_ptr<TablePropertiesCollector>>
|
std::vector<std::unique_ptr<TablePropertiesCollector>>
|
||||||
table_properties_collectors;
|
table_properties_collectors;
|
||||||
|
|
||||||
Rep(const ImmutableCFOptions& ioptions,
|
Rep(const ImmutableCFOptions& _ioptions,
|
||||||
const BlockBasedTableOptions& table_opt,
|
const BlockBasedTableOptions& table_opt,
|
||||||
const InternalKeyComparator& icomparator,
|
const InternalKeyComparator& icomparator, WritableFile* f,
|
||||||
WritableFile* f, const CompressionType compression_type,
|
const CompressionType _compression_type,
|
||||||
const CompressionOptions& compression_opts)
|
const CompressionOptions& _compression_opts)
|
||||||
: ioptions(ioptions),
|
: ioptions(_ioptions),
|
||||||
table_options(table_opt),
|
table_options(table_opt),
|
||||||
internal_comparator(icomparator),
|
internal_comparator(icomparator),
|
||||||
file(f),
|
file(f),
|
||||||
data_block(table_options.block_restart_interval),
|
data_block(table_options.block_restart_interval),
|
||||||
internal_prefix_transform(ioptions.prefix_extractor),
|
internal_prefix_transform(_ioptions.prefix_extractor),
|
||||||
index_builder(CreateIndexBuilder(
|
index_builder(CreateIndexBuilder(table_options.index_type,
|
||||||
table_options.index_type, &internal_comparator,
|
&internal_comparator,
|
||||||
&this->internal_prefix_transform)),
|
&this->internal_prefix_transform)),
|
||||||
compression_type(compression_type),
|
compression_type(_compression_type),
|
||||||
filter_block(CreateFilterBlockBuilder(ioptions, table_options)),
|
compression_opts(_compression_opts),
|
||||||
|
filter_block(CreateFilterBlockBuilder(_ioptions, table_options)),
|
||||||
flush_block_policy(
|
flush_block_policy(
|
||||||
table_options.flush_block_policy_factory->NewFlushBlockPolicy(
|
table_options.flush_block_policy_factory->NewFlushBlockPolicy(
|
||||||
table_options, data_block)) {
|
table_options, data_block)) {
|
||||||
|
@ -316,13 +316,14 @@ class HashIndexReader : public IndexReader {
|
|||||||
|
|
||||||
|
|
||||||
struct BlockBasedTable::Rep {
|
struct BlockBasedTable::Rep {
|
||||||
Rep(const ImmutableCFOptions& ioptions,
|
Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options,
|
||||||
const EnvOptions& env_options,
|
const BlockBasedTableOptions& _table_opt,
|
||||||
const BlockBasedTableOptions& table_opt,
|
const InternalKeyComparator& _internal_comparator)
|
||||||
const InternalKeyComparator& internal_comparator)
|
: ioptions(_ioptions),
|
||||||
: ioptions(ioptions), env_options(env_options), table_options(table_opt),
|
env_options(_env_options),
|
||||||
filter_policy(table_opt.filter_policy.get()),
|
table_options(_table_opt),
|
||||||
internal_comparator(internal_comparator) {}
|
filter_policy(_table_opt.filter_policy.get()),
|
||||||
|
internal_comparator(_internal_comparator) {}
|
||||||
|
|
||||||
const ImmutableCFOptions& ioptions;
|
const ImmutableCFOptions& ioptions;
|
||||||
const EnvOptions& env_options;
|
const EnvOptions& env_options;
|
||||||
@ -364,11 +365,9 @@ BlockBasedTable::~BlockBasedTable() {
|
|||||||
// was not read from cache, `cache_handle` will be nullptr.
|
// was not read from cache, `cache_handle` will be nullptr.
|
||||||
template <class TValue>
|
template <class TValue>
|
||||||
struct BlockBasedTable::CachableEntry {
|
struct BlockBasedTable::CachableEntry {
|
||||||
CachableEntry(TValue* value, Cache::Handle* cache_handle)
|
CachableEntry(TValue* _value, Cache::Handle* _cache_handle)
|
||||||
: value(value)
|
: value(_value), cache_handle(_cache_handle) {}
|
||||||
, cache_handle(cache_handle) {
|
CachableEntry() : CachableEntry(nullptr, nullptr) {}
|
||||||
}
|
|
||||||
CachableEntry(): CachableEntry(nullptr, nullptr) { }
|
|
||||||
void Release(Cache* cache) {
|
void Release(Cache* cache) {
|
||||||
if (cache_handle) {
|
if (cache_handle) {
|
||||||
cache->Release(cache_handle);
|
cache->Release(cache_handle);
|
||||||
|
@ -25,8 +25,8 @@ class BlockHashIndex {
|
|||||||
public:
|
public:
|
||||||
// Represents a restart index in the index block's restart array.
|
// Represents a restart index in the index block's restart array.
|
||||||
struct RestartIndex {
|
struct RestartIndex {
|
||||||
explicit RestartIndex(uint32_t first_index, uint32_t num_blocks = 1)
|
explicit RestartIndex(uint32_t _first_index, uint32_t _num_blocks = 1)
|
||||||
: first_index(first_index), num_blocks(num_blocks) {}
|
: first_index(_first_index), num_blocks(_num_blocks) {}
|
||||||
|
|
||||||
// For a given prefix, what is the restart index for the first data block
|
// For a given prefix, what is the restart index for the first data block
|
||||||
// that contains it.
|
// that contains it.
|
||||||
|
@ -143,8 +143,8 @@ class BlockPrefixIndex::Builder {
|
|||||||
auto current = prefixes_per_bucket[i];
|
auto current = prefixes_per_bucket[i];
|
||||||
// populate block ids from largest to smallest
|
// populate block ids from largest to smallest
|
||||||
while (current != nullptr) {
|
while (current != nullptr) {
|
||||||
for (uint32_t i = 0; i < current->num_blocks; i++) {
|
for (uint32_t iter = 0; iter < current->num_blocks; iter++) {
|
||||||
*last_block = current->end_block - i;
|
*last_block = current->end_block - iter;
|
||||||
last_block--;
|
last_block--;
|
||||||
}
|
}
|
||||||
current = current->next;
|
current = current->next;
|
||||||
|
@ -436,8 +436,8 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
|||||||
uint64_t bucket_id;
|
uint64_t bucket_id;
|
||||||
uint32_t depth;
|
uint32_t depth;
|
||||||
uint32_t parent_pos;
|
uint32_t parent_pos;
|
||||||
CuckooNode(uint64_t bucket_id, uint32_t depth, int parent_pos)
|
CuckooNode(uint64_t _bucket_id, uint32_t _depth, int _parent_pos)
|
||||||
: bucket_id(bucket_id), depth(depth), parent_pos(parent_pos) {}
|
: bucket_id(_bucket_id), depth(_depth), parent_pos(_parent_pos) {}
|
||||||
};
|
};
|
||||||
// This is BFS search tree that is stored simply as a vector.
|
// This is BFS search tree that is stored simply as a vector.
|
||||||
// Each node stores the index of parent node in the vector.
|
// Each node stores the index of parent node in the vector.
|
||||||
@ -451,10 +451,9 @@ bool CuckooTableBuilder::MakeSpaceForKey(
|
|||||||
// It is unlikely for the increment operation to overflow because the maximum
|
// It is unlikely for the increment operation to overflow because the maximum
|
||||||
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
|
// no. of times this will be called is <= max_num_hash_func_ + num_entries_.
|
||||||
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
|
for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) {
|
||||||
uint64_t bucket_id = hash_vals[hash_cnt];
|
uint64_t bid = hash_vals[hash_cnt];
|
||||||
(*buckets)[bucket_id].make_space_for_key_call_id =
|
(*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id;
|
||||||
make_space_for_key_call_id;
|
tree.push_back(CuckooNode(bid, 0, 0));
|
||||||
tree.push_back(CuckooNode(bucket_id, 0, 0));
|
|
||||||
}
|
}
|
||||||
bool null_found = false;
|
bool null_found = false;
|
||||||
uint32_t curr_pos = 0;
|
uint32_t curr_pos = 0;
|
||||||
|
@ -72,8 +72,8 @@ class CuckooReaderTest {
|
|||||||
env_options = EnvOptions(options);
|
env_options = EnvOptions(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetUp(int num_items) {
|
void SetUp(int num) {
|
||||||
this->num_items = num_items;
|
num_items = num;
|
||||||
hash_map.clear();
|
hash_map.clear();
|
||||||
keys.clear();
|
keys.clear();
|
||||||
keys.resize(num_items);
|
keys.resize(num_items);
|
||||||
|
@ -358,12 +358,12 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
|
|||||||
|
|
||||||
std::vector<uint32_t> prefix_hashes;
|
std::vector<uint32_t> prefix_hashes;
|
||||||
if (!index_in_file) {
|
if (!index_in_file) {
|
||||||
Status s = PopulateIndexRecordList(&index_builder, &prefix_hashes);
|
s = PopulateIndexRecordList(&index_builder, &prefix_hashes);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Status s = index_.InitFromRawData(*index_block);
|
s = index_.InitFromRawData(*index_block);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -566,7 +566,7 @@ Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target,
|
|||||||
PlainTableKeyDecoder decoder(encoding_type_, user_key_len_,
|
PlainTableKeyDecoder decoder(encoding_type_, user_key_len_,
|
||||||
ioptions_.prefix_extractor);
|
ioptions_.prefix_extractor);
|
||||||
while (offset < data_end_offset_) {
|
while (offset < data_end_offset_) {
|
||||||
Status s = Next(&decoder, &offset, &found_key, nullptr, &found_value);
|
s = Next(&decoder, &offset, &found_key, nullptr, &found_value);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -1416,31 +1416,32 @@ class BlockCachePropertiesSnapshot {
|
|||||||
filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssertIndexBlockStat(int64_t index_block_cache_miss,
|
void AssertIndexBlockStat(int64_t expected_index_block_cache_miss,
|
||||||
int64_t index_block_cache_hit) {
|
int64_t expected_index_block_cache_hit) {
|
||||||
ASSERT_EQ(index_block_cache_miss, this->index_block_cache_miss);
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
||||||
ASSERT_EQ(index_block_cache_hit, this->index_block_cache_hit);
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssertFilterBlockStat(int64_t filter_block_cache_miss,
|
void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss,
|
||||||
int64_t filter_block_cache_hit) {
|
int64_t expected_filter_block_cache_hit) {
|
||||||
ASSERT_EQ(filter_block_cache_miss, this->filter_block_cache_miss);
|
ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss);
|
||||||
ASSERT_EQ(filter_block_cache_hit, this->filter_block_cache_hit);
|
ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if the fetched props matches the expected ones.
|
// Check if the fetched props matches the expected ones.
|
||||||
// TODO(kailiu) Use this only when you disabled filter policy!
|
// TODO(kailiu) Use this only when you disabled filter policy!
|
||||||
void AssertEqual(int64_t index_block_cache_miss,
|
void AssertEqual(int64_t expected_index_block_cache_miss,
|
||||||
int64_t index_block_cache_hit, int64_t data_block_cache_miss,
|
int64_t expected_index_block_cache_hit,
|
||||||
int64_t data_block_cache_hit) const {
|
int64_t expected_data_block_cache_miss,
|
||||||
ASSERT_EQ(index_block_cache_miss, this->index_block_cache_miss);
|
int64_t expected_data_block_cache_hit) const {
|
||||||
ASSERT_EQ(index_block_cache_hit, this->index_block_cache_hit);
|
ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss);
|
||||||
ASSERT_EQ(data_block_cache_miss, this->data_block_cache_miss);
|
ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit);
|
||||||
ASSERT_EQ(data_block_cache_hit, this->data_block_cache_hit);
|
ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss);
|
||||||
ASSERT_EQ(index_block_cache_miss + data_block_cache_miss,
|
ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit);
|
||||||
this->block_cache_miss);
|
ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss,
|
||||||
ASSERT_EQ(index_block_cache_hit + data_block_cache_hit,
|
block_cache_miss);
|
||||||
this->block_cache_hit);
|
ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit,
|
||||||
|
block_cache_hit);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -19,8 +19,8 @@ class InternalKeyComparator;
|
|||||||
class Arena;
|
class Arena;
|
||||||
|
|
||||||
struct TwoLevelIteratorState {
|
struct TwoLevelIteratorState {
|
||||||
explicit TwoLevelIteratorState(bool check_prefix_may_match)
|
explicit TwoLevelIteratorState(bool _check_prefix_may_match)
|
||||||
: check_prefix_may_match(check_prefix_may_match) {}
|
: check_prefix_may_match(_check_prefix_may_match) {}
|
||||||
|
|
||||||
virtual ~TwoLevelIteratorState() {}
|
virtual ~TwoLevelIteratorState() {}
|
||||||
virtual Iterator* NewSecondaryIterator(const Slice& handle) = 0;
|
virtual Iterator* NewSecondaryIterator(const Slice& handle) = 0;
|
||||||
|
@ -25,8 +25,8 @@ struct BlobChunk {
|
|||||||
uint32_t offset; // in blocks
|
uint32_t offset; // in blocks
|
||||||
uint32_t size; // in blocks
|
uint32_t size; // in blocks
|
||||||
BlobChunk() {}
|
BlobChunk() {}
|
||||||
BlobChunk(uint32_t bucket_id, uint32_t offset, uint32_t size) :
|
BlobChunk(uint32_t _bucket_id, uint32_t _offset, uint32_t _size)
|
||||||
bucket_id(bucket_id), offset(offset), size(size) {}
|
: bucket_id(_bucket_id), offset(_offset), size(_size) {}
|
||||||
|
|
||||||
// returns true if it's immediately before chunk
|
// returns true if it's immediately before chunk
|
||||||
bool ImmediatelyBefore(const BlobChunk& chunk) const;
|
bool ImmediatelyBefore(const BlobChunk& chunk) const;
|
||||||
|
@ -325,7 +325,7 @@ Cache::Handle* LRUCache::Insert(
|
|||||||
// Free the space following strict LRU policy until enough space
|
// Free the space following strict LRU policy until enough space
|
||||||
// is freed.
|
// is freed.
|
||||||
while (usage_ > capacity_ && lru_.next != &lru_) {
|
while (usage_ > capacity_ && lru_.next != &lru_) {
|
||||||
LRUHandle* old = lru_.next;
|
old = lru_.next;
|
||||||
LRU_Remove(old);
|
LRU_Remove(old);
|
||||||
table_.Remove(old->key(), old->hash);
|
table_.Remove(old->key(), old->hash);
|
||||||
if (Unref(old)) {
|
if (Unref(old)) {
|
||||||
|
@ -15,8 +15,8 @@ namespace rocksdb {
|
|||||||
|
|
||||||
// Pending request
|
// Pending request
|
||||||
struct GenericRateLimiter::Req {
|
struct GenericRateLimiter::Req {
|
||||||
explicit Req(int64_t bytes, port::Mutex* mu) :
|
explicit Req(int64_t _bytes, port::Mutex* _mu)
|
||||||
bytes(bytes), cv(mu), granted(false) {}
|
: bytes(_bytes), cv(_mu), granted(false) {}
|
||||||
int64_t bytes;
|
int64_t bytes;
|
||||||
port::CondVar cv;
|
port::CondVar cv;
|
||||||
bool granted;
|
bool granted;
|
||||||
|
@ -30,23 +30,23 @@ TEST(RateLimiterTest, StartStop) {
|
|||||||
TEST(RateLimiterTest, Rate) {
|
TEST(RateLimiterTest, Rate) {
|
||||||
auto* env = Env::Default();
|
auto* env = Env::Default();
|
||||||
struct Arg {
|
struct Arg {
|
||||||
Arg(int64_t target_rate, int burst)
|
Arg(int64_t _target_rate, int _burst)
|
||||||
: limiter(new GenericRateLimiter(target_rate, 100 * 1000, 10)),
|
: limiter(new GenericRateLimiter(_target_rate, 100 * 1000, 10)),
|
||||||
request_size(target_rate / 10),
|
request_size(_target_rate / 10),
|
||||||
burst(burst) {}
|
burst(_burst) {}
|
||||||
std::unique_ptr<RateLimiter> limiter;
|
std::unique_ptr<RateLimiter> limiter;
|
||||||
int64_t request_size;
|
int64_t request_size;
|
||||||
int burst;
|
int burst;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto writer = [](void* p) {
|
auto writer = [](void* p) {
|
||||||
auto* env = Env::Default();
|
auto* thread_env = Env::Default();
|
||||||
auto* arg = static_cast<Arg*>(p);
|
auto* arg = static_cast<Arg*>(p);
|
||||||
// Test for 2 seconds
|
// Test for 2 seconds
|
||||||
auto until = env->NowMicros() + 2 * 1000000;
|
auto until = thread_env->NowMicros() + 2 * 1000000;
|
||||||
Random r((uint32_t)(env->NowNanos() %
|
Random r((uint32_t)(thread_env->NowNanos() %
|
||||||
std::numeric_limits<uint32_t>::max()));
|
std::numeric_limits<uint32_t>::max()));
|
||||||
while (env->NowMicros() < until) {
|
while (thread_env->NowMicros() < until) {
|
||||||
for (int i = 0; i < static_cast<int>(r.Skewed(arg->burst) + 1); ++i) {
|
for (int i = 0; i < static_cast<int>(r.Skewed(arg->burst) + 1); ++i) {
|
||||||
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
|
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
|
||||||
Env::IO_HIGH);
|
Env::IO_HIGH);
|
||||||
|
@ -24,11 +24,11 @@ class ThreadLocalTest {
|
|||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
struct Params {
|
struct Params {
|
||||||
Params(port::Mutex* m, port::CondVar* c, int* unref, int n,
|
Params(port::Mutex* m, port::CondVar* c, int* u, int n,
|
||||||
UnrefHandler handler = nullptr)
|
UnrefHandler handler = nullptr)
|
||||||
: mu(m),
|
: mu(m),
|
||||||
cv(c),
|
cv(c),
|
||||||
unref(unref),
|
unref(u),
|
||||||
total(n),
|
total(n),
|
||||||
started(0),
|
started(0),
|
||||||
completed(0),
|
completed(0),
|
||||||
@ -112,24 +112,24 @@ TEST(ThreadLocalTest, SequentialReadWriteTest) {
|
|||||||
p.tls2 = &tls2;
|
p.tls2 = &tls2;
|
||||||
|
|
||||||
auto func = [](void* ptr) {
|
auto func = [](void* ptr) {
|
||||||
auto& p = *static_cast<Params*>(ptr);
|
auto& params = *static_cast<Params*>(ptr);
|
||||||
|
|
||||||
ASSERT_TRUE(p.tls1.Get() == nullptr);
|
ASSERT_TRUE(params.tls1.Get() == nullptr);
|
||||||
p.tls1.Reset(reinterpret_cast<int*>(1));
|
params.tls1.Reset(reinterpret_cast<int*>(1));
|
||||||
ASSERT_TRUE(p.tls1.Get() == reinterpret_cast<int*>(1));
|
ASSERT_TRUE(params.tls1.Get() == reinterpret_cast<int*>(1));
|
||||||
p.tls1.Reset(reinterpret_cast<int*>(2));
|
params.tls1.Reset(reinterpret_cast<int*>(2));
|
||||||
ASSERT_TRUE(p.tls1.Get() == reinterpret_cast<int*>(2));
|
ASSERT_TRUE(params.tls1.Get() == reinterpret_cast<int*>(2));
|
||||||
|
|
||||||
ASSERT_TRUE(p.tls2->Get() == nullptr);
|
ASSERT_TRUE(params.tls2->Get() == nullptr);
|
||||||
p.tls2->Reset(reinterpret_cast<int*>(1));
|
params.tls2->Reset(reinterpret_cast<int*>(1));
|
||||||
ASSERT_TRUE(p.tls2->Get() == reinterpret_cast<int*>(1));
|
ASSERT_TRUE(params.tls2->Get() == reinterpret_cast<int*>(1));
|
||||||
p.tls2->Reset(reinterpret_cast<int*>(2));
|
params.tls2->Reset(reinterpret_cast<int*>(2));
|
||||||
ASSERT_TRUE(p.tls2->Get() == reinterpret_cast<int*>(2));
|
ASSERT_TRUE(params.tls2->Get() == reinterpret_cast<int*>(2));
|
||||||
|
|
||||||
p.mu->Lock();
|
params.mu->Lock();
|
||||||
++(p.completed);
|
++(params.completed);
|
||||||
p.cv->SignalAll();
|
params.cv->SignalAll();
|
||||||
p.mu->Unlock();
|
params.mu->Unlock();
|
||||||
};
|
};
|
||||||
|
|
||||||
for (int iter = 0; iter < 1024; ++iter) {
|
for (int iter = 0; iter < 1024; ++iter) {
|
||||||
|
@ -192,8 +192,8 @@ Status GeoDBImpl::SearchRadial(const GeoPosition& pos,
|
|||||||
// we are looking for.
|
// we are looking for.
|
||||||
auto res = std::mismatch(qid.begin(), qid.end(), quadkey->begin());
|
auto res = std::mismatch(qid.begin(), qid.end(), quadkey->begin());
|
||||||
if (res.first == qid.end()) {
|
if (res.first == qid.end()) {
|
||||||
GeoPosition pos(atof(parts[3].c_str()), atof(parts[4].c_str()));
|
GeoPosition obj_pos(atof(parts[3].c_str()), atof(parts[4].c_str()));
|
||||||
GeoObject obj(pos, parts[4], iter->value().ToString());
|
GeoObject obj(obj_pos, parts[4], iter->value().ToString());
|
||||||
values->push_back(obj);
|
values->push_back(obj);
|
||||||
number_of_values--;
|
number_of_values--;
|
||||||
} else {
|
} else {
|
||||||
|
@ -437,11 +437,11 @@ class WBWIIteratorImpl : public WBWIIterator {
|
|||||||
|
|
||||||
struct WriteBatchWithIndex::Rep {
|
struct WriteBatchWithIndex::Rep {
|
||||||
Rep(const Comparator* index_comparator, size_t reserved_bytes = 0,
|
Rep(const Comparator* index_comparator, size_t reserved_bytes = 0,
|
||||||
bool overwrite_key = false)
|
bool _overwrite_key = false)
|
||||||
: write_batch(reserved_bytes),
|
: write_batch(reserved_bytes),
|
||||||
comparator(index_comparator, &write_batch),
|
comparator(index_comparator, &write_batch),
|
||||||
skip_list(comparator, &arena),
|
skip_list(comparator, &arena),
|
||||||
overwrite_key(overwrite_key),
|
overwrite_key(_overwrite_key),
|
||||||
last_entry_offset(0) {}
|
last_entry_offset(0) {}
|
||||||
ReadableWriteBatch write_batch;
|
ReadableWriteBatch write_batch;
|
||||||
WriteBatchEntryComparator comparator;
|
WriteBatchEntryComparator comparator;
|
||||||
|
Loading…
Reference in New Issue
Block a user