Fixing the warning messages captured under mac os # Consider using git commit -m 'One line title' && arc diff
. # You will save time by running lint and unit in the background.
Summary: The work to make sure mac os compiles rocksdb is not completed yet. But at least we can start cleaning some warnings captured only by g++ from mac os.. Test Plan: ran make in mac os Reviewers: dhruba Reviewed By: dhruba CC: leveldb Differential Revision: https://reviews.facebook.net/D14049
This commit is contained in:
parent
c3dda7276c
commit
21587760b9
10
Makefile
10
Makefile
@ -74,8 +74,8 @@ TOOLS = \
|
||||
sst_dump \
|
||||
db_stress \
|
||||
ldb \
|
||||
db_repl_stress \
|
||||
blob_store_bench
|
||||
db_repl_stress \
|
||||
blob_store_bench
|
||||
|
||||
PROGRAMS = db_bench signal_test $(TESTS) $(TOOLS)
|
||||
BENCHMARKS = db_bench_sqlite3 db_bench_tree_db table_reader_bench
|
||||
@ -131,7 +131,7 @@ coverage:
|
||||
COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) all check
|
||||
(cd coverage; ./coverage_test.sh)
|
||||
# Delete intermediate files
|
||||
find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" | xargs --no-run-if-empty rm
|
||||
find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
|
||||
|
||||
check: all $(PROGRAMS) $(TESTS) $(TOOLS) ldb_tests
|
||||
for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
|
||||
@ -164,8 +164,8 @@ valgrind_check: all $(PROGRAMS) $(TESTS)
|
||||
clean:
|
||||
-rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk
|
||||
-rm -rf ios-x86/* ios-arm/*
|
||||
-find . -name "*.[od]" | xargs --no-run-if-empty rm
|
||||
-find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" | xargs --no-run-if-empty rm
|
||||
-find . -name "*.[od]" -exec rm {} \;
|
||||
-find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
|
||||
tags:
|
||||
ctags * -R
|
||||
cscope -b `find . -name '*.cc'` `find . -name '*.h'`
|
||||
|
@ -71,7 +71,7 @@ GENERIC_PORT_FILES=`find $ROCKSDB_ROOT/port -name '*.cc' | tr "\n" " "`
|
||||
case "$TARGET_OS" in
|
||||
Darwin)
|
||||
PLATFORM=OS_MACOSX
|
||||
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -DOS_MACOSX"
|
||||
COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
|
||||
PLATFORM_SHARED_EXT=dylib
|
||||
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
|
||||
# PORT_FILES=port/darwin/darwin_specific.cc
|
||||
|
@ -17,7 +17,7 @@ struct Options;
|
||||
struct FileMetaData;
|
||||
|
||||
class Env;
|
||||
class EnvOptions;
|
||||
struct EnvOptions;
|
||||
class Iterator;
|
||||
class TableCache;
|
||||
class VersionEdit;
|
||||
|
@ -557,7 +557,7 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
|
||||
// evict from cache
|
||||
table_cache_->Evict(number);
|
||||
}
|
||||
Log(options_.info_log, "Delete type=%d #%lu", int(type), number);
|
||||
Log(options_.info_log, "Delete type=%d #%llu", int(type), number);
|
||||
|
||||
Status st;
|
||||
if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
|
||||
@ -566,12 +566,12 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
|
||||
ArchivedLogFileName(options_.wal_dir,
|
||||
number));
|
||||
if (!st.ok()) {
|
||||
Log(options_.info_log, "RenameFile logfile #%lu FAILED", number);
|
||||
Log(options_.info_log, "RenameFile logfile #%llu FAILED", number);
|
||||
}
|
||||
} else {
|
||||
st = env_->DeleteFile(dbname_ + "/" + state.all_files[i]);
|
||||
if (!st.ok()) {
|
||||
Log(options_.info_log, "Delete type=%d #%lu FAILED\n",
|
||||
Log(options_.info_log, "Delete type=%d #%llu FAILED\n",
|
||||
int(type), number);
|
||||
}
|
||||
}
|
||||
@ -1000,7 +1000,7 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
|
||||
std::vector<Iterator*> list;
|
||||
for (MemTable* m : mems) {
|
||||
Log(options_.info_log,
|
||||
"Flushing memtable with log file: %lu\n",
|
||||
"Flushing memtable with log file: %llu\n",
|
||||
m->GetLogNumber());
|
||||
list.push_back(m->NewIterator());
|
||||
}
|
||||
@ -1009,8 +1009,7 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
|
||||
const SequenceNumber newest_snapshot = snapshots_.GetNewest();
|
||||
const SequenceNumber earliest_seqno_in_memtable =
|
||||
mems[0]->GetFirstSequenceNumber();
|
||||
Log(options_.info_log, "Level-0 flush table #%llu: started",
|
||||
(unsigned long long) meta.number);
|
||||
Log(options_.info_log, "Level-0 flush table #%llu: started", meta.number);
|
||||
|
||||
Version* base = versions_->current();
|
||||
base->Ref(); // it is likely that we do not need this reference
|
||||
@ -1346,7 +1345,7 @@ Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
|
||||
Status status = ReadFirstLine(fname, result);
|
||||
return status;
|
||||
}
|
||||
return Status::NotSupported("File Type Not Known: " + type);
|
||||
return Status::NotSupported("File Type Not Known: " + std::to_string(type));
|
||||
}
|
||||
|
||||
Status DBImpl::ReadFirstLine(const std::string& fname,
|
||||
@ -2030,7 +2029,7 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
|
||||
assert(prev);
|
||||
}
|
||||
Log(options_.info_log,
|
||||
"Looking for seqid %ld but maxseqid is %ld", in,
|
||||
"Looking for seqid %llu but maxseqid is %llu", in,
|
||||
snapshots[snapshots.size()-1]);
|
||||
assert(0);
|
||||
return 0;
|
||||
@ -3061,7 +3060,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
|
||||
internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
|
||||
mem_->Ref();
|
||||
Log(options_.info_log,
|
||||
"New memtable created with log file: #%lu\n",
|
||||
"New memtable created with log file: #%llu\n",
|
||||
logfile_number_);
|
||||
mem_->SetLogNumber(logfile_number_);
|
||||
force = false; // Do not force another compaction if have room
|
||||
|
@ -3644,7 +3644,7 @@ TEST(DBTest, SnapshotFiles) {
|
||||
char buffer[4096];
|
||||
Slice slice;
|
||||
while (size > 0) {
|
||||
uint64_t one = std::min(sizeof(buffer), size);
|
||||
uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
|
||||
ASSERT_OK(srcfile->Read(one, &slice, buffer));
|
||||
ASSERT_OK(destfile->Append(slice));
|
||||
size -= slice.size();
|
||||
|
@ -122,7 +122,7 @@ Status MemTableList::InstallMemtableFlushResults(
|
||||
break;
|
||||
}
|
||||
|
||||
Log(info_log, "Level-0 commit table #%lu started", m->file_number_);
|
||||
Log(info_log, "Level-0 commit table #%llu started", m->file_number_);
|
||||
|
||||
// this can release and reacquire the mutex.
|
||||
s = vset->LogAndApply(&m->edit_, mu);
|
||||
@ -133,7 +133,7 @@ Status MemTableList::InstallMemtableFlushResults(
|
||||
do {
|
||||
if (s.ok()) { // commit new state
|
||||
Log(info_log,
|
||||
"Level-0 commit table #%lu: memtable #%lu done",
|
||||
"Level-0 commit table #%llu: memtable #%llu done",
|
||||
m->file_number_,
|
||||
mem_id);
|
||||
memlist_.remove(m);
|
||||
@ -149,7 +149,7 @@ Status MemTableList::InstallMemtableFlushResults(
|
||||
} else {
|
||||
//commit failed. setup state so that we can flush again.
|
||||
Log(info_log,
|
||||
"Level-0 commit table #%lu: memtable #%lu failed",
|
||||
"Level-0 commit table #%llu: memtable #%llu failed",
|
||||
m->file_number_,
|
||||
mem_id);
|
||||
m->flush_completed_ = false;
|
||||
|
@ -88,7 +88,6 @@ class MergeHelper {
|
||||
const Comparator* user_comparator_;
|
||||
const MergeOperator* user_merge_operator_;
|
||||
Logger* logger_;
|
||||
Iterator* iter_; // in: the internal iterator, positioned at the first merge entry
|
||||
bool assert_valid_internal_key_; // enforce no internal key corruption?
|
||||
|
||||
// the scratch area that holds the result of MergeUntil
|
||||
|
@ -102,7 +102,6 @@ class Repairer {
|
||||
InternalKeyComparator const icmp_;
|
||||
InternalFilterPolicy const ipolicy_;
|
||||
Options const options_;
|
||||
bool owns_cache_;
|
||||
TableCache* table_cache_;
|
||||
VersionEdit* edit_;
|
||||
|
||||
|
@ -319,7 +319,7 @@ SimpleTableIterator::~SimpleTableIterator() {
|
||||
}
|
||||
|
||||
bool SimpleTableIterator::Valid() const {
|
||||
return offset_ < table_->rep_->index_start_offset && offset_ >= 0;
|
||||
return offset_ < table_->rep_->index_start_offset;
|
||||
}
|
||||
|
||||
void SimpleTableIterator::SeekToFirst() {
|
||||
|
@ -205,8 +205,9 @@ bool TransactionLogIteratorImpl::IsBatchExpected(
|
||||
if (batchSeq != expectedSeq) {
|
||||
char buf[200];
|
||||
snprintf(buf, sizeof(buf),
|
||||
"Discontinuity in log records. Got seq=%lu, Expected seq=%lu, "
|
||||
"Last flushed seq=%lu.Log iterator will reseek the correct batch.",
|
||||
"Discontinuity in log records. Got seq=%llu, Expected seq=%llu, "
|
||||
"Last flushed seq=%llu.Log iterator will reseek the correct "
|
||||
"batch.",
|
||||
batchSeq, expectedSeq, dbimpl_->GetLatestSequenceNumber());
|
||||
reporter_.Info(buf);
|
||||
return false;
|
||||
@ -224,8 +225,10 @@ void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
|
||||
// Seek to the batch having expected sequence number
|
||||
if (expectedSeq < files_->at(currentFileIndex_)->StartSequence()) {
|
||||
// Expected batch must lie in the previous log file
|
||||
currentFileIndex_--;
|
||||
currentFileIndex_ = (currentFileIndex_ >= 0) ? currentFileIndex_ : 0;
|
||||
// Avoid underflow.
|
||||
if (currentFileIndex_ != 0) {
|
||||
currentFileIndex_--;
|
||||
}
|
||||
}
|
||||
startingSequenceNumber_ = expectedSeq;
|
||||
// currentStatus_ will be set to Ok if reseek succeeds
|
||||
|
@ -1322,7 +1322,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
||||
if (s.ok() && old_manifest_file_number < manifest_file_number_) {
|
||||
// delete old manifest file
|
||||
Log(options_->info_log,
|
||||
"Deleting manifest %lu current manifest %lu\n",
|
||||
"Deleting manifest %llu current manifest %llu\n",
|
||||
old_manifest_file_number, manifest_file_number_);
|
||||
// we don't care about an error here, PurgeObsoleteFiles will take care
|
||||
// of it later
|
||||
@ -1348,7 +1348,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
|
||||
prev_log_number_ = edit->prev_log_number_;
|
||||
|
||||
} else {
|
||||
Log(options_->info_log, "Error in committing version %ld",
|
||||
Log(options_->info_log, "Error in committing version %llu",
|
||||
v->GetVersionNumber());
|
||||
delete v;
|
||||
if (!new_manifest_file.empty()) {
|
||||
@ -1521,9 +1521,9 @@ Status VersionSet::Recover() {
|
||||
prev_log_number_ = prev_log_number;
|
||||
|
||||
Log(options_->info_log, "Recovered from manifest file:%s succeeded,"
|
||||
"manifest_file_number is %ld, next_file_number is %ld, "
|
||||
"last_sequence is %ld, log_number is %ld,"
|
||||
"prev_log_number is %ld\n",
|
||||
"manifest_file_number is %llu, next_file_number is %llu, "
|
||||
"last_sequence is %llu, log_number is %llu,"
|
||||
"prev_log_number is %llu\n",
|
||||
current.c_str(), manifest_file_number_, next_file_number_,
|
||||
last_sequence_, log_number_, prev_log_number_);
|
||||
}
|
||||
@ -1647,7 +1647,8 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
|
||||
log_number_ = log_number;
|
||||
prev_log_number_ = prev_log_number;
|
||||
|
||||
printf("manifest_file_number %ld next_file_number %ld last_sequence %ld log_number %ld prev_log_number %ld\n",
|
||||
printf("manifest_file_number %llu next_file_number %llu last_sequence "
|
||||
"%llu log_number %llu prev_log_number %llu\n",
|
||||
manifest_file_number_, next_file_number_,
|
||||
last_sequence, log_number, prev_log_number);
|
||||
printf("%s \n", v->DebugString(hex).c_str());
|
||||
@ -1863,7 +1864,7 @@ const char* VersionSet::LevelDataSizeSummary(
|
||||
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
|
||||
for (int i = 0; i < NumberLevels(); i++) {
|
||||
int sz = sizeof(scratch->buffer) - len;
|
||||
int ret = snprintf(scratch->buffer + len, sz, "%ld ",
|
||||
int ret = snprintf(scratch->buffer + len, sz, "%llu ",
|
||||
NumLevelBytes(i));
|
||||
if (ret < 0 || ret >= sz)
|
||||
break;
|
||||
@ -1879,7 +1880,8 @@ const char* VersionSet::LevelFileSummary(
|
||||
for (unsigned int i = 0; i < current_->files_[level].size(); i++) {
|
||||
FileMetaData* f = current_->files_[level][i];
|
||||
int sz = sizeof(scratch->buffer) - len;
|
||||
int ret = snprintf(scratch->buffer + len, sz, "#%ld(seq=%ld,sz=%ld,%d) ",
|
||||
int ret = snprintf(scratch->buffer + len, sz,
|
||||
"#%llu(seq=%llu,sz=%llu,%d) ",
|
||||
f->number, f->smallest_seqno,
|
||||
f->file_size, f->being_compacted);
|
||||
if (ret < 0 || ret >= sz)
|
||||
@ -2219,7 +2221,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
|
||||
start_index = loop; // Consider this as the first candidate.
|
||||
break;
|
||||
}
|
||||
Log(options_->info_log, "Universal: skipping file %ld[%d] compacted %s",
|
||||
Log(options_->info_log, "Universal: skipping file %llu[%d] compacted %s",
|
||||
f->number, loop, " cannot be a candidate to reduce size amp.\n");
|
||||
f = nullptr;
|
||||
}
|
||||
@ -2227,7 +2229,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
|
||||
return nullptr; // no candidate files
|
||||
}
|
||||
|
||||
Log(options_->info_log, "Universal: First candidate file %ld[%d] %s",
|
||||
Log(options_->info_log, "Universal: First candidate file %llu[%d] %s",
|
||||
f->number, start_index, " to reduce size amp.\n");
|
||||
|
||||
// keep adding up all the remaining files
|
||||
@ -2237,7 +2239,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
|
||||
f = current_->files_[level][index];
|
||||
if (f->being_compacted) {
|
||||
Log(options_->info_log,
|
||||
"Universal: Possible candidate file %ld[%d] %s.", f->number, loop,
|
||||
"Universal: Possible candidate file %llu[%d] %s.", f->number, loop,
|
||||
" is already being compacted. No size amp reduction possible.\n");
|
||||
return nullptr;
|
||||
}
|
||||
@ -2255,14 +2257,14 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
|
||||
// size amplification = percentage of additional size
|
||||
if (candidate_size * 100 < ratio * earliest_file_size) {
|
||||
Log(options_->info_log,
|
||||
"Universal: size amp not needed. newer-files-total-size %ld "
|
||||
"earliest-file-size %ld",
|
||||
"Universal: size amp not needed. newer-files-total-size %llu "
|
||||
"earliest-file-size %llu",
|
||||
candidate_size, earliest_file_size);
|
||||
return nullptr;
|
||||
} else {
|
||||
Log(options_->info_log,
|
||||
"Universal: size amp needed. newer-files-total-size %ld "
|
||||
"earliest-file-size %ld",
|
||||
"Universal: size amp needed. newer-files-total-size %llu "
|
||||
"earliest-file-size %llu",
|
||||
candidate_size, earliest_file_size);
|
||||
}
|
||||
assert(start_index >= 0 && start_index < file_by_time.size() - 1);
|
||||
@ -2278,7 +2280,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
|
||||
f = current_->files_[level][index];
|
||||
c->inputs_[0].push_back(f);
|
||||
Log(options_->info_log,
|
||||
"Universal: size amp picking file %ld[%d] with size %ld",
|
||||
"Universal: size amp picking file %llu[%d] with size %llu",
|
||||
f->number, index, f->file_size);
|
||||
}
|
||||
return c;
|
||||
@ -2325,7 +2327,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
|
||||
break;
|
||||
}
|
||||
Log(options_->info_log,
|
||||
"Universal: file %ld[%d] being compacted, skipping",
|
||||
"Universal: file %llu[%d] being compacted, skipping",
|
||||
f->number, loop);
|
||||
f = nullptr;
|
||||
}
|
||||
@ -2334,7 +2336,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
|
||||
// first candidate to be compacted.
|
||||
uint64_t candidate_size = f != nullptr? f->file_size : 0;
|
||||
if (f != nullptr) {
|
||||
Log(options_->info_log, "Universal: Possible candidate file %ld[%d].",
|
||||
Log(options_->info_log, "Universal: Possible candidate file %llu[%d].",
|
||||
f->number, loop);
|
||||
}
|
||||
|
||||
@ -2368,7 +2370,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
|
||||
int index = file_by_time[i];
|
||||
FileMetaData* f = current_->files_[level][index];
|
||||
Log(options_->info_log,
|
||||
"Universal: Skipping file %ld[%d] with size %ld %d\n",
|
||||
"Universal: Skipping file %llu[%d] with size %llu %d\n",
|
||||
f->number, i, f->file_size, f->being_compacted);
|
||||
}
|
||||
}
|
||||
@ -2403,7 +2405,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
|
||||
int index = file_by_time[i];
|
||||
FileMetaData* f = current_->files_[level][index];
|
||||
c->inputs_[0].push_back(f);
|
||||
Log(options_->info_log, "Universal: Picking file %ld[%d] with size %ld\n",
|
||||
Log(options_->info_log, "Universal: Picking file %llu[%d] with size %llu\n",
|
||||
f->number, i, f->file_size);
|
||||
}
|
||||
return c;
|
||||
@ -2790,14 +2792,16 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
|
||||
if (expanded1.size() == c->inputs_[1].size() &&
|
||||
!FilesInCompaction(expanded1)) {
|
||||
Log(options_->info_log,
|
||||
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n",
|
||||
"Expanding@%d %d+%d (%llu+%llu bytes) to %d+%d (%llu+%llu bytes)\n",
|
||||
level,
|
||||
int(c->inputs_[0].size()),
|
||||
int(c->inputs_[1].size()),
|
||||
long(inputs0_size), long(inputs1_size),
|
||||
inputs0_size,
|
||||
inputs1_size,
|
||||
int(expanded0.size()),
|
||||
int(expanded1.size()),
|
||||
long(expanded0_size), long(inputs1_size));
|
||||
expanded0_size,
|
||||
inputs1_size);
|
||||
smallest = new_start;
|
||||
largest = new_limit;
|
||||
c->inputs_[0] = expanded0;
|
||||
@ -3087,7 +3091,7 @@ static void InputSummary(std::vector<FileMetaData*>& files,
|
||||
int write = 0;
|
||||
for (unsigned int i = 0; i < files.size(); i++) {
|
||||
int sz = len - write;
|
||||
int ret = snprintf(output + write, sz, "%lu(%lu) ",
|
||||
int ret = snprintf(output + write, sz, "%llu(%llu) ",
|
||||
files.at(i)->number,
|
||||
files.at(i)->file_size);
|
||||
if (ret < 0 || ret >= sz)
|
||||
@ -3098,7 +3102,7 @@ static void InputSummary(std::vector<FileMetaData*>& files,
|
||||
|
||||
void Compaction::Summary(char* output, int len) {
|
||||
int write = snprintf(output, len,
|
||||
"Base version %ld Base level %d, seek compaction:%d, inputs:",
|
||||
"Base version %llu Base level %d, seek compaction:%d, inputs:",
|
||||
input_version_->GetVersionNumber(), level_, seek_compaction_);
|
||||
if (write < 0 || write > len) {
|
||||
return;
|
||||
|
@ -29,7 +29,7 @@ class SequentialFile;
|
||||
class Slice;
|
||||
class WritableFile;
|
||||
class RandomRWFile;
|
||||
class Options;
|
||||
struct Options;
|
||||
|
||||
using std::unique_ptr;
|
||||
using std::shared_ptr;
|
||||
@ -424,7 +424,7 @@ class WritableFile {
|
||||
// This asks the OS to initiate flushing the cached data to disk,
|
||||
// without waiting for completion.
|
||||
// Default implementation does nothing.
|
||||
virtual Status RangeSync(off64_t offset, off64_t nbytes) {
|
||||
virtual Status RangeSync(off_t offset, off_t nbytes) {
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@
|
||||
namespace rocksdb {
|
||||
|
||||
class Slice;
|
||||
class SliceParts;
|
||||
struct SliceParts;
|
||||
|
||||
class WriteBatch {
|
||||
public:
|
||||
|
@ -33,7 +33,7 @@ namespace rocksdb {
|
||||
namespace {
|
||||
|
||||
struct BytewiseLessThan {
|
||||
bool operator()(const std::string& key1, const std::string& key2) {
|
||||
bool operator()(const std::string& key1, const std::string& key2) const {
|
||||
// smaller entries will be placed in front.
|
||||
return comparator->Compare(key1, key2) <= 0;
|
||||
}
|
||||
@ -70,7 +70,7 @@ void LogStatsCollectionError(
|
||||
std::string msg =
|
||||
"[Warning] encountered error when calling TableStatsCollector::" +
|
||||
method + "() with collector name: " + name;
|
||||
Log(info_log, msg.c_str());
|
||||
Log(info_log, "%s", msg.c_str());
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
@ -249,7 +249,7 @@ void BlockBasedTable::ReadMeta(const Footer& footer) {
|
||||
auto err_msg =
|
||||
"[Warning] Encountered error while reading data from stats block " +
|
||||
s.ToString();
|
||||
Log(rep_->options.info_log, err_msg.c_str());
|
||||
Log(rep_->options.info_log, "%s", err_msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
@ -341,7 +341,7 @@ Status BlockBasedTable::ReadStats(const Slice& handle_value, Rep* rep) {
|
||||
auto error_msg =
|
||||
"[Warning] detect malformed value in stats meta-block:"
|
||||
"\tkey: " + key + "\tval: " + raw_val.ToString();
|
||||
Log(rep->options.info_log, error_msg.c_str());
|
||||
Log(rep->options.info_log, "%s", error_msg.c_str());
|
||||
continue;
|
||||
}
|
||||
*(pos->second) = val;
|
||||
|
@ -135,7 +135,7 @@ Status BlobStore::Put(const Slice& value, Blob* blob) {
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
size_t size_left = value.size();
|
||||
auto size_left = (uint64_t) value.size();
|
||||
|
||||
uint64_t offset = 0; // in bytes, not blocks
|
||||
for (auto chunk : blob->chunks) {
|
||||
|
@ -173,7 +173,6 @@ class LRUCache {
|
||||
// mutex_ protects the following state.
|
||||
port::Mutex mutex_;
|
||||
size_t usage_;
|
||||
uint64_t last_id_;
|
||||
|
||||
// Dummy head of LRU list.
|
||||
// lru.prev is newest entry, lru.next is oldest entry.
|
||||
@ -183,8 +182,7 @@ class LRUCache {
|
||||
};
|
||||
|
||||
LRUCache::LRUCache()
|
||||
: usage_(0),
|
||||
last_id_(0) {
|
||||
: usage_(0) {
|
||||
// Make empty circular linked list
|
||||
lru_.next = &lru_;
|
||||
lru_.prev = &lru_;
|
||||
@ -406,7 +404,7 @@ class ShardedLRUCache : public Cache {
|
||||
MutexLock l(&id_mutex_);
|
||||
return ++(last_id_);
|
||||
}
|
||||
virtual uint64_t GetCapacity() {
|
||||
virtual size_t GetCapacity() {
|
||||
return capacity_;
|
||||
}
|
||||
};
|
||||
|
@ -165,7 +165,7 @@ std::string HistogramImpl::ToString() const {
|
||||
if (buckets_[b] <= 0.0) continue;
|
||||
sum += buckets_[b];
|
||||
snprintf(buf, sizeof(buf),
|
||||
"[ %7ld, %7ld ) %8ld %7.3f%% %7.3f%% ",
|
||||
"[ %7llu, %7llu ) %8llu %7.3f%% %7.3f%% ",
|
||||
((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)), // left
|
||||
bucketMapper.BucketLimit(b), // right
|
||||
buckets_[b], // count
|
||||
|
@ -570,11 +570,11 @@ void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
|
||||
int ttl_end, int bucket_size, int num_buckets) {
|
||||
int time_point = ttl_start;
|
||||
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
|
||||
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
||||
fprintf(stdout, "Keys in range %s to %s : %llu\n",
|
||||
ReadableTime(time_point).c_str(),
|
||||
ReadableTime(time_point + bucket_size).c_str(), bucket_counts[i]);
|
||||
}
|
||||
fprintf(stdout, "Keys in range %s to %s : %lu\n",
|
||||
fprintf(stdout, "Keys in range %s to %s : %llu\n",
|
||||
ReadableTime(time_point).c_str(),
|
||||
ReadableTime(ttl_end).c_str(), bucket_counts[num_buckets - 1]);
|
||||
}
|
||||
@ -1424,7 +1424,7 @@ void ApproxSizeCommand::DoCommand() {
|
||||
ranges[0] = Range(start_key_, end_key_);
|
||||
uint64_t sizes[1];
|
||||
db_->GetApproximateSizes(ranges, 1, sizes);
|
||||
fprintf(stdout, "%ld\n", sizes[0]);
|
||||
fprintf(stdout, "%llu\n", sizes[0]);
|
||||
/* Weird that GetApproximateSizes() returns void, although documentation
|
||||
* says that it returns a Status object.
|
||||
if (!st.ok()) {
|
||||
|
@ -161,7 +161,7 @@ Options::Dump(Logger* log) const
|
||||
Log(log," Options.disableDataSync: %d", disableDataSync);
|
||||
Log(log," Options.use_fsync: %d", use_fsync);
|
||||
Log(log," Options.max_log_file_size: %ld", max_log_file_size);
|
||||
Log(log,"Options.max_manifest_file_size: %ld",
|
||||
Log(log,"Options.max_manifest_file_size: %llu",
|
||||
max_manifest_file_size);
|
||||
Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll);
|
||||
Log(log," Options.keep_log_file_num: %ld", keep_log_file_num);
|
||||
@ -192,7 +192,7 @@ Options::Dump(Logger* log) const
|
||||
target_file_size_base);
|
||||
Log(log," Options.target_file_size_multiplier: %d",
|
||||
target_file_size_multiplier);
|
||||
Log(log," Options.max_bytes_for_level_base: %ld",
|
||||
Log(log," Options.max_bytes_for_level_base: %llu",
|
||||
max_bytes_for_level_base);
|
||||
Log(log," Options.max_bytes_for_level_multiplier: %d",
|
||||
max_bytes_for_level_multiplier);
|
||||
@ -200,7 +200,7 @@ Options::Dump(Logger* log) const
|
||||
Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d",
|
||||
i, max_bytes_for_level_multiplier_additional[i]);
|
||||
}
|
||||
Log(log," Options.max_sequential_skip_in_iterations: %ld",
|
||||
Log(log," Options.max_sequential_skip_in_iterations: %llu",
|
||||
max_sequential_skip_in_iterations);
|
||||
Log(log," Options.expanded_compaction_factor: %d",
|
||||
expanded_compaction_factor);
|
||||
@ -222,7 +222,7 @@ Options::Dump(Logger* log) const
|
||||
table_cache_remove_scan_count_limit);
|
||||
Log(log," Options.arena_block_size: %ld",
|
||||
arena_block_size);
|
||||
Log(log," Options.delete_obsolete_files_period_micros: %ld",
|
||||
Log(log," Options.delete_obsolete_files_period_micros: %llu",
|
||||
delete_obsolete_files_period_micros);
|
||||
Log(log," Options.max_background_compactions: %d",
|
||||
max_background_compactions);
|
||||
@ -236,9 +236,9 @@ Options::Dump(Logger* log) const
|
||||
rate_limit_delay_max_milliseconds);
|
||||
Log(log," Options.disable_auto_compactions: %d",
|
||||
disable_auto_compactions);
|
||||
Log(log," Options.WAL_ttl_seconds: %ld",
|
||||
Log(log," Options.WAL_ttl_seconds: %llu",
|
||||
WAL_ttl_seconds);
|
||||
Log(log," Options.WAL_size_limit_MB: %ld",
|
||||
Log(log," Options.WAL_size_limit_MB: %llu",
|
||||
WAL_size_limit_MB);
|
||||
Log(log," Options.manifest_preallocation_size: %ld",
|
||||
manifest_preallocation_size);
|
||||
@ -264,7 +264,7 @@ Options::Dump(Logger* log) const
|
||||
access_hints[access_hint_on_compaction_start]);
|
||||
Log(log," Options.use_adaptive_mutex: %d",
|
||||
use_adaptive_mutex);
|
||||
Log(log," Options.bytes_per_sync: %ld",
|
||||
Log(log," Options.bytes_per_sync: %llu",
|
||||
bytes_per_sync);
|
||||
Log(log," Options.filter_deletes: %d",
|
||||
filter_deletes);
|
||||
|
@ -138,7 +138,7 @@ VectorRep::Iterator::Iterator(class VectorRep* vrep,
|
||||
const KeyComparator& compare)
|
||||
: vrep_(vrep),
|
||||
bucket_(bucket),
|
||||
cit_(nullptr),
|
||||
cit_(bucket_->end()),
|
||||
compare_(compare),
|
||||
sorted_(false) { }
|
||||
|
||||
|
@ -41,39 +41,14 @@ Options RedisListsTest::options = Options();
|
||||
// operator== and operator<< are defined below for vectors (lists)
|
||||
// Needed for ASSERT_EQ
|
||||
|
||||
// Compare two lists for equality.
|
||||
bool operator==(const std::vector<std::string>& a,
|
||||
const std::vector<std::string>& b) {
|
||||
if (a.size() != b.size()) {
|
||||
return false;
|
||||
void AssertListEq(const std::vector<std::string>& result,
|
||||
const std::vector<std::string>& expected_result) {
|
||||
ASSERT_EQ(result.size(), expected_result.size());
|
||||
for (size_t i = 0; i < result.size(); ++i) {
|
||||
ASSERT_EQ(result[i], expected_result[i]);
|
||||
}
|
||||
|
||||
int n = a.size();
|
||||
for (int i=0; i<n; ++i) {
|
||||
if (a[i] != b[i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Print out a list
|
||||
ostream& operator<<(ostream& out, const std::vector<std::string>& vec) {
|
||||
out << "[";
|
||||
int n = vec.size();
|
||||
for(int i=0; i<n; ++i) {
|
||||
if (i > 0) {
|
||||
out << ", ";
|
||||
}
|
||||
out << vec[i];
|
||||
}
|
||||
out << "]";
|
||||
return out;
|
||||
}
|
||||
|
||||
/// THE TEST CASES BEGIN HERE
|
||||
|
||||
// PushRight, Length, Index, Range
|
||||
TEST(RedisListsTest, SimpleTest) {
|
||||
RedisLists redis(kDefaultDbName, options, true); // Destructive
|
||||
@ -100,7 +75,7 @@ TEST(RedisListsTest, SimpleTest) {
|
||||
expected_result[0] = "v1";
|
||||
expected_result[1] = "v2";
|
||||
expected_result[2] = "v3";
|
||||
ASSERT_EQ(result, expected_result); // Uses my overloaded operator==() above
|
||||
AssertListEq(result, expected_result);
|
||||
}
|
||||
|
||||
// PushLeft, Length, Index, Range
|
||||
@ -129,7 +104,7 @@ TEST(RedisListsTest, SimpleTest2) {
|
||||
expected_result[0] = "v1";
|
||||
expected_result[1] = "v2";
|
||||
expected_result[2] = "v3";
|
||||
ASSERT_EQ(result, expected_result); // Uses my overloaded operator==() above
|
||||
AssertListEq(result, expected_result);
|
||||
}
|
||||
|
||||
// Exhaustive test of the Index() function
|
||||
|
@ -16,8 +16,7 @@ DBWithTTL::DBWithTTL(const int32_t ttl,
|
||||
const std::string& dbname,
|
||||
Status& st,
|
||||
bool read_only)
|
||||
: StackableDB(nullptr),
|
||||
ttl_(ttl) {
|
||||
: StackableDB(nullptr) {
|
||||
Options options_to_open = options;
|
||||
|
||||
if (options.compaction_filter) {
|
||||
|
@ -110,7 +110,6 @@ class DBWithTTL : public StackableDB {
|
||||
|
||||
private:
|
||||
DB* db_;
|
||||
int32_t ttl_;
|
||||
unique_ptr<CompactionFilter> ttl_comp_filter_;
|
||||
};
|
||||
|
||||
|
@ -126,7 +126,7 @@ class TtlTest {
|
||||
static FlushOptions flush_opts;
|
||||
kv_it_ = kvmap_.begin();
|
||||
advance(kv_it_, start_pos_map);
|
||||
for (int i = 0; kv_it_ != kvmap_.end(), i < num_entries; i++, kv_it_++) {
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < num_entries; i++, kv_it_++) {
|
||||
ASSERT_OK(db_ttl_->Put(wopts, kv_it_->first, kv_it_->second));
|
||||
}
|
||||
// Put a mock kv at the end because CompactionFilter doesn't delete last key
|
||||
@ -175,7 +175,7 @@ class TtlTest {
|
||||
kv_it_ = kvmap_.begin();
|
||||
advance(kv_it_, st_pos);
|
||||
std::string v;
|
||||
for (int i = 0; kv_it_ != kvmap_.end(), i < span; i++, kv_it_++) {
|
||||
for (int i = 0; kv_it_ != kvmap_.end() && i < span; i++, kv_it_++) {
|
||||
Status s = db_ttl_->Get(ropts, kv_it_->first, &v);
|
||||
if (s.ok() != check) {
|
||||
fprintf(stderr, "key=%s ", kv_it_->first.c_str());
|
||||
|
Loading…
Reference in New Issue
Block a user