Fixing the warning messages captured under mac os # Consider using git commit -m 'One line title' && arc diff. # You will save time by running lint and unit in the background.

Summary: The work to make sure mac os compiles rocksdb is not completed yet. But at least we can start cleaning some warnings captured only by g++ from mac os..

Test Plan: ran make in mac os

Reviewers: dhruba

Reviewed By: dhruba

CC: leveldb

Differential Revision: https://reviews.facebook.net/D14049
This commit is contained in:
kailiu 2013-11-12 20:05:28 -08:00
parent c3dda7276c
commit 21587760b9
25 changed files with 88 additions and 113 deletions

View File

@ -74,8 +74,8 @@ TOOLS = \
sst_dump \ sst_dump \
db_stress \ db_stress \
ldb \ ldb \
db_repl_stress \ db_repl_stress \
blob_store_bench blob_store_bench
PROGRAMS = db_bench signal_test $(TESTS) $(TOOLS) PROGRAMS = db_bench signal_test $(TESTS) $(TOOLS)
BENCHMARKS = db_bench_sqlite3 db_bench_tree_db table_reader_bench BENCHMARKS = db_bench_sqlite3 db_bench_tree_db table_reader_bench
@ -131,7 +131,7 @@ coverage:
COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) all check COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) all check
(cd coverage; ./coverage_test.sh) (cd coverage; ./coverage_test.sh)
# Delete intermediate files # Delete intermediate files
find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" | xargs --no-run-if-empty rm find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
check: all $(PROGRAMS) $(TESTS) $(TOOLS) ldb_tests check: all $(PROGRAMS) $(TESTS) $(TOOLS) ldb_tests
for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done for t in $(TESTS); do echo "***** Running $$t"; ./$$t || exit 1; done
@ -164,8 +164,8 @@ valgrind_check: all $(PROGRAMS) $(TESTS)
clean: clean:
-rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk -rm -f $(PROGRAMS) $(BENCHMARKS) $(LIBRARY) $(SHARED) $(MEMENVLIBRARY) build_config.mk
-rm -rf ios-x86/* ios-arm/* -rm -rf ios-x86/* ios-arm/*
-find . -name "*.[od]" | xargs --no-run-if-empty rm -find . -name "*.[od]" -exec rm {} \;
-find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" | xargs --no-run-if-empty rm -find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \;
tags: tags:
ctags * -R ctags * -R
cscope -b `find . -name '*.cc'` `find . -name '*.h'` cscope -b `find . -name '*.cc'` `find . -name '*.h'`

View File

@ -71,7 +71,7 @@ GENERIC_PORT_FILES=`find $ROCKSDB_ROOT/port -name '*.cc' | tr "\n" " "`
case "$TARGET_OS" in case "$TARGET_OS" in
Darwin) Darwin)
PLATFORM=OS_MACOSX PLATFORM=OS_MACOSX
COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -DOS_MACOSX" COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX"
PLATFORM_SHARED_EXT=dylib PLATFORM_SHARED_EXT=dylib
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name " PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name "
# PORT_FILES=port/darwin/darwin_specific.cc # PORT_FILES=port/darwin/darwin_specific.cc

View File

@ -17,7 +17,7 @@ struct Options;
struct FileMetaData; struct FileMetaData;
class Env; class Env;
class EnvOptions; struct EnvOptions;
class Iterator; class Iterator;
class TableCache; class TableCache;
class VersionEdit; class VersionEdit;

View File

@ -557,7 +557,7 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
// evict from cache // evict from cache
table_cache_->Evict(number); table_cache_->Evict(number);
} }
Log(options_.info_log, "Delete type=%d #%lu", int(type), number); Log(options_.info_log, "Delete type=%d #%llu", int(type), number);
Status st; Status st;
if (type == kLogFile && (options_.WAL_ttl_seconds > 0 || if (type == kLogFile && (options_.WAL_ttl_seconds > 0 ||
@ -566,12 +566,12 @@ void DBImpl::PurgeObsoleteFiles(DeletionState& state) {
ArchivedLogFileName(options_.wal_dir, ArchivedLogFileName(options_.wal_dir,
number)); number));
if (!st.ok()) { if (!st.ok()) {
Log(options_.info_log, "RenameFile logfile #%lu FAILED", number); Log(options_.info_log, "RenameFile logfile #%llu FAILED", number);
} }
} else { } else {
st = env_->DeleteFile(dbname_ + "/" + state.all_files[i]); st = env_->DeleteFile(dbname_ + "/" + state.all_files[i]);
if (!st.ok()) { if (!st.ok()) {
Log(options_.info_log, "Delete type=%d #%lu FAILED\n", Log(options_.info_log, "Delete type=%d #%llu FAILED\n",
int(type), number); int(type), number);
} }
} }
@ -1000,7 +1000,7 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
std::vector<Iterator*> list; std::vector<Iterator*> list;
for (MemTable* m : mems) { for (MemTable* m : mems) {
Log(options_.info_log, Log(options_.info_log,
"Flushing memtable with log file: %lu\n", "Flushing memtable with log file: %llu\n",
m->GetLogNumber()); m->GetLogNumber());
list.push_back(m->NewIterator()); list.push_back(m->NewIterator());
} }
@ -1009,8 +1009,7 @@ Status DBImpl::WriteLevel0Table(std::vector<MemTable*> &mems, VersionEdit* edit,
const SequenceNumber newest_snapshot = snapshots_.GetNewest(); const SequenceNumber newest_snapshot = snapshots_.GetNewest();
const SequenceNumber earliest_seqno_in_memtable = const SequenceNumber earliest_seqno_in_memtable =
mems[0]->GetFirstSequenceNumber(); mems[0]->GetFirstSequenceNumber();
Log(options_.info_log, "Level-0 flush table #%llu: started", Log(options_.info_log, "Level-0 flush table #%llu: started", meta.number);
(unsigned long long) meta.number);
Version* base = versions_->current(); Version* base = versions_->current();
base->Ref(); // it is likely that we do not need this reference base->Ref(); // it is likely that we do not need this reference
@ -1346,7 +1345,7 @@ Status DBImpl::ReadFirstRecord(const WalFileType type, const uint64_t number,
Status status = ReadFirstLine(fname, result); Status status = ReadFirstLine(fname, result);
return status; return status;
} }
return Status::NotSupported("File Type Not Known: " + type); return Status::NotSupported("File Type Not Known: " + std::to_string(type));
} }
Status DBImpl::ReadFirstLine(const std::string& fname, Status DBImpl::ReadFirstLine(const std::string& fname,
@ -2030,7 +2029,7 @@ inline SequenceNumber DBImpl::findEarliestVisibleSnapshot(
assert(prev); assert(prev);
} }
Log(options_.info_log, Log(options_.info_log,
"Looking for seqid %ld but maxseqid is %ld", in, "Looking for seqid %llu but maxseqid is %llu", in,
snapshots[snapshots.size()-1]); snapshots[snapshots.size()-1]);
assert(0); assert(0);
return 0; return 0;
@ -3061,7 +3060,7 @@ Status DBImpl::MakeRoomForWrite(bool force) {
internal_comparator_, mem_rep_factory_, NumberLevels(), options_); internal_comparator_, mem_rep_factory_, NumberLevels(), options_);
mem_->Ref(); mem_->Ref();
Log(options_.info_log, Log(options_.info_log,
"New memtable created with log file: #%lu\n", "New memtable created with log file: #%llu\n",
logfile_number_); logfile_number_);
mem_->SetLogNumber(logfile_number_); mem_->SetLogNumber(logfile_number_);
force = false; // Do not force another compaction if have room force = false; // Do not force another compaction if have room

View File

@ -3644,7 +3644,7 @@ TEST(DBTest, SnapshotFiles) {
char buffer[4096]; char buffer[4096];
Slice slice; Slice slice;
while (size > 0) { while (size > 0) {
uint64_t one = std::min(sizeof(buffer), size); uint64_t one = std::min(uint64_t(sizeof(buffer)), size);
ASSERT_OK(srcfile->Read(one, &slice, buffer)); ASSERT_OK(srcfile->Read(one, &slice, buffer));
ASSERT_OK(destfile->Append(slice)); ASSERT_OK(destfile->Append(slice));
size -= slice.size(); size -= slice.size();

View File

@ -122,7 +122,7 @@ Status MemTableList::InstallMemtableFlushResults(
break; break;
} }
Log(info_log, "Level-0 commit table #%lu started", m->file_number_); Log(info_log, "Level-0 commit table #%llu started", m->file_number_);
// this can release and reacquire the mutex. // this can release and reacquire the mutex.
s = vset->LogAndApply(&m->edit_, mu); s = vset->LogAndApply(&m->edit_, mu);
@ -133,7 +133,7 @@ Status MemTableList::InstallMemtableFlushResults(
do { do {
if (s.ok()) { // commit new state if (s.ok()) { // commit new state
Log(info_log, Log(info_log,
"Level-0 commit table #%lu: memtable #%lu done", "Level-0 commit table #%llu: memtable #%llu done",
m->file_number_, m->file_number_,
mem_id); mem_id);
memlist_.remove(m); memlist_.remove(m);
@ -149,7 +149,7 @@ Status MemTableList::InstallMemtableFlushResults(
} else { } else {
//commit failed. setup state so that we can flush again. //commit failed. setup state so that we can flush again.
Log(info_log, Log(info_log,
"Level-0 commit table #%lu: memtable #%lu failed", "Level-0 commit table #%llu: memtable #%llu failed",
m->file_number_, m->file_number_,
mem_id); mem_id);
m->flush_completed_ = false; m->flush_completed_ = false;

View File

@ -88,7 +88,6 @@ class MergeHelper {
const Comparator* user_comparator_; const Comparator* user_comparator_;
const MergeOperator* user_merge_operator_; const MergeOperator* user_merge_operator_;
Logger* logger_; Logger* logger_;
Iterator* iter_; // in: the internal iterator, positioned at the first merge entry
bool assert_valid_internal_key_; // enforce no internal key corruption? bool assert_valid_internal_key_; // enforce no internal key corruption?
// the scratch area that holds the result of MergeUntil // the scratch area that holds the result of MergeUntil

View File

@ -102,7 +102,6 @@ class Repairer {
InternalKeyComparator const icmp_; InternalKeyComparator const icmp_;
InternalFilterPolicy const ipolicy_; InternalFilterPolicy const ipolicy_;
Options const options_; Options const options_;
bool owns_cache_;
TableCache* table_cache_; TableCache* table_cache_;
VersionEdit* edit_; VersionEdit* edit_;

View File

@ -319,7 +319,7 @@ SimpleTableIterator::~SimpleTableIterator() {
} }
bool SimpleTableIterator::Valid() const { bool SimpleTableIterator::Valid() const {
return offset_ < table_->rep_->index_start_offset && offset_ >= 0; return offset_ < table_->rep_->index_start_offset;
} }
void SimpleTableIterator::SeekToFirst() { void SimpleTableIterator::SeekToFirst() {

View File

@ -205,8 +205,9 @@ bool TransactionLogIteratorImpl::IsBatchExpected(
if (batchSeq != expectedSeq) { if (batchSeq != expectedSeq) {
char buf[200]; char buf[200];
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"Discontinuity in log records. Got seq=%lu, Expected seq=%lu, " "Discontinuity in log records. Got seq=%llu, Expected seq=%llu, "
"Last flushed seq=%lu.Log iterator will reseek the correct batch.", "Last flushed seq=%llu.Log iterator will reseek the correct "
"batch.",
batchSeq, expectedSeq, dbimpl_->GetLatestSequenceNumber()); batchSeq, expectedSeq, dbimpl_->GetLatestSequenceNumber());
reporter_.Info(buf); reporter_.Info(buf);
return false; return false;
@ -224,8 +225,10 @@ void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
// Seek to the batch having expected sequence number // Seek to the batch having expected sequence number
if (expectedSeq < files_->at(currentFileIndex_)->StartSequence()) { if (expectedSeq < files_->at(currentFileIndex_)->StartSequence()) {
// Expected batch must lie in the previous log file // Expected batch must lie in the previous log file
currentFileIndex_--; // Avoid underflow.
currentFileIndex_ = (currentFileIndex_ >= 0) ? currentFileIndex_ : 0; if (currentFileIndex_ != 0) {
currentFileIndex_--;
}
} }
startingSequenceNumber_ = expectedSeq; startingSequenceNumber_ = expectedSeq;
// currentStatus_ will be set to Ok if reseek succeeds // currentStatus_ will be set to Ok if reseek succeeds

View File

@ -1322,7 +1322,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
if (s.ok() && old_manifest_file_number < manifest_file_number_) { if (s.ok() && old_manifest_file_number < manifest_file_number_) {
// delete old manifest file // delete old manifest file
Log(options_->info_log, Log(options_->info_log,
"Deleting manifest %lu current manifest %lu\n", "Deleting manifest %llu current manifest %llu\n",
old_manifest_file_number, manifest_file_number_); old_manifest_file_number, manifest_file_number_);
// we don't care about an error here, PurgeObsoleteFiles will take care // we don't care about an error here, PurgeObsoleteFiles will take care
// of it later // of it later
@ -1348,7 +1348,7 @@ Status VersionSet::LogAndApply(VersionEdit* edit, port::Mutex* mu,
prev_log_number_ = edit->prev_log_number_; prev_log_number_ = edit->prev_log_number_;
} else { } else {
Log(options_->info_log, "Error in committing version %ld", Log(options_->info_log, "Error in committing version %llu",
v->GetVersionNumber()); v->GetVersionNumber());
delete v; delete v;
if (!new_manifest_file.empty()) { if (!new_manifest_file.empty()) {
@ -1521,9 +1521,9 @@ Status VersionSet::Recover() {
prev_log_number_ = prev_log_number; prev_log_number_ = prev_log_number;
Log(options_->info_log, "Recovered from manifest file:%s succeeded," Log(options_->info_log, "Recovered from manifest file:%s succeeded,"
"manifest_file_number is %ld, next_file_number is %ld, " "manifest_file_number is %llu, next_file_number is %llu, "
"last_sequence is %ld, log_number is %ld," "last_sequence is %llu, log_number is %llu,"
"prev_log_number is %ld\n", "prev_log_number is %llu\n",
current.c_str(), manifest_file_number_, next_file_number_, current.c_str(), manifest_file_number_, next_file_number_,
last_sequence_, log_number_, prev_log_number_); last_sequence_, log_number_, prev_log_number_);
} }
@ -1647,7 +1647,8 @@ Status VersionSet::DumpManifest(Options& options, std::string& dscname,
log_number_ = log_number; log_number_ = log_number;
prev_log_number_ = prev_log_number; prev_log_number_ = prev_log_number;
printf("manifest_file_number %ld next_file_number %ld last_sequence %ld log_number %ld prev_log_number %ld\n", printf("manifest_file_number %llu next_file_number %llu last_sequence "
"%llu log_number %llu prev_log_number %llu\n",
manifest_file_number_, next_file_number_, manifest_file_number_, next_file_number_,
last_sequence, log_number, prev_log_number); last_sequence, log_number, prev_log_number);
printf("%s \n", v->DebugString(hex).c_str()); printf("%s \n", v->DebugString(hex).c_str());
@ -1863,7 +1864,7 @@ const char* VersionSet::LevelDataSizeSummary(
int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size["); int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size[");
for (int i = 0; i < NumberLevels(); i++) { for (int i = 0; i < NumberLevels(); i++) {
int sz = sizeof(scratch->buffer) - len; int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz, "%ld ", int ret = snprintf(scratch->buffer + len, sz, "%llu ",
NumLevelBytes(i)); NumLevelBytes(i));
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
break; break;
@ -1879,7 +1880,8 @@ const char* VersionSet::LevelFileSummary(
for (unsigned int i = 0; i < current_->files_[level].size(); i++) { for (unsigned int i = 0; i < current_->files_[level].size(); i++) {
FileMetaData* f = current_->files_[level][i]; FileMetaData* f = current_->files_[level][i];
int sz = sizeof(scratch->buffer) - len; int sz = sizeof(scratch->buffer) - len;
int ret = snprintf(scratch->buffer + len, sz, "#%ld(seq=%ld,sz=%ld,%d) ", int ret = snprintf(scratch->buffer + len, sz,
"#%llu(seq=%llu,sz=%llu,%d) ",
f->number, f->smallest_seqno, f->number, f->smallest_seqno,
f->file_size, f->being_compacted); f->file_size, f->being_compacted);
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
@ -2219,7 +2221,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
start_index = loop; // Consider this as the first candidate. start_index = loop; // Consider this as the first candidate.
break; break;
} }
Log(options_->info_log, "Universal: skipping file %ld[%d] compacted %s", Log(options_->info_log, "Universal: skipping file %llu[%d] compacted %s",
f->number, loop, " cannot be a candidate to reduce size amp.\n"); f->number, loop, " cannot be a candidate to reduce size amp.\n");
f = nullptr; f = nullptr;
} }
@ -2227,7 +2229,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
return nullptr; // no candidate files return nullptr; // no candidate files
} }
Log(options_->info_log, "Universal: First candidate file %ld[%d] %s", Log(options_->info_log, "Universal: First candidate file %llu[%d] %s",
f->number, start_index, " to reduce size amp.\n"); f->number, start_index, " to reduce size amp.\n");
// keep adding up all the remaining files // keep adding up all the remaining files
@ -2237,7 +2239,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index]; f = current_->files_[level][index];
if (f->being_compacted) { if (f->being_compacted) {
Log(options_->info_log, Log(options_->info_log,
"Universal: Possible candidate file %ld[%d] %s.", f->number, loop, "Universal: Possible candidate file %llu[%d] %s.", f->number, loop,
" is already being compacted. No size amp reduction possible.\n"); " is already being compacted. No size amp reduction possible.\n");
return nullptr; return nullptr;
} }
@ -2255,14 +2257,14 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
// size amplification = percentage of additional size // size amplification = percentage of additional size
if (candidate_size * 100 < ratio * earliest_file_size) { if (candidate_size * 100 < ratio * earliest_file_size) {
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp not needed. newer-files-total-size %ld " "Universal: size amp not needed. newer-files-total-size %llu "
"earliest-file-size %ld", "earliest-file-size %llu",
candidate_size, earliest_file_size); candidate_size, earliest_file_size);
return nullptr; return nullptr;
} else { } else {
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp needed. newer-files-total-size %ld " "Universal: size amp needed. newer-files-total-size %llu "
"earliest-file-size %ld", "earliest-file-size %llu",
candidate_size, earliest_file_size); candidate_size, earliest_file_size);
} }
assert(start_index >= 0 && start_index < file_by_time.size() - 1); assert(start_index >= 0 && start_index < file_by_time.size() - 1);
@ -2278,7 +2280,7 @@ Compaction* VersionSet::PickCompactionUniversalSizeAmp(
f = current_->files_[level][index]; f = current_->files_[level][index];
c->inputs_[0].push_back(f); c->inputs_[0].push_back(f);
Log(options_->info_log, Log(options_->info_log,
"Universal: size amp picking file %ld[%d] with size %ld", "Universal: size amp picking file %llu[%d] with size %llu",
f->number, index, f->file_size); f->number, index, f->file_size);
} }
return c; return c;
@ -2325,7 +2327,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
break; break;
} }
Log(options_->info_log, Log(options_->info_log,
"Universal: file %ld[%d] being compacted, skipping", "Universal: file %llu[%d] being compacted, skipping",
f->number, loop); f->number, loop);
f = nullptr; f = nullptr;
} }
@ -2334,7 +2336,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
// first candidate to be compacted. // first candidate to be compacted.
uint64_t candidate_size = f != nullptr? f->file_size : 0; uint64_t candidate_size = f != nullptr? f->file_size : 0;
if (f != nullptr) { if (f != nullptr) {
Log(options_->info_log, "Universal: Possible candidate file %ld[%d].", Log(options_->info_log, "Universal: Possible candidate file %llu[%d].",
f->number, loop); f->number, loop);
} }
@ -2368,7 +2370,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i]; int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index]; FileMetaData* f = current_->files_[level][index];
Log(options_->info_log, Log(options_->info_log,
"Universal: Skipping file %ld[%d] with size %ld %d\n", "Universal: Skipping file %llu[%d] with size %llu %d\n",
f->number, i, f->file_size, f->being_compacted); f->number, i, f->file_size, f->being_compacted);
} }
} }
@ -2403,7 +2405,7 @@ Compaction* VersionSet::PickCompactionUniversalReadAmp(
int index = file_by_time[i]; int index = file_by_time[i];
FileMetaData* f = current_->files_[level][index]; FileMetaData* f = current_->files_[level][index];
c->inputs_[0].push_back(f); c->inputs_[0].push_back(f);
Log(options_->info_log, "Universal: Picking file %ld[%d] with size %ld\n", Log(options_->info_log, "Universal: Picking file %llu[%d] with size %llu\n",
f->number, i, f->file_size); f->number, i, f->file_size);
} }
return c; return c;
@ -2790,14 +2792,16 @@ void VersionSet::SetupOtherInputs(Compaction* c) {
if (expanded1.size() == c->inputs_[1].size() && if (expanded1.size() == c->inputs_[1].size() &&
!FilesInCompaction(expanded1)) { !FilesInCompaction(expanded1)) {
Log(options_->info_log, Log(options_->info_log,
"Expanding@%d %d+%d (%ld+%ld bytes) to %d+%d (%ld+%ld bytes)\n", "Expanding@%d %d+%d (%llu+%llu bytes) to %d+%d (%llu+%llu bytes)\n",
level, level,
int(c->inputs_[0].size()), int(c->inputs_[0].size()),
int(c->inputs_[1].size()), int(c->inputs_[1].size()),
long(inputs0_size), long(inputs1_size), inputs0_size,
inputs1_size,
int(expanded0.size()), int(expanded0.size()),
int(expanded1.size()), int(expanded1.size()),
long(expanded0_size), long(inputs1_size)); expanded0_size,
inputs1_size);
smallest = new_start; smallest = new_start;
largest = new_limit; largest = new_limit;
c->inputs_[0] = expanded0; c->inputs_[0] = expanded0;
@ -3087,7 +3091,7 @@ static void InputSummary(std::vector<FileMetaData*>& files,
int write = 0; int write = 0;
for (unsigned int i = 0; i < files.size(); i++) { for (unsigned int i = 0; i < files.size(); i++) {
int sz = len - write; int sz = len - write;
int ret = snprintf(output + write, sz, "%lu(%lu) ", int ret = snprintf(output + write, sz, "%llu(%llu) ",
files.at(i)->number, files.at(i)->number,
files.at(i)->file_size); files.at(i)->file_size);
if (ret < 0 || ret >= sz) if (ret < 0 || ret >= sz)
@ -3098,7 +3102,7 @@ static void InputSummary(std::vector<FileMetaData*>& files,
void Compaction::Summary(char* output, int len) { void Compaction::Summary(char* output, int len) {
int write = snprintf(output, len, int write = snprintf(output, len,
"Base version %ld Base level %d, seek compaction:%d, inputs:", "Base version %llu Base level %d, seek compaction:%d, inputs:",
input_version_->GetVersionNumber(), level_, seek_compaction_); input_version_->GetVersionNumber(), level_, seek_compaction_);
if (write < 0 || write > len) { if (write < 0 || write > len) {
return; return;

View File

@ -29,7 +29,7 @@ class SequentialFile;
class Slice; class Slice;
class WritableFile; class WritableFile;
class RandomRWFile; class RandomRWFile;
class Options; struct Options;
using std::unique_ptr; using std::unique_ptr;
using std::shared_ptr; using std::shared_ptr;
@ -424,7 +424,7 @@ class WritableFile {
// This asks the OS to initiate flushing the cached data to disk, // This asks the OS to initiate flushing the cached data to disk,
// without waiting for completion. // without waiting for completion.
// Default implementation does nothing. // Default implementation does nothing.
virtual Status RangeSync(off64_t offset, off64_t nbytes) { virtual Status RangeSync(off_t offset, off_t nbytes) {
return Status::OK(); return Status::OK();
} }

View File

@ -27,7 +27,7 @@
namespace rocksdb { namespace rocksdb {
class Slice; class Slice;
class SliceParts; struct SliceParts;
class WriteBatch { class WriteBatch {
public: public:

View File

@ -33,7 +33,7 @@ namespace rocksdb {
namespace { namespace {
struct BytewiseLessThan { struct BytewiseLessThan {
bool operator()(const std::string& key1, const std::string& key2) { bool operator()(const std::string& key1, const std::string& key2) const {
// smaller entries will be placed in front. // smaller entries will be placed in front.
return comparator->Compare(key1, key2) <= 0; return comparator->Compare(key1, key2) <= 0;
} }
@ -70,7 +70,7 @@ void LogStatsCollectionError(
std::string msg = std::string msg =
"[Warning] encountered error when calling TableStatsCollector::" + "[Warning] encountered error when calling TableStatsCollector::" +
method + "() with collector name: " + name; method + "() with collector name: " + name;
Log(info_log, msg.c_str()); Log(info_log, "%s", msg.c_str());
} }
} // anonymous namespace } // anonymous namespace

View File

@ -249,7 +249,7 @@ void BlockBasedTable::ReadMeta(const Footer& footer) {
auto err_msg = auto err_msg =
"[Warning] Encountered error while reading data from stats block " + "[Warning] Encountered error while reading data from stats block " +
s.ToString(); s.ToString();
Log(rep_->options.info_log, err_msg.c_str()); Log(rep_->options.info_log, "%s", err_msg.c_str());
} }
} }
@ -341,7 +341,7 @@ Status BlockBasedTable::ReadStats(const Slice& handle_value, Rep* rep) {
auto error_msg = auto error_msg =
"[Warning] detect malformed value in stats meta-block:" "[Warning] detect malformed value in stats meta-block:"
"\tkey: " + key + "\tval: " + raw_val.ToString(); "\tkey: " + key + "\tval: " + raw_val.ToString();
Log(rep->options.info_log, error_msg.c_str()); Log(rep->options.info_log, "%s", error_msg.c_str());
continue; continue;
} }
*(pos->second) = val; *(pos->second) = val;

View File

@ -135,7 +135,7 @@ Status BlobStore::Put(const Slice& value, Blob* blob) {
if (!s.ok()) { if (!s.ok()) {
return s; return s;
} }
size_t size_left = value.size(); auto size_left = (uint64_t) value.size();
uint64_t offset = 0; // in bytes, not blocks uint64_t offset = 0; // in bytes, not blocks
for (auto chunk : blob->chunks) { for (auto chunk : blob->chunks) {

View File

@ -173,7 +173,6 @@ class LRUCache {
// mutex_ protects the following state. // mutex_ protects the following state.
port::Mutex mutex_; port::Mutex mutex_;
size_t usage_; size_t usage_;
uint64_t last_id_;
// Dummy head of LRU list. // Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry. // lru.prev is newest entry, lru.next is oldest entry.
@ -183,8 +182,7 @@ class LRUCache {
}; };
LRUCache::LRUCache() LRUCache::LRUCache()
: usage_(0), : usage_(0) {
last_id_(0) {
// Make empty circular linked list // Make empty circular linked list
lru_.next = &lru_; lru_.next = &lru_;
lru_.prev = &lru_; lru_.prev = &lru_;
@ -406,7 +404,7 @@ class ShardedLRUCache : public Cache {
MutexLock l(&id_mutex_); MutexLock l(&id_mutex_);
return ++(last_id_); return ++(last_id_);
} }
virtual uint64_t GetCapacity() { virtual size_t GetCapacity() {
return capacity_; return capacity_;
} }
}; };

View File

@ -165,7 +165,7 @@ std::string HistogramImpl::ToString() const {
if (buckets_[b] <= 0.0) continue; if (buckets_[b] <= 0.0) continue;
sum += buckets_[b]; sum += buckets_[b];
snprintf(buf, sizeof(buf), snprintf(buf, sizeof(buf),
"[ %7ld, %7ld ) %8ld %7.3f%% %7.3f%% ", "[ %7llu, %7llu ) %8llu %7.3f%% %7.3f%% ",
((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)), // left ((b == 0) ? 0 : bucketMapper.BucketLimit(b-1)), // left
bucketMapper.BucketLimit(b), // right bucketMapper.BucketLimit(b), // right
buckets_[b], // count buckets_[b], // count

View File

@ -570,11 +570,11 @@ void PrintBucketCounts(const vector<uint64_t>& bucket_counts, int ttl_start,
int ttl_end, int bucket_size, int num_buckets) { int ttl_end, int bucket_size, int num_buckets) {
int time_point = ttl_start; int time_point = ttl_start;
for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) {
fprintf(stdout, "Keys in range %s to %s : %lu\n", fprintf(stdout, "Keys in range %s to %s : %llu\n",
ReadableTime(time_point).c_str(), ReadableTime(time_point).c_str(),
ReadableTime(time_point + bucket_size).c_str(), bucket_counts[i]); ReadableTime(time_point + bucket_size).c_str(), bucket_counts[i]);
} }
fprintf(stdout, "Keys in range %s to %s : %lu\n", fprintf(stdout, "Keys in range %s to %s : %llu\n",
ReadableTime(time_point).c_str(), ReadableTime(time_point).c_str(),
ReadableTime(ttl_end).c_str(), bucket_counts[num_buckets - 1]); ReadableTime(ttl_end).c_str(), bucket_counts[num_buckets - 1]);
} }
@ -1424,7 +1424,7 @@ void ApproxSizeCommand::DoCommand() {
ranges[0] = Range(start_key_, end_key_); ranges[0] = Range(start_key_, end_key_);
uint64_t sizes[1]; uint64_t sizes[1];
db_->GetApproximateSizes(ranges, 1, sizes); db_->GetApproximateSizes(ranges, 1, sizes);
fprintf(stdout, "%ld\n", sizes[0]); fprintf(stdout, "%llu\n", sizes[0]);
/* Weird that GetApproximateSizes() returns void, although documentation /* Weird that GetApproximateSizes() returns void, although documentation
* says that it returns a Status object. * says that it returns a Status object.
if (!st.ok()) { if (!st.ok()) {

View File

@ -161,7 +161,7 @@ Options::Dump(Logger* log) const
Log(log," Options.disableDataSync: %d", disableDataSync); Log(log," Options.disableDataSync: %d", disableDataSync);
Log(log," Options.use_fsync: %d", use_fsync); Log(log," Options.use_fsync: %d", use_fsync);
Log(log," Options.max_log_file_size: %ld", max_log_file_size); Log(log," Options.max_log_file_size: %ld", max_log_file_size);
Log(log,"Options.max_manifest_file_size: %ld", Log(log,"Options.max_manifest_file_size: %llu",
max_manifest_file_size); max_manifest_file_size);
Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll); Log(log," Options.log_file_time_to_roll: %ld", log_file_time_to_roll);
Log(log," Options.keep_log_file_num: %ld", keep_log_file_num); Log(log," Options.keep_log_file_num: %ld", keep_log_file_num);
@ -192,7 +192,7 @@ Options::Dump(Logger* log) const
target_file_size_base); target_file_size_base);
Log(log," Options.target_file_size_multiplier: %d", Log(log," Options.target_file_size_multiplier: %d",
target_file_size_multiplier); target_file_size_multiplier);
Log(log," Options.max_bytes_for_level_base: %ld", Log(log," Options.max_bytes_for_level_base: %llu",
max_bytes_for_level_base); max_bytes_for_level_base);
Log(log," Options.max_bytes_for_level_multiplier: %d", Log(log," Options.max_bytes_for_level_multiplier: %d",
max_bytes_for_level_multiplier); max_bytes_for_level_multiplier);
@ -200,7 +200,7 @@ Options::Dump(Logger* log) const
Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d", Log(log,"Options.max_bytes_for_level_multiplier_addtl[%d]: %d",
i, max_bytes_for_level_multiplier_additional[i]); i, max_bytes_for_level_multiplier_additional[i]);
} }
Log(log," Options.max_sequential_skip_in_iterations: %ld", Log(log," Options.max_sequential_skip_in_iterations: %llu",
max_sequential_skip_in_iterations); max_sequential_skip_in_iterations);
Log(log," Options.expanded_compaction_factor: %d", Log(log," Options.expanded_compaction_factor: %d",
expanded_compaction_factor); expanded_compaction_factor);
@ -222,7 +222,7 @@ Options::Dump(Logger* log) const
table_cache_remove_scan_count_limit); table_cache_remove_scan_count_limit);
Log(log," Options.arena_block_size: %ld", Log(log," Options.arena_block_size: %ld",
arena_block_size); arena_block_size);
Log(log," Options.delete_obsolete_files_period_micros: %ld", Log(log," Options.delete_obsolete_files_period_micros: %llu",
delete_obsolete_files_period_micros); delete_obsolete_files_period_micros);
Log(log," Options.max_background_compactions: %d", Log(log," Options.max_background_compactions: %d",
max_background_compactions); max_background_compactions);
@ -236,9 +236,9 @@ Options::Dump(Logger* log) const
rate_limit_delay_max_milliseconds); rate_limit_delay_max_milliseconds);
Log(log," Options.disable_auto_compactions: %d", Log(log," Options.disable_auto_compactions: %d",
disable_auto_compactions); disable_auto_compactions);
Log(log," Options.WAL_ttl_seconds: %ld", Log(log," Options.WAL_ttl_seconds: %llu",
WAL_ttl_seconds); WAL_ttl_seconds);
Log(log," Options.WAL_size_limit_MB: %ld", Log(log," Options.WAL_size_limit_MB: %llu",
WAL_size_limit_MB); WAL_size_limit_MB);
Log(log," Options.manifest_preallocation_size: %ld", Log(log," Options.manifest_preallocation_size: %ld",
manifest_preallocation_size); manifest_preallocation_size);
@ -264,7 +264,7 @@ Options::Dump(Logger* log) const
access_hints[access_hint_on_compaction_start]); access_hints[access_hint_on_compaction_start]);
Log(log," Options.use_adaptive_mutex: %d", Log(log," Options.use_adaptive_mutex: %d",
use_adaptive_mutex); use_adaptive_mutex);
Log(log," Options.bytes_per_sync: %ld", Log(log," Options.bytes_per_sync: %llu",
bytes_per_sync); bytes_per_sync);
Log(log," Options.filter_deletes: %d", Log(log," Options.filter_deletes: %d",
filter_deletes); filter_deletes);

View File

@ -138,7 +138,7 @@ VectorRep::Iterator::Iterator(class VectorRep* vrep,
const KeyComparator& compare) const KeyComparator& compare)
: vrep_(vrep), : vrep_(vrep),
bucket_(bucket), bucket_(bucket),
cit_(nullptr), cit_(bucket_->end()),
compare_(compare), compare_(compare),
sorted_(false) { } sorted_(false) { }

View File

@ -41,39 +41,14 @@ Options RedisListsTest::options = Options();
// operator== and operator<< are defined below for vectors (lists) // operator== and operator<< are defined below for vectors (lists)
// Needed for ASSERT_EQ // Needed for ASSERT_EQ
// Compare two lists for equality. void AssertListEq(const std::vector<std::string>& result,
bool operator==(const std::vector<std::string>& a, const std::vector<std::string>& expected_result) {
const std::vector<std::string>& b) { ASSERT_EQ(result.size(), expected_result.size());
if (a.size() != b.size()) { for (size_t i = 0; i < result.size(); ++i) {
return false; ASSERT_EQ(result[i], expected_result[i]);
} }
int n = a.size();
for (int i=0; i<n; ++i) {
if (a[i] != b[i]) {
return false;
}
}
return true;
} }
// Print out a list
ostream& operator<<(ostream& out, const std::vector<std::string>& vec) {
out << "[";
int n = vec.size();
for(int i=0; i<n; ++i) {
if (i > 0) {
out << ", ";
}
out << vec[i];
}
out << "]";
return out;
}
/// THE TEST CASES BEGIN HERE
// PushRight, Length, Index, Range // PushRight, Length, Index, Range
TEST(RedisListsTest, SimpleTest) { TEST(RedisListsTest, SimpleTest) {
RedisLists redis(kDefaultDbName, options, true); // Destructive RedisLists redis(kDefaultDbName, options, true); // Destructive
@ -100,7 +75,7 @@ TEST(RedisListsTest, SimpleTest) {
expected_result[0] = "v1"; expected_result[0] = "v1";
expected_result[1] = "v2"; expected_result[1] = "v2";
expected_result[2] = "v3"; expected_result[2] = "v3";
ASSERT_EQ(result, expected_result); // Uses my overloaded operator==() above AssertListEq(result, expected_result);
} }
// PushLeft, Length, Index, Range // PushLeft, Length, Index, Range
@ -129,7 +104,7 @@ TEST(RedisListsTest, SimpleTest2) {
expected_result[0] = "v1"; expected_result[0] = "v1";
expected_result[1] = "v2"; expected_result[1] = "v2";
expected_result[2] = "v3"; expected_result[2] = "v3";
ASSERT_EQ(result, expected_result); // Uses my overloaded operator==() above AssertListEq(result, expected_result);
} }
// Exhaustive test of the Index() function // Exhaustive test of the Index() function

View File

@ -16,8 +16,7 @@ DBWithTTL::DBWithTTL(const int32_t ttl,
const std::string& dbname, const std::string& dbname,
Status& st, Status& st,
bool read_only) bool read_only)
: StackableDB(nullptr), : StackableDB(nullptr) {
ttl_(ttl) {
Options options_to_open = options; Options options_to_open = options;
if (options.compaction_filter) { if (options.compaction_filter) {

View File

@ -110,7 +110,6 @@ class DBWithTTL : public StackableDB {
private: private:
DB* db_; DB* db_;
int32_t ttl_;
unique_ptr<CompactionFilter> ttl_comp_filter_; unique_ptr<CompactionFilter> ttl_comp_filter_;
}; };

View File

@ -126,7 +126,7 @@ class TtlTest {
static FlushOptions flush_opts; static FlushOptions flush_opts;
kv_it_ = kvmap_.begin(); kv_it_ = kvmap_.begin();
advance(kv_it_, start_pos_map); advance(kv_it_, start_pos_map);
for (int i = 0; kv_it_ != kvmap_.end(), i < num_entries; i++, kv_it_++) { for (int i = 0; kv_it_ != kvmap_.end() && i < num_entries; i++, kv_it_++) {
ASSERT_OK(db_ttl_->Put(wopts, kv_it_->first, kv_it_->second)); ASSERT_OK(db_ttl_->Put(wopts, kv_it_->first, kv_it_->second));
} }
// Put a mock kv at the end because CompactionFilter doesn't delete last key // Put a mock kv at the end because CompactionFilter doesn't delete last key
@ -175,7 +175,7 @@ class TtlTest {
kv_it_ = kvmap_.begin(); kv_it_ = kvmap_.begin();
advance(kv_it_, st_pos); advance(kv_it_, st_pos);
std::string v; std::string v;
for (int i = 0; kv_it_ != kvmap_.end(), i < span; i++, kv_it_++) { for (int i = 0; kv_it_ != kvmap_.end() && i < span; i++, kv_it_++) {
Status s = db_ttl_->Get(ropts, kv_it_->first, &v); Status s = db_ttl_->Get(ropts, kv_it_->first, &v);
if (s.ok() != check) { if (s.ok() != check) {
fprintf(stderr, "key=%s ", kv_it_->first.c_str()); fprintf(stderr, "key=%s ", kv_it_->first.c_str());