Turn on -Wshorten-64-to-32 and fix all the errors

Summary:
We need to turn on -Wshorten-64-to-32 for mobile. See D1671432 (internal phabricator) for details.

This diff turns on the warning flag and fixes all the errors. There were also some interesting errors that I might call bugs, especially in plain table. Going forward, I think it makes sense to have this flag turned on and be very very careful when converting 64-bit to 32-bit variables.

Test Plan: compiles

Reviewers: ljin, rven, yhchiang, sdong

Reviewed By: yhchiang

Subscribers: bobbaldwin, dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D28689
This commit is contained in:
Igor Canadi 2014-11-11 16:47:22 -05:00
parent 113796c493
commit 767777c2bd
106 changed files with 584 additions and 505 deletions

View File

@ -147,7 +147,6 @@ TESTS = \
cuckoo_table_builder_test \ cuckoo_table_builder_test \
cuckoo_table_reader_test \ cuckoo_table_reader_test \
cuckoo_table_db_test \ cuckoo_table_db_test \
write_batch_with_index_test \
flush_job_test \ flush_job_test \
wal_manager_test \ wal_manager_test \
listener_test \ listener_test \

View File

@ -284,6 +284,14 @@ EOF
fi fi
fi fi
# Test whether -Wshorten-64-to-32 is available
$CXX $CFLAGS -x c++ - -o /dev/null -Wshorten-64-to-32 2>/dev/null <<EOF
int main() {}
EOF
if [ "$?" = 0 ]; then
COMMON_FLAGS="$COMMON_FLAGS -Wshorten-64-to-32"
fi
# shall we use HDFS? # shall we use HDFS?
if test "$USE_HDFS"; then if test "$USE_HDFS"; then

12
db/c.cc
View File

@ -385,11 +385,9 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
unsigned char success; unsigned char success;
size_t new_value_len; size_t new_value_len;
char* tmp_new_value = (*full_merge_)( char* tmp_new_value = (*full_merge_)(
state_, state_, key.data(), key.size(), existing_value_data, existing_value_len,
key.data(), key.size(), &operand_pointers[0], &operand_sizes[0], static_cast<int>(n), &success,
existing_value_data, existing_value_len, &new_value_len);
&operand_pointers[0], &operand_sizes[0], n,
&success, &new_value_len);
new_value->assign(tmp_new_value, new_value_len); new_value->assign(tmp_new_value, new_value_len);
if (delete_value_ != nullptr) { if (delete_value_ != nullptr) {
@ -417,7 +415,7 @@ struct rocksdb_mergeoperator_t : public MergeOperator {
size_t new_value_len; size_t new_value_len;
char* tmp_new_value = (*partial_merge_)( char* tmp_new_value = (*partial_merge_)(
state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0], state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0],
operand_count, &success, &new_value_len); static_cast<int>(operand_count), &success, &new_value_len);
new_value->assign(tmp_new_value, new_value_len); new_value->assign(tmp_new_value, new_value_len);
if (delete_value_ != nullptr) { if (delete_value_ != nullptr) {
@ -2041,7 +2039,7 @@ void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level
int rocksdb_livefiles_count( int rocksdb_livefiles_count(
const rocksdb_livefiles_t* lf) { const rocksdb_livefiles_t* lf) {
return lf->rep.size(); return static_cast<int>(lf->rep.size());
} }
const char* rocksdb_livefiles_name( const char* rocksdb_livefiles_name(

View File

@ -132,7 +132,7 @@ static void CmpDestroy(void* arg) { }
static int CmpCompare(void* arg, const char* a, size_t alen, static int CmpCompare(void* arg, const char* a, size_t alen,
const char* b, size_t blen) { const char* b, size_t blen) {
int n = (alen < blen) ? alen : blen; size_t n = (alen < blen) ? alen : blen;
int r = memcmp(a, b, n); int r = memcmp(a, b, n);
if (r == 0) { if (r == 0) {
if (alen < blen) r = -1; if (alen < blen) r = -1;

View File

@ -133,7 +133,7 @@ class ColumnFamilyTest {
void CreateColumnFamilies( void CreateColumnFamilies(
const std::vector<std::string>& cfs, const std::vector<std::string>& cfs,
const std::vector<ColumnFamilyOptions> options = {}) { const std::vector<ColumnFamilyOptions> options = {}) {
int cfi = handles_.size(); int cfi = static_cast<int>(handles_.size());
handles_.resize(cfi + cfs.size()); handles_.resize(cfi + cfs.size());
names_.resize(cfi + cfs.size()); names_.resize(cfi + cfs.size());
for (size_t i = 0; i < cfs.size(); ++i) { for (size_t i = 0; i < cfs.size(); ++i) {
@ -231,7 +231,7 @@ class ColumnFamilyTest {
snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f);
result += buf; result += buf;
if (f > 0) { if (f > 0) {
last_non_zero_offset = result.size(); last_non_zero_offset = static_cast<int>(result.size());
} }
} }
result.resize(last_non_zero_offset); result.resize(last_non_zero_offset);
@ -287,8 +287,8 @@ class ColumnFamilyTest {
assert(num_per_cf.size() == handles_.size()); assert(num_per_cf.size() == handles_.size());
for (size_t i = 0; i < num_per_cf.size(); ++i) { for (size_t i = 0; i < num_per_cf.size(); ++i) {
ASSERT_EQ(num_per_cf[i], ASSERT_EQ(num_per_cf[i], GetProperty(static_cast<int>(i),
GetProperty(i, "rocksdb.num-immutable-mem-table")); "rocksdb.num-immutable-mem-table"));
} }
} }
@ -916,11 +916,11 @@ TEST(ColumnFamilyTest, DontRollEmptyLogs) {
CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); CreateColumnFamiliesAndReopen({"one", "two", "three", "four"});
for (size_t i = 0; i < handles_.size(); ++i) { for (size_t i = 0; i < handles_.size(); ++i) {
PutRandomData(i, 10, 100); PutRandomData(static_cast<int>(i), 10, 100);
} }
int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls(); int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls();
// this will trigger the flushes // this will trigger the flushes
for (size_t i = 0; i <= 4; ++i) { for (int i = 0; i <= 4; ++i) {
ASSERT_OK(Flush(i)); ASSERT_OK(Flush(i));
} }

View File

@ -124,9 +124,9 @@ Compaction::~Compaction() {
void Compaction::GenerateFileLevels() { void Compaction::GenerateFileLevels() {
input_levels_.resize(num_input_levels()); input_levels_.resize(num_input_levels());
for (int which = 0; which < num_input_levels(); which++) { for (size_t which = 0; which < num_input_levels(); which++) {
DoGenerateLevelFilesBrief( DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files,
&input_levels_[which], inputs_[which].files, &arena_); &arena_);
} }
} }
@ -144,7 +144,7 @@ bool Compaction::IsTrivialMove() const {
} }
void Compaction::AddInputDeletions(VersionEdit* out_edit) { void Compaction::AddInputDeletions(VersionEdit* out_edit) {
for (int which = 0; which < num_input_levels(); which++) { for (size_t which = 0; which < num_input_levels(); which++) {
for (size_t i = 0; i < inputs_[which].size(); i++) { for (size_t i = 0; i < inputs_[which].size(); i++) {
out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber()); out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber());
} }
@ -207,7 +207,7 @@ bool Compaction::ShouldStopBefore(const Slice& internal_key) {
// Mark (or clear) each file that is being compacted // Mark (or clear) each file that is being compacted
void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) { void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) {
for (int i = 0; i < num_input_levels(); i++) { for (size_t i = 0; i < num_input_levels(); i++) {
for (unsigned int j = 0; j < inputs_[i].size(); j++) { for (unsigned int j = 0; j < inputs_[i].size(); j++) {
assert(mark_as_compacted ? !inputs_[i][j]->being_compacted : assert(mark_as_compacted ? !inputs_[i][j]->being_compacted :
inputs_[i][j]->being_compacted); inputs_[i][j]->being_compacted);
@ -293,7 +293,7 @@ void Compaction::Summary(char* output, int len) {
return; return;
} }
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) { for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
if (level_iter > 0) { if (level_iter > 0) {
write += snprintf(output + write, len - write, "], ["); write += snprintf(output + write, len - write, "], [");
if (write < 0 || write >= len) { if (write < 0 || write >= len) {
@ -317,7 +317,7 @@ uint64_t Compaction::OutputFilePreallocationSize(
if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) { if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) {
preallocation_size = mutable_options.MaxFileSizeForLevel(output_level()); preallocation_size = mutable_options.MaxFileSizeForLevel(output_level());
} else { } else {
for (int level_iter = 0; level_iter < num_input_levels(); ++level_iter) { for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) {
for (const auto& f : inputs_[level_iter].files) { for (const auto& f : inputs_[level_iter].files) {
preallocation_size += f->fd.GetFileSize(); preallocation_size += f->fd.GetFileSize();
} }

View File

@ -23,7 +23,7 @@ struct CompactionInputFiles {
inline bool empty() const { return files.empty(); } inline bool empty() const { return files.empty(); }
inline size_t size() const { return files.size(); } inline size_t size() const { return files.size(); }
inline void clear() { files.clear(); } inline void clear() { files.clear(); }
inline FileMetaData* operator[](int i) const { return files[i]; } inline FileMetaData* operator[](size_t i) const { return files[i]; }
}; };
class Version; class Version;
@ -48,7 +48,7 @@ class Compaction {
// Returns the level associated to the specified compaction input level. // Returns the level associated to the specified compaction input level.
// If compaction_input_level is not specified, then input_level is set to 0. // If compaction_input_level is not specified, then input_level is set to 0.
int level(int compaction_input_level = 0) const { int level(size_t compaction_input_level = 0) const {
return inputs_[compaction_input_level].level; return inputs_[compaction_input_level].level;
} }
@ -56,7 +56,7 @@ class Compaction {
int output_level() const { return output_level_; } int output_level() const { return output_level_; }
// Returns the number of input levels in this compaction. // Returns the number of input levels in this compaction.
int num_input_levels() const { return inputs_.size(); } size_t num_input_levels() const { return inputs_.size(); }
// Return the object that holds the edits to the descriptor done // Return the object that holds the edits to the descriptor done
// by this compaction. // by this compaction.
@ -66,7 +66,7 @@ class Compaction {
// compaction input level. // compaction input level.
// The function will return 0 if when "compaction_input_level" < 0 // The function will return 0 if when "compaction_input_level" < 0
// or "compaction_input_level" >= "num_input_levels()". // or "compaction_input_level" >= "num_input_levels()".
int num_input_files(size_t compaction_input_level) const { size_t num_input_files(size_t compaction_input_level) const {
if (compaction_input_level < inputs_.size()) { if (compaction_input_level < inputs_.size()) {
return inputs_[compaction_input_level].size(); return inputs_[compaction_input_level].size();
} }
@ -83,7 +83,7 @@ class Compaction {
// specified compaction input level. // specified compaction input level.
// REQUIREMENT: "compaction_input_level" must be >= 0 and // REQUIREMENT: "compaction_input_level" must be >= 0 and
// < "input_levels()" // < "input_levels()"
FileMetaData* input(size_t compaction_input_level, int i) const { FileMetaData* input(size_t compaction_input_level, size_t i) const {
assert(compaction_input_level < inputs_.size()); assert(compaction_input_level < inputs_.size());
return inputs_[compaction_input_level][i]; return inputs_[compaction_input_level][i];
} }
@ -98,7 +98,7 @@ class Compaction {
} }
// Returns the LevelFilesBrief of the specified compaction input level. // Returns the LevelFilesBrief of the specified compaction input level.
LevelFilesBrief* input_levels(int compaction_input_level) { LevelFilesBrief* input_levels(size_t compaction_input_level) {
return &input_levels_[compaction_input_level]; return &input_levels_[compaction_input_level];
} }

View File

@ -415,32 +415,33 @@ Status CompactionJob::Run() {
} }
compaction_stats_.micros = env_->NowMicros() - start_micros - imm_micros; compaction_stats_.micros = env_->NowMicros() - start_micros - imm_micros;
compaction_stats_.files_in_leveln = compact_->compaction->num_input_files(0); compaction_stats_.files_in_leveln =
static_cast<int>(compact_->compaction->num_input_files(0));
compaction_stats_.files_in_levelnp1 = compaction_stats_.files_in_levelnp1 =
compact_->compaction->num_input_files(1); static_cast<int>(compact_->compaction->num_input_files(1));
MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros); MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros);
int num_output_files = compact_->outputs.size(); size_t num_output_files = compact_->outputs.size();
if (compact_->builder != nullptr) { if (compact_->builder != nullptr) {
// An error occurred so ignore the last output. // An error occurred so ignore the last output.
assert(num_output_files > 0); assert(num_output_files > 0);
--num_output_files; --num_output_files;
} }
compaction_stats_.files_out_levelnp1 = num_output_files; compaction_stats_.files_out_levelnp1 = static_cast<int>(num_output_files);
for (int i = 0; i < compact_->compaction->num_input_files(0); i++) { for (size_t i = 0; i < compact_->compaction->num_input_files(0); i++) {
compaction_stats_.bytes_readn += compaction_stats_.bytes_readn +=
compact_->compaction->input(0, i)->fd.GetFileSize(); compact_->compaction->input(0, i)->fd.GetFileSize();
compaction_stats_.num_input_records += compaction_stats_.num_input_records +=
static_cast<uint64_t>(compact_->compaction->input(0, i)->num_entries); static_cast<uint64_t>(compact_->compaction->input(0, i)->num_entries);
} }
for (int i = 0; i < compact_->compaction->num_input_files(1); i++) { for (size_t i = 0; i < compact_->compaction->num_input_files(1); i++) {
compaction_stats_.bytes_readnp1 += compaction_stats_.bytes_readnp1 +=
compact_->compaction->input(1, i)->fd.GetFileSize(); compact_->compaction->input(1, i)->fd.GetFileSize();
} }
for (int i = 0; i < num_output_files; i++) { for (size_t i = 0; i < num_output_files; i++) {
compaction_stats_.bytes_written += compact_->outputs[i].file_size; compaction_stats_.bytes_written += compact_->outputs[i].file_size;
} }
if (compact_->num_input_records > compact_->num_output_records) { if (compact_->num_input_records > compact_->num_output_records) {

View File

@ -46,7 +46,7 @@ CompressionType GetCompressionType(
// If the use has specified a different compression level for each level, // If the use has specified a different compression level for each level,
// then pick the compression for that level. // then pick the compression for that level.
if (!ioptions.compression_per_level.empty()) { if (!ioptions.compression_per_level.empty()) {
const int n = ioptions.compression_per_level.size() - 1; const int n = static_cast<int>(ioptions.compression_per_level.size()) - 1;
// It is possible for level_ to be -1; in that case, we use level // It is possible for level_ to be -1; in that case, we use level
// 0's compression. This occurs mostly in backwards compatibility // 0's compression. This occurs mostly in backwards compatibility
// situations when the builder doesn't know what level the file // situations when the builder doesn't know what level the file
@ -75,7 +75,7 @@ void CompactionPicker::SizeBeingCompacted(std::vector<uint64_t>& sizes) {
uint64_t total = 0; uint64_t total = 0;
for (auto c : compactions_in_progress_[level]) { for (auto c : compactions_in_progress_[level]) {
assert(c->level() == level); assert(c->level() == level);
for (int i = 0; i < c->num_input_files(0); i++) { for (size_t i = 0; i < c->num_input_files(0); i++) {
total += c->input(0, i)->compensated_file_size; total += c->input(0, i)->compensated_file_size;
} }
} }
@ -870,7 +870,8 @@ Compaction* UniversalCompactionPicker::PickCompaction(
// If max read amplification is exceeding configured limits, then force // If max read amplification is exceeding configured limits, then force
// compaction without looking at filesize ratios and try to reduce // compaction without looking at filesize ratios and try to reduce
// the number of files to fewer than level0_file_num_compaction_trigger. // the number of files to fewer than level0_file_num_compaction_trigger.
unsigned int num_files = level_files.size() - unsigned int num_files =
static_cast<unsigned int>(level_files.size()) -
mutable_cf_options.level0_file_num_compaction_trigger; mutable_cf_options.level0_file_num_compaction_trigger;
if ((c = PickCompactionUniversalReadAmp( if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, UINT_MAX, cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
@ -1074,8 +1075,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (ratio_to_compress >= 0) { if (ratio_to_compress >= 0) {
uint64_t total_size = vstorage->NumLevelBytes(kLevel0); uint64_t total_size = vstorage->NumLevelBytes(kLevel0);
uint64_t older_file_size = 0; uint64_t older_file_size = 0;
for (unsigned int i = files.size() - 1; for (size_t i = files.size() - 1; i >= first_index_after; i--) {
i >= first_index_after; i--) {
older_file_size += files[i]->fd.GetFileSize(); older_file_size += files[i]->fd.GetFileSize();
if (older_file_size * 100L >= total_size * (long) ratio_to_compress) { if (older_file_size * 100L >= total_size * (long) ratio_to_compress) {
enable_compression = false; enable_compression = false;

View File

@ -109,7 +109,7 @@ TEST(CompactionPickerTest, Level0Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction( std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer)); cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr); ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(2, compaction->num_input_files(0)); ASSERT_EQ(2U, compaction->num_input_files(0));
ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber());
} }
@ -121,7 +121,7 @@ TEST(CompactionPickerTest, Level1Trigger) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction( std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer)); cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr); ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0)); ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber()); ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
} }
@ -136,8 +136,8 @@ TEST(CompactionPickerTest, Level1Trigger2) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction( std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer)); cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr); ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0)); ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(2, compaction->num_input_files(1)); ASSERT_EQ(2U, compaction->num_input_files(1));
ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber()); ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber());
ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber()); ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber());
ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber()); ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber());
@ -164,7 +164,7 @@ TEST(CompactionPickerTest, LevelMaxScore) {
std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction( std::unique_ptr<Compaction> compaction(level_compaction_picker.PickCompaction(
cf_name, mutable_cf_options, &vstorage, &log_buffer)); cf_name, mutable_cf_options, &vstorage, &log_buffer));
ASSERT_TRUE(compaction.get() != nullptr); ASSERT_TRUE(compaction.get() != nullptr);
ASSERT_EQ(1, compaction->num_input_files(0)); ASSERT_EQ(1U, compaction->num_input_files(0));
ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber()); ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber());
} }

View File

@ -82,7 +82,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
} }
int type = rnd->Uniform(2); int type = rnd->Uniform(2);
int index = rnd->Uniform(source_strings.size()); int index = rnd->Uniform(static_cast<int>(source_strings.size()));
auto& key = source_strings[index]; auto& key = source_strings[index];
switch (type) { switch (type) {
case 0: case 0:
@ -124,7 +124,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break; break;
case 2: { case 2: {
// Seek to random key // Seek to random key
auto key_idx = rnd->Uniform(source_strings.size()); auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx]; auto key = source_strings[key_idx];
iter->Seek(key); iter->Seek(key);
result_iter->Seek(key); result_iter->Seek(key);
@ -150,7 +150,7 @@ void DoRandomIteraratorTest(DB* db, std::vector<std::string> source_strings,
break; break;
default: { default: {
assert(type == 5); assert(type == 5);
auto key_idx = rnd->Uniform(source_strings.size()); auto key_idx = rnd->Uniform(static_cast<int>(source_strings.size()));
auto key = source_strings[key_idx]; auto key = source_strings[key_idx];
std::string result; std::string result;
auto status = db->Get(ReadOptions(), key, &result); auto status = db->Get(ReadOptions(), key, &result);
@ -325,7 +325,7 @@ TEST(ComparatorDBTest, SimpleSuffixReverseComparator) {
source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8)); source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8));
} }
for (int j = 0; j < 20; j++) { for (int j = 0; j < 20; j++) {
int prefix_index = rnd.Uniform(source_prefixes.size()); int prefix_index = rnd.Uniform(static_cast<int>(source_prefixes.size()));
std::string key = source_prefixes[prefix_index] + std::string key = source_prefixes[prefix_index] +
test::RandomHumanReadableString(&rnd, rnd.Uniform(8)); test::RandomHumanReadableString(&rnd, rnd.Uniform(8));
source_strings.push_back(key); source_strings.push_back(key);

View File

@ -115,8 +115,8 @@ class CorruptionTest {
continue; continue;
} }
missed += (key - next_expected); missed += (key - next_expected);
next_expected = key + 1; next_expected = static_cast<unsigned int>(key + 1);
if (iter->value() != Value(key, &value_space)) { if (iter->value() != Value(static_cast<int>(key), &value_space)) {
bad_values++; bad_values++;
} else { } else {
correct++; correct++;
@ -143,14 +143,14 @@ class CorruptionTest {
if (-offset > sbuf.st_size) { if (-offset > sbuf.st_size) {
offset = 0; offset = 0;
} else { } else {
offset = sbuf.st_size + offset; offset = static_cast<int>(sbuf.st_size + offset);
} }
} }
if (offset > sbuf.st_size) { if (offset > sbuf.st_size) {
offset = sbuf.st_size; offset = static_cast<int>(sbuf.st_size);
} }
if (offset + bytes_to_corrupt > sbuf.st_size) { if (offset + bytes_to_corrupt > sbuf.st_size) {
bytes_to_corrupt = sbuf.st_size - offset; bytes_to_corrupt = static_cast<int>(sbuf.st_size - offset);
} }
// Do it // Do it
@ -177,7 +177,7 @@ class CorruptionTest {
type == filetype && type == filetype &&
static_cast<int>(number) > picked_number) { // Pick latest file static_cast<int>(number) > picked_number) { // Pick latest file
fname = dbname_ + "/" + filenames[i]; fname = dbname_ + "/" + filenames[i];
picked_number = number; picked_number = static_cast<int>(number);
} }
} }
ASSERT_TRUE(!fname.empty()) << filetype; ASSERT_TRUE(!fname.empty()) << filetype;
@ -246,7 +246,8 @@ TEST(CorruptionTest, RecoverWriteError) {
TEST(CorruptionTest, NewFileErrorDuringWrite) { TEST(CorruptionTest, NewFileErrorDuringWrite) {
// Do enough writing to force minor compaction // Do enough writing to force minor compaction
env_.writable_file_error_ = true; env_.writable_file_error_ = true;
const int num = 3 + (Options().write_buffer_size / kValueSize); const int num =
static_cast<int>(3 + (Options().write_buffer_size / kValueSize));
std::string value_storage; std::string value_storage;
Status s; Status s;
bool failed = false; bool failed = false;

View File

@ -92,7 +92,7 @@ class CuckooTableDBTest {
// Return spread of files per level // Return spread of files per level
std::string FilesPerLevel() { std::string FilesPerLevel() {
std::string result; std::string result;
int last_non_zero_offset = 0; size_t last_non_zero_offset = 0;
for (int level = 0; level < db_->NumberLevels(); level++) { for (int level = 0; level < db_->NumberLevels(); level++) {
int f = NumTableFilesAtLevel(level); int f = NumTableFilesAtLevel(level);
char buf[100]; char buf[100];

View File

@ -251,7 +251,8 @@ DEFINE_int32(universal_compression_size_percent, -1,
DEFINE_int64(cache_size, -1, "Number of bytes to use as a cache of uncompressed" DEFINE_int64(cache_size, -1, "Number of bytes to use as a cache of uncompressed"
"data. Negative means use default settings."); "data. Negative means use default settings.");
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size, DEFINE_int32(block_size,
static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
"Number of bytes in a block."); "Number of bytes in a block.");
DEFINE_int32(block_restart_interval, DEFINE_int32(block_restart_interval,
@ -2111,8 +2112,9 @@ class Benchmark {
for (uint64_t i = 0; i < num_; ++i) { for (uint64_t i = 0; i < num_; ++i) {
values_[i] = i; values_[i] = i;
} }
std::shuffle(values_.begin(), values_.end(), std::shuffle(
std::default_random_engine(FLAGS_seed)); values_.begin(), values_.end(),
std::default_random_engine(static_cast<unsigned int>(FLAGS_seed)));
} }
} }

View File

@ -2252,7 +2252,7 @@ SuperVersion* DBImpl::InstallSuperVersion(
MaybeScheduleFlushOrCompaction(); MaybeScheduleFlushOrCompaction();
// Update max_total_in_memory_state_ // Update max_total_in_memory_state_
auto old_memtable_size = 0; size_t old_memtable_size = 0;
if (old) { if (old) {
old_memtable_size = old->mutable_cf_options.write_buffer_size * old_memtable_size = old->mutable_cf_options.write_buffer_size *
old->mutable_cf_options.max_write_buffer_number; old->mutable_cf_options.max_write_buffer_number;
@ -2920,7 +2920,8 @@ Status DBImpl::DelayWrite(uint64_t expiration_time) {
auto delay = write_controller_.GetDelay(); auto delay = write_controller_.GetDelay();
if (write_controller_.IsStopped() == false && delay > 0) { if (write_controller_.IsStopped() == false && delay > 0) {
mutex_.Unlock(); mutex_.Unlock();
env_->SleepForMicroseconds(delay); // hopefully we don't have to sleep more than 2 billion microseconds
env_->SleepForMicroseconds(static_cast<int>(delay));
mutex_.Lock(); mutex_.Lock();
} }

View File

@ -19,7 +19,7 @@
namespace rocksdb { namespace rocksdb {
static uint32_t TestGetTickerCount(const Options& options, static uint64_t TestGetTickerCount(const Options& options,
Tickers ticker_type) { Tickers ticker_type) {
return options.statistics->getTickerCount(ticker_type); return options.statistics->getTickerCount(ticker_type);
} }

View File

@ -668,7 +668,7 @@ class DBTest {
void CreateColumnFamilies(const std::vector<std::string>& cfs, void CreateColumnFamilies(const std::vector<std::string>& cfs,
const Options& options) { const Options& options) {
ColumnFamilyOptions cf_opts(options); ColumnFamilyOptions cf_opts(options);
int cfi = handles_.size(); size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size()); handles_.resize(cfi + cfs.size());
for (auto cf : cfs) { for (auto cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
@ -933,7 +933,7 @@ class DBTest {
int num_levels = int num_levels =
(cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]); (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]);
std::string result; std::string result;
int last_non_zero_offset = 0; size_t last_non_zero_offset = 0;
for (int level = 0; level < num_levels; level++) { for (int level = 0; level < num_levels; level++) {
int f = NumTableFilesAtLevel(level, cf); int f = NumTableFilesAtLevel(level, cf);
char buf[100]; char buf[100];
@ -947,7 +947,7 @@ class DBTest {
return result; return result;
} }
int CountFiles() { size_t CountFiles() {
std::vector<std::string> files; std::vector<std::string> files;
env_->GetChildren(dbname_, &files); env_->GetChildren(dbname_, &files);
@ -956,10 +956,10 @@ class DBTest {
env_->GetChildren(last_options_.wal_dir, &logfiles); env_->GetChildren(last_options_.wal_dir, &logfiles);
} }
return static_cast<int>(files.size() + logfiles.size()); return files.size() + logfiles.size();
} }
int CountLiveFiles() { size_t CountLiveFiles() {
std::vector<LiveFileMetaData> metadata; std::vector<LiveFileMetaData> metadata;
db_->GetLiveFilesMetaData(&metadata); db_->GetLiveFilesMetaData(&metadata);
return metadata.size(); return metadata.size();
@ -4326,7 +4326,8 @@ TEST(DBTest, RepeatedWritesToSameKey) {
options.num_levels + options.level0_stop_writes_trigger; options.num_levels + options.level0_stop_writes_trigger;
Random rnd(301); Random rnd(301);
std::string value = RandomString(&rnd, 2 * options.write_buffer_size); std::string value =
RandomString(&rnd, static_cast<int>(2 * options.write_buffer_size));
for (int i = 0; i < 5 * kMaxFiles; i++) { for (int i = 0; i < 5 * kMaxFiles; i++) {
ASSERT_OK(Put(1, "key", value)); ASSERT_OK(Put(1, "key", value));
ASSERT_LE(TotalTableFiles(1), kMaxFiles); ASSERT_LE(TotalTableFiles(1), kMaxFiles);
@ -4657,7 +4658,7 @@ TEST(DBTest, CompactionFilterDeletesAll) {
// this will produce empty file (delete compaction filter) // this will produce empty file (delete compaction filter)
ASSERT_OK(db_->CompactRange(nullptr, nullptr)); ASSERT_OK(db_->CompactRange(nullptr, nullptr));
ASSERT_EQ(0, CountLiveFiles()); ASSERT_EQ(0U, CountLiveFiles());
Reopen(options); Reopen(options);
@ -5845,7 +5846,7 @@ TEST(DBTest, DropWrites) {
ASSERT_OK(Put("foo", "v1")); ASSERT_OK(Put("foo", "v1"));
ASSERT_EQ("v1", Get("foo")); ASSERT_EQ("v1", Get("foo"));
Compact("a", "z"); Compact("a", "z");
const int num_files = CountFiles(); const size_t num_files = CountFiles();
// Force out-of-space errors // Force out-of-space errors
env_->drop_writes_.store(true, std::memory_order_release); env_->drop_writes_.store(true, std::memory_order_release);
env_->sleep_counter_.Reset(); env_->sleep_counter_.Reset();
@ -6031,7 +6032,7 @@ TEST(DBTest, FilesDeletedAfterCompaction) {
CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); CreateAndReopenWithCF({"pikachu"}, CurrentOptions());
ASSERT_OK(Put(1, "foo", "v2")); ASSERT_OK(Put(1, "foo", "v2"));
Compact(1, "a", "z"); Compact(1, "a", "z");
const int num_files = CountLiveFiles(); const size_t num_files = CountLiveFiles();
for (int i = 0; i < 10; i++) { for (int i = 0; i < 10; i++) {
ASSERT_OK(Put(1, "foo", "v2")); ASSERT_OK(Put(1, "foo", "v2"));
Compact(1, "a", "z"); Compact(1, "a", "z");
@ -6504,7 +6505,7 @@ TEST(DBTest, FlushOneColumnFamily) {
ASSERT_OK(Put(6, "alyosha", "alyosha")); ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich")); ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 0; i < 8; ++i) { for (int i = 0; i < 8; ++i) {
Flush(i); Flush(i);
auto tables = ListTableFiles(env_, dbname_); auto tables = ListTableFiles(env_, dbname_);
ASSERT_EQ(tables.size(), i + 1U); ASSERT_EQ(tables.size(), i + 1U);
@ -6848,8 +6849,8 @@ TEST(DBTest, TransactionLogIteratorCorruptedLog) {
// than 1025 entries // than 1025 entries
auto iter = OpenTransactionLogIter(0); auto iter = OpenTransactionLogIter(0);
int count; int count;
int last_sequence_read = ReadRecords(iter, count); SequenceNumber last_sequence_read = ReadRecords(iter, count);
ASSERT_LT(last_sequence_read, 1025); ASSERT_LT(last_sequence_read, 1025U);
// Try to read past the gap, should be able to seek to key1025 // Try to read past the gap, should be able to seek to key1025
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1); auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
ExpectRecords(1, iter2); ExpectRecords(1, iter2);
@ -8358,7 +8359,7 @@ TEST(DBTest, CompactFilesOnLevelCompaction) {
ColumnFamilyMetaData cf_meta; ColumnFamilyMetaData cf_meta;
dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta);
int output_level = cf_meta.levels.size() - 1; int output_level = static_cast<int>(cf_meta.levels.size()) - 1;
for (int file_picked = 5; file_picked > 0; --file_picked) { for (int file_picked = 5; file_picked > 0; --file_picked) {
std::set<std::string> overlapping_file_names; std::set<std::string> overlapping_file_names;
std::vector<std::string> compaction_input_file_names; std::vector<std::string> compaction_input_file_names;

View File

@ -137,7 +137,8 @@ LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) {
dst = new char[needed]; dst = new char[needed];
} }
start_ = dst; start_ = dst;
dst = EncodeVarint32(dst, usize + 8); // NOTE: We don't support users keys of more than 2GB :)
dst = EncodeVarint32(dst, static_cast<uint32_t>(usize + 8));
kstart_ = dst; kstart_ = dst;
memcpy(dst, _user_key.data(), usize); memcpy(dst, _user_key.data(), usize);
dst += usize; dst += usize;

View File

@ -206,13 +206,19 @@ class LookupKey {
~LookupKey(); ~LookupKey();
// Return a key suitable for lookup in a MemTable. // Return a key suitable for lookup in a MemTable.
Slice memtable_key() const { return Slice(start_, end_ - start_); } Slice memtable_key() const {
return Slice(start_, static_cast<size_t>(end_ - start_));
}
// Return an internal key (suitable for passing to an internal iterator) // Return an internal key (suitable for passing to an internal iterator)
Slice internal_key() const { return Slice(kstart_, end_ - kstart_); } Slice internal_key() const {
return Slice(kstart_, static_cast<size_t>(end_ - kstart_));
}
// Return the user key // Return the user key
Slice user_key() const { return Slice(kstart_, end_ - kstart_ - 8); } Slice user_key() const {
return Slice(kstart_, static_cast<size_t>(end_ - kstart_ - 8));
}
private: private:
// We construct a char array of the form: // We construct a char array of the form:
@ -319,8 +325,8 @@ class IterKey {
void EncodeLengthPrefixedKey(const Slice& key) { void EncodeLengthPrefixedKey(const Slice& key) {
auto size = key.size(); auto size = key.size();
EnlargeBufferIfNeeded(size + VarintLength(size)); EnlargeBufferIfNeeded(size + static_cast<size_t>(VarintLength(size)));
char* ptr = EncodeVarint32(key_, size); char* ptr = EncodeVarint32(key_, static_cast<uint32_t>(size));
memcpy(ptr, key.data(), size); memcpy(ptr, key.data(), size);
} }

View File

@ -17,17 +17,16 @@ namespace rocksdb {
FileIndexer::FileIndexer(const Comparator* ucmp) FileIndexer::FileIndexer(const Comparator* ucmp)
: num_levels_(0), ucmp_(ucmp), level_rb_(nullptr) {} : num_levels_(0), ucmp_(ucmp), level_rb_(nullptr) {}
uint32_t FileIndexer::NumLevelIndex() const { size_t FileIndexer::NumLevelIndex() const { return next_level_index_.size(); }
return next_level_index_.size();
}
uint32_t FileIndexer::LevelIndexSize(uint32_t level) const { size_t FileIndexer::LevelIndexSize(size_t level) const {
return next_level_index_[level].num_index; return next_level_index_[level].num_index;
} }
void FileIndexer::GetNextLevelIndex( void FileIndexer::GetNextLevelIndex(const size_t level, const size_t file_index,
const uint32_t level, const uint32_t file_index, const int cmp_smallest, const int cmp_smallest,
const int cmp_largest, int32_t* left_bound, int32_t* right_bound) const { const int cmp_largest, int32_t* left_bound,
int32_t* right_bound) const {
assert(level > 0); assert(level > 0);
// Last level, no hint // Last level, no hint
@ -69,7 +68,7 @@ void FileIndexer::GetNextLevelIndex(
assert(*right_bound <= level_rb_[level + 1]); assert(*right_bound <= level_rb_[level + 1]);
} }
void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels, void FileIndexer::UpdateIndex(Arena* arena, const size_t num_levels,
std::vector<FileMetaData*>* const files) { std::vector<FileMetaData*>* const files) {
if (files == nullptr) { if (files == nullptr) {
return; return;
@ -90,11 +89,11 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
} }
// L1 - Ln-1 // L1 - Ln-1
for (uint32_t level = 1; level < num_levels_ - 1; ++level) { for (size_t level = 1; level < num_levels_ - 1; ++level) {
const auto& upper_files = files[level]; const auto& upper_files = files[level];
const int32_t upper_size = upper_files.size(); const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const auto& lower_files = files[level + 1]; const auto& lower_files = files[level + 1];
level_rb_[level] = upper_files.size() - 1; level_rb_[level] = static_cast<int32_t>(upper_files.size()) - 1;
if (upper_size == 0) { if (upper_size == 0) {
continue; continue;
} }
@ -129,7 +128,8 @@ void FileIndexer::UpdateIndex(Arena* arena, const uint32_t num_levels,
[](IndexUnit* index, int32_t f_idx) { index->largest_rb = f_idx; }); [](IndexUnit* index, int32_t f_idx) { index->largest_rb = f_idx; });
} }
level_rb_[num_levels_ - 1] = files[num_levels_ - 1].size() - 1; level_rb_[num_levels_ - 1] =
static_cast<int32_t>(files[num_levels_ - 1].size()) - 1;
} }
void FileIndexer::CalculateLB( void FileIndexer::CalculateLB(
@ -137,8 +137,8 @@ void FileIndexer::CalculateLB(
const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level, const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op, std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
std::function<void(IndexUnit*, int32_t)> set_index) { std::function<void(IndexUnit*, int32_t)> set_index) {
const int32_t upper_size = upper_files.size(); const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const int32_t lower_size = lower_files.size(); const int32_t lower_size = static_cast<int32_t>(lower_files.size());
int32_t upper_idx = 0; int32_t upper_idx = 0;
int32_t lower_idx = 0; int32_t lower_idx = 0;
@ -175,8 +175,8 @@ void FileIndexer::CalculateRB(
const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level, const std::vector<FileMetaData*>& lower_files, IndexLevel* index_level,
std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op, std::function<int(const FileMetaData*, const FileMetaData*)> cmp_op,
std::function<void(IndexUnit*, int32_t)> set_index) { std::function<void(IndexUnit*, int32_t)> set_index) {
const int32_t upper_size = upper_files.size(); const int32_t upper_size = static_cast<int32_t>(upper_files.size());
const int32_t lower_size = lower_files.size(); const int32_t lower_size = static_cast<int32_t>(lower_files.size());
int32_t upper_idx = upper_size - 1; int32_t upper_idx = upper_size - 1;
int32_t lower_idx = lower_size - 1; int32_t lower_idx = lower_size - 1;

View File

@ -42,19 +42,19 @@ class FileIndexer {
public: public:
explicit FileIndexer(const Comparator* ucmp); explicit FileIndexer(const Comparator* ucmp);
uint32_t NumLevelIndex() const; size_t NumLevelIndex() const;
uint32_t LevelIndexSize(uint32_t level) const; size_t LevelIndexSize(size_t level) const;
// Return a file index range in the next level to search for a key based on // Return a file index range in the next level to search for a key based on
// smallest and largest key comparision for the current file specified by // smallest and largest key comparision for the current file specified by
// level and file_index. When *left_index < *right_index, both index should // level and file_index. When *left_index < *right_index, both index should
// be valid and fit in the vector size. // be valid and fit in the vector size.
void GetNextLevelIndex( void GetNextLevelIndex(const size_t level, const size_t file_index,
const uint32_t level, const uint32_t file_index, const int cmp_smallest, const int cmp_smallest, const int cmp_largest,
const int cmp_largest, int32_t* left_bound, int32_t* right_bound) const; int32_t* left_bound, int32_t* right_bound) const;
void UpdateIndex(Arena* arena, const uint32_t num_levels, void UpdateIndex(Arena* arena, const size_t num_levels,
std::vector<FileMetaData*>* const files); std::vector<FileMetaData*>* const files);
enum { enum {
@ -62,7 +62,7 @@ class FileIndexer {
}; };
private: private:
uint32_t num_levels_; size_t num_levels_;
const Comparator* ucmp_; const Comparator* ucmp_;
struct IndexUnit { struct IndexUnit {

View File

@ -22,8 +22,15 @@ class IntComparator : public Comparator {
int Compare(const Slice& a, const Slice& b) const { int Compare(const Slice& a, const Slice& b) const {
assert(a.size() == 8); assert(a.size() == 8);
assert(b.size() == 8); assert(b.size() == 8);
return *reinterpret_cast<const int64_t*>(a.data()) - int64_t diff = *reinterpret_cast<const int64_t*>(a.data()) -
*reinterpret_cast<const int64_t*>(b.data()); *reinterpret_cast<const int64_t*>(b.data());
if (diff < 0) {
return -1;
} else if (diff == 0) {
return 0;
} else {
return 1;
}
} }
const char* Name() const { const char* Name() const {

View File

@ -150,9 +150,9 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
memtables.push_back(m->NewIterator(ro, &arena)); memtables.push_back(m->NewIterator(ro, &arena));
} }
{ {
ScopedArenaIterator iter(NewMergingIterator(&cfd_->internal_comparator(), ScopedArenaIterator iter(
&memtables[0], NewMergingIterator(&cfd_->internal_comparator(), &memtables[0],
memtables.size(), &arena)); static_cast<int>(memtables.size()), &arena));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log,
"[%s] Level-0 flush table #%" PRIu64 ": started", "[%s] Level-0 flush table #%" PRIu64 ": started",
cfd_->GetName().c_str(), meta.fd.GetNumber()); cfd_->GetName().c_str(), meta.fd.GetNumber());

View File

@ -264,10 +264,11 @@ void ForwardIterator::SeekInternal(const Slice& internal_key,
if (search_left_bound == search_right_bound) { if (search_left_bound == search_right_bound) {
f_idx = search_left_bound; f_idx = search_left_bound;
} else if (search_left_bound < search_right_bound) { } else if (search_left_bound < search_right_bound) {
f_idx = FindFileInRange( f_idx =
level_files, internal_key, search_left_bound, FindFileInRange(level_files, internal_key, search_left_bound,
search_right_bound == FileIndexer::kLevelMaxIndex ? search_right_bound == FileIndexer::kLevelMaxIndex
level_files.size() : search_right_bound); ? static_cast<uint32_t>(level_files.size())
: search_right_bound);
} else { } else {
// search_left_bound > search_right_bound // search_left_bound > search_right_bound
// There are only 2 cases this can happen: // There are only 2 cases this can happen:

View File

@ -59,7 +59,7 @@ class EventListenerTest {
const ColumnFamilyOptions* options = nullptr) { const ColumnFamilyOptions* options = nullptr) {
ColumnFamilyOptions cf_opts; ColumnFamilyOptions cf_opts;
cf_opts = ColumnFamilyOptions(Options()); cf_opts = ColumnFamilyOptions(Options());
int cfi = handles_.size(); size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size()); handles_.resize(cfi + cfs.size());
for (auto cf : cfs) { for (auto cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
@ -188,7 +188,7 @@ TEST(EventListenerTest, OnSingleDBFlushTest) {
ASSERT_OK(Put(6, "alyosha", "alyosha")); ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich")); ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 1; i < 8; ++i) { for (size_t i = 1; i < 8; ++i) {
Flush(i); Flush(static_cast<int>(i));
dbfull()->TEST_WaitForFlushMemTable(); dbfull()->TEST_WaitForFlushMemTable();
ASSERT_EQ(listener->flushed_dbs_.size(), i); ASSERT_EQ(listener->flushed_dbs_.size(), i);
ASSERT_EQ(listener->flushed_column_family_names_.size(), i); ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
@ -218,7 +218,7 @@ TEST(EventListenerTest, MultiCF) {
ASSERT_OK(Put(6, "alyosha", "alyosha")); ASSERT_OK(Put(6, "alyosha", "alyosha"));
ASSERT_OK(Put(7, "popovich", "popovich")); ASSERT_OK(Put(7, "popovich", "popovich"));
for (size_t i = 1; i < 8; ++i) { for (size_t i = 1; i < 8; ++i) {
Flush(i); Flush(static_cast<int>(i));
ASSERT_EQ(listener->flushed_dbs_.size(), i); ASSERT_EQ(listener->flushed_dbs_.size(), i);
ASSERT_EQ(listener->flushed_column_family_names_.size(), i); ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
} }

View File

@ -6,6 +6,11 @@
#include <vector> #include <vector>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "util/testharness.h" #include "util/testharness.h"
#include "util/benchharness.h" #include "util/benchharness.h"
#include "db/version_set.h" #include "db/version_set.h"
@ -14,9 +19,9 @@
namespace rocksdb { namespace rocksdb {
std::string MakeKey(unsigned int num) { std::string MakeKey(uint64_t num) {
char buf[30]; char buf[30];
snprintf(buf, sizeof(buf), "%016u", num); snprintf(buf, sizeof(buf), "%016" PRIu64, num);
return std::string(buf); return std::string(buf);
} }

View File

@ -558,9 +558,9 @@ TEST(LogTest, ErrorJoinsRecords) {
ASSERT_EQ("correct", Read()); ASSERT_EQ("correct", Read());
ASSERT_EQ("EOF", Read()); ASSERT_EQ("EOF", Read());
const unsigned int dropped = DroppedBytes(); size_t dropped = DroppedBytes();
ASSERT_LE(dropped, 2*kBlockSize + 100); ASSERT_LE(dropped, 2 * kBlockSize + 100);
ASSERT_GE(dropped, 2*kBlockSize); ASSERT_GE(dropped, 2 * kBlockSize);
} }
TEST(LogTest, ReadStart) { TEST(LogTest, ReadStart) {

View File

@ -188,7 +188,7 @@ KeyHandle MemTableRep::Allocate(const size_t len, char** buf) {
// into this scratch space. // into this scratch space.
const char* EncodeKey(std::string* scratch, const Slice& target) { const char* EncodeKey(std::string* scratch, const Slice& target) {
scratch->clear(); scratch->clear();
PutVarint32(scratch, target.size()); PutVarint32(scratch, static_cast<uint32_t>(target.size()));
scratch->append(target.data(), target.size()); scratch->append(target.data(), target.size());
return scratch->data(); return scratch->data();
} }
@ -288,12 +288,12 @@ void MemTable::Add(SequenceNumber s, ValueType type,
// key bytes : char[internal_key.size()] // key bytes : char[internal_key.size()]
// value_size : varint32 of value.size() // value_size : varint32 of value.size()
// value bytes : char[value.size()] // value bytes : char[value.size()]
size_t key_size = key.size(); uint32_t key_size = static_cast<uint32_t>(key.size());
size_t val_size = value.size(); uint32_t val_size = static_cast<uint32_t>(value.size());
size_t internal_key_size = key_size + 8; uint32_t internal_key_size = key_size + 8;
const size_t encoded_len = const uint32_t encoded_len = VarintLength(internal_key_size) +
VarintLength(internal_key_size) + internal_key_size + internal_key_size + VarintLength(val_size) +
VarintLength(val_size) + val_size; val_size;
char* buf = nullptr; char* buf = nullptr;
KeyHandle handle = table_->Allocate(encoded_len, &buf); KeyHandle handle = table_->Allocate(encoded_len, &buf);
assert(buf != nullptr); assert(buf != nullptr);
@ -502,8 +502,8 @@ void MemTable::Update(SequenceNumber seq,
switch (static_cast<ValueType>(tag & 0xff)) { switch (static_cast<ValueType>(tag & 0xff)) {
case kTypeValue: { case kTypeValue: {
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length); Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
uint32_t prev_size = prev_value.size(); uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
uint32_t new_size = value.size(); uint32_t new_size = static_cast<uint32_t>(value.size());
// Update value, if new value size <= previous value size // Update value, if new value size <= previous value size
if (new_size <= prev_size ) { if (new_size <= prev_size ) {
@ -560,7 +560,7 @@ bool MemTable::UpdateCallback(SequenceNumber seq,
switch (static_cast<ValueType>(tag & 0xff)) { switch (static_cast<ValueType>(tag & 0xff)) {
case kTypeValue: { case kTypeValue: {
Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length); Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length);
uint32_t prev_size = prev_value.size(); uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
char* prev_buffer = const_cast<char*>(prev_value.data()); char* prev_buffer = const_cast<char*>(prev_value.data());
uint32_t new_prev_size = prev_size; uint32_t new_prev_size = prev_size;

View File

@ -23,15 +23,11 @@ using namespace std;
using namespace rocksdb; using namespace rocksdb;
namespace { namespace {
int numMergeOperatorCalls; size_t num_merge_operator_calls;
void resetNumMergeOperatorCalls() { void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; }
numMergeOperatorCalls = 0;
}
int num_partial_merge_calls; size_t num_partial_merge_calls;
void resetNumPartialMergeCalls() { void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; }
num_partial_merge_calls = 0;
}
} }
class CountMergeOperator : public AssociativeMergeOperator { class CountMergeOperator : public AssociativeMergeOperator {
@ -45,7 +41,7 @@ class CountMergeOperator : public AssociativeMergeOperator {
const Slice& value, const Slice& value,
std::string* new_value, std::string* new_value,
Logger* logger) const override { Logger* logger) const override {
++numMergeOperatorCalls; ++num_merge_operator_calls;
if (existing_value == nullptr) { if (existing_value == nullptr) {
new_value->assign(value.data(), value.size()); new_value->assign(value.data(), value.size());
return true; return true;
@ -307,31 +303,31 @@ void testCounters(Counters& counters, DB* db, bool test_compaction) {
} }
} }
void testSuccessiveMerge( void testSuccessiveMerge(Counters& counters, size_t max_num_merges,
Counters& counters, int max_num_merges, int num_merges) { size_t num_merges) {
counters.assert_remove("z"); counters.assert_remove("z");
uint64_t sum = 0; uint64_t sum = 0;
for (int i = 1; i <= num_merges; ++i) { for (size_t i = 1; i <= num_merges; ++i) {
resetNumMergeOperatorCalls(); resetNumMergeOperatorCalls();
counters.assert_add("z", i); counters.assert_add("z", i);
sum += i; sum += i;
if (i % (max_num_merges + 1) == 0) { if (i % (max_num_merges + 1) == 0) {
assert(numMergeOperatorCalls == max_num_merges + 1); assert(num_merge_operator_calls == max_num_merges + 1);
} else { } else {
assert(numMergeOperatorCalls == 0); assert(num_merge_operator_calls == 0);
} }
resetNumMergeOperatorCalls(); resetNumMergeOperatorCalls();
assert(counters.assert_get("z") == sum); assert(counters.assert_get("z") == sum);
assert(numMergeOperatorCalls == i % (max_num_merges + 1)); assert(num_merge_operator_calls == i % (max_num_merges + 1));
} }
} }
void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge, void testPartialMerge(Counters* counters, DB* db, size_t max_merge,
int count) { size_t min_merge, size_t count) {
FlushOptions o; FlushOptions o;
o.wait = true; o.wait = true;
@ -339,7 +335,7 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
// operands exceeds the threshold. // operands exceeds the threshold.
uint64_t tmp_sum = 0; uint64_t tmp_sum = 0;
resetNumPartialMergeCalls(); resetNumPartialMergeCalls();
for (int i = 1; i <= count; i++) { for (size_t i = 1; i <= count; i++) {
counters->assert_add("b", i); counters->assert_add("b", i);
tmp_sum += i; tmp_sum += i;
} }
@ -348,7 +344,7 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
ASSERT_EQ(tmp_sum, counters->assert_get("b")); ASSERT_EQ(tmp_sum, counters->assert_get("b"));
if (count > max_merge) { if (count > max_merge) {
// in this case, FullMerge should be called instead. // in this case, FullMerge should be called instead.
ASSERT_EQ(num_partial_merge_calls, 0); ASSERT_EQ(num_partial_merge_calls, 0U);
} else { } else {
// if count >= min_merge, then partial merge should be called once. // if count >= min_merge, then partial merge should be called once.
ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1)); ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1));
@ -358,20 +354,18 @@ void testPartialMerge(Counters* counters, DB* db, int max_merge, int min_merge,
resetNumPartialMergeCalls(); resetNumPartialMergeCalls();
tmp_sum = 0; tmp_sum = 0;
db->Put(rocksdb::WriteOptions(), "c", "10"); db->Put(rocksdb::WriteOptions(), "c", "10");
for (int i = 1; i <= count; i++) { for (size_t i = 1; i <= count; i++) {
counters->assert_add("c", i); counters->assert_add("c", i);
tmp_sum += i; tmp_sum += i;
} }
db->Flush(o); db->Flush(o);
db->CompactRange(nullptr, nullptr); db->CompactRange(nullptr, nullptr);
ASSERT_EQ(tmp_sum, counters->assert_get("c")); ASSERT_EQ(tmp_sum, counters->assert_get("c"));
ASSERT_EQ(num_partial_merge_calls, 0); ASSERT_EQ(num_partial_merge_calls, 0U);
} }
void testSingleBatchSuccessiveMerge( void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges,
DB* db, size_t num_merges) {
int max_num_merges,
int num_merges) {
assert(num_merges > max_num_merges); assert(num_merges > max_num_merges);
Slice key("BatchSuccessiveMerge"); Slice key("BatchSuccessiveMerge");
@ -380,7 +374,7 @@ void testSingleBatchSuccessiveMerge(
// Create the batch // Create the batch
WriteBatch batch; WriteBatch batch;
for (int i = 0; i < num_merges; ++i) { for (size_t i = 0; i < num_merges; ++i) {
batch.Merge(key, merge_value_slice); batch.Merge(key, merge_value_slice);
} }
@ -390,8 +384,9 @@ void testSingleBatchSuccessiveMerge(
Status s = db->Write(WriteOptions(), &batch); Status s = db->Write(WriteOptions(), &batch);
assert(s.ok()); assert(s.ok());
} }
assert(numMergeOperatorCalls == ASSERT_EQ(
num_merges - (num_merges % (max_num_merges + 1))); num_merge_operator_calls,
static_cast<size_t>(num_merges - (num_merges % (max_num_merges + 1))));
// Get the value // Get the value
resetNumMergeOperatorCalls(); resetNumMergeOperatorCalls();
@ -403,7 +398,8 @@ void testSingleBatchSuccessiveMerge(
assert(get_value_str.size() == sizeof(uint64_t)); assert(get_value_str.size() == sizeof(uint64_t));
uint64_t get_value = DecodeFixed64(&get_value_str[0]); uint64_t get_value = DecodeFixed64(&get_value_str[0]);
ASSERT_EQ(get_value, num_merges * merge_value); ASSERT_EQ(get_value, num_merges * merge_value);
ASSERT_EQ(numMergeOperatorCalls, (num_merges % (max_num_merges + 1))); ASSERT_EQ(num_merge_operator_calls,
static_cast<size_t>((num_merges % (max_num_merges + 1))));
} }
void runTest(int argc, const string& dbname, const bool use_ttl = false) { void runTest(int argc, const string& dbname, const bool use_ttl = false) {

View File

@ -158,7 +158,7 @@ class PlainTableDBTest {
// Return spread of files per level // Return spread of files per level
std::string FilesPerLevel() { std::string FilesPerLevel() {
std::string result; std::string result;
int last_non_zero_offset = 0; size_t last_non_zero_offset = 0;
for (int level = 0; level < db_->NumberLevels(); level++) { for (int level = 0; level < db_->NumberLevels(); level++) {
int f = NumTableFilesAtLevel(level); int f = NumTableFilesAtLevel(level);
char buf[100]; char buf[100];

View File

@ -29,14 +29,14 @@ using GFLAGS::ParseCommandLineFlags;
DEFINE_bool(trigger_deadlock, false, DEFINE_bool(trigger_deadlock, false,
"issue delete in range scan to trigger PrefixHashMap deadlock"); "issue delete in range scan to trigger PrefixHashMap deadlock");
DEFINE_uint64(bucket_count, 100000, "number of buckets"); DEFINE_int32(bucket_count, 100000, "number of buckets");
DEFINE_uint64(num_locks, 10001, "number of locks"); DEFINE_uint64(num_locks, 10001, "number of locks");
DEFINE_bool(random_prefix, false, "randomize prefix"); DEFINE_bool(random_prefix, false, "randomize prefix");
DEFINE_uint64(total_prefixes, 100000, "total number of prefixes"); DEFINE_uint64(total_prefixes, 100000, "total number of prefixes");
DEFINE_uint64(items_per_prefix, 1, "total number of values per prefix"); DEFINE_uint64(items_per_prefix, 1, "total number of values per prefix");
DEFINE_int64(write_buffer_size, 33554432, ""); DEFINE_int64(write_buffer_size, 33554432, "");
DEFINE_int64(max_write_buffer_number, 2, ""); DEFINE_int32(max_write_buffer_number, 2, "");
DEFINE_int64(min_write_buffer_number_to_merge, 1, ""); DEFINE_int32(min_write_buffer_number_to_merge, 1, "");
DEFINE_int32(skiplist_height, 4, ""); DEFINE_int32(skiplist_height, 4, "");
DEFINE_int32(memtable_prefix_bloom_bits, 10000000, ""); DEFINE_int32(memtable_prefix_bloom_bits, 10000000, "");
DEFINE_int32(memtable_prefix_bloom_probes, 10, ""); DEFINE_int32(memtable_prefix_bloom_probes, 10, "");

View File

@ -253,11 +253,10 @@ class ConcurrentTest {
// Note that generation 0 is never inserted, so it is ok if // Note that generation 0 is never inserted, so it is ok if
// <*,0,*> is missing. // <*,0,*> is missing.
ASSERT_TRUE((gen(pos) == 0U) || ASSERT_TRUE((gen(pos) == 0U) ||
(gen(pos) > (uint64_t)initial_state.Get(key(pos))) (gen(pos) > static_cast<uint64_t>(initial_state.Get(
) << "key: " << key(pos) static_cast<int>(key(pos))))))
<< "; gen: " << gen(pos) << "key: " << key(pos) << "; gen: " << gen(pos)
<< "; initgen: " << "; initgen: " << initial_state.Get(static_cast<int>(key(pos)));
<< initial_state.Get(key(pos));
// Advance to next key in the valid key space // Advance to next key in the valid key space
if (key(pos) < key(current)) { if (key(pos) < key(current)) {

View File

@ -160,7 +160,7 @@ class VersionEdit {
// Add the specified file at the specified number. // Add the specified file at the specified number.
// REQUIRES: This version has not been saved (see VersionSet::SaveTo) // REQUIRES: This version has not been saved (see VersionSet::SaveTo)
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file // REQUIRES: "smallest" and "largest" are smallest and largest keys in file
void AddFile(int level, uint64_t file, uint64_t file_path_id, void AddFile(int level, uint64_t file, uint32_t file_path_id,
uint64_t file_size, const InternalKey& smallest, uint64_t file_size, const InternalKey& smallest,
const InternalKey& largest, const SequenceNumber& smallest_seqno, const InternalKey& largest, const SequenceNumber& smallest_seqno,
const SequenceNumber& largest_seqno) { const SequenceNumber& largest_seqno) {
@ -180,9 +180,7 @@ class VersionEdit {
} }
// Number of edits // Number of edits
int NumEntries() { size_t NumEntries() { return new_files_.size() + deleted_files_.size(); }
return new_files_.size() + deleted_files_.size();
}
bool IsColumnFamilyManipulation() { bool IsColumnFamilyManipulation() {
return is_column_family_add_ || is_column_family_drop_; return is_column_family_add_ || is_column_family_drop_;

View File

@ -26,11 +26,12 @@ class VersionEditTest { };
TEST(VersionEditTest, EncodeDecode) { TEST(VersionEditTest, EncodeDecode) {
static const uint64_t kBig = 1ull << 50; static const uint64_t kBig = 1ull << 50;
static const uint32_t kBig32Bit = 1ull << 30;
VersionEdit edit; VersionEdit edit;
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
TestEncodeDecode(edit); TestEncodeDecode(edit);
edit.AddFile(3, kBig + 300 + i, kBig + 400 + i, 0, edit.AddFile(3, kBig + 300 + i, kBig32Bit + 400 + i, 0,
InternalKey("foo", kBig + 500 + i, kTypeValue), InternalKey("foo", kBig + 500 + i, kTypeValue),
InternalKey("zoo", kBig + 600 + i, kTypeDeletion), InternalKey("zoo", kBig + 600 + i, kTypeDeletion),
kBig + 500 + i, kBig + 600 + i); kBig + 500 + i, kBig + 600 + i);

View File

@ -201,8 +201,8 @@ class FilePicker {
private: private:
unsigned int num_levels_; unsigned int num_levels_;
unsigned int curr_level_; unsigned int curr_level_;
int search_left_bound_; int32_t search_left_bound_;
int search_right_bound_; int32_t search_right_bound_;
#ifndef NDEBUG #ifndef NDEBUG
std::vector<FileMetaData*>* files_; std::vector<FileMetaData*>* files_;
#endif #endif
@ -258,11 +258,13 @@ class FilePicker {
start_index = search_left_bound_; start_index = search_left_bound_;
} else if (search_left_bound_ < search_right_bound_) { } else if (search_left_bound_ < search_right_bound_) {
if (search_right_bound_ == FileIndexer::kLevelMaxIndex) { if (search_right_bound_ == FileIndexer::kLevelMaxIndex) {
search_right_bound_ = curr_file_level_->num_files - 1; search_right_bound_ =
static_cast<int32_t>(curr_file_level_->num_files) - 1;
} }
start_index = FindFileInRange(*internal_comparator_, start_index =
*curr_file_level_, ikey_, FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_,
search_left_bound_, search_right_bound_); static_cast<uint32_t>(search_left_bound_),
static_cast<uint32_t>(search_right_bound_));
} else { } else {
// search_left_bound > search_right_bound, key does not exist in // search_left_bound > search_right_bound, key does not exist in
// this level. Since no comparision is done in this level, it will // this level. Since no comparision is done in this level, it will
@ -315,7 +317,8 @@ Version::~Version() {
int FindFile(const InternalKeyComparator& icmp, int FindFile(const InternalKeyComparator& icmp,
const LevelFilesBrief& file_level, const LevelFilesBrief& file_level,
const Slice& key) { const Slice& key) {
return FindFileInRange(icmp, file_level, key, 0, file_level.num_files); return FindFileInRange(icmp, file_level, key, 0,
static_cast<uint32_t>(file_level.num_files));
} }
void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level, void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level,
@ -412,7 +415,7 @@ class LevelFileNumIterator : public Iterator {
const LevelFilesBrief* flevel) const LevelFilesBrief* flevel)
: icmp_(icmp), : icmp_(icmp),
flevel_(flevel), flevel_(flevel),
index_(flevel->num_files), index_(static_cast<uint32_t>(flevel->num_files)),
current_value_(0, 0, 0) { // Marks as invalid current_value_(0, 0, 0) { // Marks as invalid
} }
virtual bool Valid() const { virtual bool Valid() const {
@ -423,7 +426,9 @@ class LevelFileNumIterator : public Iterator {
} }
virtual void SeekToFirst() { index_ = 0; } virtual void SeekToFirst() { index_ = 0; }
virtual void SeekToLast() { virtual void SeekToLast() {
index_ = (flevel_->num_files == 0) ? 0 : flevel_->num_files - 1; index_ = (flevel_->num_files == 0)
? 0
: static_cast<uint32_t>(flevel_->num_files) - 1;
} }
virtual void Next() { virtual void Next() {
assert(Valid()); assert(Valid());
@ -432,7 +437,7 @@ class LevelFileNumIterator : public Iterator {
virtual void Prev() { virtual void Prev() {
assert(Valid()); assert(Valid());
if (index_ == 0) { if (index_ == 0) {
index_ = flevel_->num_files; // Marks as invalid index_ = static_cast<uint32_t>(flevel_->num_files); // Marks as invalid
} else { } else {
index_--; index_--;
} }
@ -1213,7 +1218,7 @@ void VersionStorageInfo::GetOverlappingInputs(
i = 0; i = 0;
} }
} else if (file_index) { } else if (file_index) {
*file_index = i-1; *file_index = static_cast<int>(i) - 1;
} }
} }
} }
@ -1229,7 +1234,7 @@ void VersionStorageInfo::GetOverlappingInputsBinarySearch(
assert(level > 0); assert(level > 0);
int min = 0; int min = 0;
int mid = 0; int mid = 0;
int max = files_[level].size() -1; int max = static_cast<int>(files_[level].size()) - 1;
bool foundOverlap = false; bool foundOverlap = false;
const Comparator* user_cmp = user_comparator_; const Comparator* user_cmp = user_comparator_;
@ -2646,12 +2651,12 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
// Level-0 files have to be merged together. For other levels, // Level-0 files have to be merged together. For other levels,
// we will make a concatenating iterator per level. // we will make a concatenating iterator per level.
// TODO(opt): use concatenating iterator for level-0 if there is no overlap // TODO(opt): use concatenating iterator for level-0 if there is no overlap
const int space = (c->level() == 0 ? const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files +
c->input_levels(0)->num_files + c->num_input_levels() - 1: c->num_input_levels() - 1
c->num_input_levels()); : c->num_input_levels());
Iterator** list = new Iterator*[space]; Iterator** list = new Iterator* [space];
int num = 0; size_t num = 0;
for (int which = 0; which < c->num_input_levels(); which++) { for (size_t which = 0; which < c->num_input_levels(); which++) {
if (c->input_levels(which)->num_files != 0) { if (c->input_levels(which)->num_files != 0) {
if (c->level(which) == 0) { if (c->level(which) == 0) {
const LevelFilesBrief* flevel = c->input_levels(which); const LevelFilesBrief* flevel = c->input_levels(which);
@ -2673,8 +2678,9 @@ Iterator* VersionSet::MakeInputIterator(Compaction* c) {
} }
} }
assert(num <= space); assert(num <= space);
Iterator* result = NewMergingIterator( Iterator* result =
&c->column_family_data()->internal_comparator(), list, num); NewMergingIterator(&c->column_family_data()->internal_comparator(), list,
static_cast<int>(num));
delete[] list; delete[] list;
return result; return result;
} }
@ -2691,9 +2697,9 @@ bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
c->column_family_data()->GetName().c_str()); c->column_family_data()->GetName().c_str());
} }
for (int input = 0; input < c->num_input_levels(); ++input) { for (size_t input = 0; input < c->num_input_levels(); ++input) {
int level = c->level(input); int level = c->level(input);
for (int i = 0; i < c->num_input_files(input); ++i) { for (size_t i = 0; i < c->num_input_files(input); ++i) {
uint64_t number = c->input(input, i)->fd.GetNumber(); uint64_t number = c->input(input, i)->fd.GetNumber();
bool found = false; bool found = false;
for (unsigned int j = 0; j < vstorage->files_[level].size(); j++) { for (unsigned int j = 0; j < vstorage->files_[level].size(); j++) {

View File

@ -194,7 +194,7 @@ class VersionStorageInfo {
// REQUIRES: This version has been saved (see VersionSet::SaveTo) // REQUIRES: This version has been saved (see VersionSet::SaveTo)
int NumLevelFiles(int level) const { int NumLevelFiles(int level) const {
assert(finalized_); assert(finalized_);
return files_[level].size(); return static_cast<int>(files_[level].size());
} }
// Return the combined file size of all files at the specified level. // Return the combined file size of all files at the specified level.

View File

@ -374,7 +374,7 @@ class MemTableInserter : public WriteBatch::Handler {
Status s = db_->Get(ropts, cf_handle, key, &prev_value); Status s = db_->Get(ropts, cf_handle, key, &prev_value);
char* prev_buffer = const_cast<char*>(prev_value.c_str()); char* prev_buffer = const_cast<char*>(prev_value.c_str());
uint32_t prev_size = prev_value.size(); uint32_t prev_size = static_cast<uint32_t>(prev_value.size());
auto status = moptions->inplace_callback(s.ok() ? prev_buffer : nullptr, auto status = moptions->inplace_callback(s.ok() ? prev_buffer : nullptr,
s.ok() ? &prev_size : nullptr, s.ok() ? &prev_size : nullptr,
value, &merged_value); value, &merged_value);

View File

@ -20,6 +20,7 @@
#include <cstdarg> #include <cstdarg>
#include <string> #include <string>
#include <memory> #include <memory>
#include <limits>
#include <vector> #include <vector>
#include <stdint.h> #include <stdint.h>
#include "rocksdb/status.h" #include "rocksdb/status.h"
@ -476,8 +477,8 @@ class WritableFile {
if (new_last_preallocated_block > last_preallocated_block_) { if (new_last_preallocated_block > last_preallocated_block_) {
size_t num_spanned_blocks = size_t num_spanned_blocks =
new_last_preallocated_block - last_preallocated_block_; new_last_preallocated_block - last_preallocated_block_;
Allocate(block_size * last_preallocated_block_, Allocate(static_cast<off_t>(block_size * last_preallocated_block_),
block_size * num_spanned_blocks); static_cast<off_t>(block_size * num_spanned_blocks));
last_preallocated_block_ = new_last_preallocated_block; last_preallocated_block_ = new_last_preallocated_block;
} }
} }
@ -580,7 +581,8 @@ enum InfoLogLevel : unsigned char {
// An interface for writing log messages. // An interface for writing log messages.
class Logger { class Logger {
public: public:
enum { DO_NOT_SUPPORT_GET_LOG_FILE_SIZE = -1 }; size_t kDoNotSupportGetLogFileSize = std::numeric_limits<size_t>::max();
explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL) explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL)
: log_level_(log_level) {} : log_level_(log_level) {}
virtual ~Logger(); virtual ~Logger();
@ -613,9 +615,7 @@ class Logger {
Logv(new_format, ap); Logv(new_format, ap);
} }
} }
virtual size_t GetLogFileSize() const { virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; }
return DO_NOT_SUPPORT_GET_LOG_FILE_SIZE;
}
// Flush to the OS buffers // Flush to the OS buffers
virtual void Flush() {} virtual void Flush() {}
virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; } virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; }

View File

@ -74,9 +74,8 @@ jbyteArray Java_org_rocksdb_RocksIterator_key0(
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice key_slice = it->key(); rocksdb::Slice key_slice = it->key();
jbyteArray jkey = env->NewByteArray(key_slice.size()); jbyteArray jkey = env->NewByteArray(static_cast<jsize>(key_slice.size()));
env->SetByteArrayRegion( env->SetByteArrayRegion(jkey, 0, static_cast<jsize>(key_slice.size()),
jkey, 0, key_slice.size(),
reinterpret_cast<const jbyte*>(key_slice.data())); reinterpret_cast<const jbyte*>(key_slice.data()));
return jkey; return jkey;
} }
@ -91,9 +90,9 @@ jbyteArray Java_org_rocksdb_RocksIterator_value0(
auto it = reinterpret_cast<rocksdb::Iterator*>(handle); auto it = reinterpret_cast<rocksdb::Iterator*>(handle);
rocksdb::Slice value_slice = it->value(); rocksdb::Slice value_slice = it->value();
jbyteArray jkeyValue = env->NewByteArray(value_slice.size()); jbyteArray jkeyValue =
env->SetByteArrayRegion( env->NewByteArray(static_cast<jsize>(value_slice.size()));
jkeyValue, 0, value_slice.size(), env->SetByteArrayRegion(jkeyValue, 0, static_cast<jsize>(value_slice.size()),
reinterpret_cast<const jbyte*>(value_slice.data())); reinterpret_cast<const jbyte*>(value_slice.data()));
return jkeyValue; return jkeyValue;
} }

View File

@ -65,8 +65,8 @@ void Java_org_rocksdb_RestoreBackupableDB_restoreDBFromBackup0(JNIEnv* env,
const char* cwal_dir = env->GetStringUTFChars(jwal_dir, 0); const char* cwal_dir = env->GetStringUTFChars(jwal_dir, 0);
auto rdb = reinterpret_cast<rocksdb::RestoreBackupableDB*>(jhandle); auto rdb = reinterpret_cast<rocksdb::RestoreBackupableDB*>(jhandle);
rocksdb::Status s = rocksdb::Status s = rdb->RestoreDBFromBackup(
rdb->RestoreDBFromBackup(jbackup_id, cdb_dir, cwal_dir, *opt); static_cast<rocksdb::BackupID>(jbackup_id), cdb_dir, cwal_dir, *opt);
env->ReleaseStringUTFChars(jdb_dir, cdb_dir); env->ReleaseStringUTFChars(jdb_dir, cdb_dir);
env->ReleaseStringUTFChars(jwal_dir, cwal_dir); env->ReleaseStringUTFChars(jwal_dir, cwal_dir);

View File

@ -234,9 +234,9 @@ jobject Java_org_rocksdb_RocksDB_listColumnFamilies(
for (std::vector<std::string>::size_type i = 0; for (std::vector<std::string>::size_type i = 0;
i < column_family_names.size(); i++) { i < column_family_names.size(); i++) {
jbyteArray jcf_value = jbyteArray jcf_value =
env->NewByteArray(column_family_names[i].size()); env->NewByteArray(static_cast<jsize>(column_family_names[i].size()));
env->SetByteArrayRegion(jcf_value, 0, env->SetByteArrayRegion(
column_family_names[i].size(), jcf_value, 0, static_cast<jsize>(column_family_names[i].size()),
reinterpret_cast<const jbyte*>(column_family_names[i].c_str())); reinterpret_cast<const jbyte*>(column_family_names[i].c_str()));
env->CallBooleanMethod(jvalue_list, env->CallBooleanMethod(jvalue_list,
rocksdb::ListJni::getListAddMethodId(env), jcf_value); rocksdb::ListJni::getListAddMethodId(env), jcf_value);
@ -516,9 +516,8 @@ jbyteArray rocksdb_get_helper(
} }
if (s.ok()) { if (s.ok()) {
jbyteArray jret_value = env->NewByteArray(value.size()); jbyteArray jret_value = env->NewByteArray(static_cast<jsize>(value.size()));
env->SetByteArrayRegion( env->SetByteArrayRegion(jret_value, 0, static_cast<jsize>(value.size()),
jret_value, 0, value.size(),
reinterpret_cast<const jbyte*>(value.c_str())); reinterpret_cast<const jbyte*>(value.c_str()));
return jret_value; return jret_value;
} }
@ -712,9 +711,10 @@ jobject multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db,
// insert in java list // insert in java list
for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) { for (std::vector<rocksdb::Status>::size_type i = 0; i != s.size(); i++) {
if (s[i].ok()) { if (s[i].ok()) {
jbyteArray jentry_value = env->NewByteArray(values[i].size()); jbyteArray jentry_value =
env->NewByteArray(static_cast<jsize>(values[i].size()));
env->SetByteArrayRegion( env->SetByteArrayRegion(
jentry_value, 0, values[i].size(), jentry_value, 0, static_cast<jsize>(values[i].size()),
reinterpret_cast<const jbyte*>(values[i].c_str())); reinterpret_cast<const jbyte*>(values[i].c_str()));
env->CallBooleanMethod( env->CallBooleanMethod(
jvalue_list, rocksdb::ListJni::getListAddMethodId(env), jvalue_list, rocksdb::ListJni::getListAddMethodId(env),
@ -1135,10 +1135,11 @@ jlongArray Java_org_rocksdb_RocksDB_iterators(
rocksdb::Status s = db->NewIterators(rocksdb::ReadOptions(), rocksdb::Status s = db->NewIterators(rocksdb::ReadOptions(),
cf_handles, &iterators); cf_handles, &iterators);
if (s.ok()) { if (s.ok()) {
jlongArray jLongArray = env->NewLongArray(iterators.size()); jlongArray jLongArray =
for (std::vector<rocksdb::Iterator*>::size_type i = 0; env->NewLongArray(static_cast<jsize>(iterators.size()));
i < iterators.size(); i++) { for (std::vector<rocksdb::Iterator*>::size_type i = 0; i < iterators.size();
env->SetLongArrayRegion(jLongArray, i, 1, i++) {
env->SetLongArrayRegion(jLongArray, static_cast<jsize>(i), 1,
reinterpret_cast<const jlong*>(&iterators[i])); reinterpret_cast<const jlong*>(&iterators[i]));
} }
return jLongArray; return jLongArray;

View File

@ -39,7 +39,7 @@ void Java_org_rocksdb_AbstractSlice_createNewSliceFromString(
jint Java_org_rocksdb_AbstractSlice_size0( jint Java_org_rocksdb_AbstractSlice_size0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle); const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle);
return slice->size(); return static_cast<jint>(slice->size());
} }
/* /*
@ -154,7 +154,7 @@ void Java_org_rocksdb_Slice_createNewSlice1(
jbyteArray Java_org_rocksdb_Slice_data0( jbyteArray Java_org_rocksdb_Slice_data0(
JNIEnv* env, jobject jobj, jlong handle) { JNIEnv* env, jobject jobj, jlong handle) {
const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle); const rocksdb::Slice* slice = reinterpret_cast<rocksdb::Slice*>(handle);
const int len = slice->size(); const int len = static_cast<int>(slice->size());
const jbyteArray data = env->NewByteArray(len); const jbyteArray data = env->NewByteArray(len);
env->SetByteArrayRegion(data, 0, len, env->SetByteArrayRegion(data, 0, len,
reinterpret_cast<jbyte*>(const_cast<char*>(slice->data()))); reinterpret_cast<jbyte*>(const_cast<char*>(slice->data())));

View File

@ -392,9 +392,8 @@ jbyteArray Java_org_rocksdb_WriteBatchTest_getContents(
} }
delete mem->Unref(); delete mem->Unref();
jbyteArray jstate = env->NewByteArray(state.size()); jbyteArray jstate = env->NewByteArray(static_cast<jsize>(state.size()));
env->SetByteArrayRegion( env->SetByteArrayRegion(jstate, 0, static_cast<jsize>(state.size()),
jstate, 0, state.size(),
reinterpret_cast<const jbyte*>(state.c_str())); reinterpret_cast<const jbyte*>(state.c_str()));
return jstate; return jstate;

View File

@ -203,13 +203,13 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
// Compress the input, and put compressed data in output. // Compress the input, and put compressed data in output.
_stream.next_in = (Bytef *)input; _stream.next_in = (Bytef *)input;
_stream.avail_in = length; _stream.avail_in = static_cast<unsigned int>(length);
// Initialize the output size. // Initialize the output size.
_stream.avail_out = length; _stream.avail_out = static_cast<unsigned int>(length);
_stream.next_out = (Bytef *)&(*output)[0]; _stream.next_out = (Bytef*)&(*output)[0];
int old_sz =0, new_sz =0, new_sz_delta =0; size_t old_sz = 0, new_sz = 0, new_sz_delta = 0;
bool done = false; bool done = false;
while (!done) { while (!done) {
st = deflate(&_stream, Z_FINISH); st = deflate(&_stream, Z_FINISH);
@ -221,12 +221,12 @@ inline bool Zlib_Compress(const CompressionOptions& opts, const char* input,
// No output space. Increase the output space by 20%. // No output space. Increase the output space by 20%.
// (Should we fail the compression since it expands the size?) // (Should we fail the compression since it expands the size?)
old_sz = output->size(); old_sz = output->size();
new_sz_delta = (int)(output->size() * 0.2); new_sz_delta = static_cast<size_t>(output->size() * 0.2);
new_sz = output->size() + (new_sz_delta < 10 ? 10 : new_sz_delta); new_sz = output->size() + (new_sz_delta < 10 ? 10 : new_sz_delta);
output->resize(new_sz); output->resize(new_sz);
// Set more output. // Set more output.
_stream.next_out = (Bytef *)&(*output)[old_sz]; _stream.next_out = (Bytef *)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz; _stream.avail_out = static_cast<unsigned int>(new_sz - old_sz);
break; break;
case Z_BUF_ERROR: case Z_BUF_ERROR:
default: default:
@ -258,18 +258,18 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
} }
_stream.next_in = (Bytef *)input_data; _stream.next_in = (Bytef *)input_data;
_stream.avail_in = input_length; _stream.avail_in = static_cast<unsigned int>(input_length);
// Assume the decompressed data size will 5x of compressed size. // Assume the decompressed data size will 5x of compressed size.
int output_len = input_length * 5; size_t output_len = input_length * 5;
char* output = new char[output_len]; char* output = new char[output_len];
int old_sz = output_len; size_t old_sz = output_len;
_stream.next_out = (Bytef *)output; _stream.next_out = (Bytef *)output;
_stream.avail_out = output_len; _stream.avail_out = static_cast<unsigned int>(output_len);
char* tmp = nullptr; char* tmp = nullptr;
int output_len_delta; size_t output_len_delta;
bool done = false; bool done = false;
//while(_stream.next_in != nullptr && _stream.avail_in != 0) { //while(_stream.next_in != nullptr && _stream.avail_in != 0) {
@ -282,7 +282,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
case Z_OK: case Z_OK:
// No output space. Increase the output space by 20%. // No output space. Increase the output space by 20%.
old_sz = output_len; old_sz = output_len;
output_len_delta = (int)(output_len * 0.2); output_len_delta = static_cast<size_t>(output_len * 0.2);
output_len += output_len_delta < 10 ? 10 : output_len_delta; output_len += output_len_delta < 10 ? 10 : output_len_delta;
tmp = new char[output_len]; tmp = new char[output_len];
memcpy(tmp, output, old_sz); memcpy(tmp, output, old_sz);
@ -291,7 +291,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
// Set more output. // Set more output.
_stream.next_out = (Bytef *)(output + old_sz); _stream.next_out = (Bytef *)(output + old_sz);
_stream.avail_out = output_len - old_sz; _stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
break; break;
case Z_BUF_ERROR: case Z_BUF_ERROR:
default: default:
@ -301,7 +301,7 @@ inline char* Zlib_Uncompress(const char* input_data, size_t input_length,
} }
} }
*decompress_size = output_len - _stream.avail_out; *decompress_size = static_cast<int>(output_len - _stream.avail_out);
inflateEnd(&_stream); inflateEnd(&_stream);
return output; return output;
#endif #endif
@ -329,14 +329,14 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
// Compress the input, and put compressed data in output. // Compress the input, and put compressed data in output.
_stream.next_in = (char *)input; _stream.next_in = (char *)input;
_stream.avail_in = length; _stream.avail_in = static_cast<unsigned int>(length);
// Initialize the output size. // Initialize the output size.
_stream.next_out = (char *)&(*output)[0]; _stream.next_out = (char *)&(*output)[0];
_stream.avail_out = length; _stream.avail_out = static_cast<unsigned int>(length);
int old_sz =0, new_sz =0; size_t old_sz = 0, new_sz = 0;
while(_stream.next_in != nullptr && _stream.avail_in != 0) { while (_stream.next_in != nullptr && _stream.avail_in != 0) {
st = BZ2_bzCompress(&_stream, BZ_FINISH); st = BZ2_bzCompress(&_stream, BZ_FINISH);
switch (st) { switch (st) {
case BZ_STREAM_END: case BZ_STREAM_END:
@ -345,11 +345,11 @@ inline bool BZip2_Compress(const CompressionOptions& opts, const char* input,
// No output space. Increase the output space by 20%. // No output space. Increase the output space by 20%.
// (Should we fail the compression since it expands the size?) // (Should we fail the compression since it expands the size?)
old_sz = output->size(); old_sz = output->size();
new_sz = (int)(output->size() * 1.2); new_sz = static_cast<size_t>(output->size() * 1.2);
output->resize(new_sz); output->resize(new_sz);
// Set more output. // Set more output.
_stream.next_out = (char *)&(*output)[old_sz]; _stream.next_out = (char *)&(*output)[old_sz];
_stream.avail_out = new_sz - old_sz; _stream.avail_out = static_cast<unsigned int>(new_sz - old_sz);
break; break;
case BZ_SEQUENCE_ERROR: case BZ_SEQUENCE_ERROR:
default: default:
@ -377,15 +377,15 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
} }
_stream.next_in = (char *)input_data; _stream.next_in = (char *)input_data;
_stream.avail_in = input_length; _stream.avail_in = static_cast<unsigned int>(input_length);
// Assume the decompressed data size will be 5x of compressed size. // Assume the decompressed data size will be 5x of compressed size.
int output_len = input_length * 5; size_t output_len = input_length * 5;
char* output = new char[output_len]; char* output = new char[output_len];
int old_sz = output_len; size_t old_sz = output_len;
_stream.next_out = (char *)output; _stream.next_out = (char *)output;
_stream.avail_out = output_len; _stream.avail_out = static_cast<unsigned int>(output_len);
char* tmp = nullptr; char* tmp = nullptr;
@ -397,7 +397,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
case BZ_OK: case BZ_OK:
// No output space. Increase the output space by 20%. // No output space. Increase the output space by 20%.
old_sz = output_len; old_sz = output_len;
output_len = (int)(output_len * 1.2); output_len = static_cast<size_t>(output_len * 1.2);
tmp = new char[output_len]; tmp = new char[output_len];
memcpy(tmp, output, old_sz); memcpy(tmp, output, old_sz);
delete[] output; delete[] output;
@ -405,7 +405,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
// Set more output. // Set more output.
_stream.next_out = (char *)(output + old_sz); _stream.next_out = (char *)(output + old_sz);
_stream.avail_out = output_len - old_sz; _stream.avail_out = static_cast<unsigned int>(output_len - old_sz);
break; break;
default: default:
delete[] output; delete[] output;
@ -414,7 +414,7 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
} }
} }
*decompress_size = output_len - _stream.avail_out; *decompress_size = static_cast<int>(output_len - _stream.avail_out);
BZ2_bzDecompressEnd(&_stream); BZ2_bzDecompressEnd(&_stream);
return output; return output;
#endif #endif
@ -424,16 +424,16 @@ inline char* BZip2_Uncompress(const char* input_data, size_t input_length,
inline bool LZ4_Compress(const CompressionOptions &opts, const char *input, inline bool LZ4_Compress(const CompressionOptions &opts, const char *input,
size_t length, ::std::string* output) { size_t length, ::std::string* output) {
#ifdef LZ4 #ifdef LZ4
int compressBound = LZ4_compressBound(length); int compressBound = LZ4_compressBound(static_cast<int>(length));
output->resize(8 + compressBound); output->resize(static_cast<size_t>(8 + compressBound));
char *p = const_cast<char *>(output->c_str()); char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length)); memcpy(p, &length, sizeof(length));
size_t outlen; int outlen = LZ4_compress_limitedOutput(
outlen = LZ4_compress_limitedOutput(input, p + 8, length, compressBound); input, p + 8, static_cast<int>(length), compressBound);
if (outlen == 0) { if (outlen == 0) {
return false; return false;
} }
output->resize(8 + outlen); output->resize(static_cast<size_t>(8 + outlen));
return true; return true;
#endif #endif
return false; return false;
@ -449,7 +449,8 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
memcpy(&output_len, input_data, sizeof(output_len)); memcpy(&output_len, input_data, sizeof(output_len));
char *output = new char[output_len]; char *output = new char[output_len];
*decompress_size = LZ4_decompress_safe_partial( *decompress_size = LZ4_decompress_safe_partial(
input_data + 8, output, input_length - 8, output_len, output_len); input_data + 8, output, static_cast<int>(input_length - 8), output_len,
output_len);
if (*decompress_size < 0) { if (*decompress_size < 0) {
delete[] output; delete[] output;
return nullptr; return nullptr;
@ -462,21 +463,22 @@ inline char* LZ4_Uncompress(const char* input_data, size_t input_length,
inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input, inline bool LZ4HC_Compress(const CompressionOptions &opts, const char* input,
size_t length, ::std::string* output) { size_t length, ::std::string* output) {
#ifdef LZ4 #ifdef LZ4
int compressBound = LZ4_compressBound(length); int compressBound = LZ4_compressBound(static_cast<int>(length));
output->resize(8 + compressBound); output->resize(static_cast<size_t>(8 + compressBound));
char *p = const_cast<char *>(output->c_str()); char* p = const_cast<char*>(output->c_str());
memcpy(p, &length, sizeof(length)); memcpy(p, &length, sizeof(length));
size_t outlen; int outlen;
#ifdef LZ4_VERSION_MAJOR // they only started defining this since r113 #ifdef LZ4_VERSION_MAJOR // they only started defining this since r113
outlen = LZ4_compressHC2_limitedOutput(input, p + 8, length, compressBound, outlen = LZ4_compressHC2_limitedOutput(input, p + 8, static_cast<int>(length),
opts.level); compressBound, opts.level);
#else #else
outlen = LZ4_compressHC_limitedOutput(input, p + 8, length, compressBound); outlen = LZ4_compressHC_limitedOutput(input, p + 8, static_cast<int>(length),
compressBound);
#endif #endif
if (outlen == 0) { if (outlen == 0) {
return false; return false;
} }
output->resize(8 + outlen); output->resize(static_cast<size_t>(8 + outlen));
return true; return true;
#endif #endif
return false; return false;

View File

@ -304,7 +304,8 @@ Block::Block(BlockContents&& contents)
if (size_ < sizeof(uint32_t)) { if (size_ < sizeof(uint32_t)) {
size_ = 0; // Error marker size_ = 0; // Error marker
} else { } else {
restart_offset_ = size_ - (1 + NumRestarts()) * sizeof(uint32_t); restart_offset_ =
static_cast<uint32_t>(size_) - (1 + NumRestarts()) * sizeof(uint32_t);
if (restart_offset_ > size_ - sizeof(uint32_t)) { if (restart_offset_ > size_ - sizeof(uint32_t)) {
// The size is too small for NumRestarts() and therefore // The size is too small for NumRestarts() and therefore
// restart_offset_ wrapped around. // restart_offset_ wrapped around.

View File

@ -159,7 +159,8 @@ class BlockIter : public Iterator {
// Return the offset in data_ just past the end of the current entry. // Return the offset in data_ just past the end of the current entry.
inline uint32_t NextEntryOffset() const { inline uint32_t NextEntryOffset() const {
return (value_.data() + value_.size()) - data_; // NOTE: We don't support files bigger than 2GB
return static_cast<uint32_t>((value_.data() + value_.size()) - data_);
} }
uint32_t GetRestartPoint(uint32_t index) { uint32_t GetRestartPoint(uint32_t index) {

View File

@ -99,7 +99,7 @@ Slice BlockBasedFilterBlockBuilder::Finish() {
} }
// Append array of per-filter offsets // Append array of per-filter offsets
const uint32_t array_offset = result_.size(); const uint32_t array_offset = static_cast<uint32_t>(result_.size());
for (size_t i = 0; i < filter_offsets_.size(); i++) { for (size_t i = 0; i < filter_offsets_.size(); i++) {
PutFixed32(&result_, filter_offsets_[i]); PutFixed32(&result_, filter_offsets_[i]);
} }
@ -113,7 +113,7 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() {
const size_t num_entries = start_.size(); const size_t num_entries = start_.size();
if (num_entries == 0) { if (num_entries == 0) {
// Fast path if there are no keys for this filter // Fast path if there are no keys for this filter
filter_offsets_.push_back(result_.size()); filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
return; return;
} }
@ -127,8 +127,9 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() {
} }
// Generate filter for current set of keys and append to result_. // Generate filter for current set of keys and append to result_.
filter_offsets_.push_back(result_.size()); filter_offsets_.push_back(static_cast<uint32_t>(result_.size()));
policy_->CreateFilter(&tmp_entries_[0], num_entries, &result_); policy_->CreateFilter(&tmp_entries_[0], static_cast<int>(num_entries),
&result_);
tmp_entries_.clear(); tmp_entries_.clear();
entries_.clear(); entries_.clear();

View File

@ -203,7 +203,7 @@ class HashIndexBuilder : public IndexBuilder {
// copy. // copy.
pending_entry_prefix_ = key_prefix.ToString(); pending_entry_prefix_ = key_prefix.ToString();
pending_block_num_ = 1; pending_block_num_ = 1;
pending_entry_index_ = current_restart_index_; pending_entry_index_ = static_cast<uint32_t>(current_restart_index_);
} else { } else {
// entry number increments when keys share the prefix reside in // entry number increments when keys share the prefix reside in
// differnt data blocks. // differnt data blocks.
@ -234,7 +234,8 @@ class HashIndexBuilder : public IndexBuilder {
void FlushPendingPrefix() { void FlushPendingPrefix() {
prefix_block_.append(pending_entry_prefix_.data(), prefix_block_.append(pending_entry_prefix_.data(),
pending_entry_prefix_.size()); pending_entry_prefix_.size());
PutVarint32(&prefix_meta_block_, pending_entry_prefix_.size()); PutVarint32(&prefix_meta_block_,
static_cast<uint32_t>(pending_entry_prefix_.size()));
PutVarint32(&prefix_meta_block_, pending_entry_index_); PutVarint32(&prefix_meta_block_, pending_entry_index_);
PutVarint32(&prefix_meta_block_, pending_block_num_); PutVarint32(&prefix_meta_block_, pending_block_num_);
} }
@ -596,7 +597,8 @@ void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents,
} }
case kxxHash: { case kxxHash: {
void* xxh = XXH32_init(0); void* xxh = XXH32_init(0);
XXH32_update(xxh, block_contents.data(), block_contents.size()); XXH32_update(xxh, block_contents.data(),
static_cast<uint32_t>(block_contents.size()));
XXH32_update(xxh, trailer, 1); // Extend to cover block type XXH32_update(xxh, trailer, 1); // Extend to cover block type
EncodeFixed32(trailer_without_type, XXH32_digest(xxh)); EncodeFixed32(trailer_without_type, XXH32_digest(xxh));
break; break;

View File

@ -85,7 +85,7 @@ Slice BlockBuilder::Finish() {
for (size_t i = 0; i < restarts_.size(); i++) { for (size_t i = 0; i < restarts_.size(); i++) {
PutFixed32(&buffer_, restarts_[i]); PutFixed32(&buffer_, restarts_[i]);
} }
PutFixed32(&buffer_, restarts_.size()); PutFixed32(&buffer_, static_cast<uint32_t>(restarts_.size()));
finished_ = true; finished_ = true;
return Slice(buffer_); return Slice(buffer_);
} }
@ -103,15 +103,15 @@ void BlockBuilder::Add(const Slice& key, const Slice& value) {
} }
} else { } else {
// Restart compression // Restart compression
restarts_.push_back(buffer_.size()); restarts_.push_back(static_cast<uint32_t>(buffer_.size()));
counter_ = 0; counter_ = 0;
} }
const size_t non_shared = key.size() - shared; const size_t non_shared = key.size() - shared;
// Add "<shared><non_shared><value_size>" to buffer_ // Add "<shared><non_shared><value_size>" to buffer_
PutVarint32(&buffer_, shared); PutVarint32(&buffer_, static_cast<uint32_t>(shared));
PutVarint32(&buffer_, non_shared); PutVarint32(&buffer_, static_cast<uint32_t>(non_shared));
PutVarint32(&buffer_, value.size()); PutVarint32(&buffer_, static_cast<uint32_t>(value.size()));
// Add string delta to buffer_ followed by value // Add string delta to buffer_ followed by value
buffer_.append(key.data() + shared, non_shared); buffer_.append(key.data() + shared, non_shared);

View File

@ -59,7 +59,7 @@ BlockHashIndex* CreateBlockHashIndexOnTheFly(
auto hash_index = new BlockHashIndex( auto hash_index = new BlockHashIndex(
hash_key_extractor, hash_key_extractor,
true /* hash_index will copy prefix when Add() is called */); true /* hash_index will copy prefix when Add() is called */);
uint64_t current_restart_index = 0; uint32_t current_restart_index = 0;
std::string pending_entry_prefix; std::string pending_entry_prefix;
// pending_block_num == 0 also implies there is no entry inserted at all. // pending_block_num == 0 also implies there is no entry inserted at all.

View File

@ -82,8 +82,8 @@ TEST(BlockTest, BasicTest) {
auto prefix_extractor = NewFixedPrefixTransform(prefix_size); auto prefix_extractor = NewFixedPrefixTransform(prefix_size);
std::unique_ptr<BlockHashIndex> block_hash_index(CreateBlockHashIndexOnTheFly( std::unique_ptr<BlockHashIndex> block_hash_index(CreateBlockHashIndexOnTheFly(
&index_iter, &data_iter, index_entries.size(), BytewiseComparator(), &index_iter, &data_iter, static_cast<uint32_t>(index_entries.size()),
prefix_extractor)); BytewiseComparator(), prefix_extractor));
std::map<std::string, BlockHashIndex::RestartIndex> expected = { std::map<std::string, BlockHashIndex::RestartIndex> expected = {
{"01xx", BlockHashIndex::RestartIndex(0, 1)}, {"01xx", BlockHashIndex::RestartIndex(0, 1)},

View File

@ -87,7 +87,7 @@ class BlockPrefixIndex::Builder {
BlockPrefixIndex* Finish() { BlockPrefixIndex* Finish() {
// For now, use roughly 1:1 prefix to bucket ratio. // For now, use roughly 1:1 prefix to bucket ratio.
uint32_t num_buckets = prefixes_.size() + 1; uint32_t num_buckets = static_cast<uint32_t>(prefixes_.size()) + 1;
// Collect prefix records that hash to the same bucket, into a single // Collect prefix records that hash to the same bucket, into a single
// linklist. // linklist.

View File

@ -163,7 +163,7 @@ void CheckBlockContents(BlockContents contents, const int max_key,
auto iter1 = reader1.NewIterator(nullptr); auto iter1 = reader1.NewIterator(nullptr);
auto iter2 = reader1.NewIterator(nullptr); auto iter2 = reader1.NewIterator(nullptr);
reader1.SetBlockHashIndex(CreateBlockHashIndexOnTheFly( reader1.SetBlockHashIndex(CreateBlockHashIndexOnTheFly(
iter1, iter2, keys.size(), BytewiseComparator(), iter1, iter2, static_cast<uint32_t>(keys.size()), BytewiseComparator(),
prefix_extractor.get())); prefix_extractor.get()));
delete iter1; delete iter1;

View File

@ -182,7 +182,7 @@ Slice CuckooTableBuilder::GetValue(uint64_t idx) const {
Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) { Status CuckooTableBuilder::MakeHashTable(std::vector<CuckooBucket>* buckets) {
buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1); buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1);
uint64_t make_space_for_key_call_id = 0; uint32_t make_space_for_key_call_id = 0;
for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) { for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) {
uint64_t bucket_id; uint64_t bucket_id;
bool bucket_found = false; bool bucket_found = false;
@ -254,7 +254,7 @@ Status CuckooTableBuilder::Finish() {
} }
// Determine unused_user_key to fill empty buckets. // Determine unused_user_key to fill empty buckets.
std::string unused_user_key = smallest_user_key_; std::string unused_user_key = smallest_user_key_;
int curr_pos = unused_user_key.size() - 1; int curr_pos = static_cast<int>(unused_user_key.size()) - 1;
while (curr_pos >= 0) { while (curr_pos >= 0) {
--unused_user_key[curr_pos]; --unused_user_key[curr_pos];
if (Slice(unused_user_key).compare(smallest_user_key_) < 0) { if (Slice(unused_user_key).compare(smallest_user_key_) < 0) {
@ -265,7 +265,7 @@ Status CuckooTableBuilder::Finish() {
if (curr_pos < 0) { if (curr_pos < 0) {
// Try using the largest key to identify an unused key. // Try using the largest key to identify an unused key.
unused_user_key = largest_user_key_; unused_user_key = largest_user_key_;
curr_pos = unused_user_key.size() - 1; curr_pos = static_cast<int>(unused_user_key.size()) - 1;
while (curr_pos >= 0) { while (curr_pos >= 0) {
++unused_user_key[curr_pos]; ++unused_user_key[curr_pos];
if (Slice(unused_user_key).compare(largest_user_key_) > 0) { if (Slice(unused_user_key).compare(largest_user_key_) > 0) {
@ -429,9 +429,8 @@ uint64_t CuckooTableBuilder::FileSize() const {
// If tree depth exceedes max depth, we return false indicating failure. // If tree depth exceedes max depth, we return false indicating failure.
bool CuckooTableBuilder::MakeSpaceForKey( bool CuckooTableBuilder::MakeSpaceForKey(
const autovector<uint64_t>& hash_vals, const autovector<uint64_t>& hash_vals,
const uint64_t make_space_for_key_call_id, const uint32_t make_space_for_key_call_id,
std::vector<CuckooBucket>* buckets, std::vector<CuckooBucket>* buckets, uint64_t* bucket_id) {
uint64_t* bucket_id) {
struct CuckooNode { struct CuckooNode {
uint64_t bucket_id; uint64_t bucket_id;
uint32_t depth; uint32_t depth;
@ -495,7 +494,7 @@ bool CuckooTableBuilder::MakeSpaceForKey(
// child with the parent. Stop when first level is reached in the tree // child with the parent. Stop when first level is reached in the tree
// (happens when 0 <= bucket_to_replace_pos < num_hash_func_) and return // (happens when 0 <= bucket_to_replace_pos < num_hash_func_) and return
// this location in first level for target key to be inserted. // this location in first level for target key to be inserted.
uint32_t bucket_to_replace_pos = tree.size()-1; uint32_t bucket_to_replace_pos = static_cast<uint32_t>(tree.size()) - 1;
while (bucket_to_replace_pos >= num_hash_func_) { while (bucket_to_replace_pos >= num_hash_func_) {
CuckooNode& curr_node = tree[bucket_to_replace_pos]; CuckooNode& curr_node = tree[bucket_to_replace_pos];
(*buckets)[curr_node.bucket_id] = (*buckets)[curr_node.bucket_id] =

View File

@ -68,11 +68,9 @@ class CuckooTableBuilder: public TableBuilder {
}; };
static const uint32_t kMaxVectorIdx = std::numeric_limits<int32_t>::max(); static const uint32_t kMaxVectorIdx = std::numeric_limits<int32_t>::max();
bool MakeSpaceForKey( bool MakeSpaceForKey(const autovector<uint64_t>& hash_vals,
const autovector<uint64_t>& hash_vals, const uint32_t call_id,
const uint64_t call_id, std::vector<CuckooBucket>* buckets, uint64_t* bucket_id);
std::vector<CuckooBucket>* buckets,
uint64_t* bucket_id);
Status MakeHashTable(std::vector<CuckooBucket>* buckets); Status MakeHashTable(std::vector<CuckooBucket>* buckets);
inline bool IsDeletedKey(uint64_t idx) const; inline bool IsDeletedKey(uint64_t idx) const;

View File

@ -87,13 +87,14 @@ class CuckooBuilderTest {
// Check contents of the bucket. // Check contents of the bucket.
std::vector<bool> keys_found(keys.size(), false); std::vector<bool> keys_found(keys.size(), false);
uint32_t bucket_size = expected_unused_bucket.size(); size_t bucket_size = expected_unused_bucket.size();
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) { for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
Slice read_slice; Slice read_slice;
ASSERT_OK(read_file->Read(i*bucket_size, bucket_size, ASSERT_OK(read_file->Read(i*bucket_size, bucket_size,
&read_slice, nullptr)); &read_slice, nullptr));
uint32_t key_idx = std::find(expected_locations.begin(), size_t key_idx =
expected_locations.end(), i) - expected_locations.begin(); std::find(expected_locations.begin(), expected_locations.end(), i) -
expected_locations.begin();
if (key_idx == keys.size()) { if (key_idx == keys.size()) {
// i is not one of the expected locaitons. Empty bucket. // i is not one of the expected locaitons. Empty bucket.
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0); ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
@ -156,7 +157,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
for (auto& user_key : user_keys) { for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false)); keys.push_back(GetInternalKey(user_key, false));
} }
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/NoCollisionFullKey"; fname = test::TmpDir() + "/NoCollisionFullKey";
@ -169,7 +170,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = keys[0].size() + values[0].size(); size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -196,7 +197,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
for (auto& user_key : user_keys) { for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false)); keys.push_back(GetInternalKey(user_key, false));
} }
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionFullKey"; fname = test::TmpDir() + "/WithCollisionFullKey";
@ -209,7 +210,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = keys[0].size() + values[0].size(); size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -236,7 +237,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
for (auto& user_key : user_keys) { for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false)); keys.push_back(GetInternalKey(user_key, false));
} }
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
uint32_t cuckoo_block_size = 2; uint32_t cuckoo_block_size = 2;
@ -251,7 +252,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = keys[0].size() + values[0].size(); size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -283,7 +284,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKey) {
for (auto& user_key : user_keys) { for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false)); keys.push_back(GetInternalKey(user_key, false));
} }
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathFullKey"; fname = test::TmpDir() + "/WithCollisionPathFullKey";
@ -296,7 +297,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = keys[0].size() + values[0].size(); size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -325,7 +326,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
for (auto& user_key : user_keys) { for (auto& user_key : user_keys) {
keys.push_back(GetInternalKey(user_key, false)); keys.push_back(GetInternalKey(user_key, false));
} }
uint32_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock"; fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock";
@ -338,7 +339,7 @@ TEST(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = keys[0].size() + values[0].size(); size_t bucket_size = keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -361,7 +362,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
{user_keys[3], {3, 4, 5, 6}} {user_keys[3], {3, 4, 5, 6}}
}; };
std::vector<uint64_t> expected_locations = {0, 1, 2, 3}; std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/NoCollisionUserKey"; fname = test::TmpDir() + "/NoCollisionUserKey";
@ -374,7 +375,7 @@ TEST(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = user_keys[0].size() + values[0].size(); size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -397,7 +398,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
{user_keys[3], {0, 1, 2, 3}}, {user_keys[3], {0, 1, 2, 3}},
}; };
std::vector<uint64_t> expected_locations = {0, 1, 2, 3}; std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionUserKey"; fname = test::TmpDir() + "/WithCollisionUserKey";
@ -410,7 +411,7 @@ TEST(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = user_keys[0].size() + values[0].size(); size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());
@ -435,7 +436,7 @@ TEST(CuckooBuilderTest, WithCollisionPathUserKey) {
{user_keys[4], {0, 2}}, {user_keys[4], {0, 2}},
}; };
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2}; std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
uint32_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio); uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
fname = test::TmpDir() + "/WithCollisionPathUserKey"; fname = test::TmpDir() + "/WithCollisionPathUserKey";
@ -448,7 +449,7 @@ TEST(CuckooBuilderTest, WithCollisionPathUserKey) {
ASSERT_EQ(builder.NumEntries(), i + 1); ASSERT_EQ(builder.NumEntries(), i + 1);
ASSERT_OK(builder.status()); ASSERT_OK(builder.status());
} }
uint32_t bucket_size = user_keys[0].size() + values[0].size(); size_t bucket_size = user_keys[0].size() + values[0].size();
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
ASSERT_OK(builder.Finish()); ASSERT_OK(builder.Finish());
ASSERT_OK(writable_file->Close()); ASSERT_OK(writable_file->Close());

View File

@ -28,7 +28,7 @@ static inline uint64_t CuckooHash(
if (hash_cnt == 0 && identity_as_first_hash) { if (hash_cnt == 0 && identity_as_first_hash) {
value = (*reinterpret_cast<const int64_t*>(user_key.data())); value = (*reinterpret_cast<const int64_t*>(user_key.data()));
} else { } else {
value = MurmurHash(user_key.data(), user_key.size(), value = MurmurHash(user_key.data(), static_cast<int>(user_key.size()),
kCuckooMurmurSeedMultiplier * hash_cnt); kCuckooMurmurSeedMultiplier * hash_cnt);
} }
if (use_module_hash) { if (use_module_hash) {

View File

@ -64,7 +64,7 @@ CuckooTableReader::CuckooTableReader(
} }
unused_key_ = unused_key->second; unused_key_ = unused_key->second;
key_length_ = props->fixed_key_len; key_length_ = static_cast<uint32_t>(props->fixed_key_len);
auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength); auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength);
if (user_key_len == user_props.end()) { if (user_key_len == user_props.end()) {
status_ = Status::Corruption("User key length not found"); status_ = Status::Corruption("User key length not found");
@ -274,7 +274,7 @@ void CuckooTableIterator::SeekToFirst() {
void CuckooTableIterator::SeekToLast() { void CuckooTableIterator::SeekToLast() {
InitIfNeeded(); InitIfNeeded();
curr_key_idx_ = sorted_bucket_ids_.size() - 1; curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size()) - 1;
PrepareKVAtCurrIdx(); PrepareKVAtCurrIdx();
} }
@ -288,7 +288,8 @@ void CuckooTableIterator::Seek(const Slice& target) {
sorted_bucket_ids_.end(), sorted_bucket_ids_.end(),
kInvalidIndex, kInvalidIndex,
seek_comparator); seek_comparator);
curr_key_idx_ = std::distance(sorted_bucket_ids_.begin(), seek_it); curr_key_idx_ =
static_cast<uint32_t>(std::distance(sorted_bucket_ids_.begin(), seek_it));
PrepareKVAtCurrIdx(); PrepareKVAtCurrIdx();
} }
@ -327,7 +328,7 @@ void CuckooTableIterator::Next() {
void CuckooTableIterator::Prev() { void CuckooTableIterator::Prev() {
if (curr_key_idx_ == 0) { if (curr_key_idx_ == 0) {
curr_key_idx_ = sorted_bucket_ids_.size(); curr_key_idx_ = static_cast<uint32_t>(sorted_bucket_ids_.size());
} }
if (!Valid()) { if (!Valid()) {
curr_value_.clear(); curr_value_.clear();

View File

@ -161,7 +161,7 @@ class CuckooReaderTest {
ASSERT_EQ(static_cast<uint32_t>(cnt), num_items); ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
it->SeekToLast(); it->SeekToLast();
cnt = num_items - 1; cnt = static_cast<int>(num_items) - 1;
ASSERT_TRUE(it->Valid()); ASSERT_TRUE(it->Valid());
while (it->Valid()) { while (it->Valid()) {
ASSERT_OK(it->status()); ASSERT_OK(it->status());
@ -172,7 +172,7 @@ class CuckooReaderTest {
} }
ASSERT_EQ(cnt, -1); ASSERT_EQ(cnt, -1);
cnt = num_items / 2; cnt = static_cast<int>(num_items) / 2;
it->Seek(keys[cnt]); it->Seek(keys[cnt]);
while (it->Valid()) { while (it->Valid()) {
ASSERT_OK(it->status()); ASSERT_OK(it->status());

View File

@ -240,7 +240,7 @@ Status ReadBlock(RandomAccessFile* file, const Footer& footer,
actual = crc32c::Value(data, n + 1); actual = crc32c::Value(data, n + 1);
break; break;
case kxxHash: case kxxHash:
actual = XXH32(data, n + 1, 0); actual = XXH32(data, static_cast<int>(n) + 1, 0);
break; break;
default: default:
s = Status::Corruption("unknown checksum type"); s = Status::Corruption("unknown checksum type");

View File

@ -25,7 +25,7 @@ class TestFilterBitsBuilder : public FilterBitsBuilder {
// Generate the filter using the keys that are added // Generate the filter using the keys that are added
virtual Slice Finish(std::unique_ptr<const char[]>* buf) override { virtual Slice Finish(std::unique_ptr<const char[]>* buf) override {
uint32_t len = hash_entries_.size() * 4; uint32_t len = static_cast<uint32_t>(hash_entries_.size()) * 4;
char* data = new char[len]; char* data = new char[len];
for (size_t i = 0; i < hash_entries_.size(); i++) { for (size_t i = 0; i < hash_entries_.size(); i++) {
EncodeFixed32(data + i * 4, hash_entries_[i]); EncodeFixed32(data + i * 4, hash_entries_[i]);
@ -42,7 +42,7 @@ class TestFilterBitsBuilder : public FilterBitsBuilder {
class TestFilterBitsReader : public FilterBitsReader { class TestFilterBitsReader : public FilterBitsReader {
public: public:
explicit TestFilterBitsReader(const Slice& contents) explicit TestFilterBitsReader(const Slice& contents)
: data_(contents.data()), len_(contents.size()) {} : data_(contents.data()), len_(static_cast<uint32_t>(contents.size())) {}
virtual bool MayMatch(const Slice& entry) override { virtual bool MayMatch(const Slice& entry) override {
uint32_t h = Hash(entry.data(), entry.size(), 1); uint32_t h = Hash(entry.data(), entry.size(), 1);

View File

@ -49,9 +49,9 @@ class MergerTest {
MergerTest() MergerTest()
: rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {} : rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {}
~MergerTest() = default; ~MergerTest() = default;
std::vector<std::string> GenerateStrings(int len, int string_len) { std::vector<std::string> GenerateStrings(size_t len, int string_len) {
std::vector<std::string> ret; std::vector<std::string> ret;
for (int i = 0; i < len; ++i) { for (size_t i = 0; i < len; ++i) {
ret.push_back(test::RandomHumanReadableString(&rnd_, string_len)); ret.push_back(test::RandomHumanReadableString(&rnd_, string_len));
} }
return ret; return ret;
@ -119,7 +119,7 @@ class MergerTest {
} }
void Generate(size_t num_iterators, size_t strings_per_iterator, void Generate(size_t num_iterators, size_t strings_per_iterator,
size_t letters_per_string) { int letters_per_string) {
std::vector<Iterator*> small_iterators; std::vector<Iterator*> small_iterators;
for (size_t i = 0; i < num_iterators; ++i) { for (size_t i = 0; i < num_iterators; ++i) {
auto strings = GenerateStrings(strings_per_iterator, letters_per_string); auto strings = GenerateStrings(strings_per_iterator, letters_per_string);
@ -127,8 +127,9 @@ class MergerTest {
all_keys_.insert(all_keys_.end(), strings.begin(), strings.end()); all_keys_.insert(all_keys_.end(), strings.begin(), strings.end());
} }
merging_iterator_.reset(NewMergingIterator( merging_iterator_.reset(
BytewiseComparator(), &small_iterators[0], small_iterators.size())); NewMergingIterator(BytewiseComparator(), &small_iterators[0],
static_cast<int>(small_iterators.size())));
single_iterator_.reset(new VectorIterator(all_keys_)); single_iterator_.reset(new VectorIterator(all_keys_));
} }

View File

@ -6,8 +6,10 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "table/plain_table_builder.h" #include "table/plain_table_builder.h"
#include <string>
#include <assert.h> #include <assert.h>
#include <string>
#include <limits>
#include <map> #include <map>
#include "rocksdb/comparator.h" #include "rocksdb/comparator.h"
@ -133,7 +135,8 @@ void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
} }
// Write value // Write value
auto prev_offset = offset_; assert(offset_ <= std::numeric_limits<uint32_t>::max());
auto prev_offset = static_cast<uint32_t>(offset_);
// Write out the key // Write out the key
encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf, encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf,
&meta_bytes_buf_size); &meta_bytes_buf_size);
@ -142,7 +145,7 @@ void PlainTableBuilder::Add(const Slice& key, const Slice& value) {
} }
// Write value length // Write value length
int value_size = value.size(); uint32_t value_size = static_cast<uint32_t>(value.size());
char* end_ptr = char* end_ptr =
EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size); EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size);
assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf)); assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf));
@ -180,10 +183,11 @@ Status PlainTableBuilder::Finish() {
MetaIndexBuilder meta_index_builer; MetaIndexBuilder meta_index_builer;
if (store_index_in_file_ && (properties_.num_entries > 0)) { if (store_index_in_file_ && (properties_.num_entries > 0)) {
assert(properties_.num_entries <= std::numeric_limits<uint32_t>::max());
bloom_block_.SetTotalBits( bloom_block_.SetTotalBits(
&arena_, properties_.num_entries * bloom_bits_per_key_, &arena_,
ioptions_.bloom_locality, huge_page_tlb_size_, static_cast<uint32_t>(properties_.num_entries) * bloom_bits_per_key_,
ioptions_.info_log); ioptions_.bloom_locality, huge_page_tlb_size_, ioptions_.info_log);
PutVarint32(&properties_.user_collected_properties PutVarint32(&properties_.user_collected_properties
[PlainTablePropertyNames::kNumBloomBlocks], [PlainTablePropertyNames::kNumBloomBlocks],

View File

@ -81,7 +81,7 @@ class PlainTableBuilder: public TableBuilder {
WritableFile* file_; WritableFile* file_;
uint64_t offset_ = 0; uint64_t offset_ = 0;
uint32_t bloom_bits_per_key_; uint32_t bloom_bits_per_key_;
uint32_t huge_page_tlb_size_; size_t huge_page_tlb_size_;
Status status_; Status status_;
TableProperties properties_; TableProperties properties_;
PlainTableKeyEncoder encoder_; PlainTableKeyEncoder encoder_;

View File

@ -3,6 +3,12 @@
// LICENSE file in the root directory of this source tree. An additional grant // LICENSE file in the root directory of this source tree. An additional grant
// of patent rights can be found in the PATENTS file in the same directory. // of patent rights can be found in the PATENTS file in the same directory.
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "table/plain_table_index.h" #include "table/plain_table_index.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/hash.h" #include "util/hash.h"
@ -24,7 +30,8 @@ Status PlainTableIndex::InitFromRawData(Slice data) {
if (!GetVarint32(&data, &num_prefixes_)) { if (!GetVarint32(&data, &num_prefixes_)) {
return Status::Corruption("Couldn't read the index size!"); return Status::Corruption("Couldn't read the index size!");
} }
sub_index_size_ = data.size() - index_size_ * kOffsetLen; sub_index_size_ =
static_cast<uint32_t>(data.size()) - index_size_ * kOffsetLen;
char* index_data_begin = const_cast<char*>(data.data()); char* index_data_begin = const_cast<char*>(data.data());
index_ = reinterpret_cast<uint32_t*>(index_data_begin); index_ = reinterpret_cast<uint32_t*>(index_data_begin);
@ -55,13 +62,15 @@ void PlainTableIndexBuilder::IndexRecordList::AddRecord(murmur_t hash,
num_records_in_current_group_ = 0; num_records_in_current_group_ = 0;
} }
auto& new_record = current_group_[num_records_in_current_group_++]; auto& new_record = current_group_[num_records_in_current_group_++];
new_record.hash = hash; // TODO(sdong) -- check if this is OK -- murmur_t is uint64_t, while we only
// use 32 bits here
new_record.hash = static_cast<uint32_t>(hash);
new_record.offset = offset; new_record.offset = offset;
new_record.next = nullptr; new_record.next = nullptr;
} }
void PlainTableIndexBuilder::AddKeyPrefix(Slice key_prefix_slice, void PlainTableIndexBuilder::AddKeyPrefix(Slice key_prefix_slice,
uint64_t key_offset) { uint32_t key_offset) {
if (is_first_record_ || prev_key_prefix_ != key_prefix_slice.ToString()) { if (is_first_record_ || prev_key_prefix_ != key_prefix_slice.ToString()) {
++num_prefixes_; ++num_prefixes_;
if (!is_first_record_) { if (!is_first_record_) {
@ -149,7 +158,7 @@ Slice PlainTableIndexBuilder::FillIndexes(
const std::vector<IndexRecord*>& hash_to_offsets, const std::vector<IndexRecord*>& hash_to_offsets,
const std::vector<uint32_t>& entries_per_bucket) { const std::vector<uint32_t>& entries_per_bucket) {
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log, Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log,
"Reserving %zu bytes for plain table's sub_index", "Reserving %" PRIu32 " bytes for plain table's sub_index",
sub_index_size_); sub_index_size_);
auto total_allocate_size = GetTotalSize(); auto total_allocate_size = GetTotalSize();
char* allocated = arena_->AllocateAligned( char* allocated = arena_->AllocateAligned(
@ -160,7 +169,7 @@ Slice PlainTableIndexBuilder::FillIndexes(
reinterpret_cast<uint32_t*>(EncodeVarint32(temp_ptr, num_prefixes_)); reinterpret_cast<uint32_t*>(EncodeVarint32(temp_ptr, num_prefixes_));
char* sub_index = reinterpret_cast<char*>(index + index_size_); char* sub_index = reinterpret_cast<char*>(index + index_size_);
size_t sub_index_offset = 0; uint32_t sub_index_offset = 0;
for (uint32_t i = 0; i < index_size_; i++) { for (uint32_t i = 0; i < index_size_; i++) {
uint32_t num_keys_for_bucket = entries_per_bucket[i]; uint32_t num_keys_for_bucket = entries_per_bucket[i];
switch (num_keys_for_bucket) { switch (num_keys_for_bucket) {

View File

@ -92,7 +92,7 @@ class PlainTableIndex {
private: private:
uint32_t index_size_; uint32_t index_size_;
size_t sub_index_size_; uint32_t sub_index_size_;
uint32_t num_prefixes_; uint32_t num_prefixes_;
uint32_t* index_; uint32_t* index_;
@ -109,8 +109,8 @@ class PlainTableIndex {
class PlainTableIndexBuilder { class PlainTableIndexBuilder {
public: public:
PlainTableIndexBuilder(Arena* arena, const ImmutableCFOptions& ioptions, PlainTableIndexBuilder(Arena* arena, const ImmutableCFOptions& ioptions,
uint32_t index_sparseness, double hash_table_ratio, size_t index_sparseness, double hash_table_ratio,
double huge_page_tlb_size) size_t huge_page_tlb_size)
: arena_(arena), : arena_(arena),
ioptions_(ioptions), ioptions_(ioptions),
record_list_(kRecordsPerGroup), record_list_(kRecordsPerGroup),
@ -124,7 +124,7 @@ class PlainTableIndexBuilder {
hash_table_ratio_(hash_table_ratio), hash_table_ratio_(hash_table_ratio),
huge_page_tlb_size_(huge_page_tlb_size) {} huge_page_tlb_size_(huge_page_tlb_size) {}
void AddKeyPrefix(Slice key_prefix_slice, uint64_t key_offset); void AddKeyPrefix(Slice key_prefix_slice, uint32_t key_offset);
Slice Finish(); Slice Finish();
@ -205,13 +205,13 @@ class PlainTableIndexBuilder {
uint32_t num_keys_per_prefix_; uint32_t num_keys_per_prefix_;
uint32_t prev_key_prefix_hash_; uint32_t prev_key_prefix_hash_;
uint32_t index_sparseness_; size_t index_sparseness_;
uint32_t index_size_; uint32_t index_size_;
size_t sub_index_size_; uint32_t sub_index_size_;
const SliceTransform* prefix_extractor_; const SliceTransform* prefix_extractor_;
double hash_table_ratio_; double hash_table_ratio_;
double huge_page_tlb_size_; size_t huge_page_tlb_size_;
std::string prev_key_prefix_; std::string prev_key_prefix_;

View File

@ -43,7 +43,7 @@ size_t EncodeSize(EntryType type, uint32_t key_size, char* out_buffer) {
// Return position after the size byte(s). nullptr means error // Return position after the size byte(s). nullptr means error
const char* DecodeSize(const char* offset, const char* limit, const char* DecodeSize(const char* offset, const char* limit,
EntryType* entry_type, size_t* key_size) { EntryType* entry_type, uint32_t* key_size) {
assert(offset < limit); assert(offset < limit);
*entry_type = static_cast<EntryType>( *entry_type = static_cast<EntryType>(
(static_cast<unsigned char>(offset[0]) & ~kSizeInlineLimit) >> 6); (static_cast<unsigned char>(offset[0]) & ~kSizeInlineLimit) >> 6);
@ -73,10 +73,10 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
Slice key_to_write = key; // Portion of internal key to write out. Slice key_to_write = key; // Portion of internal key to write out.
size_t user_key_size = fixed_user_key_len_; uint32_t user_key_size = fixed_user_key_len_;
if (encoding_type_ == kPlain) { if (encoding_type_ == kPlain) {
if (fixed_user_key_len_ == kPlainTableVariableLength) { if (fixed_user_key_len_ == kPlainTableVariableLength) {
user_key_size = key.size() - 8; user_key_size = static_cast<uint32_t>(key.size() - 8);
// Write key length // Write key length
char key_size_buf[5]; // tmp buffer for key size as varint32 char key_size_buf[5]; // tmp buffer for key size as varint32
char* ptr = EncodeVarint32(key_size_buf, user_key_size); char* ptr = EncodeVarint32(key_size_buf, user_key_size);
@ -93,7 +93,7 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
char size_bytes[12]; char size_bytes[12];
size_t size_bytes_pos = 0; size_t size_bytes_pos = 0;
user_key_size = key.size() - 8; user_key_size = static_cast<uint32_t>(key.size() - 8);
Slice prefix = Slice prefix =
prefix_extractor_->Transform(Slice(key.data(), user_key_size)); prefix_extractor_->Transform(Slice(key.data(), user_key_size));
@ -112,10 +112,11 @@ Status PlainTableKeyEncoder::AppendKey(const Slice& key, WritableFile* file,
if (key_count_for_prefix_ == 2) { if (key_count_for_prefix_ == 2) {
// For second key within a prefix, need to encode prefix length // For second key within a prefix, need to encode prefix length
size_bytes_pos += size_bytes_pos +=
EncodeSize(kPrefixFromPreviousKey, pre_prefix_.GetKey().size(), EncodeSize(kPrefixFromPreviousKey,
static_cast<uint32_t>(pre_prefix_.GetKey().size()),
size_bytes + size_bytes_pos); size_bytes + size_bytes_pos);
} }
size_t prefix_len = pre_prefix_.GetKey().size(); uint32_t prefix_len = static_cast<uint32_t>(pre_prefix_.GetKey().size());
size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len, size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len,
size_bytes + size_bytes_pos); size_bytes + size_bytes_pos);
Status s = file->Append(Slice(size_bytes, size_bytes_pos)); Status s = file->Append(Slice(size_bytes, size_bytes_pos));
@ -184,7 +185,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(
const char* start, const char* limit, ParsedInternalKey* parsed_key, const char* start, const char* limit, ParsedInternalKey* parsed_key,
Slice* internal_key, size_t* bytes_read, bool* seekable) { Slice* internal_key, size_t* bytes_read, bool* seekable) {
const char* key_ptr = start; const char* key_ptr = start;
size_t user_key_size = 0; uint32_t user_key_size = 0;
if (fixed_user_key_len_ != kPlainTableVariableLength) { if (fixed_user_key_len_ != kPlainTableVariableLength) {
user_key_size = fixed_user_key_len_; user_key_size = fixed_user_key_len_;
key_ptr = start; key_ptr = start;
@ -195,7 +196,7 @@ Status PlainTableKeyDecoder::NextPlainEncodingKey(
return Status::Corruption( return Status::Corruption(
"Unexpected EOF when reading the next key's size"); "Unexpected EOF when reading the next key's size");
} }
user_key_size = static_cast<size_t>(tmp_size); user_key_size = tmp_size;
*bytes_read = key_ptr - start; *bytes_read = key_ptr - start;
} }
// dummy initial value to avoid compiler complain // dummy initial value to avoid compiler complain
@ -227,7 +228,7 @@ Status PlainTableKeyDecoder::NextPrefixEncodingKey(
bool expect_suffix = false; bool expect_suffix = false;
do { do {
size_t size = 0; uint32_t size = 0;
// dummy initial value to avoid compiler complain // dummy initial value to avoid compiler complain
bool decoded_internal_key_valid = true; bool decoded_internal_key_valid = true;
const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size); const char* pos = DecodeSize(key_ptr, limit, &entry_type, &size);

View File

@ -98,8 +98,8 @@ PlainTableReader::PlainTableReader(const ImmutableCFOptions& ioptions,
: internal_comparator_(icomparator), : internal_comparator_(icomparator),
encoding_type_(encoding_type), encoding_type_(encoding_type),
full_scan_mode_(false), full_scan_mode_(false),
data_end_offset_(table_properties->data_size), data_end_offset_(static_cast<uint32_t>(table_properties->data_size)),
user_key_len_(table_properties->fixed_key_len), user_key_len_(static_cast<uint32_t>(table_properties->fixed_key_len)),
prefix_extractor_(ioptions.prefix_extractor), prefix_extractor_(ioptions.prefix_extractor),
enable_bloom_(false), enable_bloom_(false),
bloom_(6, nullptr), bloom_(6, nullptr),
@ -327,7 +327,8 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
// Allocate bloom filter here for total order mode. // Allocate bloom filter here for total order mode.
if (IsTotalOrderMode()) { if (IsTotalOrderMode()) {
uint32_t num_bloom_bits = uint32_t num_bloom_bits =
table_properties_->num_entries * bloom_bits_per_key; static_cast<uint32_t>(table_properties_->num_entries) *
bloom_bits_per_key;
if (num_bloom_bits > 0) { if (num_bloom_bits > 0) {
enable_bloom_ = true; enable_bloom_ = true;
bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality, bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality,
@ -350,7 +351,7 @@ Status PlainTableReader::PopulateIndex(TableProperties* props,
bloom_.SetRawData( bloom_.SetRawData(
const_cast<unsigned char*>( const_cast<unsigned char*>(
reinterpret_cast<const unsigned char*>(bloom_block->data())), reinterpret_cast<const unsigned char*>(bloom_block->data())),
bloom_block->size() * 8, num_blocks); static_cast<uint32_t>(bloom_block->size()) * 8, num_blocks);
} }
PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness, PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness,
@ -509,7 +510,7 @@ Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset,
return Status::Corruption( return Status::Corruption(
"Unexpected EOF when reading the next value's size."); "Unexpected EOF when reading the next value's size.");
} }
*offset = *offset + (value_ptr - start) + value_size; *offset = *offset + static_cast<uint32_t>(value_ptr - start) + value_size;
if (*offset > data_end_offset_) { if (*offset > data_end_offset_) {
return Status::Corruption("Unexpected EOF when reading the next value. "); return Status::Corruption("Unexpected EOF when reading the next value. ");
} }

View File

@ -123,7 +123,7 @@ class PlainTableReader: public TableReader {
// sst file that stores data. // sst file that stores data.
const uint32_t data_start_offset_ = 0; const uint32_t data_start_offset_ = 0;
const uint32_t data_end_offset_; const uint32_t data_end_offset_;
const size_t user_key_len_; const uint32_t user_key_len_;
const SliceTransform* prefix_extractor_; const SliceTransform* prefix_extractor_;
static const size_t kNumInternalBytes = 8; static const size_t kNumInternalBytes = 8;
@ -135,7 +135,7 @@ class PlainTableReader: public TableReader {
const ImmutableCFOptions& ioptions_; const ImmutableCFOptions& ioptions_;
unique_ptr<RandomAccessFile> file_; unique_ptr<RandomAccessFile> file_;
uint32_t file_size_; uint64_t file_size_;
std::shared_ptr<const TableProperties> table_properties_; std::shared_ptr<const TableProperties> table_properties_;
bool IsFixedLength() const { bool IsFixedLength() const {

View File

@ -947,7 +947,7 @@ class Harness {
if (keys.empty()) { if (keys.empty()) {
return "foo"; return "foo";
} else { } else {
const int index = rnd->Uniform(keys.size()); const int index = rnd->Uniform(static_cast<int>(keys.size()));
std::string result = keys[index]; std::string result = keys[index];
switch (rnd->Uniform(support_prev_ ? 3 : 1)) { switch (rnd->Uniform(support_prev_ ? 3 : 1)) {
case 0: case 0:

View File

@ -31,7 +31,7 @@ BlobStore* bs;
namespace { namespace {
std::string RandomString(Random* rnd, uint64_t len) { std::string RandomString(Random* rnd, uint64_t len) {
std::string r; std::string r;
test::RandomString(rnd, len, &r); test::RandomString(rnd, static_cast<int>(len), &r);
return r; return r;
} }
} // namespace } // namespace

View File

@ -113,7 +113,8 @@ DEFINE_bool(verbose, false, "Verbose");
DEFINE_bool(progress_reports, true, DEFINE_bool(progress_reports, true,
"If true, db_stress will report number of finished operations"); "If true, db_stress will report number of finished operations");
DEFINE_int32(write_buffer_size, rocksdb::Options().write_buffer_size, DEFINE_int32(write_buffer_size,
static_cast<int32_t>(rocksdb::Options().write_buffer_size),
"Number of bytes to buffer in memtable before compacting"); "Number of bytes to buffer in memtable before compacting");
DEFINE_int32(max_write_buffer_number, DEFINE_int32(max_write_buffer_number,
@ -154,7 +155,8 @@ DEFINE_int32(level0_stop_writes_trigger,
rocksdb::Options().level0_stop_writes_trigger, rocksdb::Options().level0_stop_writes_trigger,
"Number of files in level-0 that will trigger put stop."); "Number of files in level-0 that will trigger put stop.");
DEFINE_int32(block_size, rocksdb::BlockBasedTableOptions().block_size, DEFINE_int32(block_size,
static_cast<int32_t>(rocksdb::BlockBasedTableOptions().block_size),
"Number of bytes in a block."); "Number of bytes in a block.");
DEFINE_int32(max_background_compactions, DEFINE_int32(max_background_compactions,
@ -573,9 +575,9 @@ class SharedState {
explicit SharedState(StressTest* stress_test) explicit SharedState(StressTest* stress_test)
: cv_(&mu_), : cv_(&mu_),
seed_(FLAGS_seed), seed_(static_cast<uint32_t>(FLAGS_seed)),
max_key_(FLAGS_max_key), max_key_(FLAGS_max_key),
log2_keys_per_lock_(FLAGS_log2_keys_per_lock), log2_keys_per_lock_(static_cast<uint32_t>(FLAGS_log2_keys_per_lock)),
num_threads_(FLAGS_threads), num_threads_(FLAGS_threads),
num_initialized_(0), num_initialized_(0),
num_populated_(0), num_populated_(0),
@ -1451,7 +1453,7 @@ class StressTest {
assert(count <= assert(count <=
(static_cast<int64_t>(1) << ((8 - FLAGS_prefix_size) * 8))); (static_cast<int64_t>(1) << ((8 - FLAGS_prefix_size) * 8)));
if (iter->status().ok()) { if (iter->status().ok()) {
thread->stats.AddPrefixes(1, count); thread->stats.AddPrefixes(1, static_cast<int>(count));
} else { } else {
thread->stats.AddErrors(1); thread->stats.AddErrors(1);
} }
@ -1489,7 +1491,8 @@ class StressTest {
} else { } else {
MultiPut(thread, write_opts, column_family, key, v, sz); MultiPut(thread, write_opts, column_family, key, v, sz);
} }
PrintKeyValue(rand_column_family, rand_key, value, sz); PrintKeyValue(rand_column_family, static_cast<uint32_t>(rand_key),
value, sz);
} else if (writeBound <= prob_op && prob_op < delBound) { } else if (writeBound <= prob_op && prob_op < delBound) {
// OPERATION delete // OPERATION delete
if (!FLAGS_test_batches_snapshots) { if (!FLAGS_test_batches_snapshots) {
@ -1553,16 +1556,19 @@ class StressTest {
from_db = iter->value().ToString(); from_db = iter->value().ToString();
iter->Next(); iter->Next();
} else if (iter->key().compare(k) < 0) { } else if (iter->key().compare(k) < 0) {
VerificationAbort(shared, "An out of range key was found", cf, i); VerificationAbort(shared, "An out of range key was found",
static_cast<int>(cf), i);
} }
} else { } else {
// The iterator found no value for the key in question, so do not // The iterator found no value for the key in question, so do not
// move to the next item in the iterator // move to the next item in the iterator
s = Status::NotFound(Slice()); s = Status::NotFound(Slice());
} }
VerifyValue(cf, i, options, shared, from_db, s, true); VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
true);
if (from_db.length()) { if (from_db.length()) {
PrintKeyValue(cf, i, from_db.data(), from_db.length()); PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
from_db.data(), from_db.length());
} }
} }
} else { } else {
@ -1575,9 +1581,11 @@ class StressTest {
std::string keystr = Key(i); std::string keystr = Key(i);
Slice k = keystr; Slice k = keystr;
Status s = db_->Get(options, column_families_[cf], k, &from_db); Status s = db_->Get(options, column_families_[cf], k, &from_db);
VerifyValue(cf, i, options, shared, from_db, s, true); VerifyValue(static_cast<int>(cf), i, options, shared, from_db, s,
true);
if (from_db.length()) { if (from_db.length()) {
PrintKeyValue(cf, i, from_db.data(), from_db.length()); PrintKeyValue(static_cast<int>(cf), static_cast<uint32_t>(i),
from_db.data(), from_db.length());
} }
} }
} }

View File

@ -18,8 +18,7 @@ Status AutoRollLogger::ResetLogger() {
return status_; return status_;
} }
if (logger_->GetLogFileSize() == if (logger_->GetLogFileSize() == Logger::kDoNotSupportGetLogFileSize) {
(size_t)Logger::DO_NOT_SUPPORT_GET_LOG_FILE_SIZE) {
status_ = Status::NotSupported( status_ = Status::NotSupported(
"The underlying logger doesn't support GetLogFileSize()"); "The underlying logger doesn't support GetLogFileSize()");
} }

View File

@ -122,7 +122,7 @@ uint64_t AutoRollLoggerTest::RollLogFileByTimeTest(
} }
// -- Make the log file expire // -- Make the log file expire
sleep(time); sleep(static_cast<unsigned int>(time));
LogMessage(logger, log_message.c_str()); LogMessage(logger, log_message.c_str());
// At this time, the new log file should be created. // At this time, the new log file should be created.

View File

@ -206,7 +206,8 @@ static double RunBenchmarkGetNSPerIteration(const BenchmarkFun& fun,
size_t actualEpochs = 0; size_t actualEpochs = 0;
for (; actualEpochs < epochs; ++actualEpochs) { for (; actualEpochs < epochs; ++actualEpochs) {
for (unsigned int n = FLAGS_bm_min_iters; n < (1UL << 30); n *= 2) { for (unsigned int n = static_cast<unsigned int>(FLAGS_bm_min_iters);
n < (1UL << 30); n *= 2) {
auto const nsecs = fun(n); auto const nsecs = fun(n);
if (nsecs < minNanoseconds) { if (nsecs < minNanoseconds) {
continue; continue;

View File

@ -10,35 +10,35 @@
namespace rocksdb { namespace rocksdb {
BENCHMARK(insertFrontVector) { BENCHMARK(insertFrontVector) {
std::vector<int> v; std::vector<size_t> v;
for (int i = 0; i < 100; i++) { for (size_t i = 0; i < 100; i++) {
v.insert(v.begin(), i); v.insert(v.begin(), i);
} }
} }
BENCHMARK_RELATIVE(insertBackVector) { BENCHMARK_RELATIVE(insertBackVector) {
std::vector<int> v; std::vector<size_t> v;
for (size_t i = 0; i < 100; i++) { for (size_t i = 0; i < 100; i++) {
v.insert(v.end(), i); v.insert(v.end(), i);
} }
} }
BENCHMARK_N(insertFrontVector_n, n) { BENCHMARK_N(insertFrontVector_n, n) {
std::vector<int> v; std::vector<size_t> v;
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i); v.insert(v.begin(), i);
} }
} }
BENCHMARK_RELATIVE_N(insertBackVector_n, n) { BENCHMARK_RELATIVE_N(insertBackVector_n, n) {
std::vector<int> v; std::vector<size_t> v;
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
v.insert(v.end(), i); v.insert(v.end(), i);
} }
} }
BENCHMARK_N(insertFrontEnd_n, n) { BENCHMARK_N(insertFrontEnd_n, n) {
std::vector<int> v; std::vector<size_t> v;
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i); v.insert(v.begin(), i);
} }
@ -48,7 +48,7 @@ BENCHMARK_N(insertFrontEnd_n, n) {
} }
BENCHMARK_RELATIVE_N(insertFrontEndSuspend_n, n) { BENCHMARK_RELATIVE_N(insertFrontEndSuspend_n, n) {
std::vector<int> v; std::vector<size_t> v;
for (size_t i = 0; i < n; i++) { for (size_t i = 0; i < n; i++) {
v.insert(v.begin(), i); v.insert(v.begin(), i);
} }

View File

@ -132,7 +132,9 @@ BlobStore::~BlobStore() {
Status BlobStore::Put(const Slice& value, Blob* blob) { Status BlobStore::Put(const Slice& value, Blob* blob) {
// convert size to number of blocks // convert size to number of blocks
Status s = Allocate((value.size() + block_size_ - 1) / block_size_, blob); Status s = Allocate(
static_cast<uint32_t>((value.size() + block_size_ - 1) / block_size_),
blob);
if (!s.ok()) { if (!s.ok()) {
return s; return s;
} }

View File

@ -55,7 +55,8 @@ class FullFilterBitsBuilder : public FilterBitsBuilder {
// +----------------------------------------------------------------+ // +----------------------------------------------------------------+
virtual Slice Finish(std::unique_ptr<const char[]>* buf) override { virtual Slice Finish(std::unique_ptr<const char[]>* buf) override {
uint32_t total_bits, num_lines; uint32_t total_bits, num_lines;
char* data = ReserveSpace(hash_entries_.size(), &total_bits, &num_lines); char* data = ReserveSpace(static_cast<int>(hash_entries_.size()),
&total_bits, &num_lines);
assert(data); assert(data);
if (total_bits != 0 && num_lines != 0) { if (total_bits != 0 && num_lines != 0) {
@ -111,7 +112,7 @@ char* FullFilterBitsBuilder::ReserveSpace(const int num_entry,
assert(bits_per_key_); assert(bits_per_key_);
char* data = nullptr; char* data = nullptr;
if (num_entry != 0) { if (num_entry != 0) {
uint32_t total_bits_tmp = num_entry * bits_per_key_; uint32_t total_bits_tmp = num_entry * static_cast<uint32_t>(bits_per_key_);
*total_bits = GetTotalBitsForLocality(total_bits_tmp); *total_bits = GetTotalBitsForLocality(total_bits_tmp);
*num_lines = *total_bits / (CACHE_LINE_SIZE * 8); *num_lines = *total_bits / (CACHE_LINE_SIZE * 8);
@ -152,8 +153,9 @@ class FullFilterBitsReader : public FilterBitsReader {
public: public:
explicit FullFilterBitsReader(const Slice& contents) explicit FullFilterBitsReader(const Slice& contents)
: data_(const_cast<char*>(contents.data())), : data_(const_cast<char*>(contents.data())),
data_len_(contents.size()), data_len_(static_cast<uint32_t>(contents.size())),
num_probes_(0), num_lines_(0) { num_probes_(0),
num_lines_(0) {
assert(data_); assert(data_);
GetFilterMeta(contents, &num_probes_, &num_lines_); GetFilterMeta(contents, &num_probes_, &num_lines_);
// Sanitize broken parameter // Sanitize broken parameter
@ -210,7 +212,7 @@ class FullFilterBitsReader : public FilterBitsReader {
void FullFilterBitsReader::GetFilterMeta(const Slice& filter, void FullFilterBitsReader::GetFilterMeta(const Slice& filter,
size_t* num_probes, uint32_t* num_lines) { size_t* num_probes, uint32_t* num_lines) {
uint32_t len = filter.size(); uint32_t len = static_cast<uint32_t>(filter.size());
if (len <= 5) { if (len <= 5) {
// filter is empty or broken // filter is empty or broken
*num_probes = 0; *num_probes = 0;
@ -225,7 +227,7 @@ void FullFilterBitsReader::GetFilterMeta(const Slice& filter,
bool FullFilterBitsReader::HashMayMatch(const uint32_t& hash, bool FullFilterBitsReader::HashMayMatch(const uint32_t& hash,
const Slice& filter, const size_t& num_probes, const Slice& filter, const size_t& num_probes,
const uint32_t& num_lines) { const uint32_t& num_lines) {
uint32_t len = filter.size(); uint32_t len = static_cast<uint32_t>(filter.size());
if (len <= 5) return false; // remain the same with original filter if (len <= 5) return false; // remain the same with original filter
// It is ensured the params are valid before calling it // It is ensured the params are valid before calling it

View File

@ -79,7 +79,8 @@ class BloomTest {
key_slices.push_back(Slice(keys_[i])); key_slices.push_back(Slice(keys_[i]));
} }
filter_.clear(); filter_.clear();
policy_->CreateFilter(&key_slices[0], key_slices.size(), &filter_); policy_->CreateFilter(&key_slices[0], static_cast<int>(key_slices.size()),
&filter_);
keys_.clear(); keys_.clear();
if (kVerbose >= 2) DumpFilter(); if (kVerbose >= 2) DumpFilter();
} }

View File

@ -145,7 +145,7 @@ class LRUCache {
// Separate from constructor so caller can easily make an array of LRUCache // Separate from constructor so caller can easily make an array of LRUCache
void SetCapacity(size_t capacity) { capacity_ = capacity; } void SetCapacity(size_t capacity) { capacity_ = capacity; }
void SetRemoveScanCountLimit(size_t remove_scan_count_limit) { void SetRemoveScanCountLimit(uint32_t remove_scan_count_limit) {
remove_scan_count_limit_ = remove_scan_count_limit; remove_scan_count_limit_ = remove_scan_count_limit;
} }

View File

@ -28,7 +28,9 @@ static int DecodeKey(const Slice& k) {
return DecodeFixed32(k.data()); return DecodeFixed32(k.data());
} }
static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); } static void* EncodeValue(uintptr_t v) { return reinterpret_cast<void*>(v); }
static int DecodeValue(void* v) { return reinterpret_cast<uintptr_t>(v); } static int DecodeValue(void* v) {
return static_cast<int>(reinterpret_cast<uintptr_t>(v));
}
class CacheTest { class CacheTest {
public: public:

View File

@ -157,7 +157,7 @@ inline void PutFixed64(std::string* dst, uint64_t value) {
inline void PutVarint32(std::string* dst, uint32_t v) { inline void PutVarint32(std::string* dst, uint32_t v) {
char buf[5]; char buf[5];
char* ptr = EncodeVarint32(buf, v); char* ptr = EncodeVarint32(buf, v);
dst->append(buf, ptr - buf); dst->append(buf, static_cast<size_t>(ptr - buf));
} }
inline char* EncodeVarint64(char* dst, uint64_t v) { inline char* EncodeVarint64(char* dst, uint64_t v) {
@ -174,11 +174,11 @@ inline char* EncodeVarint64(char* dst, uint64_t v) {
inline void PutVarint64(std::string* dst, uint64_t v) { inline void PutVarint64(std::string* dst, uint64_t v) {
char buf[10]; char buf[10];
char* ptr = EncodeVarint64(buf, v); char* ptr = EncodeVarint64(buf, v);
dst->append(buf, ptr - buf); dst->append(buf, static_cast<size_t>(ptr - buf));
} }
inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) { inline void PutLengthPrefixedSlice(std::string* dst, const Slice& value) {
PutVarint32(dst, value.size()); PutVarint32(dst, static_cast<uint32_t>(value.size()));
dst->append(value.data(), value.size()); dst->append(value.data(), value.size());
} }
@ -219,7 +219,7 @@ inline bool GetVarint32(Slice* input, uint32_t* value) {
if (q == nullptr) { if (q == nullptr) {
return false; return false;
} else { } else {
*input = Slice(q, limit - q); *input = Slice(q, static_cast<size_t>(limit - q));
return true; return true;
} }
} }
@ -231,7 +231,7 @@ inline bool GetVarint64(Slice* input, uint64_t* value) {
if (q == nullptr) { if (q == nullptr) {
return false; return false;
} else { } else {
*input = Slice(q, limit - q); *input = Slice(q, static_cast<size_t>(limit - q));
return true; return true;
} }
} }

View File

@ -298,14 +298,14 @@ static inline uint64_t LE_LOAD64(const uint8_t *p) {
#endif #endif
static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) { static inline void Slow_CRC32(uint64_t* l, uint8_t const **p) {
uint32_t c = *l ^ LE_LOAD32(*p); uint32_t c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
*p += 4; *p += 4;
*l = table3_[c & 0xff] ^ *l = table3_[c & 0xff] ^
table2_[(c >> 8) & 0xff] ^ table2_[(c >> 8) & 0xff] ^
table1_[(c >> 16) & 0xff] ^ table1_[(c >> 16) & 0xff] ^
table0_[c >> 24]; table0_[c >> 24];
// DO it twice. // DO it twice.
c = *l ^ LE_LOAD32(*p); c = static_cast<uint32_t>(*l ^ LE_LOAD32(*p));
*p += 4; *p += 4;
*l = table3_[c & 0xff] ^ *l = table3_[c & 0xff] ^
table2_[(c >> 8) & 0xff] ^ table2_[(c >> 8) & 0xff] ^
@ -362,7 +362,7 @@ uint32_t ExtendImpl(uint32_t crc, const char* buf, size_t size) {
} }
#undef STEP1 #undef STEP1
#undef ALIGN #undef ALIGN
return l ^ 0xffffffffu; return static_cast<uint32_t>(l ^ 0xffffffffu);
} }
// Detect if SS42 or not. // Detect if SS42 or not.

View File

@ -153,15 +153,15 @@ TEST(DynamicBloomTest, perf) {
return; return;
} }
for (uint64_t m = 1; m <= 8; ++m) { for (uint32_t m = 1; m <= 8; ++m) {
Arena arena; Arena arena;
const uint64_t num_keys = m * 8 * 1024 * 1024; const uint32_t num_keys = m * 8 * 1024 * 1024;
fprintf(stderr, "testing %" PRIu64 "M keys\n", m * 8); fprintf(stderr, "testing %" PRIu32 "M keys\n", m * 8);
DynamicBloom std_bloom(&arena, num_keys * 10, 0, num_probes); DynamicBloom std_bloom(&arena, num_keys * 10, 0, num_probes);
timer.Start(); timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) { for (uint32_t i = 1; i <= num_keys; ++i) {
std_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8)); std_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
} }
@ -169,9 +169,9 @@ TEST(DynamicBloomTest, perf) {
fprintf(stderr, "standard bloom, avg add latency %" PRIu64 "\n", fprintf(stderr, "standard bloom, avg add latency %" PRIu64 "\n",
elapsed / num_keys); elapsed / num_keys);
uint64_t count = 0; uint32_t count = 0;
timer.Start(); timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) { for (uint32_t i = 1; i <= num_keys; ++i) {
if (std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8))) { if (std_bloom.MayContain(Slice(reinterpret_cast<const char*>(&i), 8))) {
++count; ++count;
} }
@ -185,7 +185,7 @@ TEST(DynamicBloomTest, perf) {
DynamicBloom blocked_bloom(&arena, num_keys * 10, 1, num_probes); DynamicBloom blocked_bloom(&arena, num_keys * 10, 1, num_probes);
timer.Start(); timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) { for (uint32_t i = 1; i <= num_keys; ++i) {
blocked_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8)); blocked_bloom.Add(Slice(reinterpret_cast<const char*>(&i), 8));
} }
@ -196,7 +196,7 @@ TEST(DynamicBloomTest, perf) {
count = 0; count = 0;
timer.Start(); timer.Start();
for (uint64_t i = 1; i <= num_keys; ++i) { for (uint32_t i = 1; i <= num_keys; ++i) {
if (blocked_bloom.MayContain( if (blocked_bloom.MayContain(
Slice(reinterpret_cast<const char*>(&i), 8))) { Slice(reinterpret_cast<const char*>(&i), 8))) {
++count; ++count;

View File

@ -1594,7 +1594,8 @@ class PosixEnv : public Env {
void (*function)(void*) = queue_.front().function; void (*function)(void*) = queue_.front().function;
void* arg = queue_.front().arg; void* arg = queue_.front().arg;
queue_.pop_front(); queue_.pop_front();
queue_len_.store(queue_.size(), std::memory_order_relaxed); queue_len_.store(static_cast<unsigned int>(queue_.size()),
std::memory_order_relaxed);
bool decrease_io_priority = (low_io_priority != low_io_priority_); bool decrease_io_priority = (low_io_priority != low_io_priority_);
PthreadCall("unlock", pthread_mutex_unlock(&mu_)); PthreadCall("unlock", pthread_mutex_unlock(&mu_));
@ -1709,7 +1710,8 @@ class PosixEnv : public Env {
queue_.push_back(BGItem()); queue_.push_back(BGItem());
queue_.back().function = function; queue_.back().function = function;
queue_.back().arg = arg; queue_.back().arg = arg;
queue_len_.store(queue_.size(), std::memory_order_relaxed); queue_len_.store(static_cast<unsigned int>(queue_.size()),
std::memory_order_relaxed);
if (!HasExcessiveThread()) { if (!HasExcessiveThread()) {
// Wake up at least one waiting thread. // Wake up at least one waiting thread.

View File

@ -18,7 +18,7 @@ uint32_t Hash(const char* data, size_t n, uint32_t seed) {
const uint32_t m = 0xc6a4a793; const uint32_t m = 0xc6a4a793;
const uint32_t r = 24; const uint32_t r = 24;
const char* limit = data + n; const char* limit = data + n;
uint32_t h = seed ^ (n * m); uint32_t h = static_cast<uint32_t>(seed ^ (n * m));
// Pick up four bytes at a time // Pick up four bytes at a time
while (data + 4 <= limit) { while (data + 4 <= limit) {

View File

@ -213,9 +213,10 @@ class HashCuckooRep : public MemTableRep {
static const int kMurmurHashSeeds[HashCuckooRepFactory::kMaxHashCount] = { static const int kMurmurHashSeeds[HashCuckooRepFactory::kMaxHashCount] = {
545609244, 1769731426, 763324157, 13099088, 592422103, 545609244, 1769731426, 763324157, 13099088, 592422103,
1899789565, 248369300, 1984183468, 1613664382, 1491157517}; 1899789565, 248369300, 1984183468, 1613664382, 1491157517};
return MurmurHash(slice.data(), slice.size(), return static_cast<unsigned int>(
MurmurHash(slice.data(), static_cast<int>(slice.size()),
kMurmurHashSeeds[hash_func_id]) % kMurmurHashSeeds[hash_func_id]) %
bucket_count_; bucket_count_);
} }
// A cuckoo path is a sequence of bucket ids, where each id points to a // A cuckoo path is a sequence of bucket ids, where each id points to a

View File

@ -200,7 +200,8 @@ class HashLinkListRep : public MemTableRep {
} }
size_t GetHash(const Slice& slice) const { size_t GetHash(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0) % bucket_size_; return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
bucket_size_;
} }
Pointer* GetBucket(size_t i) const { Pointer* GetBucket(size_t i) const {

View File

@ -65,7 +65,8 @@ class HashSkipListRep : public MemTableRep {
Arena* const arena_; Arena* const arena_;
inline size_t GetHash(const Slice& slice) const { inline size_t GetHash(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0) % bucket_size_; return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0) %
bucket_size_;
} }
inline Bucket* GetBucket(size_t i) const { inline Bucket* GetBucket(size_t i) const {
return buckets_[i].load(std::memory_order_acquire); return buckets_[i].load(std::memory_order_acquire);

View File

@ -971,8 +971,9 @@ void DBDumperCommand::DoCommand() {
uint64_t s1=0,s2=0; uint64_t s1=0,s2=0;
// At this point, bucket_size=0 => time_range=0 // At this point, bucket_size=0 => time_range=0
uint64_t num_buckets = (bucket_size >= time_range) ? 1 : int num_buckets = (bucket_size >= time_range)
((time_range + bucket_size - 1) / bucket_size); ? 1
: ((time_range + bucket_size - 1) / bucket_size);
vector<uint64_t> bucket_counts(num_buckets, 0); vector<uint64_t> bucket_counts(num_buckets, 0);
if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) { if (is_db_ttl_ && !count_only_ && timestamp_ && !count_delim_) {
fprintf(stdout, "Dumping key-values from %s to %s\n", fprintf(stdout, "Dumping key-values from %s to %s\n",

View File

@ -19,9 +19,14 @@ namespace rocksdb {
class MemFile { class MemFile {
public: public:
explicit MemFile(const std::string& fn) : explicit MemFile(const std::string& fn)
fn_(fn), refs_(0), size_(0), modified_time_(Now()), : fn_(fn),
rnd_((uint32_t)MurmurHash(fn.data(), fn.size(), 0)), fsynced_bytes_(0) {} refs_(0),
size_(0),
modified_time_(Now()),
rnd_(static_cast<uint32_t>(
MurmurHash(fn.data(), static_cast<int>(fn.size()), 0))),
fsynced_bytes_(0) {}
void Ref() { void Ref() {
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
@ -61,7 +66,8 @@ class MemFile {
return; return;
} }
uint64_t buffered_bytes = size_ - fsynced_bytes_; uint64_t buffered_bytes = size_ - fsynced_bytes_;
uint64_t start = fsynced_bytes_ + rnd_.Uniform(buffered_bytes); uint64_t start =
fsynced_bytes_ + rnd_.Uniform(static_cast<int>(buffered_bytes));
uint64_t end = std::min(start + 512, size_.load()); uint64_t end = std::min(start + 512, size_.load());
MutexLock lock(&mutex_); MutexLock lock(&mutex_);
for (uint64_t pos = start; pos < end; ++pos) { for (uint64_t pos = start; pos < end; ++pos) {

View File

@ -36,7 +36,7 @@ typedef unsigned int murmur_t;
namespace rocksdb { namespace rocksdb {
struct murmur_hash { struct murmur_hash {
size_t operator()(const Slice& slice) const { size_t operator()(const Slice& slice) const {
return MurmurHash(slice.data(), slice.size(), 0); return MurmurHash(slice.data(), static_cast<int>(slice.size()), 0);
} }
}; };
} // rocksdb } // rocksdb

View File

@ -110,7 +110,7 @@ void MutableCFOptions::Dump(Logger* log) const {
expanded_compaction_factor); expanded_compaction_factor);
Log(log, " source_compaction_factor: %d", Log(log, " source_compaction_factor: %d",
source_compaction_factor); source_compaction_factor);
Log(log, " target_file_size_base: %d", Log(log, " target_file_size_base: %" PRIu64,
target_file_size_base); target_file_size_base);
Log(log, " target_file_size_multiplier: %d", Log(log, " target_file_size_multiplier: %d",
target_file_size_multiplier); target_file_size_multiplier);

View File

@ -108,7 +108,7 @@ struct MutableCFOptions {
int max_grandparent_overlap_factor; int max_grandparent_overlap_factor;
int expanded_compaction_factor; int expanded_compaction_factor;
int source_compaction_factor; int source_compaction_factor;
int target_file_size_base; uint64_t target_file_size_base;
int target_file_size_multiplier; int target_file_size_multiplier;
uint64_t max_bytes_for_level_base; uint64_t max_bytes_for_level_base;
int max_bytes_for_level_multiplier; int max_bytes_for_level_multiplier;

View File

@ -95,7 +95,7 @@ void PickWriteBufferSize(size_t total_write_buffer_limit, Options* options) {
options->write_buffer_size = write_buffer_size; options->write_buffer_size = write_buffer_size;
options->max_write_buffer_number = options->max_write_buffer_number =
total_write_buffer_limit / write_buffer_size; static_cast<int>(total_write_buffer_limit / write_buffer_size);
options->min_write_buffer_number_to_merge = 1; options->min_write_buffer_number_to_merge = 1;
} }
@ -147,10 +147,10 @@ void OptimizeForLevel(int read_amplification_threshold,
// This doesn't consider compaction and overheads of mem tables. But usually // This doesn't consider compaction and overheads of mem tables. But usually
// it is in the same order of magnitude. // it is in the same order of magnitude.
int expected_level0_compaction_size = size_t expected_level0_compaction_size =
options->level0_file_num_compaction_trigger * options->write_buffer_size; options->level0_file_num_compaction_trigger * options->write_buffer_size;
// Enlarge level1 target file size if level0 compaction size is larger. // Enlarge level1 target file size if level0 compaction size is larger.
int max_bytes_for_level_base = 10 * kBytesForOneMb; uint64_t max_bytes_for_level_base = 10 * kBytesForOneMb;
if (expected_level0_compaction_size > max_bytes_for_level_base) { if (expected_level0_compaction_size > max_bytes_for_level_base) {
max_bytes_for_level_base = expected_level0_compaction_size; max_bytes_for_level_base = expected_level0_compaction_size;
} }
@ -160,7 +160,7 @@ void OptimizeForLevel(int read_amplification_threshold,
const int kMinFileSize = 2 * kBytesForOneMb; const int kMinFileSize = 2 * kBytesForOneMb;
// Allow at least 3-way parallelism for compaction between level 1 and 2. // Allow at least 3-way parallelism for compaction between level 1 and 2.
int max_file_size = max_bytes_for_level_base / 3; uint64_t max_file_size = max_bytes_for_level_base / 3;
if (max_file_size < kMinFileSize) { if (max_file_size < kMinFileSize) {
options->target_file_size_base = kMinFileSize; options->target_file_size_base = kMinFileSize;
} else { } else {

View File

@ -40,12 +40,10 @@ bool ParseBoolean(const std::string& type, const std::string& value) {
throw type; throw type;
} }
} }
uint32_t ParseInt(const std::string& value) { int ParseInt(const std::string& value) { return std::stoi(value); }
return std::stoi(value);
}
uint32_t ParseUint32(const std::string& value) { uint32_t ParseUint32(const std::string& value) {
return std::stoul(value); return static_cast<uint32_t>(std::stoul(value));
} }
uint64_t ParseUint64(const std::string& value) { uint64_t ParseUint64(const std::string& value) {
@ -82,9 +80,9 @@ bool ParseMemtableOptions(const std::string& name, const std::string& value,
} else if (name == "arena_block_size") { } else if (name == "arena_block_size") {
new_options->arena_block_size = ParseInt64(value); new_options->arena_block_size = ParseInt64(value);
} else if (name == "memtable_prefix_bloom_bits") { } else if (name == "memtable_prefix_bloom_bits") {
new_options->memtable_prefix_bloom_bits = stoul(value); new_options->memtable_prefix_bloom_bits = ParseUint32(value);
} else if (name == "memtable_prefix_bloom_probes") { } else if (name == "memtable_prefix_bloom_probes") {
new_options->memtable_prefix_bloom_probes = stoul(value); new_options->memtable_prefix_bloom_probes = ParseUint32(value);
} else if (name == "memtable_prefix_bloom_huge_page_tlb_size") { } else if (name == "memtable_prefix_bloom_huge_page_tlb_size") {
new_options->memtable_prefix_bloom_huge_page_tlb_size = new_options->memtable_prefix_bloom_huge_page_tlb_size =
ParseInt64(value); ParseInt64(value);

View File

@ -47,7 +47,8 @@ GenericRateLimiter::GenericRateLimiter(
GenericRateLimiter::~GenericRateLimiter() { GenericRateLimiter::~GenericRateLimiter() {
MutexLock g(&request_mutex_); MutexLock g(&request_mutex_);
stop_ = true; stop_ = true;
requests_to_wait_ = queue_[Env::IO_LOW].size() + queue_[Env::IO_HIGH].size(); requests_to_wait_ = static_cast<int32_t>(queue_[Env::IO_LOW].size() +
queue_[Env::IO_HIGH].size());
for (auto& r : queue_[Env::IO_HIGH]) { for (auto& r : queue_[Env::IO_HIGH]) {
r->cv.Signal(); r->cv.Signal();
} }

View File

@ -30,12 +30,12 @@ TEST(RateLimiterTest, StartStop) {
TEST(RateLimiterTest, Rate) { TEST(RateLimiterTest, Rate) {
auto* env = Env::Default(); auto* env = Env::Default();
struct Arg { struct Arg {
Arg(int64_t _target_rate, int _burst) Arg(int32_t _target_rate, int _burst)
: limiter(new GenericRateLimiter(_target_rate, 100 * 1000, 10)), : limiter(new GenericRateLimiter(_target_rate, 100 * 1000, 10)),
request_size(_target_rate / 10), request_size(_target_rate / 10),
burst(_burst) {} burst(_burst) {}
std::unique_ptr<RateLimiter> limiter; std::unique_ptr<RateLimiter> limiter;
int64_t request_size; int32_t request_size;
int burst; int burst;
}; };
@ -51,13 +51,12 @@ TEST(RateLimiterTest, Rate) {
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1,
Env::IO_HIGH); Env::IO_HIGH);
} }
arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, arg->limiter->Request(r.Uniform(arg->request_size - 1) + 1, Env::IO_LOW);
Env::IO_LOW);
} }
}; };
for (int i = 1; i <= 16; i*=2) { for (int i = 1; i <= 16; i*=2) {
int64_t target = i * 1024 * 10; int32_t target = i * 1024 * 10;
Arg arg(target, i / 4 + 1); Arg arg(target, i / 4 + 1);
auto start = env->NowMicros(); auto start = env->NowMicros();
for (int t = 0; t < i; ++t) { for (int t = 0; t < i; ++t) {
@ -68,7 +67,7 @@ TEST(RateLimiterTest, Rate) {
auto elapsed = env->NowMicros() - start; auto elapsed = env->NowMicros() - start;
double rate = arg.limiter->GetTotalBytesThrough() double rate = arg.limiter->GetTotalBytesThrough()
* 1000000.0 / elapsed; * 1000000.0 / elapsed;
fprintf(stderr, "request size [1 - %" PRIi64 "], limit %" PRIi64 fprintf(stderr, "request size [1 - %" PRIi32 "], limit %" PRIi32
" KB/sec, actual rate: %lf KB/sec, elapsed %.2lf seconds\n", " KB/sec, actual rate: %lf KB/sec, elapsed %.2lf seconds\n",
arg.request_size - 1, target / 1024, rate / 1024, arg.request_size - 1, target / 1024, rate / 1024,
elapsed / 1000000.0); elapsed / 1000000.0);

View File

@ -23,8 +23,8 @@ const char* Status::CopyState(const char* state) {
Status::Status(Code _code, const Slice& msg, const Slice& msg2) : code_(_code) { Status::Status(Code _code, const Slice& msg, const Slice& msg2) : code_(_code) {
assert(code_ != kOk); assert(code_ != kOk);
const uint32_t len1 = msg.size(); const uint32_t len1 = static_cast<uint32_t>(msg.size());
const uint32_t len2 = msg2.size(); const uint32_t len2 = static_cast<uint32_t>(msg2.size());
const uint32_t size = len1 + (len2 ? (2 + len2) : 0); const uint32_t size = len1 + (len2 ? (2 + len2) : 0);
char* result = new char[size + 4]; char* result = new char[size + 4];
memcpy(result, &size, sizeof(size)); memcpy(result, &size, sizeof(size));

View File

@ -54,7 +54,8 @@ class BackupRateLimiter {
(bytes_since_start_ * kMicrosInSecond) / max_bytes_per_second_; (bytes_since_start_ * kMicrosInSecond) / max_bytes_per_second_;
if (should_take_micros > interval) { if (should_take_micros > interval) {
env_->SleepForMicroseconds(should_take_micros - interval); env_->SleepForMicroseconds(
static_cast<int>(should_take_micros - interval));
now = env_->NowMicros(); now = env_->NowMicros();
} }
// reset interval // reset interval
@ -165,9 +166,7 @@ class BackupEngineImpl : public BackupEngine {
uint64_t GetSize() const { uint64_t GetSize() const {
return size_; return size_;
} }
uint32_t GetNumberFiles() { uint32_t GetNumberFiles() { return static_cast<uint32_t>(files_.size()); }
return files_.size();
}
void SetSequenceNumber(uint64_t sequence_number) { void SetSequenceNumber(uint64_t sequence_number) {
sequence_number_ = sequence_number; sequence_number_ = sequence_number;
} }

Some files were not shown because too many files have changed in this diff Show More