Fix iOS build

Summary: We don't yet have a CI build for iOS, so our iOS compile gets broken sometimes. Most of the errors are from assumption that size_t is 64-bit, while it's actually 32-bit on some (all?) iOS platforms. This diff fixes the compile.

Test Plan:
TARGET_OS=IOS make static_lib

Observe there are no warnings

Reviewers: sdong, anthony, IslamAbdelRahman, kradhakrishnan, yhchiang

Reviewed By: yhchiang

Subscribers: dhruba, leveldb

Differential Revision: https://reviews.facebook.net/D49029
This commit is contained in:
Igor Canadi 2015-10-19 13:40:44 -07:00
parent 32c291e3c9
commit 4e07c99a9a
6 changed files with 18 additions and 20 deletions

View File

@ -601,10 +601,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
if (measure_io_stats_) {
prev_perf_level = GetPerfLevel();
SetPerfLevel(PerfLevel::kEnableTime);
prev_write_nanos = iostats_context.write_nanos;
prev_fsync_nanos = iostats_context.fsync_nanos;
prev_range_sync_nanos = iostats_context.range_sync_nanos;
prev_prepare_write_nanos = iostats_context.prepare_write_nanos;
prev_write_nanos = IOSTATS(write_nanos);
prev_fsync_nanos = IOSTATS(fsync_nanos);
prev_range_sync_nanos = IOSTATS(range_sync_nanos);
prev_prepare_write_nanos = IOSTATS(prepare_write_nanos);
}
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
@ -728,13 +728,13 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
if (measure_io_stats_) {
sub_compact->compaction_job_stats.file_write_nanos +=
iostats_context.write_nanos - prev_write_nanos;
IOSTATS(write_nanos) - prev_write_nanos;
sub_compact->compaction_job_stats.file_fsync_nanos +=
iostats_context.fsync_nanos - prev_fsync_nanos;
IOSTATS(fsync_nanos) - prev_fsync_nanos;
sub_compact->compaction_job_stats.file_range_sync_nanos +=
iostats_context.range_sync_nanos - prev_range_sync_nanos;
IOSTATS(range_sync_nanos) - prev_range_sync_nanos;
sub_compact->compaction_job_stats.file_prepare_write_nanos +=
iostats_context.prepare_write_nanos - prev_prepare_write_nanos;
IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos;
if (prev_perf_level != PerfLevel::kEnableTime) {
SetPerfLevel(prev_perf_level);
}

View File

@ -1097,7 +1097,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// We keep doing it to Level 2, 3, etc, until the last level and return the
// accumulated bytes.
size_t bytes_compact_to_next_level = 0;
uint64_t bytes_compact_to_next_level = 0;
// Level 0
bool level0_compact_triggered = false;
if (static_cast<int>(files_[0].size()) >
@ -1113,7 +1113,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// Level 1 and up.
for (int level = base_level(); level <= MaxInputLevel(); level++) {
size_t level_size = 0;
uint64_t level_size = 0;
for (auto* f : files_[level]) {
level_size += f->fd.GetFileSize();
}
@ -1124,7 +1124,7 @@ void VersionStorageInfo::EstimateCompactionBytesNeeded(
// Add size added by previous compaction
level_size += bytes_compact_to_next_level;
bytes_compact_to_next_level = 0;
size_t level_target = MaxBytesForLevel(level);
uint64_t level_target = MaxBytesForLevel(level);
if (level_size > level_target) {
bytes_compact_to_next_level = level_size - level_target;
// Simplify to assume the actual compaction fan-out ratio is always

View File

@ -317,7 +317,7 @@ class PosixMmapReadableFile: public RandomAccessFile {
*result = Slice();
return IOError(filename_, EINVAL);
} else if (offset + n > length_) {
n = length_ - offset;
n = static_cast<size_t>(length_ - offset);
}
*result = Slice(reinterpret_cast<char*>(mmapped_region_) + offset, n);
return s;

View File

@ -412,8 +412,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
// if offset between [buffer_offset_, buffer_offset_ + buffer_len>
if (offset >= buffer_offset_ && offset < buffer_len_ + buffer_offset_) {
uint64_t offset_in_buffer = offset - buffer_offset_;
copied = std::min(static_cast<uint64_t>(buffer_len_) - offset_in_buffer,
static_cast<uint64_t>(n));
copied = std::min(buffer_len_ - static_cast<size_t>(offset_in_buffer), n);
memcpy(scratch, buffer_.get() + offset_in_buffer, copied);
if (copied == n) {
// fully cached

View File

@ -49,8 +49,7 @@ Status CopyFile(Env* env, const std::string& source,
char buffer[4096];
Slice slice;
while (size > 0) {
uint64_t bytes_to_read =
std::min(static_cast<uint64_t>(sizeof(buffer)), size);
size_t bytes_to_read = std::min(sizeof(buffer), static_cast<size_t>(size));
if (s.ok()) {
s = src_reader->Read(bytes_to_read, &slice, buffer);
}

View File

@ -190,7 +190,7 @@ class StringSink: public WritableFile {
const std::string& contents() const { return contents_; }
virtual Status Truncate(uint64_t size) override {
contents_.resize(size);
contents_.resize(static_cast<size_t>(size));
return Status::OK();
}
virtual Status Close() override { return Status::OK(); }
@ -243,13 +243,13 @@ class StringSource: public RandomAccessFile {
return Status::InvalidArgument("invalid Read offset");
}
if (offset + n > contents_.size()) {
n = contents_.size() - offset;
n = contents_.size() - static_cast<size_t>(offset);
}
if (!mmap_) {
memcpy(scratch, &contents_[offset], n);
memcpy(scratch, &contents_[static_cast<size_t>(offset)], n);
*result = Slice(scratch, n);
} else {
*result = Slice(&contents_[offset], n);
*result = Slice(&contents_[static_cast<size_t>(offset)], n);
}
return Status::OK();
}