From d518fe1da657ca8aa19bb3b8c9baf432aaf4c902 Mon Sep 17 00:00:00 2001 From: Fosco Marotto Date: Tue, 6 Mar 2018 12:27:07 -0800 Subject: [PATCH] uint64_t and size_t changes to compile for iOS Summary: In attempting to build a static lib for use in iOS, I ran in to lots of type errors between uint64_t and size_t. This PR contains the changes I made to get `TARGET_OS=IOS make static_lib` to succeed while also getting Xcode to build successfully with the resulting `librocksdb.a` library imported. This also compiles for me on macOS and tests fine, but I'm really not sure if I made the correct decisions about where to `static_cast` and where to change types. Also up for discussion: is iOS worth supporting? Getting the static lib is just part one, we aren't providing any bridging headers or wrappers like the ObjectiveRocks project, it won't be a great experience. Closes https://github.com/facebook/rocksdb/pull/3503 Differential Revision: D7106457 Pulled By: gfosco fbshipit-source-id: 82ac2073de7e1f09b91f6b4faea91d18bd311f8e --- build_tools/build_detect_platform | 2 ++ db/compaction_job.cc | 10 ++++++---- db/compaction_picker.cc | 2 +- db/db_impl_write.cc | 10 ++++++---- db/log_writer.cc | 3 ++- env/mock_env.cc | 20 +++++++++++--------- monitoring/histogram_windowing.cc | 13 +++++++++---- table/block_based_table_reader.cc | 10 ++++++---- table/partitioned_filter_block.cc | 3 ++- util/file_reader_writer.cc | 18 ++++++++++-------- 10 files changed, 55 insertions(+), 36 deletions(-) diff --git a/build_tools/build_detect_platform b/build_tools/build_detect_platform index 65550ff30..02aa623d9 100755 --- a/build_tools/build_detect_platform +++ b/build_tools/build_detect_platform @@ -485,6 +485,8 @@ if test -z "$PORTABLE"; then elif test -n "`echo $TARGET_ARCHITECTURE | grep ^arm`"; then # TODO: Handle this with approprite options. COMMON_FLAGS="$COMMON_FLAGS" + elif [ "$TARGET_OS" == IOS ]; then + COMMON_FLAGS="$COMMON_FLAGS" elif [ "$TARGET_OS" != AIX ] && [ "$TARGET_OS" != SunOS ]; then COMMON_FLAGS="$COMMON_FLAGS -march=native " elif test "$USE_SSE"; then diff --git a/db/compaction_job.cc b/db/compaction_job.cc index be99ff283..23d4248d2 100644 --- a/db/compaction_job.cc +++ b/db/compaction_job.cc @@ -723,14 +723,16 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { std::set sample_begin_offsets; if (bottommost_level_ && kSampleBytes > 0) { const size_t kMaxSamples = kSampleBytes >> kSampleLenShift; - const size_t kOutFileLen = mutable_cf_options->MaxFileSizeForLevel( - compact_->compaction->output_level()); + const size_t kOutFileLen = static_cast( + mutable_cf_options->MaxFileSizeForLevel( + compact_->compaction->output_level())); if (kOutFileLen != port::kMaxSizet) { const size_t kOutFileNumSamples = kOutFileLen >> kSampleLenShift; Random64 generator{versions_->NewFileNumber()}; for (size_t i = 0; i < kMaxSamples; ++i) { - sample_begin_offsets.insert(generator.Uniform(kOutFileNumSamples) - << kSampleLenShift); + sample_begin_offsets.insert( + static_cast(generator.Uniform(kOutFileNumSamples)) + << kSampleLenShift); } } } diff --git a/db/compaction_picker.cc b/db/compaction_picker.cc index 353dc2985..9e20b63f7 100644 --- a/db/compaction_picker.cc +++ b/db/compaction_picker.cc @@ -41,7 +41,7 @@ bool FindIntraL0Compaction(const std::vector& level_files, size_t min_files_to_compact, uint64_t max_compact_bytes_per_del_file, CompactionInputFiles* comp_inputs) { - size_t compact_bytes = level_files[0]->fd.file_size; + size_t compact_bytes = static_cast(level_files[0]->fd.file_size); size_t compact_bytes_per_del_file = port::kMaxSizet; // compaction range will be [0, span_len). size_t span_len; diff --git a/db/db_impl_write.cc b/db/db_impl_write.cc index 34ecad360..5c3eaf404 100644 --- a/db/db_impl_write.cc +++ b/db/db_impl_write.cc @@ -214,7 +214,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options, write_group.size > 1; size_t total_count = 0; size_t valid_batches = 0; - uint64_t total_byte_size = 0; + size_t total_byte_size = 0; for (auto* writer : write_group) { if (writer->CheckCallback(this)) { valid_batches += writer->batch_cnt; @@ -550,7 +550,7 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options, // Note: no need to update last_batch_group_size_ here since the batch writes // to WAL only - uint64_t total_byte_size = 0; + size_t total_byte_size = 0; for (auto* writer : write_group) { if (writer->CheckCallback(this)) { total_byte_size = WriteBatchInternal::AppendedByteSize( @@ -1369,11 +1369,13 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) { size_t DBImpl::GetWalPreallocateBlockSize(uint64_t write_buffer_size) const { mutex_.AssertHeld(); - size_t bsize = write_buffer_size / 10 + write_buffer_size; + size_t bsize = static_cast( + write_buffer_size / 10 + write_buffer_size); // Some users might set very high write_buffer_size and rely on // max_total_wal_size or other parameters to control the WAL size. if (mutable_db_options_.max_total_wal_size > 0) { - bsize = std::min(bsize, mutable_db_options_.max_total_wal_size); + bsize = std::min(bsize, static_cast( + mutable_db_options_.max_total_wal_size)); } if (immutable_db_options_.db_write_buffer_size > 0) { bsize = std::min(bsize, immutable_db_options_.db_write_buffer_size); diff --git a/db/log_writer.cc b/db/log_writer.cc index b02eec89d..a767f1916 100644 --- a/db/log_writer.cc +++ b/db/log_writer.cc @@ -58,7 +58,8 @@ Status Writer::AddRecord(const Slice& slice) { // kRecyclableHeaderSize being <= 11) assert(header_size <= 11); dest_->Append( - Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", leftover)); + Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", + static_cast(leftover))); } block_offset_ = 0; } diff --git a/env/mock_env.cc b/env/mock_env.cc index 30aaeadb7..3e5fbb0fb 100644 --- a/env/mock_env.cc +++ b/env/mock_env.cc @@ -94,35 +94,37 @@ class MemFile { uint64_t end = std::min(start + 512, size_.load()); MutexLock lock(&mutex_); for (uint64_t pos = start; pos < end; ++pos) { - data_[pos] = static_cast(rnd_.Uniform(256)); + data_[static_cast(pos)] = static_cast(rnd_.Uniform(256)); } } Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const { MutexLock lock(&mutex_); const uint64_t available = Size() - std::min(Size(), offset); + size_t offset_ = static_cast(offset); if (n > available) { - n = available; + n = static_cast(available); } if (n == 0) { *result = Slice(); return Status::OK(); } if (scratch) { - memcpy(scratch, &(data_[offset]), n); + memcpy(scratch, &(data_[offset_]), n); *result = Slice(scratch, n); } else { - *result = Slice(&(data_[offset]), n); + *result = Slice(&(data_[offset_]), n); } return Status::OK(); } Status Write(uint64_t offset, const Slice& data) { MutexLock lock(&mutex_); + size_t offset_ = static_cast(offset); if (offset + data.size() > data_.size()) { - data_.resize(offset + data.size()); + data_.resize(offset_ + data.size()); } - data_.replace(offset, data.size(), data.data(), data.size()); + data_.replace(offset_, data.size(), data.data(), data.size()); size_ = data_.size(); modified_time_ = Now(); return Status::OK(); @@ -203,7 +205,7 @@ class MockSequentialFile : public SequentialFile { if (pos_ > file_->Size()) { return Status::IOError("pos_ > file_->Size()"); } - const size_t available = file_->Size() - pos_; + const uint64_t available = file_->Size() - pos_; if (n > available) { n = available; } @@ -273,7 +275,7 @@ class MockWritableFile : public WritableFile { } virtual Status Append(const Slice& data) override { - uint64_t bytes_written = 0; + size_t bytes_written = 0; while (bytes_written < data.size()) { auto bytes = RequestToken(data.size() - bytes_written); Status s = file_->Append(Slice(data.data() + bytes_written, bytes)); @@ -285,7 +287,7 @@ class MockWritableFile : public WritableFile { return Status::OK(); } virtual Status Truncate(uint64_t size) override { - file_->Truncate(size); + file_->Truncate(static_cast(size)); return Status::OK(); } virtual Status Close() override { return file_->Fsync(); } diff --git a/monitoring/histogram_windowing.cc b/monitoring/histogram_windowing.cc index 28d8265f2..5c49fcd16 100644 --- a/monitoring/histogram_windowing.cc +++ b/monitoring/histogram_windowing.cc @@ -60,7 +60,7 @@ void HistogramWindowingImpl::Add(uint64_t value){ stats_.Add(value); // Current window update - window_stats_[current_window()].Add(value); + window_stats_[static_cast(current_window())].Add(value); } void HistogramWindowingImpl::Merge(const Histogram& other) { @@ -89,8 +89,11 @@ void HistogramWindowingImpl::Merge(const HistogramWindowingImpl& other) { (cur_window + num_windows_ - i) % num_windows_; uint64_t other_window_index = (other_cur_window + other.num_windows_ - i) % other.num_windows_; + size_t windex = static_cast(window_index); + size_t other_windex = static_cast(other_window_index); - window_stats_[window_index].Merge(other.window_stats_[other_window_index]); + window_stats_[windex].Merge( + other.window_stats_[other_windex]); } } @@ -129,8 +132,9 @@ void HistogramWindowingImpl::Data(HistogramData * const data) const { void HistogramWindowingImpl::TimerTick() { uint64_t curr_time = env_->NowMicros(); + size_t curr_window_ = static_cast(current_window()); if (curr_time - last_swap_time() > micros_per_window_ && - window_stats_[current_window()].num() >= min_num_per_window_) { + window_stats_[curr_window_].num() >= min_num_per_window_) { SwapHistoryBucket(); } } @@ -149,7 +153,8 @@ void HistogramWindowingImpl::SwapHistoryBucket() { 0 : curr_window + 1; // subtract next buckets from totals and swap to next buckets - HistogramStat& stats_to_drop = window_stats_[next_window]; + HistogramStat& stats_to_drop = + window_stats_[static_cast(next_window)]; if (!stats_to_drop.Empty()) { for (size_t b = 0; b < stats_.num_buckets_; b++){ diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 1095b4649..d861f1a55 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -260,7 +260,8 @@ class PartitionIndexReader : public IndexReader, public Cleanable { std::unique_ptr prefetch_buffer; auto& file = table_->rep_->file; prefetch_buffer.reset(new FilePrefetchBuffer()); - s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len); + s = prefetch_buffer->Prefetch(file.get(), prefetch_off, + static_cast(prefetch_len)); // After prefetch, read the partitions one by one biter.SeekToFirst(); @@ -654,9 +655,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, size_t prefetch_len; if (file_size < kTailPrefetchSize) { prefetch_off = 0; - prefetch_len = file_size; + prefetch_len = static_cast(file_size); } else { - prefetch_off = file_size - kTailPrefetchSize; + prefetch_off = static_cast(file_size - kTailPrefetchSize); prefetch_len = kTailPrefetchSize; } Status s; @@ -1896,7 +1897,8 @@ void BlockBasedTableIterator::InitDataBlock() { readahead_size_ = std::min(kMaxReadaheadSize, readahead_size_); table_->get_rep()->file->Prefetch(data_block_handle.offset(), readahead_size_); - readahead_limit_ = data_block_handle.offset() + readahead_size_; + readahead_limit_ = static_cast(data_block_handle.offset() + + readahead_size_); // Keep exponentially increasing readahead size until kMaxReadaheadSize. readahead_size_ *= 2; } diff --git a/table/partitioned_filter_block.cc b/table/partitioned_filter_block.cc index 7fd59b437..2f7c67a99 100644 --- a/table/partitioned_filter_block.cc +++ b/table/partitioned_filter_block.cc @@ -279,7 +279,8 @@ void PartitionedFilterBlockReader::CacheDependencies(bool pin) { std::unique_ptr prefetch_buffer; auto& file = table_->rep_->file; prefetch_buffer.reset(new FilePrefetchBuffer()); - s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len); + s = prefetch_buffer->Prefetch(file.get(), prefetch_off, + static_cast(prefetch_len)); // After prefetch, read the partitions one by one biter.SeekToFirst(); diff --git a/util/file_reader_writer.cc b/util/file_reader_writer.cc index 677cf2076..38228a8e8 100644 --- a/util/file_reader_writer.cc +++ b/util/file_reader_writer.cc @@ -516,7 +516,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { *result = Slice(scratch, cached_len); return Status::OK(); } - size_t advanced_offset = offset + cached_len; + size_t advanced_offset = static_cast(offset + cached_len); // In the case of cache hit advanced_offset is already aligned, means that // chunk_offset equals to advanced_offset size_t chunk_offset = TruncateToPageBoundary(alignment_, advanced_offset); @@ -549,12 +549,13 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { // `Read()` assumes a smaller prefetch buffer indicates EOF was reached. return Status::OK(); } - size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset); + size_t offset_ = static_cast(offset); + size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset_); if (prefetch_offset == buffer_offset_) { return Status::OK(); } return ReadIntoBuffer(prefetch_offset, - Roundup(offset + n, alignment_) - prefetch_offset); + Roundup(offset_ + n, alignment_) - prefetch_offset); } virtual size_t GetUniqueId(char* id, size_t max_size) const override { @@ -614,17 +615,18 @@ class ReadaheadRandomAccessFile : public RandomAccessFile { Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader, uint64_t offset, size_t n) { size_t alignment = reader->file()->GetRequiredBufferAlignment(); - uint64_t rounddown_offset = Rounddown(offset, alignment); - uint64_t roundup_end = Roundup(offset + n, alignment); + size_t offset_ = static_cast(offset); + uint64_t rounddown_offset = Rounddown(offset_, alignment); + uint64_t roundup_end = Roundup(offset_ + n, alignment); uint64_t roundup_len = roundup_end - rounddown_offset; assert(roundup_len >= alignment); assert(roundup_len % alignment == 0); buffer_.Alignment(alignment); - buffer_.AllocateNewBuffer(roundup_len); + buffer_.AllocateNewBuffer(static_cast(roundup_len)); Slice result; - Status s = reader->Read(rounddown_offset, roundup_len, &result, - buffer_.BufferStart()); + Status s = reader->Read(rounddown_offset, static_cast(roundup_len), + &result, buffer_.BufferStart()); if (s.ok()) { buffer_offset_ = rounddown_offset; buffer_len_ = result.size();