uint64_t and size_t changes to compile for iOS
Summary: In attempting to build a static lib for use in iOS, I ran in to lots of type errors between uint64_t and size_t. This PR contains the changes I made to get `TARGET_OS=IOS make static_lib` to succeed while also getting Xcode to build successfully with the resulting `librocksdb.a` library imported. This also compiles for me on macOS and tests fine, but I'm really not sure if I made the correct decisions about where to `static_cast` and where to change types. Also up for discussion: is iOS worth supporting? Getting the static lib is just part one, we aren't providing any bridging headers or wrappers like the ObjectiveRocks project, it won't be a great experience. Closes https://github.com/facebook/rocksdb/pull/3503 Differential Revision: D7106457 Pulled By: gfosco fbshipit-source-id: 82ac2073de7e1f09b91f6b4faea91d18bd311f8e
This commit is contained in:
parent
8bc41f4f5d
commit
d518fe1da6
@ -485,6 +485,8 @@ if test -z "$PORTABLE"; then
|
||||
elif test -n "`echo $TARGET_ARCHITECTURE | grep ^arm`"; then
|
||||
# TODO: Handle this with approprite options.
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif [ "$TARGET_OS" == IOS ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS"
|
||||
elif [ "$TARGET_OS" != AIX ] && [ "$TARGET_OS" != SunOS ]; then
|
||||
COMMON_FLAGS="$COMMON_FLAGS -march=native "
|
||||
elif test "$USE_SSE"; then
|
||||
|
@ -723,13 +723,15 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
std::set<size_t> sample_begin_offsets;
|
||||
if (bottommost_level_ && kSampleBytes > 0) {
|
||||
const size_t kMaxSamples = kSampleBytes >> kSampleLenShift;
|
||||
const size_t kOutFileLen = mutable_cf_options->MaxFileSizeForLevel(
|
||||
compact_->compaction->output_level());
|
||||
const size_t kOutFileLen = static_cast<size_t>(
|
||||
mutable_cf_options->MaxFileSizeForLevel(
|
||||
compact_->compaction->output_level()));
|
||||
if (kOutFileLen != port::kMaxSizet) {
|
||||
const size_t kOutFileNumSamples = kOutFileLen >> kSampleLenShift;
|
||||
Random64 generator{versions_->NewFileNumber()};
|
||||
for (size_t i = 0; i < kMaxSamples; ++i) {
|
||||
sample_begin_offsets.insert(generator.Uniform(kOutFileNumSamples)
|
||||
sample_begin_offsets.insert(
|
||||
static_cast<size_t>(generator.Uniform(kOutFileNumSamples))
|
||||
<< kSampleLenShift);
|
||||
}
|
||||
}
|
||||
|
@ -41,7 +41,7 @@ bool FindIntraL0Compaction(const std::vector<FileMetaData*>& level_files,
|
||||
size_t min_files_to_compact,
|
||||
uint64_t max_compact_bytes_per_del_file,
|
||||
CompactionInputFiles* comp_inputs) {
|
||||
size_t compact_bytes = level_files[0]->fd.file_size;
|
||||
size_t compact_bytes = static_cast<size_t>(level_files[0]->fd.file_size);
|
||||
size_t compact_bytes_per_del_file = port::kMaxSizet;
|
||||
// compaction range will be [0, span_len).
|
||||
size_t span_len;
|
||||
|
@ -214,7 +214,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
|
||||
write_group.size > 1;
|
||||
size_t total_count = 0;
|
||||
size_t valid_batches = 0;
|
||||
uint64_t total_byte_size = 0;
|
||||
size_t total_byte_size = 0;
|
||||
for (auto* writer : write_group) {
|
||||
if (writer->CheckCallback(this)) {
|
||||
valid_batches += writer->batch_cnt;
|
||||
@ -550,7 +550,7 @@ Status DBImpl::WriteImplWALOnly(const WriteOptions& write_options,
|
||||
// Note: no need to update last_batch_group_size_ here since the batch writes
|
||||
// to WAL only
|
||||
|
||||
uint64_t total_byte_size = 0;
|
||||
size_t total_byte_size = 0;
|
||||
for (auto* writer : write_group) {
|
||||
if (writer->CheckCallback(this)) {
|
||||
total_byte_size = WriteBatchInternal::AppendedByteSize(
|
||||
@ -1369,11 +1369,13 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
|
||||
|
||||
size_t DBImpl::GetWalPreallocateBlockSize(uint64_t write_buffer_size) const {
|
||||
mutex_.AssertHeld();
|
||||
size_t bsize = write_buffer_size / 10 + write_buffer_size;
|
||||
size_t bsize = static_cast<size_t>(
|
||||
write_buffer_size / 10 + write_buffer_size);
|
||||
// Some users might set very high write_buffer_size and rely on
|
||||
// max_total_wal_size or other parameters to control the WAL size.
|
||||
if (mutable_db_options_.max_total_wal_size > 0) {
|
||||
bsize = std::min<size_t>(bsize, mutable_db_options_.max_total_wal_size);
|
||||
bsize = std::min<size_t>(bsize, static_cast<size_t>(
|
||||
mutable_db_options_.max_total_wal_size));
|
||||
}
|
||||
if (immutable_db_options_.db_write_buffer_size > 0) {
|
||||
bsize = std::min<size_t>(bsize, immutable_db_options_.db_write_buffer_size);
|
||||
|
@ -58,7 +58,8 @@ Status Writer::AddRecord(const Slice& slice) {
|
||||
// kRecyclableHeaderSize being <= 11)
|
||||
assert(header_size <= 11);
|
||||
dest_->Append(
|
||||
Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", leftover));
|
||||
Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00",
|
||||
static_cast<size_t>(leftover)));
|
||||
}
|
||||
block_offset_ = 0;
|
||||
}
|
||||
|
20
env/mock_env.cc
vendored
20
env/mock_env.cc
vendored
@ -94,35 +94,37 @@ class MemFile {
|
||||
uint64_t end = std::min(start + 512, size_.load());
|
||||
MutexLock lock(&mutex_);
|
||||
for (uint64_t pos = start; pos < end; ++pos) {
|
||||
data_[pos] = static_cast<char>(rnd_.Uniform(256));
|
||||
data_[static_cast<size_t>(pos)] = static_cast<char>(rnd_.Uniform(256));
|
||||
}
|
||||
}
|
||||
|
||||
Status Read(uint64_t offset, size_t n, Slice* result, char* scratch) const {
|
||||
MutexLock lock(&mutex_);
|
||||
const uint64_t available = Size() - std::min(Size(), offset);
|
||||
size_t offset_ = static_cast<size_t>(offset);
|
||||
if (n > available) {
|
||||
n = available;
|
||||
n = static_cast<size_t>(available);
|
||||
}
|
||||
if (n == 0) {
|
||||
*result = Slice();
|
||||
return Status::OK();
|
||||
}
|
||||
if (scratch) {
|
||||
memcpy(scratch, &(data_[offset]), n);
|
||||
memcpy(scratch, &(data_[offset_]), n);
|
||||
*result = Slice(scratch, n);
|
||||
} else {
|
||||
*result = Slice(&(data_[offset]), n);
|
||||
*result = Slice(&(data_[offset_]), n);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status Write(uint64_t offset, const Slice& data) {
|
||||
MutexLock lock(&mutex_);
|
||||
size_t offset_ = static_cast<size_t>(offset);
|
||||
if (offset + data.size() > data_.size()) {
|
||||
data_.resize(offset + data.size());
|
||||
data_.resize(offset_ + data.size());
|
||||
}
|
||||
data_.replace(offset, data.size(), data.data(), data.size());
|
||||
data_.replace(offset_, data.size(), data.data(), data.size());
|
||||
size_ = data_.size();
|
||||
modified_time_ = Now();
|
||||
return Status::OK();
|
||||
@ -203,7 +205,7 @@ class MockSequentialFile : public SequentialFile {
|
||||
if (pos_ > file_->Size()) {
|
||||
return Status::IOError("pos_ > file_->Size()");
|
||||
}
|
||||
const size_t available = file_->Size() - pos_;
|
||||
const uint64_t available = file_->Size() - pos_;
|
||||
if (n > available) {
|
||||
n = available;
|
||||
}
|
||||
@ -273,7 +275,7 @@ class MockWritableFile : public WritableFile {
|
||||
}
|
||||
|
||||
virtual Status Append(const Slice& data) override {
|
||||
uint64_t bytes_written = 0;
|
||||
size_t bytes_written = 0;
|
||||
while (bytes_written < data.size()) {
|
||||
auto bytes = RequestToken(data.size() - bytes_written);
|
||||
Status s = file_->Append(Slice(data.data() + bytes_written, bytes));
|
||||
@ -285,7 +287,7 @@ class MockWritableFile : public WritableFile {
|
||||
return Status::OK();
|
||||
}
|
||||
virtual Status Truncate(uint64_t size) override {
|
||||
file_->Truncate(size);
|
||||
file_->Truncate(static_cast<size_t>(size));
|
||||
return Status::OK();
|
||||
}
|
||||
virtual Status Close() override { return file_->Fsync(); }
|
||||
|
@ -60,7 +60,7 @@ void HistogramWindowingImpl::Add(uint64_t value){
|
||||
stats_.Add(value);
|
||||
|
||||
// Current window update
|
||||
window_stats_[current_window()].Add(value);
|
||||
window_stats_[static_cast<size_t>(current_window())].Add(value);
|
||||
}
|
||||
|
||||
void HistogramWindowingImpl::Merge(const Histogram& other) {
|
||||
@ -89,8 +89,11 @@ void HistogramWindowingImpl::Merge(const HistogramWindowingImpl& other) {
|
||||
(cur_window + num_windows_ - i) % num_windows_;
|
||||
uint64_t other_window_index =
|
||||
(other_cur_window + other.num_windows_ - i) % other.num_windows_;
|
||||
size_t windex = static_cast<size_t>(window_index);
|
||||
size_t other_windex = static_cast<size_t>(other_window_index);
|
||||
|
||||
window_stats_[window_index].Merge(other.window_stats_[other_window_index]);
|
||||
window_stats_[windex].Merge(
|
||||
other.window_stats_[other_windex]);
|
||||
}
|
||||
}
|
||||
|
||||
@ -129,8 +132,9 @@ void HistogramWindowingImpl::Data(HistogramData * const data) const {
|
||||
|
||||
void HistogramWindowingImpl::TimerTick() {
|
||||
uint64_t curr_time = env_->NowMicros();
|
||||
size_t curr_window_ = static_cast<size_t>(current_window());
|
||||
if (curr_time - last_swap_time() > micros_per_window_ &&
|
||||
window_stats_[current_window()].num() >= min_num_per_window_) {
|
||||
window_stats_[curr_window_].num() >= min_num_per_window_) {
|
||||
SwapHistoryBucket();
|
||||
}
|
||||
}
|
||||
@ -149,7 +153,8 @@ void HistogramWindowingImpl::SwapHistoryBucket() {
|
||||
0 : curr_window + 1;
|
||||
|
||||
// subtract next buckets from totals and swap to next buckets
|
||||
HistogramStat& stats_to_drop = window_stats_[next_window];
|
||||
HistogramStat& stats_to_drop =
|
||||
window_stats_[static_cast<size_t>(next_window)];
|
||||
|
||||
if (!stats_to_drop.Empty()) {
|
||||
for (size_t b = 0; b < stats_.num_buckets_; b++){
|
||||
|
@ -260,7 +260,8 @@ class PartitionIndexReader : public IndexReader, public Cleanable {
|
||||
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
|
||||
auto& file = table_->rep_->file;
|
||||
prefetch_buffer.reset(new FilePrefetchBuffer());
|
||||
s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len);
|
||||
s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
|
||||
static_cast<size_t>(prefetch_len));
|
||||
|
||||
// After prefetch, read the partitions one by one
|
||||
biter.SeekToFirst();
|
||||
@ -654,9 +655,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
|
||||
size_t prefetch_len;
|
||||
if (file_size < kTailPrefetchSize) {
|
||||
prefetch_off = 0;
|
||||
prefetch_len = file_size;
|
||||
prefetch_len = static_cast<size_t>(file_size);
|
||||
} else {
|
||||
prefetch_off = file_size - kTailPrefetchSize;
|
||||
prefetch_off = static_cast<size_t>(file_size - kTailPrefetchSize);
|
||||
prefetch_len = kTailPrefetchSize;
|
||||
}
|
||||
Status s;
|
||||
@ -1896,7 +1897,8 @@ void BlockBasedTableIterator::InitDataBlock() {
|
||||
readahead_size_ = std::min(kMaxReadaheadSize, readahead_size_);
|
||||
table_->get_rep()->file->Prefetch(data_block_handle.offset(),
|
||||
readahead_size_);
|
||||
readahead_limit_ = data_block_handle.offset() + readahead_size_;
|
||||
readahead_limit_ = static_cast<size_t>(data_block_handle.offset()
|
||||
+ readahead_size_);
|
||||
// Keep exponentially increasing readahead size until kMaxReadaheadSize.
|
||||
readahead_size_ *= 2;
|
||||
}
|
||||
|
@ -279,7 +279,8 @@ void PartitionedFilterBlockReader::CacheDependencies(bool pin) {
|
||||
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
|
||||
auto& file = table_->rep_->file;
|
||||
prefetch_buffer.reset(new FilePrefetchBuffer());
|
||||
s = prefetch_buffer->Prefetch(file.get(), prefetch_off, prefetch_len);
|
||||
s = prefetch_buffer->Prefetch(file.get(), prefetch_off,
|
||||
static_cast<size_t>(prefetch_len));
|
||||
|
||||
// After prefetch, read the partitions one by one
|
||||
biter.SeekToFirst();
|
||||
|
@ -516,7 +516,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
|
||||
*result = Slice(scratch, cached_len);
|
||||
return Status::OK();
|
||||
}
|
||||
size_t advanced_offset = offset + cached_len;
|
||||
size_t advanced_offset = static_cast<size_t>(offset + cached_len);
|
||||
// In the case of cache hit advanced_offset is already aligned, means that
|
||||
// chunk_offset equals to advanced_offset
|
||||
size_t chunk_offset = TruncateToPageBoundary(alignment_, advanced_offset);
|
||||
@ -549,12 +549,13 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
|
||||
// `Read()` assumes a smaller prefetch buffer indicates EOF was reached.
|
||||
return Status::OK();
|
||||
}
|
||||
size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset);
|
||||
size_t offset_ = static_cast<size_t>(offset);
|
||||
size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset_);
|
||||
if (prefetch_offset == buffer_offset_) {
|
||||
return Status::OK();
|
||||
}
|
||||
return ReadIntoBuffer(prefetch_offset,
|
||||
Roundup(offset + n, alignment_) - prefetch_offset);
|
||||
Roundup(offset_ + n, alignment_) - prefetch_offset);
|
||||
}
|
||||
|
||||
virtual size_t GetUniqueId(char* id, size_t max_size) const override {
|
||||
@ -614,17 +615,18 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
|
||||
Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
|
||||
uint64_t offset, size_t n) {
|
||||
size_t alignment = reader->file()->GetRequiredBufferAlignment();
|
||||
uint64_t rounddown_offset = Rounddown(offset, alignment);
|
||||
uint64_t roundup_end = Roundup(offset + n, alignment);
|
||||
size_t offset_ = static_cast<size_t>(offset);
|
||||
uint64_t rounddown_offset = Rounddown(offset_, alignment);
|
||||
uint64_t roundup_end = Roundup(offset_ + n, alignment);
|
||||
uint64_t roundup_len = roundup_end - rounddown_offset;
|
||||
assert(roundup_len >= alignment);
|
||||
assert(roundup_len % alignment == 0);
|
||||
buffer_.Alignment(alignment);
|
||||
buffer_.AllocateNewBuffer(roundup_len);
|
||||
buffer_.AllocateNewBuffer(static_cast<size_t>(roundup_len));
|
||||
|
||||
Slice result;
|
||||
Status s = reader->Read(rounddown_offset, roundup_len, &result,
|
||||
buffer_.BufferStart());
|
||||
Status s = reader->Read(rounddown_offset, static_cast<size_t>(roundup_len),
|
||||
&result, buffer_.BufferStart());
|
||||
if (s.ok()) {
|
||||
buffer_offset_ = rounddown_offset;
|
||||
buffer_len_ = result.size();
|
||||
|
Loading…
Reference in New Issue
Block a user