Remove external tracking of AlignedBuffer's size (#4105)

Summary:
Remove external tracking of AlignedBuffer's size in `ReadaheadRandomAccessFile` and `FilePrefetchBuffer`.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4105

Differential Revision: D8805724

Pulled By: sagar0

fbshipit-source-id: d61d8c203c7c500e3f36e912132d7852026ed023
This commit is contained in:
Sagar Vemuri 2018-07-11 15:42:49 -07:00 committed by Facebook Github Bot
parent 331cb63641
commit 1c912196de
2 changed files with 16 additions and 21 deletions

View File

@ -509,9 +509,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
alignment_(file_->GetRequiredBufferAlignment()),
readahead_size_(Roundup(readahead_size, alignment_)),
buffer_(),
buffer_offset_(0),
buffer_len_(0) {
buffer_offset_(0) {
buffer_.Alignment(alignment_);
buffer_.AllocateNewBuffer(readahead_size_);
}
@ -537,7 +535,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
if (TryReadFromCache(offset, n, &cached_len, scratch) &&
(cached_len == n ||
// End of file
buffer_len_ < readahead_size_)) {
buffer_.CurrentSize() < readahead_size_)) {
*result = Slice(scratch, cached_len);
return Status::OK();
}
@ -551,13 +549,13 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
if (s.ok()) {
// In the case of cache miss, i.e. when cached_len equals 0, an offset can
// exceed the file end position, so the following check is required
if (advanced_offset < chunk_offset + buffer_len_) {
if (advanced_offset < chunk_offset + buffer_.CurrentSize()) {
// In the case of cache miss, the first chunk_padding bytes in buffer_
// are
// stored for alignment only and must be skipped
size_t chunk_padding = advanced_offset - chunk_offset;
auto remaining_len =
std::min(buffer_len_ - chunk_padding, n - cached_len);
std::min(buffer_.CurrentSize() - chunk_padding, n - cached_len);
memcpy(scratch + cached_len, buffer_.BufferStart() + chunk_padding,
remaining_len);
*result = Slice(scratch, cached_len + remaining_len);
@ -600,13 +598,14 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
private:
bool TryReadFromCache(uint64_t offset, size_t n, size_t* cached_len,
char* scratch) const {
if (offset < buffer_offset_ || offset >= buffer_offset_ + buffer_len_) {
if (offset < buffer_offset_ ||
offset >= buffer_offset_ + buffer_.CurrentSize()) {
*cached_len = 0;
return false;
}
uint64_t offset_in_buffer = offset - buffer_offset_;
*cached_len =
std::min(buffer_len_ - static_cast<size_t>(offset_in_buffer), n);
*cached_len = std::min(
buffer_.CurrentSize() - static_cast<size_t>(offset_in_buffer), n);
memcpy(scratch, buffer_.BufferStart() + offset_in_buffer, *cached_len);
return true;
}
@ -621,7 +620,7 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
Status s = file_->Read(offset, n, &result, buffer_.BufferStart());
if (s.ok()) {
buffer_offset_ = offset;
buffer_len_ = result.size();
buffer_.Size(result.size());
assert(buffer_.BufferStart() == result.data());
}
return s;
@ -634,7 +633,6 @@ class ReadaheadRandomAccessFile : public RandomAccessFile {
mutable std::mutex lock_;
mutable AlignedBuffer buffer_;
mutable uint64_t buffer_offset_;
mutable size_t buffer_len_;
};
} // namespace
@ -658,9 +656,9 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
uint64_t chunk_offset_in_buffer = 0;
uint64_t chunk_len = 0;
bool copy_data_to_new_buffer = false;
if (buffer_len_ > 0 && offset >= buffer_offset_ &&
offset <= buffer_offset_ + buffer_len_) {
if (offset + n <= buffer_offset_ + buffer_len_) {
if (buffer_.CurrentSize() > 0 && offset >= buffer_offset_ &&
offset <= buffer_offset_ + buffer_.CurrentSize()) {
if (offset + n <= buffer_offset_ + buffer_.CurrentSize()) {
// All requested bytes are already in the buffer. So no need to Read
// again.
return s;
@ -669,11 +667,11 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
// bytes to the beginning, and memcpy them back into the new buffer if a
// new buffer is created.
chunk_offset_in_buffer = Rounddown(offset - buffer_offset_, alignment);
chunk_len = buffer_len_ - chunk_offset_in_buffer;
chunk_len = buffer_.CurrentSize() - chunk_offset_in_buffer;
assert(chunk_offset_in_buffer % alignment == 0);
assert(chunk_len % alignment == 0);
assert(chunk_offset_in_buffer + chunk_len <=
buffer_offset_ + buffer_len_);
buffer_offset_ + buffer_.CurrentSize());
if (chunk_len > 0) {
copy_data_to_new_buffer = true;
} else {
@ -702,8 +700,7 @@ Status FilePrefetchBuffer::Prefetch(RandomAccessFileReader* reader,
buffer_.BufferStart() + chunk_len);
if (s.ok()) {
buffer_offset_ = rounddown_offset;
buffer_len_ = chunk_len + result.size();
buffer_.Size(buffer_len_);
buffer_.Size(chunk_len + result.size());
}
return s;
}
@ -718,7 +715,7 @@ bool FilePrefetchBuffer::TryReadFromCache(uint64_t offset, size_t n,
// If readahead is enabled: prefetch the remaining bytes + readadhead bytes
// and satisfy the request.
// If readahead is not enabled: return false.
if (offset + n > buffer_offset_ + buffer_len_) {
if (offset + n > buffer_offset_ + buffer_.CurrentSize()) {
if (readahead_size_ > 0) {
assert(file_reader_ != nullptr);
assert(max_readahead_size_ >= readahead_size_);

View File

@ -216,7 +216,6 @@ class FilePrefetchBuffer {
FilePrefetchBuffer(RandomAccessFileReader* file_reader = nullptr,
size_t readadhead_size = 0, size_t max_readahead_size = 0)
: buffer_offset_(0),
buffer_len_(0),
file_reader_(file_reader),
readahead_size_(readadhead_size),
max_readahead_size_(max_readahead_size) {}
@ -226,7 +225,6 @@ class FilePrefetchBuffer {
private:
AlignedBuffer buffer_;
uint64_t buffer_offset_;
size_t buffer_len_;
RandomAccessFileReader* file_reader_;
size_t readahead_size_;
size_t max_readahead_size_;