2019-09-16 19:31:27 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "file/readahead_raf.h"
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
#include <mutex>
|
2021-01-05 00:59:52 +01:00
|
|
|
|
2019-09-17 05:40:44 +02:00
|
|
|
#include "file/read_write_util.h"
|
2021-01-05 00:59:52 +01:00
|
|
|
#include "rocksdb/file_system.h"
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "util/aligned_buffer.h"
|
|
|
|
#include "util/rate_limiter.h"
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2019-09-16 19:31:27 +02:00
|
|
|
namespace {
|
2021-01-05 00:59:52 +01:00
|
|
|
class ReadaheadRandomAccessFile : public FSRandomAccessFile {
|
2019-09-16 19:31:27 +02:00
|
|
|
public:
|
2021-01-05 00:59:52 +01:00
|
|
|
ReadaheadRandomAccessFile(std::unique_ptr<FSRandomAccessFile>&& file,
|
2019-09-16 19:31:27 +02:00
|
|
|
size_t readahead_size)
|
|
|
|
: file_(std::move(file)),
|
|
|
|
alignment_(file_->GetRequiredBufferAlignment()),
|
|
|
|
readahead_size_(Roundup(readahead_size, alignment_)),
|
|
|
|
buffer_(),
|
|
|
|
buffer_offset_(0) {
|
|
|
|
buffer_.Alignment(alignment_);
|
|
|
|
buffer_.AllocateNewBuffer(readahead_size_);
|
|
|
|
}
|
|
|
|
|
|
|
|
ReadaheadRandomAccessFile(const ReadaheadRandomAccessFile&) = delete;
|
|
|
|
|
|
|
|
ReadaheadRandomAccessFile& operator=(const ReadaheadRandomAccessFile&) =
|
|
|
|
delete;
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus Read(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
Slice* result, char* scratch,
|
|
|
|
IODebugContext* dbg) const override {
|
2019-09-16 19:31:27 +02:00
|
|
|
// Read-ahead only make sense if we have some slack left after reading
|
|
|
|
if (n + alignment_ >= readahead_size_) {
|
2021-01-05 00:59:52 +01:00
|
|
|
return file_->Read(offset, n, options, result, scratch, dbg);
|
2019-09-16 19:31:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> lk(lock_);
|
|
|
|
|
|
|
|
size_t cached_len = 0;
|
|
|
|
// Check if there is a cache hit, meaning that [offset, offset + n) is
|
|
|
|
// either completely or partially in the buffer. If it's completely cached,
|
|
|
|
// including end of file case when offset + n is greater than EOF, then
|
|
|
|
// return.
|
|
|
|
if (TryReadFromCache(offset, n, &cached_len, scratch) &&
|
|
|
|
(cached_len == n || buffer_.CurrentSize() < readahead_size_)) {
|
|
|
|
// We read exactly what we needed, or we hit end of file - return.
|
|
|
|
*result = Slice(scratch, cached_len);
|
2021-01-05 00:59:52 +01:00
|
|
|
return IOStatus::OK();
|
2019-09-16 19:31:27 +02:00
|
|
|
}
|
|
|
|
size_t advanced_offset = static_cast<size_t>(offset + cached_len);
|
|
|
|
// In the case of cache hit advanced_offset is already aligned, means that
|
|
|
|
// chunk_offset equals to advanced_offset
|
|
|
|
size_t chunk_offset = TruncateToPageBoundary(alignment_, advanced_offset);
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus s = ReadIntoBuffer(chunk_offset, readahead_size_, options, dbg);
|
2019-09-16 19:31:27 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
// The data we need is now in cache, so we can safely read it
|
|
|
|
size_t remaining_len;
|
|
|
|
TryReadFromCache(advanced_offset, n - cached_len, &remaining_len,
|
|
|
|
scratch + cached_len);
|
|
|
|
*result = Slice(scratch, cached_len + remaining_len);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus Prefetch(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) override {
|
2019-09-16 19:31:27 +02:00
|
|
|
if (n < readahead_size_) {
|
|
|
|
// Don't allow smaller prefetches than the configured `readahead_size_`.
|
|
|
|
// `Read()` assumes a smaller prefetch buffer indicates EOF was reached.
|
2021-01-05 00:59:52 +01:00
|
|
|
return IOStatus::OK();
|
2019-09-16 19:31:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::unique_lock<std::mutex> lk(lock_);
|
|
|
|
|
|
|
|
size_t offset_ = static_cast<size_t>(offset);
|
|
|
|
size_t prefetch_offset = TruncateToPageBoundary(alignment_, offset_);
|
|
|
|
if (prefetch_offset == buffer_offset_) {
|
2021-01-05 00:59:52 +01:00
|
|
|
return IOStatus::OK();
|
2019-09-16 19:31:27 +02:00
|
|
|
}
|
|
|
|
return ReadIntoBuffer(prefetch_offset,
|
2021-01-05 00:59:52 +01:00
|
|
|
Roundup(offset_ + n, alignment_) - prefetch_offset,
|
|
|
|
options, dbg);
|
2019-09-16 19:31:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
size_t GetUniqueId(char* id, size_t max_size) const override {
|
|
|
|
return file_->GetUniqueId(id, max_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Hint(AccessPattern pattern) override { file_->Hint(pattern); }
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus InvalidateCache(size_t offset, size_t length) override {
|
2019-09-16 19:31:27 +02:00
|
|
|
std::unique_lock<std::mutex> lk(lock_);
|
|
|
|
buffer_.Clear();
|
|
|
|
return file_->InvalidateCache(offset, length);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool use_direct_io() const override { return file_->use_direct_io(); }
|
|
|
|
|
|
|
|
private:
|
|
|
|
// Tries to read from buffer_ n bytes starting at offset. If anything was read
|
|
|
|
// from the cache, it sets cached_len to the number of bytes actually read,
|
|
|
|
// copies these number of bytes to scratch and returns true.
|
|
|
|
// If nothing was read sets cached_len to 0 and returns false.
|
|
|
|
bool TryReadFromCache(uint64_t offset, size_t n, size_t* cached_len,
|
|
|
|
char* scratch) const {
|
|
|
|
if (offset < buffer_offset_ ||
|
|
|
|
offset >= buffer_offset_ + buffer_.CurrentSize()) {
|
|
|
|
*cached_len = 0;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
uint64_t offset_in_buffer = offset - buffer_offset_;
|
|
|
|
*cached_len = std::min(
|
|
|
|
buffer_.CurrentSize() - static_cast<size_t>(offset_in_buffer), n);
|
|
|
|
memcpy(scratch, buffer_.BufferStart() + offset_in_buffer, *cached_len);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reads into buffer_ the next n bytes from file_ starting at offset.
|
|
|
|
// Can actually read less if EOF was reached.
|
|
|
|
// Returns the status of the read operastion on the file.
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus ReadIntoBuffer(uint64_t offset, size_t n, const IOOptions& options,
|
|
|
|
IODebugContext* dbg) const {
|
2019-09-16 19:31:27 +02:00
|
|
|
if (n > buffer_.Capacity()) {
|
|
|
|
n = buffer_.Capacity();
|
|
|
|
}
|
|
|
|
assert(IsFileSectorAligned(offset, alignment_));
|
|
|
|
assert(IsFileSectorAligned(n, alignment_));
|
|
|
|
Slice result;
|
2021-01-05 00:59:52 +01:00
|
|
|
IOStatus s =
|
|
|
|
file_->Read(offset, n, options, &result, buffer_.BufferStart(), dbg);
|
2019-09-16 19:31:27 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
buffer_offset_ = offset;
|
|
|
|
buffer_.Size(result.size());
|
|
|
|
assert(result.size() == 0 || buffer_.BufferStart() == result.data());
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
const std::unique_ptr<FSRandomAccessFile> file_;
|
2019-09-16 19:31:27 +02:00
|
|
|
const size_t alignment_;
|
|
|
|
const size_t readahead_size_;
|
|
|
|
|
|
|
|
mutable std::mutex lock_;
|
|
|
|
// The buffer storing the prefetched data
|
|
|
|
mutable AlignedBuffer buffer_;
|
|
|
|
// The offset in file_, corresponding to data stored in buffer_
|
|
|
|
mutable uint64_t buffer_offset_;
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2021-01-05 00:59:52 +01:00
|
|
|
std::unique_ptr<FSRandomAccessFile> NewReadaheadRandomAccessFile(
|
|
|
|
std::unique_ptr<FSRandomAccessFile>&& file, size_t readahead_size) {
|
|
|
|
std::unique_ptr<FSRandomAccessFile> result(
|
2019-09-16 19:31:27 +02:00
|
|
|
new ReadaheadRandomAccessFile(std::move(file), readahead_size));
|
|
|
|
return result;
|
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|