Return error if Get/Multi() fails in Prefetching Filter blocks (#7543)

Summary:
Right now all I/O failures under
PartitionFilterBlock::CacheDependencies() is swallowed. Return error in
case prefetch fails.

On returning error in PartitionedFilterBlockReader::CacheDependencies was causing stress test failure because PrefetchBuffer is initialized with enable_ = true, as result when PosixMmapReadableFile::Read is called from Prefetch, scratch is ignored causing buffer to fill with garbage values. Initializing prefetch buffer by CreatePrefetchBuffer that sets enable_ with !ioptions.allow_mmap_reads fixed the problem as it returns without prefetching data if allow_mmap_reads is set.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7543

Test Plan:
make check -j64;
python -u tools/db_crashtest.py --simple blackbox

Reviewed By: anand1976

Differential Revision: D24284596

Pulled By: akankshamahajan15

fbshipit-source-id: f3f0fd44b59dcf60645730436f28564a07884868
This commit is contained in:
Akanksha Mahajan 2020-10-14 10:43:37 -07:00 committed by Facebook GitHub Bot
parent 7b65666cf1
commit db87afbcb3
5 changed files with 33 additions and 21 deletions

View File

@ -91,17 +91,19 @@ Status FilePrefetchBuffer::Prefetch(const IOOptions& opts,
size_t read_len = static_cast<size_t>(roundup_len - chunk_len);
s = reader->Read(opts, rounddown_offset + chunk_len, read_len, &result,
buffer_.BufferStart() + chunk_len, nullptr, for_compaction);
if (!s.ok()) {
return s;
}
#ifndef NDEBUG
if (!s.ok() || result.size() < read_len) {
if (result.size() < read_len) {
// Fake an IO error to force db_stress fault injection to ignore
// truncated read errors
IGNORE_STATUS_IF_ERROR(Status::IOError());
}
#endif
if (s.ok()) {
buffer_offset_ = rounddown_offset;
buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
}
buffer_offset_ = rounddown_offset;
buffer_.Size(static_cast<size_t>(chunk_len) + result.size());
return s;
}

View File

@ -1070,12 +1070,15 @@ Status BlockBasedTable::PrefetchIndexAndFilterBlocks(
auto filter = new_table->CreateFilterBlockReader(
ro, prefetch_buffer, use_cache, prefetch_filter, pin_filter,
lookup_context);
if (filter) {
// Refer to the comment above about paritioned indexes always being cached
if (prefetch_all || pin_partition) {
filter->CacheDependencies(ro, pin_partition);
s = filter->CacheDependencies(ro, pin_partition);
if (!s.ok()) {
return s;
}
}
rep_->filter = std::move(filter);
}
}

View File

@ -153,7 +153,9 @@ class FilterBlockReader {
return error_msg;
}
virtual void CacheDependencies(const ReadOptions& /*ro*/, bool /*pin*/) {}
virtual Status CacheDependencies(const ReadOptions& /*ro*/, bool /*pin*/) {
return Status::OK();
}
virtual bool RangeMayExist(const Slice* /*iterate_upper_bound*/,
const Slice& user_key,

View File

@ -412,8 +412,8 @@ size_t PartitionedFilterBlockReader::ApproximateMemoryUsage() const {
}
// TODO(myabandeh): merge this with the same function in IndexReader
void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
bool pin) {
Status PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
bool pin) {
assert(table());
const BlockBasedTable::Rep* const rep = table()->get_rep();
@ -426,12 +426,11 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
Status s = GetOrReadFilterBlock(false /* no_io */, nullptr /* get_context */,
&lookup_context, &filter_block);
if (!s.ok()) {
ROCKS_LOG_WARN(rep->ioptions.info_log,
"Error retrieving top-level filter block while trying to "
"cache filter partitions: %s",
s.ToString().c_str());
IGNORE_STATUS_IF_ERROR(s);
return;
ROCKS_LOG_ERROR(rep->ioptions.info_log,
"Error retrieving top-level filter block while trying to "
"cache filter partitions: %s",
s.ToString().c_str());
return s;
}
// Before read partitions, prefetch them to avoid lots of IOs
@ -457,14 +456,17 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
uint64_t last_off = handle.offset() + handle.size() + kBlockTrailerSize;
uint64_t prefetch_len = last_off - prefetch_off;
std::unique_ptr<FilePrefetchBuffer> prefetch_buffer;
rep->CreateFilePrefetchBuffer(0, 0, &prefetch_buffer);
prefetch_buffer.reset(new FilePrefetchBuffer());
IOOptions opts;
s = PrepareIOFromReadOptions(ro, rep->file->env(), opts);
if (s.ok()) {
s = prefetch_buffer->Prefetch(opts, rep->file.get(), prefetch_off,
static_cast<size_t>(prefetch_len));
}
if (!s.ok()) {
return s;
}
// After prefetch, read the partitions one by one
for (biter.SeekToFirst(); biter.Valid(); biter.Next()) {
@ -477,17 +479,20 @@ void PartitionedFilterBlockReader::CacheDependencies(const ReadOptions& ro,
prefetch_buffer.get(), ro, handle, UncompressionDict::GetEmptyDict(),
&block, BlockType::kFilter, nullptr /* get_context */, &lookup_context,
nullptr /* contents */);
if (!s.ok()) {
return s;
}
assert(s.ok() || block.GetValue() == nullptr);
if (s.ok() && block.GetValue() != nullptr) {
if (block.GetValue() != nullptr) {
if (block.IsCached()) {
if (pin) {
filter_map_[handle.offset()] = std::move(block);
}
}
}
IGNORE_STATUS_IF_ERROR(s);
}
return biter.status();
}
const InternalKeyComparator* PartitionedFilterBlockReader::internal_comparator()

View File

@ -130,7 +130,7 @@ class PartitionedFilterBlockReader : public FilterBlockReaderCommon<Block> {
uint64_t block_offset, BlockHandle filter_handle,
bool no_io, BlockCacheLookupContext* lookup_context,
FilterManyFunction filter_function) const;
void CacheDependencies(const ReadOptions& ro, bool pin) override;
Status CacheDependencies(const ReadOptions& ro, bool pin) override;
const InternalKeyComparator* internal_comparator() const;
bool index_key_includes_seq() const;