Refactor ReadBlockContents()
Summary: Divide ReadBlockContents() to multiple sub-functions. Maintaining the input and intermediate data in a new class BlockFetcher. I hope in general it makes the code easier to maintain. Another motivation to do it is to clearly divide the logic before file reading and after file reading. The refactor will help us evaluate how can we make I/O async in the future. Closes https://github.com/facebook/rocksdb/pull/3244 Differential Revision: D6520983 Pulled By: siying fbshipit-source-id: 338d90bc0338472d46be7a7682028dc9114b12e9
This commit is contained in:
parent
9a27ac5d89
commit
2f1a3a4d74
@ -485,6 +485,7 @@ set(SOURCES
|
||||
table/block_based_table_factory.cc
|
||||
table/block_based_table_reader.cc
|
||||
table/block_builder.cc
|
||||
table/block_fetcher.cc
|
||||
table/block_prefix_index.cc
|
||||
table/bloom_block.cc
|
||||
table/cuckoo_table_builder.cc
|
||||
|
1
TARGETS
1
TARGETS
@ -157,6 +157,7 @@ cpp_library(
|
||||
"table/block_based_table_factory.cc",
|
||||
"table/block_based_table_reader.cc",
|
||||
"table/block_builder.cc",
|
||||
"table/block_fetcher.cc",
|
||||
"table/block_prefix_index.cc",
|
||||
"table/bloom_block.cc",
|
||||
"table/cuckoo_table_builder.cc",
|
||||
|
1
src.mk
1
src.mk
@ -96,6 +96,7 @@ LIB_SOURCES = \
|
||||
table/block_based_table_factory.cc \
|
||||
table/block_based_table_reader.cc \
|
||||
table/block_builder.cc \
|
||||
table/block_fetcher.cc \
|
||||
table/block_prefix_index.cc \
|
||||
table/bloom_block.cc \
|
||||
table/cuckoo_table_builder.cc \
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "table/block.h"
|
||||
#include "table/block_based_filter_block.h"
|
||||
#include "table/block_based_table_factory.h"
|
||||
#include "table/block_fetcher.h"
|
||||
#include "table/block_prefix_index.h"
|
||||
#include "table/filter_block.h"
|
||||
#include "table/format.h"
|
||||
@ -78,9 +79,10 @@ Status ReadBlockFromFile(
|
||||
const PersistentCacheOptions& cache_options, SequenceNumber global_seqno,
|
||||
size_t read_amp_bytes_per_bit) {
|
||||
BlockContents contents;
|
||||
Status s = ReadBlockContents(file, prefetch_buffer, footer, options, handle,
|
||||
&contents, ioptions, do_uncompress,
|
||||
compression_dict, cache_options);
|
||||
BlockFetcher block_fetcher(file, prefetch_buffer, footer, options, handle,
|
||||
&contents, ioptions, do_uncompress,
|
||||
compression_dict, cache_options);
|
||||
Status s = block_fetcher.ReadBlockContents();
|
||||
if (s.ok()) {
|
||||
result->reset(new Block(std::move(contents), global_seqno,
|
||||
read_amp_bytes_per_bit, ioptions.statistics));
|
||||
@ -410,18 +412,20 @@ class HashIndexReader : public IndexReader {
|
||||
|
||||
// Read contents for the blocks
|
||||
BlockContents prefixes_contents;
|
||||
s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(),
|
||||
prefixes_handle, &prefixes_contents, ioptions,
|
||||
true /* decompress */, Slice() /*compression dict*/,
|
||||
cache_options);
|
||||
BlockFetcher prefixes_block_fetcher(
|
||||
file, prefetch_buffer, footer, ReadOptions(), prefixes_handle,
|
||||
&prefixes_contents, ioptions, true /* decompress */,
|
||||
Slice() /*compression dict*/, cache_options);
|
||||
s = prefixes_block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
BlockContents prefixes_meta_contents;
|
||||
s = ReadBlockContents(file, prefetch_buffer, footer, ReadOptions(),
|
||||
prefixes_meta_handle, &prefixes_meta_contents,
|
||||
ioptions, true /* decompress */,
|
||||
Slice() /*compression dict*/, cache_options);
|
||||
BlockFetcher prefixes_meta_block_fetcher(
|
||||
file, prefetch_buffer, footer, ReadOptions(), prefixes_meta_handle,
|
||||
&prefixes_meta_contents, ioptions, true /* decompress */,
|
||||
Slice() /*compression dict*/, cache_options);
|
||||
prefixes_meta_block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
// TODO: log error
|
||||
return Status::OK();
|
||||
@ -1138,11 +1142,14 @@ FilterBlockReader* BlockBasedTable::ReadFilter(
|
||||
return nullptr;
|
||||
}
|
||||
BlockContents block;
|
||||
if (!ReadBlockContents(rep->file.get(), prefetch_buffer, rep->footer,
|
||||
ReadOptions(), filter_handle, &block, rep->ioptions,
|
||||
false /* decompress */, Slice() /*compression dict*/,
|
||||
rep->persistent_cache_options)
|
||||
.ok()) {
|
||||
|
||||
BlockFetcher block_fetcher(
|
||||
rep->file.get(), prefetch_buffer, rep->footer, ReadOptions(),
|
||||
filter_handle, &block, rep->ioptions, false /* decompress */,
|
||||
Slice() /*compression dict*/, rep->persistent_cache_options);
|
||||
Status s = block_fetcher.ReadBlockContents();
|
||||
|
||||
if (!s.ok()) {
|
||||
// Error reading the block
|
||||
return nullptr;
|
||||
}
|
||||
@ -1906,11 +1913,12 @@ Status BlockBasedTable::VerifyChecksumInBlocks(InternalIterator* index_iter) {
|
||||
break;
|
||||
}
|
||||
BlockContents contents;
|
||||
s = ReadBlockContents(rep_->file.get(), nullptr /* prefetch buffer */,
|
||||
rep_->footer, ReadOptions(), handle, &contents,
|
||||
rep_->ioptions, false /* decompress */,
|
||||
Slice() /*compression dict*/,
|
||||
rep_->persistent_cache_options);
|
||||
BlockFetcher block_fetcher(rep_->file.get(), nullptr /* prefetch buffer */,
|
||||
rep_->footer, ReadOptions(), handle, &contents,
|
||||
rep_->ioptions, false /* decompress */,
|
||||
Slice() /*compression dict*/,
|
||||
rep_->persistent_cache_options);
|
||||
s = block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
break;
|
||||
}
|
||||
@ -2195,12 +2203,12 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) {
|
||||
BlockHandle handle;
|
||||
if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) {
|
||||
BlockContents block;
|
||||
if (ReadBlockContents(rep_->file.get(), nullptr /* prefetch_buffer */,
|
||||
rep_->footer, ReadOptions(), handle, &block,
|
||||
rep_->ioptions, false /*decompress*/,
|
||||
Slice() /*compression dict*/,
|
||||
rep_->persistent_cache_options)
|
||||
.ok()) {
|
||||
BlockFetcher block_fetcher(
|
||||
rep_->file.get(), nullptr /* prefetch_buffer */, rep_->footer,
|
||||
ReadOptions(), handle, &block, rep_->ioptions, false /*decompress*/,
|
||||
Slice() /*compression dict*/, rep_->persistent_cache_options);
|
||||
s = block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
rep_->filter.reset(new BlockBasedFilterBlockReader(
|
||||
rep_->ioptions.prefix_extractor, table_options,
|
||||
table_options.whole_key_filtering, std::move(block),
|
||||
|
233
table/block_fetcher.cc
Normal file
233
table/block_fetcher.cc
Normal file
@ -0,0 +1,233 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include "table/block_fetcher.h"
|
||||
|
||||
#include <string>
|
||||
#include <inttypes.h>
|
||||
|
||||
#include "monitoring/perf_context_imp.h"
|
||||
#include "monitoring/statistics.h"
|
||||
#include "rocksdb/env.h"
|
||||
#include "table/block.h"
|
||||
#include "table/block_based_table_reader.h"
|
||||
#include "table/persistent_cache_helper.h"
|
||||
#include "table/format.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/compression.h"
|
||||
#include "util/crc32c.h"
|
||||
#include "util/file_reader_writer.h"
|
||||
#include "util/logging.h"
|
||||
#include "util/stop_watch.h"
|
||||
#include "util/string_util.h"
|
||||
#include "util/xxhash.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
void BlockFetcher::CheckBlockChecksum() {
|
||||
// Check the crc of the type and the block contents
|
||||
if (read_options_.verify_checksums) {
|
||||
const char* data = slice_.data(); // Pointer to where Read put the data
|
||||
PERF_TIMER_GUARD(block_checksum_time);
|
||||
uint32_t value = DecodeFixed32(data + block_size_ + 1);
|
||||
uint32_t actual = 0;
|
||||
switch (footer_.checksum()) {
|
||||
case kNoChecksum:
|
||||
break;
|
||||
case kCRC32c:
|
||||
value = crc32c::Unmask(value);
|
||||
actual = crc32c::Value(data, block_size_ + 1);
|
||||
break;
|
||||
case kxxHash:
|
||||
actual = XXH32(data, static_cast<int>(block_size_) + 1, 0);
|
||||
break;
|
||||
default:
|
||||
status_ = Status::Corruption(
|
||||
"unknown checksum type " + ToString(footer_.checksum()) + " in " +
|
||||
file_->file_name() + " offset " + ToString(handle_.offset()) +
|
||||
" size " + ToString(block_size_));
|
||||
}
|
||||
if (status_.ok() && actual != value) {
|
||||
status_ = Status::Corruption(
|
||||
"block checksum mismatch: expected " + ToString(actual) + ", got " +
|
||||
ToString(value) + " in " + file_->file_name() + " offset " +
|
||||
ToString(handle_.offset()) + " size " + ToString(block_size_));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool BlockFetcher::TryGetUncompressBlockFromPersistentCache() {
|
||||
if (cache_options_.persistent_cache &&
|
||||
!cache_options_.persistent_cache->IsCompressed()) {
|
||||
Status status = PersistentCacheHelper::LookupUncompressedPage(
|
||||
cache_options_, handle_, contents_);
|
||||
if (status.ok()) {
|
||||
// uncompressed page is found for the block handle
|
||||
return true;
|
||||
} else {
|
||||
// uncompressed page is not found
|
||||
if (ioptions_.info_log && !status.IsNotFound()) {
|
||||
assert(!status.ok());
|
||||
ROCKS_LOG_INFO(ioptions_.info_log,
|
||||
"Error reading from persistent cache. %s",
|
||||
status.ToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool BlockFetcher::TryGetFromPrefetchBuffer() {
|
||||
if (prefetch_buffer_ != nullptr &&
|
||||
prefetch_buffer_->TryReadFromCache(
|
||||
handle_.offset(),
|
||||
static_cast<size_t>(handle_.size()) + kBlockTrailerSize, &slice_)) {
|
||||
block_size_ = static_cast<size_t>(handle_.size());
|
||||
CheckBlockChecksum();
|
||||
if (!status_.ok()) {
|
||||
return true;
|
||||
}
|
||||
got_from_prefetch_buffer_ = true;
|
||||
used_buf_ = const_cast<char*>(slice_.data());
|
||||
}
|
||||
return got_from_prefetch_buffer_;
|
||||
}
|
||||
|
||||
bool BlockFetcher::TryGetCompressedBlockFromPersistentCache() {
|
||||
if (cache_options_.persistent_cache &&
|
||||
cache_options_.persistent_cache->IsCompressed()) {
|
||||
// lookup uncompressed cache mode p-cache
|
||||
status_ = PersistentCacheHelper::LookupRawPage(
|
||||
cache_options_, handle_, &heap_buf_, block_size_ + kBlockTrailerSize);
|
||||
if (status_.ok()) {
|
||||
used_buf_ = heap_buf_.get();
|
||||
slice_ = Slice(heap_buf_.get(), block_size_);
|
||||
return true;
|
||||
} else if (!status_.IsNotFound() && ioptions_.info_log) {
|
||||
assert(!status_.ok());
|
||||
ROCKS_LOG_INFO(ioptions_.info_log,
|
||||
"Error reading from persistent cache. %s",
|
||||
status_.ToString().c_str());
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void BlockFetcher::PrepareBufferForBlockFromFile() {
|
||||
// cache miss read from device
|
||||
if (do_uncompress_ &&
|
||||
block_size_ + kBlockTrailerSize < kDefaultStackBufferSize) {
|
||||
// If we've got a small enough hunk of data, read it in to the
|
||||
// trivially allocated stack buffer instead of needing a full malloc()
|
||||
used_buf_ = &stack_buf_[0];
|
||||
} else {
|
||||
heap_buf_ =
|
||||
std::unique_ptr<char[]>(new char[block_size_ + kBlockTrailerSize]);
|
||||
used_buf_ = heap_buf_.get();
|
||||
}
|
||||
}
|
||||
|
||||
void BlockFetcher::InsertCompressedBlockToPersistentCacheIfNeeded() {
|
||||
if (status_.ok() && read_options_.fill_cache &&
|
||||
cache_options_.persistent_cache &&
|
||||
cache_options_.persistent_cache->IsCompressed()) {
|
||||
// insert to raw cache
|
||||
PersistentCacheHelper::InsertRawPage(cache_options_, handle_, used_buf_,
|
||||
block_size_ + kBlockTrailerSize);
|
||||
}
|
||||
}
|
||||
|
||||
void BlockFetcher::InsertUncompressedBlockToPersistentCacheIfNeeded() {
|
||||
if (status_.ok() && !got_from_prefetch_buffer_ && read_options_.fill_cache &&
|
||||
cache_options_.persistent_cache &&
|
||||
!cache_options_.persistent_cache->IsCompressed()) {
|
||||
// insert to uncompressed cache
|
||||
PersistentCacheHelper::InsertUncompressedPage(cache_options_, handle_,
|
||||
*contents_);
|
||||
}
|
||||
}
|
||||
|
||||
void BlockFetcher::GetBlockContents() {
|
||||
if (slice_.data() != used_buf_) {
|
||||
// the slice content is not the buffer provided
|
||||
*contents_ = BlockContents(Slice(slice_.data(), block_size_), false,
|
||||
compression_type);
|
||||
} else {
|
||||
// page is uncompressed, the buffer either stack or heap provided
|
||||
if (got_from_prefetch_buffer_ || used_buf_ == &stack_buf_[0]) {
|
||||
heap_buf_ = std::unique_ptr<char[]>(new char[block_size_]);
|
||||
memcpy(heap_buf_.get(), used_buf_, block_size_);
|
||||
}
|
||||
*contents_ = BlockContents(std::move(heap_buf_), block_size_, true,
|
||||
compression_type);
|
||||
}
|
||||
}
|
||||
|
||||
Status BlockFetcher::ReadBlockContents() {
|
||||
block_size_ = static_cast<size_t>(handle_.size());
|
||||
|
||||
if (TryGetUncompressBlockFromPersistentCache()) {
|
||||
return Status::OK();
|
||||
}
|
||||
if (TryGetFromPrefetchBuffer()) {
|
||||
if (!status_.ok()) {
|
||||
return status_;
|
||||
}
|
||||
} else if (!TryGetCompressedBlockFromPersistentCache()) {
|
||||
PrepareBufferForBlockFromFile();
|
||||
Status s;
|
||||
|
||||
{
|
||||
PERF_TIMER_GUARD(block_read_time);
|
||||
// Actual file read
|
||||
status_ = file_->Read(handle_.offset(), block_size_ + kBlockTrailerSize,
|
||||
&slice_, used_buf_);
|
||||
}
|
||||
PERF_COUNTER_ADD(block_read_count, 1);
|
||||
PERF_COUNTER_ADD(block_read_byte, block_size_ + kBlockTrailerSize);
|
||||
if (!status_.ok()) {
|
||||
return status_;
|
||||
}
|
||||
|
||||
if (slice_.size() != block_size_ + kBlockTrailerSize) {
|
||||
return Status::Corruption("truncated block read from " +
|
||||
file_->file_name() + " offset " +
|
||||
ToString(handle_.offset()) + ", expected " +
|
||||
ToString(block_size_ + kBlockTrailerSize) +
|
||||
" bytes, got " + ToString(slice_.size()));
|
||||
}
|
||||
|
||||
CheckBlockChecksum();
|
||||
if (status_.ok()) {
|
||||
InsertCompressedBlockToPersistentCacheIfNeeded();
|
||||
} else {
|
||||
return status_;
|
||||
}
|
||||
}
|
||||
|
||||
PERF_TIMER_GUARD(block_decompress_time);
|
||||
|
||||
compression_type =
|
||||
static_cast<rocksdb::CompressionType>(slice_.data()[block_size_]);
|
||||
|
||||
if (do_uncompress_ && compression_type != kNoCompression) {
|
||||
// compressed page, uncompress, update cache
|
||||
status_ = UncompressBlockContents(slice_.data(), block_size_, contents_,
|
||||
footer_.version(), compression_dict_,
|
||||
ioptions_);
|
||||
} else {
|
||||
GetBlockContents();
|
||||
}
|
||||
|
||||
InsertUncompressedBlockToPersistentCacheIfNeeded();
|
||||
|
||||
return status_;
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
75
table/block_fetcher.h
Normal file
75
table/block_fetcher.h
Normal file
@ -0,0 +1,75 @@
|
||||
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
||||
// This source code is licensed under both the GPLv2 (found in the
|
||||
// COPYING file in the root directory) and Apache 2.0 License
|
||||
// (found in the LICENSE.Apache file in the root directory).
|
||||
//
|
||||
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#pragma once
|
||||
#include "table/block.h"
|
||||
#include "table/format.h"
|
||||
|
||||
namespace rocksdb {
|
||||
class BlockFetcher {
|
||||
public:
|
||||
// Read the block identified by "handle" from "file".
|
||||
// The only relevant option is options.verify_checksums for now.
|
||||
// On failure return non-OK.
|
||||
// On success fill *result and return OK - caller owns *result
|
||||
// @param compression_dict Data for presetting the compression library's
|
||||
// dictionary.
|
||||
BlockFetcher(RandomAccessFileReader* file,
|
||||
FilePrefetchBuffer* prefetch_buffer, const Footer& footer,
|
||||
const ReadOptions& read_options, const BlockHandle& handle,
|
||||
BlockContents* contents,
|
||||
const ImmutableCFOptions& ioptions,
|
||||
bool do_uncompress, const Slice& compression_dict,
|
||||
const PersistentCacheOptions& cache_options)
|
||||
: file_(file),
|
||||
prefetch_buffer_(prefetch_buffer),
|
||||
footer_(footer),
|
||||
read_options_(read_options),
|
||||
handle_(handle),
|
||||
contents_(contents),
|
||||
ioptions_(ioptions),
|
||||
do_uncompress_(do_uncompress),
|
||||
compression_dict_(compression_dict),
|
||||
cache_options_(cache_options) {}
|
||||
Status ReadBlockContents();
|
||||
|
||||
private:
|
||||
static const uint32_t kDefaultStackBufferSize = 5000;
|
||||
|
||||
RandomAccessFileReader* file_;
|
||||
FilePrefetchBuffer* prefetch_buffer_;
|
||||
const Footer& footer_;
|
||||
const ReadOptions& read_options_;
|
||||
const BlockHandle& handle_;
|
||||
BlockContents* contents_;
|
||||
const ImmutableCFOptions& ioptions_;
|
||||
bool do_uncompress_;
|
||||
const Slice& compression_dict_;
|
||||
const PersistentCacheOptions& cache_options_;
|
||||
Status status_;
|
||||
Slice slice_;
|
||||
char* used_buf_ = nullptr;
|
||||
size_t block_size_;
|
||||
std::unique_ptr<char[]> heap_buf_;
|
||||
char stack_buf_[kDefaultStackBufferSize];
|
||||
bool got_from_prefetch_buffer_ = false;
|
||||
rocksdb::CompressionType compression_type;
|
||||
|
||||
// return true if found
|
||||
bool TryGetUncompressBlockFromPersistentCache();
|
||||
// return true if found
|
||||
bool TryGetFromPrefetchBuffer();
|
||||
bool TryGetCompressedBlockFromPersistentCache();
|
||||
void PrepareBufferForBlockFromFile();
|
||||
void GetBlockContents();
|
||||
void InsertCompressedBlockToPersistentCacheIfNeeded();
|
||||
void InsertUncompressedBlockToPersistentCacheIfNeeded();
|
||||
void CheckBlockChecksum();
|
||||
};
|
||||
} // namespace rocksdb
|
201
table/format.cc
201
table/format.cc
@ -17,6 +17,7 @@
|
||||
#include "rocksdb/env.h"
|
||||
#include "table/block.h"
|
||||
#include "table/block_based_table_reader.h"
|
||||
#include "table/block_fetcher.h"
|
||||
#include "table/persistent_cache_helper.h"
|
||||
#include "util/coding.h"
|
||||
#include "util/compression.h"
|
||||
@ -40,7 +41,6 @@ extern const uint64_t kPlainTableMagicNumber;
|
||||
const uint64_t kLegacyPlainTableMagicNumber = 0;
|
||||
const uint64_t kPlainTableMagicNumber = 0;
|
||||
#endif
|
||||
const uint32_t DefaultStackBufferSize = 5000;
|
||||
|
||||
bool ShouldReportDetailedTime(Env* env, Statistics* stats) {
|
||||
return env != nullptr && stats != nullptr &&
|
||||
@ -264,205 +264,6 @@ Status ReadFooterFromFile(RandomAccessFileReader* file,
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
// Without anonymous namespace here, we fail the warning -Wmissing-prototypes
|
||||
namespace {
|
||||
Status CheckBlockChecksum(const ReadOptions& options, const Footer& footer,
|
||||
const Slice& contents, size_t block_size,
|
||||
RandomAccessFileReader* file,
|
||||
const BlockHandle& handle) {
|
||||
Status s;
|
||||
// Check the crc of the type and the block contents
|
||||
if (options.verify_checksums) {
|
||||
const char* data = contents.data(); // Pointer to where Read put the data
|
||||
PERF_TIMER_GUARD(block_checksum_time);
|
||||
uint32_t value = DecodeFixed32(data + block_size + 1);
|
||||
uint32_t actual = 0;
|
||||
switch (footer.checksum()) {
|
||||
case kNoChecksum:
|
||||
break;
|
||||
case kCRC32c:
|
||||
value = crc32c::Unmask(value);
|
||||
actual = crc32c::Value(data, block_size + 1);
|
||||
break;
|
||||
case kxxHash:
|
||||
actual = XXH32(data, static_cast<int>(block_size) + 1, 0);
|
||||
break;
|
||||
default:
|
||||
s = Status::Corruption(
|
||||
"unknown checksum type " + ToString(footer.checksum()) + " in " +
|
||||
file->file_name() + " offset " + ToString(handle.offset()) +
|
||||
" size " + ToString(block_size));
|
||||
}
|
||||
if (s.ok() && actual != value) {
|
||||
s = Status::Corruption(
|
||||
"block checksum mismatch: expected " + ToString(actual) + ", got " +
|
||||
ToString(value) + " in " + file->file_name() + " offset " +
|
||||
ToString(handle.offset()) + " size " + ToString(block_size));
|
||||
}
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
// Read a block and check its CRC
|
||||
// contents is the result of reading.
|
||||
// According to the implementation of file->Read, contents may not point to buf
|
||||
Status ReadBlock(RandomAccessFileReader* file, const Footer& footer,
|
||||
const ReadOptions& options, const BlockHandle& handle,
|
||||
Slice* contents, /* result of reading */ char* buf) {
|
||||
size_t n = static_cast<size_t>(handle.size());
|
||||
Status s;
|
||||
|
||||
{
|
||||
PERF_TIMER_GUARD(block_read_time);
|
||||
s = file->Read(handle.offset(), n + kBlockTrailerSize, contents, buf);
|
||||
}
|
||||
|
||||
PERF_COUNTER_ADD(block_read_count, 1);
|
||||
PERF_COUNTER_ADD(block_read_byte, n + kBlockTrailerSize);
|
||||
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
if (contents->size() != n + kBlockTrailerSize) {
|
||||
return Status::Corruption("truncated block read from " + file->file_name() +
|
||||
" offset " + ToString(handle.offset()) +
|
||||
", expected " + ToString(n + kBlockTrailerSize) +
|
||||
" bytes, got " + ToString(contents->size()));
|
||||
}
|
||||
return CheckBlockChecksum(options, footer, *contents, n, file, handle);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
Status ReadBlockContents(RandomAccessFileReader* file,
|
||||
FilePrefetchBuffer* prefetch_buffer,
|
||||
const Footer& footer, const ReadOptions& read_options,
|
||||
const BlockHandle& handle, BlockContents* contents,
|
||||
const ImmutableCFOptions& ioptions,
|
||||
bool decompression_requested,
|
||||
const Slice& compression_dict,
|
||||
const PersistentCacheOptions& cache_options) {
|
||||
Status status;
|
||||
Slice slice;
|
||||
size_t n = static_cast<size_t>(handle.size());
|
||||
std::unique_ptr<char[]> heap_buf;
|
||||
char stack_buf[DefaultStackBufferSize];
|
||||
char* used_buf = nullptr;
|
||||
rocksdb::CompressionType compression_type;
|
||||
|
||||
if (cache_options.persistent_cache &&
|
||||
!cache_options.persistent_cache->IsCompressed()) {
|
||||
status = PersistentCacheHelper::LookupUncompressedPage(cache_options,
|
||||
handle, contents);
|
||||
if (status.ok()) {
|
||||
// uncompressed page is found for the block handle
|
||||
return status;
|
||||
} else {
|
||||
// uncompressed page is not found
|
||||
if (ioptions.info_log && !status.IsNotFound()) {
|
||||
assert(!status.ok());
|
||||
ROCKS_LOG_INFO(ioptions.info_log,
|
||||
"Error reading from persistent cache. %s",
|
||||
status.ToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool got_from_prefetch_buffer = false;
|
||||
if (prefetch_buffer != nullptr &&
|
||||
prefetch_buffer->TryReadFromCache(
|
||||
handle.offset(),
|
||||
static_cast<size_t>(handle.size()) + kBlockTrailerSize, &slice)) {
|
||||
status =
|
||||
CheckBlockChecksum(read_options, footer, slice,
|
||||
static_cast<size_t>(handle.size()), file, handle);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
got_from_prefetch_buffer = true;
|
||||
used_buf = const_cast<char*>(slice.data());
|
||||
} else if (cache_options.persistent_cache &&
|
||||
cache_options.persistent_cache->IsCompressed()) {
|
||||
// lookup uncompressed cache mode p-cache
|
||||
status = PersistentCacheHelper::LookupRawPage(
|
||||
cache_options, handle, &heap_buf, n + kBlockTrailerSize);
|
||||
} else {
|
||||
status = Status::NotFound();
|
||||
}
|
||||
|
||||
if (!got_from_prefetch_buffer) {
|
||||
if (status.ok()) {
|
||||
// cache hit
|
||||
used_buf = heap_buf.get();
|
||||
slice = Slice(heap_buf.get(), n);
|
||||
} else {
|
||||
if (ioptions.info_log && !status.IsNotFound()) {
|
||||
assert(!status.ok());
|
||||
ROCKS_LOG_INFO(ioptions.info_log,
|
||||
"Error reading from persistent cache. %s",
|
||||
status.ToString().c_str());
|
||||
}
|
||||
// cache miss read from device
|
||||
if (decompression_requested &&
|
||||
n + kBlockTrailerSize < DefaultStackBufferSize) {
|
||||
// If we've got a small enough hunk of data, read it in to the
|
||||
// trivially allocated stack buffer instead of needing a full malloc()
|
||||
used_buf = &stack_buf[0];
|
||||
} else {
|
||||
heap_buf = std::unique_ptr<char[]>(new char[n + kBlockTrailerSize]);
|
||||
used_buf = heap_buf.get();
|
||||
}
|
||||
|
||||
status = ReadBlock(file, footer, read_options, handle, &slice, used_buf);
|
||||
if (status.ok() && read_options.fill_cache &&
|
||||
cache_options.persistent_cache &&
|
||||
cache_options.persistent_cache->IsCompressed()) {
|
||||
// insert to raw cache
|
||||
PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf,
|
||||
n + kBlockTrailerSize);
|
||||
}
|
||||
}
|
||||
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
PERF_TIMER_GUARD(block_decompress_time);
|
||||
|
||||
compression_type = static_cast<rocksdb::CompressionType>(slice.data()[n]);
|
||||
|
||||
if (decompression_requested && compression_type != kNoCompression) {
|
||||
// compressed page, uncompress, update cache
|
||||
status = UncompressBlockContents(slice.data(), n, contents,
|
||||
footer.version(), compression_dict,
|
||||
ioptions);
|
||||
} else if (slice.data() != used_buf) {
|
||||
// the slice content is not the buffer provided
|
||||
*contents = BlockContents(Slice(slice.data(), n), false, compression_type);
|
||||
} else {
|
||||
// page is uncompressed, the buffer either stack or heap provided
|
||||
if (got_from_prefetch_buffer || used_buf == &stack_buf[0]) {
|
||||
heap_buf = std::unique_ptr<char[]>(new char[n]);
|
||||
memcpy(heap_buf.get(), used_buf, n);
|
||||
}
|
||||
*contents = BlockContents(std::move(heap_buf), n, true, compression_type);
|
||||
}
|
||||
|
||||
if (status.ok() && !got_from_prefetch_buffer && read_options.fill_cache &&
|
||||
cache_options.persistent_cache &&
|
||||
!cache_options.persistent_cache->IsCompressed()) {
|
||||
// insert to uncompressed cache
|
||||
PersistentCacheHelper::InsertUncompressedPage(cache_options, handle,
|
||||
*contents);
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
Status UncompressBlockContentsForCompressionType(
|
||||
const char* data, size_t n, BlockContents* contents,
|
||||
uint32_t format_version, const Slice& compression_dict,
|
||||
|
@ -22,7 +22,6 @@
|
||||
|
||||
namespace rocksdb {
|
||||
|
||||
class Block;
|
||||
class RandomAccessFile;
|
||||
struct ReadOptions;
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "rocksdb/table.h"
|
||||
#include "rocksdb/table_properties.h"
|
||||
#include "table/block.h"
|
||||
#include "table/block_fetcher.h"
|
||||
#include "table/format.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/persistent_cache_helper.h"
|
||||
@ -176,8 +177,13 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
|
||||
ReadOptions read_options;
|
||||
read_options.verify_checksums = false;
|
||||
Status s;
|
||||
s = ReadBlockContents(file, prefetch_buffer, footer, read_options, handle,
|
||||
&block_contents, ioptions, false /* decompress */);
|
||||
Slice compression_dict;
|
||||
PersistentCacheOptions cache_options;
|
||||
|
||||
BlockFetcher block_fetcher(
|
||||
file, prefetch_buffer, footer, read_options, handle, &block_contents,
|
||||
ioptions, false /* decompress */, compression_dict, cache_options);
|
||||
s = block_fetcher.ReadBlockContents();
|
||||
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
@ -292,9 +298,14 @@ Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size,
|
||||
BlockContents metaindex_contents;
|
||||
ReadOptions read_options;
|
||||
read_options.verify_checksums = false;
|
||||
s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer,
|
||||
read_options, metaindex_handle, &metaindex_contents,
|
||||
ioptions, false /* decompress */);
|
||||
Slice compression_dict;
|
||||
PersistentCacheOptions cache_options;
|
||||
|
||||
BlockFetcher block_fetcher(
|
||||
file, nullptr /* prefetch_buffer */, footer, read_options,
|
||||
metaindex_handle, &metaindex_contents, ioptions, false /* decompress */,
|
||||
compression_dict, cache_options);
|
||||
s = block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -350,9 +361,13 @@ Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size,
|
||||
BlockContents metaindex_contents;
|
||||
ReadOptions read_options;
|
||||
read_options.verify_checksums = false;
|
||||
s = ReadBlockContents(file, nullptr /* prefetch_buffer */, footer,
|
||||
read_options, metaindex_handle, &metaindex_contents,
|
||||
ioptions, false /* do decompression */);
|
||||
Slice compression_dict;
|
||||
PersistentCacheOptions cache_options;
|
||||
BlockFetcher block_fetcher(
|
||||
file, nullptr /* prefetch_buffer */, footer, read_options,
|
||||
metaindex_handle, &metaindex_contents, ioptions,
|
||||
false /* do decompression */, compression_dict, cache_options);
|
||||
s = block_fetcher.ReadBlockContents();
|
||||
if (!s.ok()) {
|
||||
return s;
|
||||
}
|
||||
@ -384,9 +399,14 @@ Status ReadMetaBlock(RandomAccessFileReader* file,
|
||||
BlockContents metaindex_contents;
|
||||
ReadOptions read_options;
|
||||
read_options.verify_checksums = false;
|
||||
status = ReadBlockContents(file, prefetch_buffer, footer, read_options,
|
||||
Slice compression_dict;
|
||||
PersistentCacheOptions cache_options;
|
||||
|
||||
BlockFetcher block_fetcher(file, prefetch_buffer, footer, read_options,
|
||||
metaindex_handle, &metaindex_contents, ioptions,
|
||||
false /* decompress */);
|
||||
false /* decompress */, compression_dict,
|
||||
cache_options);
|
||||
status = block_fetcher.ReadBlockContents();
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
@ -406,9 +426,10 @@ Status ReadMetaBlock(RandomAccessFileReader* file,
|
||||
}
|
||||
|
||||
// Reading metablock
|
||||
return ReadBlockContents(file, prefetch_buffer, footer, read_options,
|
||||
block_handle, contents, ioptions,
|
||||
false /* decompress */);
|
||||
BlockFetcher block_fetcher2(
|
||||
file, prefetch_buffer, footer, read_options, block_handle, contents,
|
||||
ioptions, false /* decompress */, compression_dict, cache_options);
|
||||
return block_fetcher2.ReadBlockContents();
|
||||
}
|
||||
|
||||
} // namespace rocksdb
|
||||
|
Loading…
Reference in New Issue
Block a user