Fix MultiGet crash when no_block_cache is set (#5991)
Summary: This PR fixes https://github.com/facebook/rocksdb/issues/5975. In ```BlockBasedTable::RetrieveMultipleBlocks()```, we were calling ```MaybeReadBlocksAndLoadToCache()```, which is a no-op if neither uncompressed nor compressed block cache are configured. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5991 Test Plan: 1. Add unit tests that fail with the old code and pass with the new 2. make check and asan_check Cc spetrunia Differential Revision: D18272744 Pulled By: anand1976 fbshipit-source-id: e62fa6090d1a6adf84fcd51dfd6859b03c6aebfe
This commit is contained in:
parent
3353b7141d
commit
98e5189fb0
@ -1,4 +1,8 @@
|
||||
# Rocksdb Change Log
|
||||
## Unreleased
|
||||
### Bug Fixes
|
||||
* Fix a assertion failure in MultiGe4t() when BlockBasedTableOptions::no_block_cache is true and there is no compressed block cache
|
||||
|
||||
## 6.5.1 (10/16/2019)
|
||||
### Bug Fixes
|
||||
* Revert the feature "Merging iterator to avoid child iterator reseek for some cases (#5286)" since it might cause strange results when reseek happens with a different iterator upper bound.
|
||||
|
@ -1566,8 +1566,12 @@ class DBBasicTestWithParallelIO
|
||||
Options options = CurrentOptions();
|
||||
Random rnd(301);
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
|
||||
table_options.block_cache = uncompressed_cache_;
|
||||
if (table_options.block_cache == nullptr) {
|
||||
table_options.no_block_cache = true;
|
||||
} else {
|
||||
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
|
||||
}
|
||||
table_options.block_cache_compressed = compressed_cache_;
|
||||
table_options.flush_block_policy_factory.reset(
|
||||
new MyFlushBlockPolicyFactory());
|
||||
@ -1609,6 +1613,9 @@ class DBBasicTestWithParallelIO
|
||||
}
|
||||
|
||||
bool fill_cache() { return fill_cache_; }
|
||||
bool compression_enabled() { return compression_enabled_; }
|
||||
bool has_compressed_cache() { return compressed_cache_ != nullptr; }
|
||||
bool has_uncompressed_cache() { return uncompressed_cache_ != nullptr; }
|
||||
|
||||
static void SetUpTestCase() {}
|
||||
static void TearDownTestCase() {}
|
||||
@ -1793,7 +1800,16 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
ASSERT_TRUE(CheckValue(1, values[0].ToString()));
|
||||
ASSERT_TRUE(CheckValue(51, values[1].ToString()));
|
||||
|
||||
int expected_reads = random_reads + (fill_cache() ? 0 : 2);
|
||||
bool read_from_cache = false;
|
||||
if (fill_cache()) {
|
||||
if (has_uncompressed_cache()) {
|
||||
read_from_cache = true;
|
||||
} else if (has_compressed_cache() && compression_enabled()) {
|
||||
read_from_cache = true;
|
||||
}
|
||||
}
|
||||
|
||||
int expected_reads = random_reads + (read_from_cache ? 0 : 2);
|
||||
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
|
||||
|
||||
keys.resize(10);
|
||||
@ -1811,7 +1827,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
ASSERT_OK(statuses[i]);
|
||||
ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString()));
|
||||
}
|
||||
expected_reads += (fill_cache() ? 2 : 4);
|
||||
expected_reads += (read_from_cache ? 2 : 4);
|
||||
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
|
||||
}
|
||||
|
||||
@ -1822,12 +1838,8 @@ INSTANTIATE_TEST_CASE_P(
|
||||
// Param 1 - Uncompressed cache enabled
|
||||
// Param 2 - Data compression enabled
|
||||
// Param 3 - ReadOptions::fill_cache
|
||||
::testing::Values(std::make_tuple(false, true, true, true),
|
||||
std::make_tuple(true, true, true, true),
|
||||
std::make_tuple(false, true, false, true),
|
||||
std::make_tuple(false, true, true, false),
|
||||
std::make_tuple(true, true, true, false),
|
||||
std::make_tuple(false, true, false, false)));
|
||||
::testing::Combine(::testing::Bool(), ::testing::Bool(),
|
||||
::testing::Bool(), ::testing::Bool()));
|
||||
|
||||
class DBBasicTestWithTimestampWithParam
|
||||
: public DBTestBase,
|
||||
|
@ -2368,37 +2368,46 @@ void BlockBasedTable::RetrieveMultipleBlocks(
|
||||
// MaybeReadBlockAndLoadToCache will insert into the block caches if
|
||||
// necessary. Since we're passing the raw block contents, it will
|
||||
// avoid looking up the block cache
|
||||
s = MaybeReadBlockAndLoadToCache(nullptr, options, handle,
|
||||
uncompression_dict, block_entry, BlockType::kData,
|
||||
mget_iter->get_context, &lookup_data_block_context,
|
||||
&raw_block_contents);
|
||||
s = MaybeReadBlockAndLoadToCache(
|
||||
nullptr, options, handle, uncompression_dict, block_entry,
|
||||
BlockType::kData, mget_iter->get_context,
|
||||
&lookup_data_block_context, &raw_block_contents);
|
||||
|
||||
// block_entry value could be null if no block cache is present, i.e
|
||||
// BlockBasedTableOptions::no_block_cache is true and no compressed
|
||||
// block cache is configured. In that case, fall
|
||||
// through and set up the block explicitly
|
||||
if (block_entry->GetValue() != nullptr) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
CompressionType compression_type =
|
||||
raw_block_contents.get_compression_type();
|
||||
BlockContents contents;
|
||||
if (compression_type != kNoCompression) {
|
||||
UncompressionContext context(compression_type);
|
||||
UncompressionInfo info(context, uncompression_dict, compression_type);
|
||||
s = UncompressBlockContents(info, req.result.data(), handle.size(),
|
||||
&contents, footer.version(),
|
||||
rep_->ioptions, memory_allocator);
|
||||
} else {
|
||||
CompressionType compression_type =
|
||||
raw_block_contents.get_compression_type();
|
||||
BlockContents contents;
|
||||
if (compression_type != kNoCompression) {
|
||||
UncompressionContext context(compression_type);
|
||||
UncompressionInfo info(context, uncompression_dict, compression_type);
|
||||
s = UncompressBlockContents(info, req.result.data(), handle.size(),
|
||||
&contents, footer.version(), rep_->ioptions,
|
||||
memory_allocator);
|
||||
if (scratch != nullptr) {
|
||||
// If we used the scratch buffer, then the contents need to be
|
||||
// copied to heap
|
||||
Slice raw = Slice(req.result.data(), handle.size());
|
||||
contents = BlockContents(
|
||||
CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
} else {
|
||||
if (scratch != nullptr) {
|
||||
// If we used the scratch buffer, then the contents need to be
|
||||
// copied to heap
|
||||
Slice raw = Slice(req.result.data(), handle.size());
|
||||
contents = BlockContents(CopyBufferToHeap(
|
||||
GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
} else {
|
||||
contents = std::move(raw_block_contents);
|
||||
}
|
||||
}
|
||||
if (s.ok()) {
|
||||
(*results)[idx_in_batch].SetOwnedValue(new Block(std::move(contents),
|
||||
global_seqno, read_amp_bytes_per_bit, ioptions.statistics));
|
||||
contents = std::move(raw_block_contents);
|
||||
}
|
||||
}
|
||||
if (s.ok()) {
|
||||
(*results)[idx_in_batch].SetOwnedValue(
|
||||
new Block(std::move(contents), global_seqno,
|
||||
read_amp_bytes_per_bit, ioptions.statistics));
|
||||
}
|
||||
}
|
||||
(*statuses)[idx_in_batch] = s;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user