Fix MultiGet crash when no_block_cache is set (#5991)
Summary: This PR fixes https://github.com/facebook/rocksdb/issues/5975. In ```BlockBasedTable::RetrieveMultipleBlocks()```, we were calling ```MaybeReadBlocksAndLoadToCache()```, which is a no-op if neither uncompressed nor compressed block cache are configured. Pull Request resolved: https://github.com/facebook/rocksdb/pull/5991 Test Plan: 1. Add unit tests that fail with the old code and pass with the new 2. make check and asan_check Cc spetrunia Differential Revision: D18272744 Pulled By: anand1976 fbshipit-source-id: e62fa6090d1a6adf84fcd51dfd6859b03c6aebfe
This commit is contained in:
parent
1da1f04231
commit
9836a1fa33
@ -12,6 +12,9 @@ file_creation_time of the oldest SST file in the DB.
|
||||
### Performance Improvements
|
||||
* For 64-bit hashing, RocksDB is standardizing on a slightly modified preview version of XXH3. This function is now used for many non-persisted hashes, along with fastrange64() in place of the modulus operator, and some benchmarks show a slight improvement.
|
||||
|
||||
### Bug Fixes
|
||||
* Fix a assertion failure in MultiGe4t() when BlockBasedTableOptions::no_block_cache is true and there is no compressed block cache
|
||||
|
||||
## 6.5.1 (10/16/2019)
|
||||
### Bug Fixes
|
||||
* Revert the feature "Merging iterator to avoid child iterator reseek for some cases (#5286)" since it might cause strange results when reseek happens with a different iterator upper bound.
|
||||
|
@ -1577,8 +1577,12 @@ class DBBasicTestWithParallelIO
|
||||
Options options = CurrentOptions();
|
||||
Random rnd(301);
|
||||
BlockBasedTableOptions table_options;
|
||||
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
|
||||
table_options.block_cache = uncompressed_cache_;
|
||||
if (table_options.block_cache == nullptr) {
|
||||
table_options.no_block_cache = true;
|
||||
} else {
|
||||
table_options.pin_l0_filter_and_index_blocks_in_cache = true;
|
||||
}
|
||||
table_options.block_cache_compressed = compressed_cache_;
|
||||
table_options.flush_block_policy_factory.reset(
|
||||
new MyFlushBlockPolicyFactory());
|
||||
@ -1614,6 +1618,9 @@ class DBBasicTestWithParallelIO
|
||||
int num_inserts_compressed() { return compressed_cache_->num_inserts(); }
|
||||
|
||||
bool fill_cache() { return fill_cache_; }
|
||||
bool compression_enabled() { return compression_enabled_; }
|
||||
bool has_compressed_cache() { return compressed_cache_ != nullptr; }
|
||||
bool has_uncompressed_cache() { return uncompressed_cache_ != nullptr; }
|
||||
|
||||
static void SetUpTestCase() {}
|
||||
static void TearDownTestCase() {}
|
||||
@ -1788,7 +1795,16 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
ASSERT_TRUE(CheckValue(1, values[0].ToString()));
|
||||
ASSERT_TRUE(CheckValue(51, values[1].ToString()));
|
||||
|
||||
int expected_reads = random_reads + (fill_cache() ? 0 : 2);
|
||||
bool read_from_cache = false;
|
||||
if (fill_cache()) {
|
||||
if (has_uncompressed_cache()) {
|
||||
read_from_cache = true;
|
||||
} else if (has_compressed_cache() && compression_enabled()) {
|
||||
read_from_cache = true;
|
||||
}
|
||||
}
|
||||
|
||||
int expected_reads = random_reads + (read_from_cache ? 0 : 2);
|
||||
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
|
||||
|
||||
keys.resize(10);
|
||||
@ -1806,7 +1822,7 @@ TEST_P(DBBasicTestWithParallelIO, MultiGet) {
|
||||
ASSERT_OK(statuses[i]);
|
||||
ASSERT_TRUE(CheckValue(key_ints[i], values[i].ToString()));
|
||||
}
|
||||
expected_reads += (fill_cache() ? 2 : 4);
|
||||
expected_reads += (read_from_cache ? 2 : 4);
|
||||
ASSERT_EQ(env_->random_read_counter_.Read(), expected_reads);
|
||||
}
|
||||
|
||||
@ -1817,12 +1833,8 @@ INSTANTIATE_TEST_CASE_P(
|
||||
// Param 1 - Uncompressed cache enabled
|
||||
// Param 2 - Data compression enabled
|
||||
// Param 3 - ReadOptions::fill_cache
|
||||
::testing::Values(std::make_tuple(false, true, true, true),
|
||||
std::make_tuple(true, true, true, true),
|
||||
std::make_tuple(false, true, false, true),
|
||||
std::make_tuple(false, true, true, false),
|
||||
std::make_tuple(true, true, true, false),
|
||||
std::make_tuple(false, true, false, false)));
|
||||
::testing::Combine(::testing::Bool(), ::testing::Bool(),
|
||||
::testing::Bool(), ::testing::Bool()));
|
||||
|
||||
class DBBasicTestWithTimestampWithParam
|
||||
: public DBTestBase,
|
||||
|
@ -2398,34 +2398,42 @@ void BlockBasedTable::RetrieveMultipleBlocks(
|
||||
nullptr, options, handle, uncompression_dict, block_entry,
|
||||
BlockType::kData, mget_iter->get_context,
|
||||
&lookup_data_block_context, &raw_block_contents);
|
||||
|
||||
// block_entry value could be null if no block cache is present, i.e
|
||||
// BlockBasedTableOptions::no_block_cache is true and no compressed
|
||||
// block cache is configured. In that case, fall
|
||||
// through and set up the block explicitly
|
||||
if (block_entry->GetValue() != nullptr) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
CompressionType compression_type =
|
||||
raw_block_contents.get_compression_type();
|
||||
BlockContents contents;
|
||||
if (compression_type != kNoCompression) {
|
||||
UncompressionContext context(compression_type);
|
||||
UncompressionInfo info(context, uncompression_dict, compression_type);
|
||||
s = UncompressBlockContents(info, req.result.data(), handle.size(),
|
||||
&contents, footer.version(),
|
||||
rep_->ioptions, memory_allocator);
|
||||
} else {
|
||||
CompressionType compression_type =
|
||||
raw_block_contents.get_compression_type();
|
||||
BlockContents contents;
|
||||
if (compression_type != kNoCompression) {
|
||||
UncompressionContext context(compression_type);
|
||||
UncompressionInfo info(context, uncompression_dict, compression_type);
|
||||
s = UncompressBlockContents(info, req.result.data(), handle.size(),
|
||||
&contents, footer.version(),
|
||||
rep_->ioptions, memory_allocator);
|
||||
if (scratch != nullptr) {
|
||||
// If we used the scratch buffer, then the contents need to be
|
||||
// copied to heap
|
||||
Slice raw = Slice(req.result.data(), handle.size());
|
||||
contents = BlockContents(
|
||||
CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
} else {
|
||||
if (scratch != nullptr) {
|
||||
// If we used the scratch buffer, then the contents need to be
|
||||
// copied to heap
|
||||
Slice raw = Slice(req.result.data(), handle.size());
|
||||
contents = BlockContents(
|
||||
CopyBufferToHeap(GetMemoryAllocator(rep_->table_options), raw),
|
||||
handle.size());
|
||||
} else {
|
||||
contents = std::move(raw_block_contents);
|
||||
}
|
||||
}
|
||||
if (s.ok()) {
|
||||
(*results)[idx_in_batch].SetOwnedValue(
|
||||
new Block(std::move(contents), global_seqno,
|
||||
read_amp_bytes_per_bit, ioptions.statistics));
|
||||
contents = std::move(raw_block_contents);
|
||||
}
|
||||
}
|
||||
if (s.ok()) {
|
||||
(*results)[idx_in_batch].SetOwnedValue(
|
||||
new Block(std::move(contents), global_seqno,
|
||||
read_amp_bytes_per_bit, ioptions.statistics));
|
||||
}
|
||||
}
|
||||
(*statuses)[idx_in_batch] = s;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user