Revert "Delete filter before closing the table"
Summary:
This reverts commit 89833577a8
.
Closes https://github.com/facebook/rocksdb/pull/2240
Differential Revision: D4986982
Pulled By: maysamyabandeh
fbshipit-source-id: 56c4c07b7b5b7c6fe122d5c2f2199d221c8510c0
This commit is contained in:
parent
89833577a8
commit
6798d1f3be
@ -101,11 +101,6 @@ Status BlockBasedTableFactory::SanitizeOptions(
|
||||
"Unsupported BlockBasedTable format_version. Please check "
|
||||
"include/rocksdb/table.h for more info");
|
||||
}
|
||||
if (table_options_.block_cache &&
|
||||
table_options_.no_block_cache) {
|
||||
return Status::InvalidArgument("Block cache is initialized"
|
||||
", but it is disabled");
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
|
@ -2063,17 +2063,21 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) {
|
||||
}
|
||||
|
||||
void BlockBasedTable::Close() {
|
||||
const bool force_erase = true;
|
||||
auto block_cache = rep_->table_options.block_cache.get();
|
||||
if (block_cache) {
|
||||
// The filter_entry and inde_entry's lifetime ends with table's and is
|
||||
// forced to be released.
|
||||
// Note that if the xxx_entry is not set then the cached entry might remian
|
||||
// in the cache for some more time. The destrcutor hence must take int
|
||||
// account that the table object migth no longer be available.
|
||||
rep_->filter_entry.Release(block_cache, force_erase);
|
||||
rep_->index_entry.Release(block_cache, force_erase);
|
||||
rep_->range_del_entry.Release(block_cache);
|
||||
rep_->filter_entry.Release(rep_->table_options.block_cache.get());
|
||||
rep_->index_entry.Release(rep_->table_options.block_cache.get());
|
||||
rep_->range_del_entry.Release(rep_->table_options.block_cache.get());
|
||||
// cleanup index and filter blocks to avoid accessing dangling pointer
|
||||
if (!rep_->table_options.no_block_cache) {
|
||||
char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length];
|
||||
// Get the filter block key
|
||||
auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size,
|
||||
rep_->footer.metaindex_handle(), cache_key);
|
||||
rep_->table_options.block_cache.get()->Erase(key);
|
||||
// Get the index block key
|
||||
key = GetCacheKeyFromOffset(rep_->cache_key_prefix,
|
||||
rep_->cache_key_prefix_size,
|
||||
rep_->dummy_index_reader_offset, cache_key);
|
||||
rep_->table_options.block_cache.get()->Erase(key);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -370,10 +370,9 @@ struct BlockBasedTable::CachableEntry {
|
||||
CachableEntry(TValue* _value, Cache::Handle* _cache_handle)
|
||||
: value(_value), cache_handle(_cache_handle) {}
|
||||
CachableEntry() : CachableEntry(nullptr, nullptr) {}
|
||||
void Release(Cache* cache, bool force_erase = false) {
|
||||
void Release(Cache* cache) {
|
||||
if (cache_handle) {
|
||||
bool erased = cache->Release(cache_handle, force_erase);
|
||||
assert(!force_erase || erased);
|
||||
cache->Release(cache_handle);
|
||||
value = nullptr;
|
||||
cache_handle = nullptr;
|
||||
}
|
||||
|
@ -82,21 +82,16 @@ PartitionedFilterBlockReader::PartitionedFilterBlockReader(
|
||||
: FilterBlockReader(contents.data.size(), stats, _whole_key_filtering),
|
||||
prefix_extractor_(prefix_extractor),
|
||||
comparator_(comparator),
|
||||
table_(table),
|
||||
block_cache_(table_->rep_->table_options.block_cache.get()){
|
||||
table_(table) {
|
||||
idx_on_fltr_blk_.reset(new Block(std::move(contents),
|
||||
kDisableGlobalSequenceNumber,
|
||||
0 /* read_amp_bytes_per_bit */, stats));
|
||||
}
|
||||
|
||||
PartitionedFilterBlockReader::~PartitionedFilterBlockReader() {
|
||||
// The destructor migh be called via cache evict after the table is deleted.
|
||||
// We should avoid using table_ pointer in destructor then.
|
||||
if (block_cache_) {
|
||||
ReadLock rl(&mu_);
|
||||
for (auto it = handle_list_.begin(); it != handle_list_.end(); ++it) {
|
||||
block_cache_->Release(*it);
|
||||
}
|
||||
table_->rep_->table_options.block_cache.get()->Release(*it);
|
||||
}
|
||||
}
|
||||
|
||||
@ -127,7 +122,7 @@ bool PartitionedFilterBlockReader::KeyMayMatch(
|
||||
return res;
|
||||
}
|
||||
if (LIKELY(filter_partition.IsSet())) {
|
||||
filter_partition.Release(block_cache_);
|
||||
filter_partition.Release(table_->rep_->table_options.block_cache.get());
|
||||
} else {
|
||||
delete filter_partition.value;
|
||||
}
|
||||
@ -159,7 +154,7 @@ bool PartitionedFilterBlockReader::PrefixMayMatch(
|
||||
return res;
|
||||
}
|
||||
if (LIKELY(filter_partition.IsSet())) {
|
||||
filter_partition.Release(block_cache_);
|
||||
filter_partition.Release(table_->rep_->table_options.block_cache.get());
|
||||
} else {
|
||||
delete filter_partition.value;
|
||||
}
|
||||
@ -187,7 +182,8 @@ PartitionedFilterBlockReader::GetFilterPartition(Slice* handle_value,
|
||||
auto s = fltr_blk_handle.DecodeFrom(handle_value);
|
||||
assert(s.ok());
|
||||
const bool is_a_filter_partition = true;
|
||||
if (LIKELY(block_cache_ != nullptr)) {
|
||||
auto block_cache = table_->rep_->table_options.block_cache.get();
|
||||
if (LIKELY(block_cache != nullptr)) {
|
||||
bool pin_cached_filters =
|
||||
GetLevel() == 0 &&
|
||||
table_->rep_->table_options.pin_l0_filter_and_index_blocks_in_cache;
|
||||
|
@ -85,7 +85,6 @@ class PartitionedFilterBlockReader : public FilterBlockReader {
|
||||
std::unique_ptr<Block> idx_on_fltr_blk_;
|
||||
const Comparator& comparator_;
|
||||
const BlockBasedTable* table_;
|
||||
Cache* block_cache_;
|
||||
std::unordered_map<uint64_t, FilterBlockReader*> filter_cache_;
|
||||
autovector<Cache::Handle*> handle_list_;
|
||||
port::RWMutex mu_;
|
||||
|
@ -9,6 +9,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <stdio.h>
|
||||
|
||||
#include <algorithm>
|
||||
@ -1849,10 +1850,7 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||
// -- Table construction
|
||||
Options options;
|
||||
options.create_if_missing = true;
|
||||
// Keep a ref to statistic to prevent it from being destructed before
|
||||
// block cache gets cleaned up upon next table_factory.reset
|
||||
auto statistics = CreateDBStatistics();
|
||||
options.statistics = statistics;
|
||||
options.statistics = CreateDBStatistics();
|
||||
|
||||
// Enable the cache for index/filter blocks
|
||||
BlockBasedTableOptions table_options;
|
||||
@ -1932,17 +1930,15 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||
}
|
||||
// release the iterator so that the block cache can reset correctly.
|
||||
iter.reset();
|
||||
|
||||
c.ResetTableReader();
|
||||
|
||||
// -- PART 2: Open with very small block cache
|
||||
// In this test, no block will ever get hit since the block cache is
|
||||
// too small to fit even one entry.
|
||||
table_options.block_cache = NewLRUCache(1, 4);
|
||||
options.statistics = CreateDBStatistics();
|
||||
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
||||
// Keep a ref to statistic to prevent it from being destructed before
|
||||
// block cache gets cleaned up upon next table_factory.reset
|
||||
statistics = CreateDBStatistics();
|
||||
options.statistics = statistics;
|
||||
const ImmutableCFOptions ioptions2(options);
|
||||
c.Reopen(ioptions2);
|
||||
{
|
||||
@ -1997,10 +1993,7 @@ TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) {
|
||||
// Open table with filter policy
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(1));
|
||||
options.table_factory.reset(new BlockBasedTableFactory(table_options));
|
||||
// Keep a ref to statistic to prevent it from being destructed before
|
||||
// block cache gets cleaned up upon next table_factory.reset
|
||||
statistics = CreateDBStatistics();
|
||||
options.statistics = statistics;
|
||||
options.statistics = CreateDBStatistics();
|
||||
ImmutableCFOptions ioptions4(options);
|
||||
ASSERT_OK(c3.Reopen(ioptions4));
|
||||
reader = dynamic_cast<BlockBasedTable*>(c3.GetTableReader());
|
||||
|
Loading…
Reference in New Issue
Block a user