From e532877940fc6082ce8b524f8f810f483dcb3441 Mon Sep 17 00:00:00 2001 From: Aaron Gao Date: Fri, 3 Jun 2016 10:47:47 -0700 Subject: [PATCH] Add statistics field to show total size of index and filter blocks in block cache Summary: With `table_options.cache_index_and_filter_blocks = true`, index and filter blocks are stored in block cache. Then people are curious how much of the block cache total size is used by indexes and bloom filters. It will be nice we have a way to report that. It can help people tune performance and plan for optimized hardware setting. We add several enum values for db Statistics. BLOCK_CACHE_INDEX/FILTER_BYTES_INSERT - BLOCK_CACHE_INDEX/FILTER_BYTES_ERASE = current INDEX/FILTER total block size in bytes. Test Plan: write a test case called `DBBlockCacheTest.IndexAndFilterBlocksStats`. The result is: ``` [gzh@dev9927.prn1 ~/local/rocksdb] make db_block_cache_test -j64 && ./db_block_cache_test --gtest_filter=DBBlockCacheTest.IndexAndFilterBlocksStats Makefile:101: Warning: Compiling in debug mode. Don't use the resulting binary in production GEN util/build_version.cc make: `db_block_cache_test' is up to date. Note: Google Test filter = DBBlockCacheTest.IndexAndFilterBlocksStats [==========] Running 1 test from 1 test case. [----------] Global test environment set-up. [----------] 1 test from DBBlockCacheTest [ RUN ] DBBlockCacheTest.IndexAndFilterBlocksStats [ OK ] DBBlockCacheTest.IndexAndFilterBlocksStats (689 ms) [----------] 1 test from DBBlockCacheTest (689 ms total) [----------] Global test environment tear-down [==========] 1 test from 1 test case ran. (689 ms total) [ PASSED ] 1 test. ``` Reviewers: IslamAbdelRahman, andrewkr, sdong Reviewed By: sdong Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D58677 --- db/db_block_cache_test.cc | 41 ++++++++ include/rocksdb/statistics.h | 14 +++ table/block_based_filter_block.cc | 5 +- table/block_based_filter_block.h | 2 +- table/block_based_filter_block_test.cc | 12 +-- table/block_based_table_reader.cc | 126 ++++++++++++++++--------- table/block_based_table_reader.h | 2 +- table/filter_block.h | 8 +- table/full_filter_block.cc | 11 ++- table/full_filter_block.h | 6 +- table/full_filter_block_test.cc | 8 +- 11 files changed, 172 insertions(+), 63 deletions(-) diff --git a/db/db_block_cache_test.cc b/db/db_block_cache_test.cc index afe1772ba..f68dd188d 100644 --- a/db/db_block_cache_test.cc +++ b/db/db_block_cache_test.cc @@ -281,6 +281,47 @@ TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) { TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); } +TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + // 200 bytes are enough to hold the first two blocks + std::shared_ptr cache = NewLRUCache(200, 0, false); + table_options.block_cache = cache; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "key", "val")); + // Create a new table + ASSERT_OK(Flush(1)); + size_t index_bytes_insert = + TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT); + size_t filter_bytes_insert = + TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT); + ASSERT_GT(index_bytes_insert, 0); + ASSERT_GT(filter_bytes_insert, 0); + ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert); + // set the cache capacity to the current usage + cache->SetCapacity(index_bytes_insert + filter_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0); + ASSERT_OK(Put(1, "key2", "val")); + // Create a new table + ASSERT_OK(Flush(1)); + // cache evicted old index and block entries + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT), + index_bytes_insert); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT), + filter_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), + index_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), + filter_bytes_insert); +} + TEST_F(DBBlockCacheTest, ParanoidFileChecks) { Options options = CurrentOptions(); options.create_if_missing = true; diff --git a/include/rocksdb/statistics.h b/include/rocksdb/statistics.h index 2f14c444b..05688f5cb 100644 --- a/include/rocksdb/statistics.h +++ b/include/rocksdb/statistics.h @@ -39,10 +39,18 @@ enum Tickers : uint32_t { BLOCK_CACHE_INDEX_MISS, // # of times cache hit when accessing index block from block cache. BLOCK_CACHE_INDEX_HIT, + // # of bytes of index blocks inserted into cache + BLOCK_CACHE_INDEX_BYTES_INSERT, + // # of bytes of index block erased from cache + BLOCK_CACHE_INDEX_BYTES_EVICT, // # of times cache miss when accessing filter block from block cache. BLOCK_CACHE_FILTER_MISS, // # of times cache hit when accessing filter block from block cache. BLOCK_CACHE_FILTER_HIT, + // # of bytes of bloom filter blocks inserted into cache + BLOCK_CACHE_FILTER_BYTES_INSERT, + // # of bytes of bloom filter block erased from cache + BLOCK_CACHE_FILTER_BYTES_EVICT, // # of times cache miss when accessing data block from block cache. BLOCK_CACHE_DATA_MISS, // # of times cache hit when accessing data block from block cache. @@ -51,6 +59,7 @@ enum Tickers : uint32_t { BLOCK_CACHE_BYTES_READ, // # of bytes written into cache. BLOCK_CACHE_BYTES_WRITE, + // # of times bloom filter has avoided file reads. BLOOM_FILTER_USEFUL, @@ -190,8 +199,13 @@ const std::vector> TickersNameMap = { {BLOCK_CACHE_ADD_FAILURES, "rocksdb.block.cache.add.failures"}, {BLOCK_CACHE_INDEX_MISS, "rocksdb.block.cache.index.miss"}, {BLOCK_CACHE_INDEX_HIT, "rocksdb.block.cache.index.hit"}, + {BLOCK_CACHE_INDEX_BYTES_INSERT, "rocksdb.block.cache.index.bytes.insert"}, + {BLOCK_CACHE_INDEX_BYTES_EVICT, "rocksdb.block.cache.index.bytes.evict"}, {BLOCK_CACHE_FILTER_MISS, "rocksdb.block.cache.filter.miss"}, {BLOCK_CACHE_FILTER_HIT, "rocksdb.block.cache.filter.hit"}, + {BLOCK_CACHE_FILTER_BYTES_INSERT, + "rocksdb.block.cache.filter.bytes.insert"}, + {BLOCK_CACHE_FILTER_BYTES_EVICT, "rocksdb.block.cache.filter.bytes.evict"}, {BLOCK_CACHE_DATA_MISS, "rocksdb.block.cache.data.miss"}, {BLOCK_CACHE_DATA_HIT, "rocksdb.block.cache.data.hit"}, {BLOCK_CACHE_BYTES_READ, "rocksdb.block.cache.bytes.read"}, diff --git a/table/block_based_filter_block.cc b/table/block_based_filter_block.cc index e65ee280d..0685d54a5 100644 --- a/table/block_based_filter_block.cc +++ b/table/block_based_filter_block.cc @@ -161,8 +161,9 @@ void BlockBasedFilterBlockBuilder::GenerateFilter() { BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, bool whole_key_filtering, - BlockContents&& contents) - : policy_(table_opt.filter_policy.get()), + BlockContents&& contents, Statistics* stats) + : FilterBlockReader(contents.data.size(), stats), + policy_(table_opt.filter_policy.get()), prefix_extractor_(prefix_extractor), whole_key_filtering_(whole_key_filtering), data_(nullptr), diff --git a/table/block_based_filter_block.h b/table/block_based_filter_block.h index a97309f2e..d11786f21 100644 --- a/table/block_based_filter_block.h +++ b/table/block_based_filter_block.h @@ -78,7 +78,7 @@ class BlockBasedFilterBlockReader : public FilterBlockReader { BlockBasedFilterBlockReader(const SliceTransform* prefix_extractor, const BlockBasedTableOptions& table_opt, bool whole_key_filtering, - BlockContents&& contents); + BlockContents&& contents, Statistics* statistics); virtual bool IsBlockBased() override { return true; } virtual bool KeyMayMatch(const Slice& key, uint64_t block_offset = kNotValid) override; diff --git a/table/block_based_filter_block_test.cc b/table/block_based_filter_block_test.cc index d77def3d9..c28b0008d 100644 --- a/table/block_based_filter_block_test.cc +++ b/table/block_based_filter_block_test.cc @@ -58,7 +58,7 @@ TEST_F(FilterBlockTest, EmptyBuilder) { BlockContents block(builder.Finish(), false, kNoCompression); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data)); BlockBasedFilterBlockReader reader(nullptr, table_options_, true, - std::move(block)); + std::move(block), nullptr); ASSERT_TRUE(reader.KeyMayMatch("foo", 0)); ASSERT_TRUE(reader.KeyMayMatch("foo", 100000)); } @@ -75,7 +75,7 @@ TEST_F(FilterBlockTest, SingleChunk) { builder.Add("hello"); BlockContents block(builder.Finish(), false, kNoCompression); BlockBasedFilterBlockReader reader(nullptr, table_options_, true, - std::move(block)); + std::move(block), nullptr); ASSERT_TRUE(reader.KeyMayMatch("foo", 100)); ASSERT_TRUE(reader.KeyMayMatch("bar", 100)); ASSERT_TRUE(reader.KeyMayMatch("box", 100)); @@ -107,7 +107,7 @@ TEST_F(FilterBlockTest, MultiChunk) { BlockContents block(builder.Finish(), false, kNoCompression); BlockBasedFilterBlockReader reader(nullptr, table_options_, true, - std::move(block)); + std::move(block), nullptr); // Check first filter ASSERT_TRUE(reader.KeyMayMatch("foo", 0)); @@ -153,7 +153,7 @@ TEST_F(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { BlockContents block(builder->Finish(), false, kNoCompression); ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data)); FilterBlockReader* reader = new BlockBasedFilterBlockReader( - nullptr, table_options_, true, std::move(block)); + nullptr, table_options_, true, std::move(block), nullptr); ASSERT_TRUE(reader->KeyMayMatch("foo", 0)); ASSERT_TRUE(reader->KeyMayMatch("foo", 100000)); @@ -174,7 +174,7 @@ TEST_F(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { builder->Add("hello"); BlockContents block(builder->Finish(), false, kNoCompression); FilterBlockReader* reader = new BlockBasedFilterBlockReader( - nullptr, table_options_, true, std::move(block)); + nullptr, table_options_, true, std::move(block), nullptr); ASSERT_TRUE(reader->KeyMayMatch("foo", 100)); ASSERT_TRUE(reader->KeyMayMatch("bar", 100)); ASSERT_TRUE(reader->KeyMayMatch("box", 100)); @@ -210,7 +210,7 @@ TEST_F(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { BlockContents block(builder->Finish(), false, kNoCompression); FilterBlockReader* reader = new BlockBasedFilterBlockReader( - nullptr, table_options_, true, std::move(block)); + nullptr, table_options_, true, std::move(block), nullptr); // Check first filter ASSERT_TRUE(reader->KeyMayMatch("foo", 0)); diff --git a/table/block_based_table_reader.cc b/table/block_based_table_reader.cc index 55ecc4d30..080f4a1b1 100644 --- a/table/block_based_table_reader.cc +++ b/table/block_based_table_reader.cc @@ -6,7 +6,6 @@ // Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. See the AUTHORS file for names of contributors. - #include "table/block_based_table_reader.h" #include @@ -89,6 +88,9 @@ void DeleteCachedEntry(const Slice& key, void* value) { delete entry; } +void DeleteCachedFilterEntry(const Slice& key, void* value); +void DeleteCachedIndexEntry(const Slice& key, void* value); + // Release the cached entry and decrement its ref count. void ReleaseCachedEntry(void* arg, void* h) { Cache* cache = reinterpret_cast(arg); @@ -137,8 +139,8 @@ Cache::Handle* GetEntryFromCache(Cache* block_cache, const Slice& key, // IndexReader is the interface that provide the functionality for index access. class BlockBasedTable::IndexReader { public: - explicit IndexReader(const Comparator* comparator) - : comparator_(comparator) {} + explicit IndexReader(const Comparator* comparator, Statistics* stats) + : comparator_(comparator), statistics_(stats) {} virtual ~IndexReader() {} @@ -152,13 +154,17 @@ class BlockBasedTable::IndexReader { virtual size_t size() const = 0; // Memory usage of the index block virtual size_t usable_size() const = 0; - + // return the statistics pointer + virtual Statistics* statistics() const { return statistics_; } // Report an approximation of how much memory has been used other than memory // that was allocated in block cache. virtual size_t ApproximateMemoryUsage() const = 0; protected: const Comparator* comparator_; + + private: + Statistics* statistics_; }; // Index that allows binary search lookup for the first key of each block. @@ -173,7 +179,8 @@ class BinarySearchIndexReader : public IndexReader { static Status Create(RandomAccessFileReader* file, const Footer& footer, const BlockHandle& index_handle, Env* env, const Comparator* comparator, IndexReader** index_reader, - const PersistentCacheOptions& cache_options) { + const PersistentCacheOptions& cache_options, + Statistics* statistics) { std::unique_ptr index_block; auto s = ReadBlockFromFile(file, footer, ReadOptions(), index_handle, &index_block, env, true /* decompress */, @@ -181,8 +188,8 @@ class BinarySearchIndexReader : public IndexReader { /*info_log*/ nullptr); if (s.ok()) { - *index_reader = - new BinarySearchIndexReader(comparator, std::move(index_block)); + *index_reader = new BinarySearchIndexReader( + comparator, std::move(index_block), statistics); } return s; @@ -205,8 +212,9 @@ class BinarySearchIndexReader : public IndexReader { private: BinarySearchIndexReader(const Comparator* comparator, - std::unique_ptr&& index_block) - : IndexReader(comparator), index_block_(std::move(index_block)) { + std::unique_ptr&& index_block, + Statistics* stats) + : IndexReader(comparator, stats), index_block_(std::move(index_block)) { assert(index_block_ != nullptr); } std::unique_ptr index_block_; @@ -216,14 +224,12 @@ class BinarySearchIndexReader : public IndexReader { // key. class HashIndexReader : public IndexReader { public: - static Status Create(const SliceTransform* hash_key_extractor, - const Footer& footer, RandomAccessFileReader* file, - Env* env, const Comparator* comparator, - const BlockHandle& index_handle, - InternalIterator* meta_index_iter, - IndexReader** index_reader, - bool hash_index_allow_collision, - const PersistentCacheOptions& cache_options) { + static Status Create( + const SliceTransform* hash_key_extractor, const Footer& footer, + RandomAccessFileReader* file, Env* env, const Comparator* comparator, + const BlockHandle& index_handle, InternalIterator* meta_index_iter, + IndexReader** index_reader, bool hash_index_allow_collision, + const PersistentCacheOptions& cache_options, Statistics* statistics) { std::unique_ptr index_block; auto s = ReadBlockFromFile(file, footer, ReadOptions(), index_handle, &index_block, env, true /* decompress */, @@ -239,7 +245,7 @@ class HashIndexReader : public IndexReader { // So, Create will succeed regardless, from this point on. auto new_index_reader = - new HashIndexReader(comparator, std::move(index_block)); + new HashIndexReader(comparator, std::move(index_block), statistics); *index_reader = new_index_reader; // Get prefixes block @@ -306,8 +312,8 @@ class HashIndexReader : public IndexReader { private: HashIndexReader(const Comparator* comparator, - std::unique_ptr&& index_block) - : IndexReader(comparator), index_block_(std::move(index_block)) { + std::unique_ptr&& index_block, Statistics* stats) + : IndexReader(comparator, stats), index_block_(std::move(index_block)) { assert(index_block_ != nullptr); } @@ -678,7 +684,7 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, // Set filter block if (rep->filter_policy) { - rep->filter.reset(ReadFilter(rep, nullptr)); + rep->filter.reset(ReadFilter(rep)); } } else { delete index_reader; @@ -899,7 +905,7 @@ Status BlockBasedTable::PutDataBlockToCache( return s; } -FilterBlockReader* BlockBasedTable::ReadFilter(Rep* rep, size_t* filter_size) { +FilterBlockReader* BlockBasedTable::ReadFilter(Rep* rep) { // TODO: We might want to unify with ReadBlockFromFile() if we start // requiring checksum verification in Table::Open. if (rep->filter_type == Rep::FilterType::kNoFilter) { @@ -915,23 +921,21 @@ FilterBlockReader* BlockBasedTable::ReadFilter(Rep* rep, size_t* filter_size) { return nullptr; } - if (filter_size) { - *filter_size = block.data.size(); - } - assert(rep->filter_policy); if (rep->filter_type == Rep::FilterType::kBlockFilter) { return new BlockBasedFilterBlockReader( rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr, - rep->table_options, rep->whole_key_filtering, std::move(block)); + rep->table_options, rep->whole_key_filtering, std::move(block), + rep->ioptions.statistics); } else if (rep->filter_type == Rep::FilterType::kFullFilter) { auto filter_bits_reader = rep->filter_policy->GetFilterBitsReader(block.data); if (filter_bits_reader != nullptr) { return new FullFilterBlockReader( rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr, - rep->whole_key_filtering, std::move(block), filter_bits_reader); + rep->whole_key_filtering, std::move(block), filter_bits_reader, + rep->ioptions.statistics); } } @@ -983,16 +987,15 @@ BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( // Do not invoke any io. return CachableEntry(); } else { - size_t filter_size = 0; - filter = ReadFilter(rep_, &filter_size); + filter = ReadFilter(rep_); if (filter != nullptr) { - assert(filter_size > 0); - Status s = block_cache->Insert(key, filter, filter_size, - &DeleteCachedEntry, - &cache_handle); + assert(filter->size() > 0); + Status s = block_cache->Insert(key, filter, filter->size(), + &DeleteCachedFilterEntry, &cache_handle); if (s.ok()) { RecordTick(statistics, BLOCK_CACHE_ADD); - RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, filter_size); + RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, filter->size()); + RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, filter->size()); } else { RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); delete filter; @@ -1050,13 +1053,14 @@ InternalIterator* BlockBasedTable::NewIndexIterator( s = CreateIndexReader(&index_reader); if (s.ok()) { s = block_cache->Insert(key, index_reader, index_reader->usable_size(), - &DeleteCachedEntry, &cache_handle); + &DeleteCachedIndexEntry, &cache_handle); } if (s.ok()) { + size_t usable_size = index_reader->usable_size(); RecordTick(statistics, BLOCK_CACHE_ADD); - RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, - index_reader->usable_size()); + RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size); + RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size); } else { RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); // make sure if something goes wrong, index_reader shall remain intact. @@ -1537,6 +1541,7 @@ Status BlockBasedTable::CreateIndexReader( auto env = rep_->ioptions.env; auto comparator = &rep_->internal_comparator; const Footer& footer = rep_->footer; + Statistics* stats = rep_->ioptions.statistics; if (index_type_on_file == BlockBasedTableOptions::kHashSearch && rep_->ioptions.prefix_extractor == nullptr) { @@ -1551,7 +1556,7 @@ Status BlockBasedTable::CreateIndexReader( case BlockBasedTableOptions::kBinarySearch: { return BinarySearchIndexReader::Create( file, footer, footer.index_handle(), env, comparator, index_reader, - rep_->persistent_cache_options); + rep_->persistent_cache_options, stats); } case BlockBasedTableOptions::kHashSearch: { std::unique_ptr meta_guard; @@ -1567,7 +1572,7 @@ Status BlockBasedTable::CreateIndexReader( " Fall back to binary search index."); return BinarySearchIndexReader::Create( file, footer, footer.index_handle(), env, comparator, - index_reader, rep_->persistent_cache_options); + index_reader, rep_->persistent_cache_options, stats); } meta_index_iter = meta_iter_guard.get(); } @@ -1579,7 +1584,8 @@ Status BlockBasedTable::CreateIndexReader( return HashIndexReader::Create( rep_->internal_prefix_transform.get(), footer, file, env, comparator, footer.index_handle(), meta_index_iter, index_reader, - rep_->hash_index_allow_collision, rep_->persistent_cache_options); + rep_->hash_index_allow_collision, rep_->persistent_cache_options, + stats); } default: { std::string error_message = @@ -1704,7 +1710,8 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) { .ok()) { rep_->filter.reset(new BlockBasedFilterBlockReader( rep_->ioptions.prefix_extractor, table_options, - table_options.whole_key_filtering, std::move(block))); + table_options.whole_key_filtering, std::move(block), + rep_->ioptions.statistics)); } } } @@ -1732,6 +1739,19 @@ Status BlockBasedTable::DumpTable(WritableFile* out_file) { void BlockBasedTable::Close() { rep_->filter_entry.Release(rep_->table_options.block_cache.get()); rep_->index_entry.Release(rep_->table_options.block_cache.get()); + // cleanup index and filter blocks to avoid accessing dangling pointer + if (!rep_->table_options.no_block_cache) { + char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + // Get the filter block key + auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, + rep_->footer.metaindex_handle(), cache_key); + rep_->table_options.block_cache.get()->Erase(key); + // Get the index block key + key = GetCacheKeyFromOffset(rep_->cache_key_prefix, + rep_->cache_key_prefix_size, + rep_->dummy_index_reader_offset, cache_key); + rep_->table_options.block_cache.get()->Erase(key); + } } Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) { @@ -1857,4 +1877,26 @@ Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) { return Status::OK(); } +namespace { + +void DeleteCachedFilterEntry(const Slice& key, void* value) { + FilterBlockReader* filter = reinterpret_cast(value); + if (filter->statistics() != nullptr) { + RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT, + filter->size()); + } + delete filter; +} + +void DeleteCachedIndexEntry(const Slice& key, void* value) { + IndexReader* index_reader = reinterpret_cast(value); + if (index_reader->statistics() != nullptr) { + RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT, + index_reader->usable_size()); + } + delete index_reader; +} + +} // anonymous namespace + } // namespace rocksdb diff --git a/table/block_based_table_reader.h b/table/block_based_table_reader.h index 37d760e01..28447687b 100644 --- a/table/block_based_table_reader.h +++ b/table/block_based_table_reader.h @@ -224,7 +224,7 @@ class BlockBasedTable : public TableReader { std::unique_ptr* iter); // Create the filter from the filter block. - static FilterBlockReader* ReadFilter(Rep* rep, size_t* filter_size = nullptr); + static FilterBlockReader* ReadFilter(Rep* rep); static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size); diff --git a/table/filter_block.h b/table/filter_block.h index e326018f7..0612c3cb2 100644 --- a/table/filter_block.h +++ b/table/filter_block.h @@ -65,7 +65,9 @@ class FilterBlockBuilder { // BlockBased/Full FilterBlock would be called in the same way. class FilterBlockReader { public: - explicit FilterBlockReader() {} + explicit FilterBlockReader() : size_(0), statistics_(nullptr) {} + explicit FilterBlockReader(size_t s, Statistics* stats) + : size_(s), statistics_(stats) {} virtual ~FilterBlockReader() {} virtual bool IsBlockBased() = 0; // If is blockbased filter @@ -74,6 +76,8 @@ class FilterBlockReader { virtual bool PrefixMayMatch(const Slice& prefix, uint64_t block_offset = kNotValid) = 0; virtual size_t ApproximateMemoryUsage() const = 0; + virtual size_t size() const { return size_; } + virtual Statistics* statistics() const { return statistics_; } // convert this object to a human readable form virtual std::string ToString() const { @@ -85,6 +89,8 @@ class FilterBlockReader { // No copying allowed FilterBlockReader(const FilterBlockReader&); void operator=(const FilterBlockReader&); + size_t size_; + Statistics* statistics_; }; } // namespace rocksdb diff --git a/table/full_filter_block.cc b/table/full_filter_block.cc index 11c8a016c..1774c2d79 100644 --- a/table/full_filter_block.cc +++ b/table/full_filter_block.cc @@ -54,8 +54,10 @@ Slice FullFilterBlockBuilder::Finish() { FullFilterBlockReader::FullFilterBlockReader( const SliceTransform* prefix_extractor, bool whole_key_filtering, - const Slice& contents, FilterBitsReader* filter_bits_reader) - : prefix_extractor_(prefix_extractor), + const Slice& contents, FilterBitsReader* filter_bits_reader, + Statistics* stats) + : FilterBlockReader(contents.size(), stats), + prefix_extractor_(prefix_extractor), whole_key_filtering_(whole_key_filtering), contents_(contents) { assert(filter_bits_reader != nullptr); @@ -64,9 +66,10 @@ FullFilterBlockReader::FullFilterBlockReader( FullFilterBlockReader::FullFilterBlockReader( const SliceTransform* prefix_extractor, bool whole_key_filtering, - BlockContents&& contents, FilterBitsReader* filter_bits_reader) + BlockContents&& contents, FilterBitsReader* filter_bits_reader, + Statistics* stats) : FullFilterBlockReader(prefix_extractor, whole_key_filtering, - contents.data, filter_bits_reader) { + contents.data, filter_bits_reader, stats) { block_contents_ = std::move(contents); } diff --git a/table/full_filter_block.h b/table/full_filter_block.h index 27e10eba1..6634f505a 100644 --- a/table/full_filter_block.h +++ b/table/full_filter_block.h @@ -75,11 +75,13 @@ class FullFilterBlockReader : public FilterBlockReader { explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, bool whole_key_filtering, const Slice& contents, - FilterBitsReader* filter_bits_reader); + FilterBitsReader* filter_bits_reader, + Statistics* statistics); explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, bool whole_key_filtering, BlockContents&& contents, - FilterBitsReader* filter_bits_reader); + FilterBitsReader* filter_bits_reader, + Statistics* statistics); // bits_reader is created in filter_policy, it should be passed in here // directly. and be deleted here diff --git a/table/full_filter_block_test.cc b/table/full_filter_block_test.cc index 5840cb035..51ce1aaa9 100644 --- a/table/full_filter_block_test.cc +++ b/table/full_filter_block_test.cc @@ -110,7 +110,7 @@ TEST_F(PluginFullFilterBlockTest, PluginEmptyBuilder) { FullFilterBlockReader reader( nullptr, true, block, - table_options_.filter_policy->GetFilterBitsReader(block)); + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); // Remain same symantic with blockbased filter ASSERT_TRUE(reader.KeyMayMatch("foo")); } @@ -126,7 +126,7 @@ TEST_F(PluginFullFilterBlockTest, PluginSingleChunk) { Slice block = builder.Finish(); FullFilterBlockReader reader( nullptr, true, block, - table_options_.filter_policy->GetFilterBitsReader(block)); + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); ASSERT_TRUE(reader.KeyMayMatch("foo")); ASSERT_TRUE(reader.KeyMayMatch("bar")); ASSERT_TRUE(reader.KeyMayMatch("box")); @@ -155,7 +155,7 @@ TEST_F(FullFilterBlockTest, EmptyBuilder) { FullFilterBlockReader reader( nullptr, true, block, - table_options_.filter_policy->GetFilterBitsReader(block)); + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); // Remain same symantic with blockbased filter ASSERT_TRUE(reader.KeyMayMatch("foo")); } @@ -171,7 +171,7 @@ TEST_F(FullFilterBlockTest, SingleChunk) { Slice block = builder.Finish(); FullFilterBlockReader reader( nullptr, true, block, - table_options_.filter_policy->GetFilterBitsReader(block)); + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); ASSERT_TRUE(reader.KeyMayMatch("foo")); ASSERT_TRUE(reader.KeyMayMatch("bar")); ASSERT_TRUE(reader.KeyMayMatch("box"));