From a5ec5e3ea01a0dfffc4863666e29784ce21a1592 Mon Sep 17 00:00:00 2001 From: hx235 <83968999+hx235@users.noreply.github.com> Date: Mon, 1 Nov 2021 14:26:50 -0700 Subject: [PATCH] Minor improvement to #8428 (Account for dictionary-building buffer in global memory limit) (#9032) Summary: Summary/Context: - Renamed `cache_rev_mng` to `compression_dict_buffer_cache_res_mgr` - It is to distinguish with other potential `cache_res_mgr` in `BlockBasedTableBuilder` and to use correct short-hand for the words "reservation", "manager" - Added `table_options.block_cache == nullptr` in additional to `table_options.no_block_cache == true` to be conditions where we don't create a `CacheReservationManager` - Theoretically `table_options.no_block_cache == true` is equivalent to `table_options.block_cache == nullptr` by API. But since segment fault will be generated by passing `nullptr` into `CacheReservationManager`'s constructor, it does not hurt to directly verify `table_options.block_cache != nullptr` before passing in - Renamed `is_cache_full` to `exceeds_global_block_cache_limit` - It is to hide implementation detail of cache reservation and to emphasize on the concept/design intent of caping memory within global block cache limit Pull Request resolved: https://github.com/facebook/rocksdb/pull/9032 Test Plan: - Passing existing tests Reviewed By: ajkr Differential Revision: D32005807 Pulled By: hx235 fbshipit-source-id: 619fd17bb924199de3db5924d8ab7dae53b1efa2 --- .../block_based/block_based_table_builder.cc | 29 ++++++++++--------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index 9dec5c910..608bcc51f 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -316,7 +316,8 @@ struct BlockBasedTableBuilder::Rep { // `kBuffered` state is allowed only as long as the buffering of uncompressed // data blocks (see `data_block_buffers`) does not exceed `buffer_limit`. uint64_t buffer_limit; - std::unique_ptr cache_rev_mng; + std::unique_ptr + compression_dict_buffer_cache_res_mgr; const bool use_delta_encoding_for_index_values; std::unique_ptr filter_builder; char cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize]; @@ -450,10 +451,10 @@ struct BlockBasedTableBuilder::Rep { buffer_limit = std::min(tbo.target_file_size, compression_opts.max_dict_buffer_bytes); } - if (table_options.no_block_cache) { - cache_rev_mng.reset(nullptr); + if (table_options.no_block_cache || table_options.block_cache == nullptr) { + compression_dict_buffer_cache_res_mgr.reset(nullptr); } else { - cache_rev_mng.reset( + compression_dict_buffer_cache_res_mgr.reset( new CacheReservationManager(table_options.block_cache)); } for (uint32_t i = 0; i < compression_opts.parallel_threads; i++) { @@ -912,19 +913,21 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) { if (r->state == Rep::State::kBuffered) { bool exceeds_buffer_limit = (r->buffer_limit != 0 && r->data_begin_offset > r->buffer_limit); - bool is_cache_full = false; + bool exceeds_global_block_cache_limit = false; // Increase cache reservation for the last buffered data block // only if the block is not going to be unbuffered immediately // and there exists a cache reservation manager - if (!exceeds_buffer_limit && r->cache_rev_mng != nullptr) { - Status s = r->cache_rev_mng->UpdateCacheReservation< - CacheEntryRole::kCompressionDictionaryBuildingBuffer>( - r->data_begin_offset); - is_cache_full = s.IsIncomplete(); + if (!exceeds_buffer_limit && + r->compression_dict_buffer_cache_res_mgr != nullptr) { + Status s = + r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation< + CacheEntryRole::kCompressionDictionaryBuildingBuffer>( + r->data_begin_offset); + exceeds_global_block_cache_limit = s.IsIncomplete(); } - if (exceeds_buffer_limit || is_cache_full) { + if (exceeds_buffer_limit || exceeds_global_block_cache_limit) { EnterUnbuffered(); } } @@ -1971,8 +1974,8 @@ void BlockBasedTableBuilder::EnterUnbuffered() { } r->data_block_buffers.clear(); r->data_begin_offset = 0; - if (r->cache_rev_mng != nullptr) { - Status s = r->cache_rev_mng->UpdateCacheReservation< + if (r->compression_dict_buffer_cache_res_mgr != nullptr) { + Status s = r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation< CacheEntryRole::kCompressionDictionaryBuildingBuffer>( r->data_begin_offset); s.PermitUncheckedError();