diff --git a/include/rocksdb/write_buffer_manager.h b/include/rocksdb/write_buffer_manager.h index 63f9add9f..185e8baf9 100644 --- a/include/rocksdb/write_buffer_manager.h +++ b/include/rocksdb/write_buffer_manager.h @@ -43,11 +43,19 @@ class WriteBufferManager { // Should only be called from write thread bool ShouldFlush() const { - // Flush if memory usage hits a hard limit, or total size that hasn't been - // scheduled to free hits a soft limit, which is 7/8 of the hard limit. - return enabled() && - (memory_usage() >= buffer_size() || - mutable_memtable_memory_usage() >= buffer_size() / 8 * 7); + if (enabled()) { + if (mutable_memtable_memory_usage() > mutable_limit_) { + return true; + } + if (memory_usage() >= buffer_size_ && + mutable_memtable_memory_usage() >= buffer_size_ / 2) { + // If the memory exceeds the buffer size, we trigger more aggressive + // flush. But if already more than half memory is being flushed, + // triggering more flush may not help. We will hold it instead. + return true; + } + } + return false; } void ReserveMem(size_t mem) { @@ -77,6 +85,7 @@ class WriteBufferManager { private: const size_t buffer_size_; + const size_t mutable_limit_; std::atomic memory_used_; // Memory that hasn't been scheduled to free. std::atomic memory_active_; diff --git a/memtable/write_buffer_manager.cc b/memtable/write_buffer_manager.cc index bb61c85d2..c00d842ab 100644 --- a/memtable/write_buffer_manager.cc +++ b/memtable/write_buffer_manager.cc @@ -53,6 +53,7 @@ struct WriteBufferManager::CacheRep {}; WriteBufferManager::WriteBufferManager(size_t _buffer_size, std::shared_ptr cache) : buffer_size_(_buffer_size), + mutable_limit_(buffer_size_ * 7 / 8), memory_used_(0), memory_active_(0), cache_rep_(nullptr) { diff --git a/memtable/write_buffer_manager_test.cc b/memtable/write_buffer_manager_test.cc index 64ec840b6..e0cdff2dd 100644 --- a/memtable/write_buffer_manager_test.cc +++ b/memtable/write_buffer_manager_test.cc @@ -18,7 +18,7 @@ class WriteBufferManagerTest : public testing::Test {}; #ifndef ROCKSDB_LITE TEST_F(WriteBufferManagerTest, ShouldFlush) { - // A write buffer manager of size 50MB + // A write buffer manager of size 10MB std::unique_ptr wbf( new WriteBufferManager(10 * 1024 * 1024)); @@ -27,16 +27,28 @@ TEST_F(WriteBufferManagerTest, ShouldFlush) { // 90% of the hard limit will hit the condition wbf->ReserveMem(1 * 1024 * 1024); ASSERT_TRUE(wbf->ShouldFlush()); - // Scheduling for feeing will release the condition + // Scheduling for freeing will release the condition wbf->ScheduleFreeMem(1 * 1024 * 1024); ASSERT_FALSE(wbf->ShouldFlush()); wbf->ReserveMem(2 * 1024 * 1024); ASSERT_TRUE(wbf->ShouldFlush()); - wbf->ScheduleFreeMem(5 * 1024 * 1024); - // hard limit still hit + + wbf->ScheduleFreeMem(4 * 1024 * 1024); + // 11MB total, 6MB mutable. hard limit still hit ASSERT_TRUE(wbf->ShouldFlush()); - wbf->FreeMem(10 * 1024 * 1024); + + wbf->ScheduleFreeMem(2 * 1024 * 1024); + // 11MB total, 4MB mutable. hard limit stills but won't flush because more + // than half data is already being flushed. + ASSERT_FALSE(wbf->ShouldFlush()); + + wbf->ReserveMem(4 * 1024 * 1024); + // 15 MB total, 8MB mutable. + ASSERT_TRUE(wbf->ShouldFlush()); + + wbf->FreeMem(7 * 1024 * 1024); + // 9MB total, 8MB mutable. ASSERT_FALSE(wbf->ShouldFlush()); }