WriteBufferManager will not trigger flush if much data is already being flushed
Summary: Even if hard limit hits, flushing more memtable may not help cap the memory usage if already more than half data is scheduled for flush. Not triggering flush instead. Closes https://github.com/facebook/rocksdb/pull/2469 Differential Revision: D5284249 Pulled By: siying fbshipit-source-id: 8ab7ba1aba56a634dbe72b318fcab2093063972e
This commit is contained in:
parent
9467eb6141
commit
af1746751f
@ -43,11 +43,19 @@ class WriteBufferManager {
|
|||||||
|
|
||||||
// Should only be called from write thread
|
// Should only be called from write thread
|
||||||
bool ShouldFlush() const {
|
bool ShouldFlush() const {
|
||||||
// Flush if memory usage hits a hard limit, or total size that hasn't been
|
if (enabled()) {
|
||||||
// scheduled to free hits a soft limit, which is 7/8 of the hard limit.
|
if (mutable_memtable_memory_usage() > mutable_limit_) {
|
||||||
return enabled() &&
|
return true;
|
||||||
(memory_usage() >= buffer_size() ||
|
}
|
||||||
mutable_memtable_memory_usage() >= buffer_size() / 8 * 7);
|
if (memory_usage() >= buffer_size_ &&
|
||||||
|
mutable_memtable_memory_usage() >= buffer_size_ / 2) {
|
||||||
|
// If the memory exceeds the buffer size, we trigger more aggressive
|
||||||
|
// flush. But if already more than half memory is being flushed,
|
||||||
|
// triggering more flush may not help. We will hold it instead.
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ReserveMem(size_t mem) {
|
void ReserveMem(size_t mem) {
|
||||||
@ -77,6 +85,7 @@ class WriteBufferManager {
|
|||||||
|
|
||||||
private:
|
private:
|
||||||
const size_t buffer_size_;
|
const size_t buffer_size_;
|
||||||
|
const size_t mutable_limit_;
|
||||||
std::atomic<size_t> memory_used_;
|
std::atomic<size_t> memory_used_;
|
||||||
// Memory that hasn't been scheduled to free.
|
// Memory that hasn't been scheduled to free.
|
||||||
std::atomic<size_t> memory_active_;
|
std::atomic<size_t> memory_active_;
|
||||||
|
@ -53,6 +53,7 @@ struct WriteBufferManager::CacheRep {};
|
|||||||
WriteBufferManager::WriteBufferManager(size_t _buffer_size,
|
WriteBufferManager::WriteBufferManager(size_t _buffer_size,
|
||||||
std::shared_ptr<Cache> cache)
|
std::shared_ptr<Cache> cache)
|
||||||
: buffer_size_(_buffer_size),
|
: buffer_size_(_buffer_size),
|
||||||
|
mutable_limit_(buffer_size_ * 7 / 8),
|
||||||
memory_used_(0),
|
memory_used_(0),
|
||||||
memory_active_(0),
|
memory_active_(0),
|
||||||
cache_rep_(nullptr) {
|
cache_rep_(nullptr) {
|
||||||
|
@ -18,7 +18,7 @@ class WriteBufferManagerTest : public testing::Test {};
|
|||||||
|
|
||||||
#ifndef ROCKSDB_LITE
|
#ifndef ROCKSDB_LITE
|
||||||
TEST_F(WriteBufferManagerTest, ShouldFlush) {
|
TEST_F(WriteBufferManagerTest, ShouldFlush) {
|
||||||
// A write buffer manager of size 50MB
|
// A write buffer manager of size 10MB
|
||||||
std::unique_ptr<WriteBufferManager> wbf(
|
std::unique_ptr<WriteBufferManager> wbf(
|
||||||
new WriteBufferManager(10 * 1024 * 1024));
|
new WriteBufferManager(10 * 1024 * 1024));
|
||||||
|
|
||||||
@ -27,16 +27,28 @@ TEST_F(WriteBufferManagerTest, ShouldFlush) {
|
|||||||
// 90% of the hard limit will hit the condition
|
// 90% of the hard limit will hit the condition
|
||||||
wbf->ReserveMem(1 * 1024 * 1024);
|
wbf->ReserveMem(1 * 1024 * 1024);
|
||||||
ASSERT_TRUE(wbf->ShouldFlush());
|
ASSERT_TRUE(wbf->ShouldFlush());
|
||||||
// Scheduling for feeing will release the condition
|
// Scheduling for freeing will release the condition
|
||||||
wbf->ScheduleFreeMem(1 * 1024 * 1024);
|
wbf->ScheduleFreeMem(1 * 1024 * 1024);
|
||||||
ASSERT_FALSE(wbf->ShouldFlush());
|
ASSERT_FALSE(wbf->ShouldFlush());
|
||||||
|
|
||||||
wbf->ReserveMem(2 * 1024 * 1024);
|
wbf->ReserveMem(2 * 1024 * 1024);
|
||||||
ASSERT_TRUE(wbf->ShouldFlush());
|
ASSERT_TRUE(wbf->ShouldFlush());
|
||||||
wbf->ScheduleFreeMem(5 * 1024 * 1024);
|
|
||||||
// hard limit still hit
|
wbf->ScheduleFreeMem(4 * 1024 * 1024);
|
||||||
|
// 11MB total, 6MB mutable. hard limit still hit
|
||||||
ASSERT_TRUE(wbf->ShouldFlush());
|
ASSERT_TRUE(wbf->ShouldFlush());
|
||||||
wbf->FreeMem(10 * 1024 * 1024);
|
|
||||||
|
wbf->ScheduleFreeMem(2 * 1024 * 1024);
|
||||||
|
// 11MB total, 4MB mutable. hard limit stills but won't flush because more
|
||||||
|
// than half data is already being flushed.
|
||||||
|
ASSERT_FALSE(wbf->ShouldFlush());
|
||||||
|
|
||||||
|
wbf->ReserveMem(4 * 1024 * 1024);
|
||||||
|
// 15 MB total, 8MB mutable.
|
||||||
|
ASSERT_TRUE(wbf->ShouldFlush());
|
||||||
|
|
||||||
|
wbf->FreeMem(7 * 1024 * 1024);
|
||||||
|
// 9MB total, 8MB mutable.
|
||||||
ASSERT_FALSE(wbf->ShouldFlush());
|
ASSERT_FALSE(wbf->ShouldFlush());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user