Prevent empty memtables from using a lot of memory
Summary: This fixes OOMs that we (logdevice) are currently having in production. SkipListRep constructor does a couple small allocations from ConcurrentArena (see InlineSkipList constructor). ConcurrentArena would sometimes allocate an entire block for that, which is a few megabytes (we use Options::arena_block_size = 4 MB). So an empty memtable can take take 4 MB of memory. We have ~40k column families (spread across 15 DB instances), so 4 MB per empty memtable easily OOMs a machine for us. This PR makes ConcurrentArena always allocate from Arena's inline block when possible. So as long as InlineSkipList's initial allocations are below 2 KB there would be no blocks allocated for empty memtables. Closes https://github.com/facebook/rocksdb/pull/2569 Differential Revision: D5404029 Pulled By: al13n321 fbshipit-source-id: 568ec22a3fd1a485c06123f6b2dfc5e9ef67cd23
This commit is contained in:
parent
ac748c57ed
commit
e85f2c64cb
@ -77,6 +77,10 @@ class Arena : public Allocator {
|
||||
|
||||
size_t BlockSize() const override { return kBlockSize; }
|
||||
|
||||
bool IsInInlineBlock() const {
|
||||
return blocks_.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
char inline_block_[kInlineSize] __attribute__((__aligned__(sizeof(void*))));
|
||||
// Number of bytes allocated in one block
|
||||
|
@ -91,9 +91,13 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) {
|
||||
ASSERT_EQ(kZero, arena.ApproximateMemoryUsage());
|
||||
|
||||
// allocate inline bytes
|
||||
EXPECT_TRUE(arena.IsInInlineBlock());
|
||||
arena.AllocateAligned(8);
|
||||
EXPECT_TRUE(arena.IsInInlineBlock());
|
||||
arena.AllocateAligned(Arena::kInlineSize / 2 - 16);
|
||||
EXPECT_TRUE(arena.IsInInlineBlock());
|
||||
arena.AllocateAligned(Arena::kInlineSize / 2);
|
||||
EXPECT_TRUE(arena.IsInInlineBlock());
|
||||
ASSERT_EQ(arena.ApproximateMemoryUsage(), Arena::kInlineSize - 8);
|
||||
ASSERT_PRED2(CheckMemoryAllocated, arena.MemoryAllocatedBytes(),
|
||||
Arena::kInlineSize);
|
||||
@ -102,6 +106,7 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) {
|
||||
|
||||
// first allocation
|
||||
arena.AllocateAligned(kEntrySize);
|
||||
EXPECT_FALSE(arena.IsInInlineBlock());
|
||||
auto mem_usage = arena.MemoryAllocatedBytes();
|
||||
if (huge_page_size) {
|
||||
ASSERT_TRUE(
|
||||
@ -117,6 +122,7 @@ static void ApproximateMemoryUsageTest(size_t huge_page_size) {
|
||||
arena.AllocateAligned(kEntrySize);
|
||||
ASSERT_EQ(mem_usage, arena.MemoryAllocatedBytes());
|
||||
ASSERT_EQ(arena.ApproximateMemoryUsage(), usage + kEntrySize);
|
||||
EXPECT_FALSE(arena.IsInInlineBlock());
|
||||
usage = arena.ApproximateMemoryUsage();
|
||||
}
|
||||
if (huge_page_size) {
|
||||
|
@ -164,6 +164,21 @@ class ConcurrentArena : public Allocator {
|
||||
// size, we adjust our request to avoid arena waste.
|
||||
auto exact = arena_allocated_and_unused_.load(std::memory_order_relaxed);
|
||||
assert(exact == arena_.AllocatedAndUnused());
|
||||
|
||||
if (exact >= bytes && arena_.IsInInlineBlock()) {
|
||||
// If we haven't exhausted arena's inline block yet, allocate from arena
|
||||
// directly. This ensures that we'll do the first few small allocations
|
||||
// without allocating any blocks.
|
||||
// In particular this prevents empty memtables from using
|
||||
// disproportionately large amount of memory: a memtable allocates on
|
||||
// the order of 1 KB of memory when created; we wouldn't want to
|
||||
// allocate a full arena block (typically a few megabytes) for that,
|
||||
// especially if there are thousands of empty memtables.
|
||||
auto rv = func();
|
||||
Fixup();
|
||||
return rv;
|
||||
}
|
||||
|
||||
avail = exact >= shard_block_size_ / 2 && exact < shard_block_size_ * 2
|
||||
? exact
|
||||
: shard_block_size_;
|
||||
|
Loading…
x
Reference in New Issue
Block a user