fix advance reservation of arena block addresses
Summary: Calling `std::vector::reserve()` causes memory to be reallocated and then data to be moved. It was called prior to adding every block. This reallocation could be done a huge amount of times, e.g., for users with large index blocks. Instead, we can simply use `std::vector::emplace_back()` in such a way that preserves the no-memory-leak guarantee, while letting the vector decide when to reallocate space. Now I see reallocation/moving happen O(logN) times, rather than O(N) times, where N is the final size of vector. Closes https://github.com/facebook/rocksdb/pull/3508 Differential Revision: D6994228 Pulled By: ajkr fbshipit-source-id: ab7c11e13ff37c8c6c8249be7a79566a4068cd27
This commit is contained in:
parent
989d12313c
commit
0454f781c2
@ -79,6 +79,9 @@ Arena::~Arena() {
|
|||||||
|
|
||||||
#ifdef MAP_HUGETLB
|
#ifdef MAP_HUGETLB
|
||||||
for (const auto& mmap_info : huge_blocks_) {
|
for (const auto& mmap_info : huge_blocks_) {
|
||||||
|
if (mmap_info.addr_ == nullptr) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
auto ret = munmap(mmap_info.addr_, mmap_info.length_);
|
auto ret = munmap(mmap_info.addr_, mmap_info.length_);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
// TODO(sdong): Better handling
|
// TODO(sdong): Better handling
|
||||||
@ -126,11 +129,15 @@ char* Arena::AllocateFromHugePage(size_t bytes) {
|
|||||||
if (hugetlb_size_ == 0) {
|
if (hugetlb_size_ == 0) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
// already reserve space in huge_blocks_ before calling mmap().
|
// Reserve space in `huge_blocks_` before calling `mmap`.
|
||||||
// this way the insertion into the vector below will not throw and we
|
// Use `emplace_back()` instead of `reserve()` to let std::vector manage its
|
||||||
// won't leak the mapping in that case. if reserve() throws, we
|
// own memory and do fewer reallocations.
|
||||||
// won't leak either
|
//
|
||||||
huge_blocks_.reserve(huge_blocks_.size() + 1);
|
// - If `emplace_back` throws, no memory leaks because we haven't called
|
||||||
|
// `mmap` yet.
|
||||||
|
// - If `mmap` throws, no memory leaks because the vector will be cleaned up
|
||||||
|
// via RAII.
|
||||||
|
huge_blocks_.emplace_back(nullptr /* addr */, 0 /* length */);
|
||||||
|
|
||||||
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
|
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
|
||||||
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), -1, 0);
|
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), -1, 0);
|
||||||
@ -138,8 +145,7 @@ char* Arena::AllocateFromHugePage(size_t bytes) {
|
|||||||
if (addr == MAP_FAILED) {
|
if (addr == MAP_FAILED) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
// the following shouldn't throw because of the above reserve()
|
huge_blocks_.back() = MmapInfo(addr, bytes);
|
||||||
huge_blocks_.emplace_back(MmapInfo(addr, bytes));
|
|
||||||
blocks_memory_ += bytes;
|
blocks_memory_ += bytes;
|
||||||
if (tracker_ != nullptr) {
|
if (tracker_ != nullptr) {
|
||||||
tracker_->Allocate(bytes);
|
tracker_->Allocate(bytes);
|
||||||
@ -193,11 +199,15 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
||||||
// already reserve space in blocks_ before allocating memory via new.
|
// Reserve space in `blocks_` before allocating memory via new.
|
||||||
// this way the insertion into the vector below will not throw and we
|
// Use `emplace_back()` instead of `reserve()` to let std::vector manage its
|
||||||
// won't leak the allocated memory in that case. if reserve() throws,
|
// own memory and do fewer reallocations.
|
||||||
// we won't leak either
|
//
|
||||||
blocks_.reserve(blocks_.size() + 1);
|
// - If `emplace_back` throws, no memory leaks because we haven't called `new`
|
||||||
|
// yet.
|
||||||
|
// - If `new` throws, no memory leaks because the vector will be cleaned up
|
||||||
|
// via RAII.
|
||||||
|
blocks_.emplace_back(nullptr);
|
||||||
|
|
||||||
char* block = new char[block_bytes];
|
char* block = new char[block_bytes];
|
||||||
size_t allocated_size;
|
size_t allocated_size;
|
||||||
@ -216,8 +226,7 @@ char* Arena::AllocateNewBlock(size_t block_bytes) {
|
|||||||
if (tracker_ != nullptr) {
|
if (tracker_ != nullptr) {
|
||||||
tracker_->Allocate(allocated_size);
|
tracker_->Allocate(allocated_size);
|
||||||
}
|
}
|
||||||
// the following shouldn't throw because of the above reserve()
|
blocks_.back() = block;
|
||||||
blocks_.push_back(block);
|
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user