Merge pull request #735 from jsteemann/fix-potential-leak-in-allocate
prevent potential memleaks in Arena::Allocate*()
This commit is contained in:
commit
174e2be5f0
@ -84,10 +84,12 @@ char* Arena::AllocateFallback(size_t bytes, bool aligned) {
|
|||||||
// We waste the remaining space in the current block.
|
// We waste the remaining space in the current block.
|
||||||
size_t size;
|
size_t size;
|
||||||
char* block_head = nullptr;
|
char* block_head = nullptr;
|
||||||
|
#ifdef MAP_HUGETLB
|
||||||
if (hugetlb_size_) {
|
if (hugetlb_size_) {
|
||||||
size = hugetlb_size_;
|
size = hugetlb_size_;
|
||||||
block_head = AllocateFromHugePage(size);
|
block_head = AllocateFromHugePage(size);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
if (!block_head) {
|
if (!block_head) {
|
||||||
size = kBlockSize;
|
size = kBlockSize;
|
||||||
block_head = AllocateNewBlock(size);
|
block_head = AllocateNewBlock(size);
|
||||||
@ -110,6 +112,11 @@ char* Arena::AllocateFromHugePage(size_t bytes) {
|
|||||||
if (hugetlb_size_ == 0) {
|
if (hugetlb_size_ == 0) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
// already reserve space in huge_blocks_ before calling mmap().
|
||||||
|
// this way the insertion into the vector below will not throw and we
|
||||||
|
// won't leak the mapping in that case. if reserve() throws, we
|
||||||
|
// won't leak either
|
||||||
|
huge_blocks_.reserve(huge_blocks_.size() + 1);
|
||||||
|
|
||||||
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
|
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
|
||||||
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
|
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
|
||||||
@ -117,7 +124,8 @@ char* Arena::AllocateFromHugePage(size_t bytes) {
|
|||||||
if (addr == MAP_FAILED) {
|
if (addr == MAP_FAILED) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
huge_blocks_.push_back(MmapInfo(addr, bytes));
|
// the following shouldn't throw because of the above reserve()
|
||||||
|
huge_blocks_.emplace_back(MmapInfo(addr, bytes));
|
||||||
blocks_memory_ += bytes;
|
blocks_memory_ += bytes;
|
||||||
return reinterpret_cast<char*>(addr);
|
return reinterpret_cast<char*>(addr);
|
||||||
#else
|
#else
|
||||||
@ -167,6 +175,12 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
||||||
|
// already reserve space in blocks_ before allocating memory via new.
|
||||||
|
// this way the insertion into the vector below will not throw and we
|
||||||
|
// won't leak the allocated memory in that case. if reserve() throws,
|
||||||
|
// we won't leak either
|
||||||
|
blocks_.reserve(blocks_.size() + 1);
|
||||||
|
|
||||||
char* block = new char[block_bytes];
|
char* block = new char[block_bytes];
|
||||||
|
|
||||||
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
||||||
@ -174,6 +188,7 @@ char* Arena::AllocateNewBlock(size_t block_bytes) {
|
|||||||
#else
|
#else
|
||||||
blocks_memory_ += block_bytes;
|
blocks_memory_ += block_bytes;
|
||||||
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
||||||
|
// the following shouldn't throw because of the above reserve()
|
||||||
blocks_.push_back(block);
|
blocks_.push_back(block);
|
||||||
return block;
|
return block;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user