2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2014-01-31 02:18:17 +01:00
|
|
|
#include "util/arena.h"
|
2015-08-26 23:19:31 +02:00
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
#include <malloc.h>
|
|
|
|
#endif
|
2015-07-03 02:41:05 +02:00
|
|
|
#ifndef OS_WIN
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#endif
|
2015-07-02 01:13:49 +02:00
|
|
|
#include "port/port.h"
|
2014-01-09 00:06:07 +01:00
|
|
|
#include <algorithm>
|
2014-05-05 00:52:23 +02:00
|
|
|
#include "rocksdb/env.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-07-08 01:58:20 +02:00
|
|
|
// MSVC complains that it is already defined since it is static in the header.
|
|
|
|
#ifndef OS_WIN
|
|
|
|
const size_t Arena::kInlineSize;
|
|
|
|
#endif
|
|
|
|
|
2014-01-31 02:18:17 +01:00
|
|
|
const size_t Arena::kMinBlockSize = 4096;
|
|
|
|
const size_t Arena::kMaxBlockSize = 2 << 30;
|
2014-01-09 00:06:07 +01:00
|
|
|
static const int kAlignUnit = sizeof(void*);
|
|
|
|
|
|
|
|
size_t OptimizeBlockSize(size_t block_size) {
|
|
|
|
// Make sure block_size is in optimal range
|
2014-01-31 02:18:17 +01:00
|
|
|
block_size = std::max(Arena::kMinBlockSize, block_size);
|
|
|
|
block_size = std::min(Arena::kMaxBlockSize, block_size);
|
2014-01-09 00:06:07 +01:00
|
|
|
|
|
|
|
// make sure block_size is the multiple of kAlignUnit
|
|
|
|
if (block_size % kAlignUnit != 0) {
|
|
|
|
block_size = (1 + block_size / kAlignUnit) * kAlignUnit;
|
Make arena block size configurable
Summary:
Add an option for arena block size, default value 4096 bytes. Arena will allocate blocks with such size.
I am not sure about passing parameter to skiplist in the new virtualized framework, though I talked to Jim a bit. So add Jim as reviewer.
Test Plan:
new unit test, I am running db_test.
For passing paramter from configured option to Arena, I tried tests like:
TEST(DBTest, Arena_Option) {
std::string dbname = test::TmpDir() + "/db_arena_option_test";
DestroyDB(dbname, Options());
DB* db = nullptr;
Options opts;
opts.create_if_missing = true;
opts.arena_block_size = 1000000; // tested 99, 999999
Status s = DB::Open(opts, dbname, &db);
db->Put(WriteOptions(), "a", "123");
}
and printed some debug info. The results look good. Any suggestion for such a unit-test?
Reviewers: haobo, dhruba, emayanke, jpaton
Reviewed By: dhruba
CC: leveldb, zshao
Differential Revision: https://reviews.facebook.net/D11799
2013-07-31 21:42:23 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-01-09 00:06:07 +01:00
|
|
|
return block_size;
|
|
|
|
}
|
|
|
|
|
2014-11-21 23:11:22 +01:00
|
|
|
Arena::Arena(size_t block_size, size_t huge_page_size)
|
|
|
|
: kBlockSize(OptimizeBlockSize(block_size)) {
|
2014-01-09 00:06:07 +01:00
|
|
|
assert(kBlockSize >= kMinBlockSize && kBlockSize <= kMaxBlockSize &&
|
|
|
|
kBlockSize % kAlignUnit == 0);
|
2014-05-09 20:01:54 +02:00
|
|
|
alloc_bytes_remaining_ = sizeof(inline_block_);
|
|
|
|
blocks_memory_ += alloc_bytes_remaining_;
|
|
|
|
aligned_alloc_ptr_ = inline_block_;
|
|
|
|
unaligned_alloc_ptr_ = inline_block_ + alloc_bytes_remaining_;
|
2014-11-21 23:11:22 +01:00
|
|
|
#ifdef MAP_HUGETLB
|
|
|
|
hugetlb_size_ = huge_page_size;
|
|
|
|
if (hugetlb_size_ && kBlockSize > hugetlb_size_) {
|
|
|
|
hugetlb_size_ = ((kBlockSize - 1U) / hugetlb_size_ + 1U) * hugetlb_size_;
|
|
|
|
}
|
|
|
|
#endif
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-31 02:18:17 +01:00
|
|
|
Arena::~Arena() {
|
2014-01-09 00:06:07 +01:00
|
|
|
for (const auto& block : blocks_) {
|
|
|
|
delete[] block;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2015-07-08 01:58:20 +02:00
|
|
|
|
|
|
|
#ifdef MAP_HUGETLB
|
2014-05-04 22:55:53 +02:00
|
|
|
for (const auto& mmap_info : huge_blocks_) {
|
|
|
|
auto ret = munmap(mmap_info.addr_, mmap_info.length_);
|
|
|
|
if (ret != 0) {
|
|
|
|
// TODO(sdong): Better handling
|
|
|
|
}
|
|
|
|
}
|
2015-07-02 01:13:49 +02:00
|
|
|
#endif
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-01-31 02:18:17 +01:00
|
|
|
char* Arena::AllocateFallback(size_t bytes, bool aligned) {
|
2014-01-09 00:06:07 +01:00
|
|
|
if (bytes > kBlockSize / 4) {
|
2014-03-13 00:40:14 +01:00
|
|
|
++irregular_block_num;
|
2011-03-18 23:37:00 +01:00
|
|
|
// Object is more than a quarter of our block size. Allocate it separately
|
|
|
|
// to avoid wasting too much space in leftover bytes.
|
2014-01-09 00:06:07 +01:00
|
|
|
return AllocateNewBlock(bytes);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// We waste the remaining space in the current block.
|
2014-11-21 23:11:22 +01:00
|
|
|
size_t size;
|
|
|
|
char* block_head = nullptr;
|
|
|
|
if (hugetlb_size_) {
|
|
|
|
size = hugetlb_size_;
|
|
|
|
block_head = AllocateFromHugePage(size);
|
|
|
|
}
|
|
|
|
if (!block_head) {
|
|
|
|
size = kBlockSize;
|
|
|
|
block_head = AllocateNewBlock(size);
|
|
|
|
}
|
|
|
|
alloc_bytes_remaining_ = size - bytes;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-01-09 00:06:07 +01:00
|
|
|
if (aligned) {
|
|
|
|
aligned_alloc_ptr_ = block_head + bytes;
|
2014-11-21 23:11:22 +01:00
|
|
|
unaligned_alloc_ptr_ = block_head + size;
|
2014-01-09 00:06:07 +01:00
|
|
|
return block_head;
|
|
|
|
} else {
|
|
|
|
aligned_alloc_ptr_ = block_head;
|
2014-11-21 23:11:22 +01:00
|
|
|
unaligned_alloc_ptr_ = block_head + size - bytes;
|
2014-01-09 00:06:07 +01:00
|
|
|
return unaligned_alloc_ptr_;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-11-21 23:11:22 +01:00
|
|
|
char* Arena::AllocateFromHugePage(size_t bytes) {
|
|
|
|
#ifdef MAP_HUGETLB
|
|
|
|
if (hugetlb_size_ == 0) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void* addr = mmap(nullptr, bytes, (PROT_READ | PROT_WRITE),
|
|
|
|
(MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB), 0, 0);
|
|
|
|
|
|
|
|
if (addr == MAP_FAILED) {
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
huge_blocks_.push_back(MmapInfo(addr, bytes));
|
|
|
|
blocks_memory_ += bytes;
|
|
|
|
return reinterpret_cast<char*>(addr);
|
|
|
|
#else
|
|
|
|
return nullptr;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2014-08-15 02:01:20 +02:00
|
|
|
char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
|
2014-05-05 00:52:23 +02:00
|
|
|
Logger* logger) {
|
2014-01-09 00:06:07 +01:00
|
|
|
assert((kAlignUnit & (kAlignUnit - 1)) ==
|
|
|
|
0); // Pointer size should be a power of 2
|
2014-05-04 22:55:53 +02:00
|
|
|
|
2014-05-10 00:58:39 +02:00
|
|
|
#ifdef MAP_HUGETLB
|
2014-08-15 02:01:20 +02:00
|
|
|
if (huge_page_size > 0 && bytes > 0) {
|
2014-05-04 22:55:53 +02:00
|
|
|
// Allocate from a huge page TBL table.
|
2014-05-05 00:52:23 +02:00
|
|
|
assert(logger != nullptr); // logger need to be passed in.
|
2014-05-04 22:55:53 +02:00
|
|
|
size_t reserved_size =
|
2014-08-15 02:01:20 +02:00
|
|
|
((bytes - 1U) / huge_page_size + 1U) * huge_page_size;
|
2014-05-04 22:55:53 +02:00
|
|
|
assert(reserved_size >= bytes);
|
2014-05-08 17:45:44 +02:00
|
|
|
|
2014-11-21 23:11:22 +01:00
|
|
|
char* addr = AllocateFromHugePage(reserved_size);
|
|
|
|
if (addr == nullptr) {
|
2014-05-05 00:52:23 +02:00
|
|
|
Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s",
|
|
|
|
strerror(errno));
|
2014-05-04 22:55:53 +02:00
|
|
|
// fail back to malloc
|
|
|
|
} else {
|
2014-11-21 23:11:22 +01:00
|
|
|
return addr;
|
2014-05-04 22:55:53 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-01-09 00:06:07 +01:00
|
|
|
size_t current_mod =
|
|
|
|
reinterpret_cast<uintptr_t>(aligned_alloc_ptr_) & (kAlignUnit - 1);
|
|
|
|
size_t slop = (current_mod == 0 ? 0 : kAlignUnit - current_mod);
|
2011-03-18 23:37:00 +01:00
|
|
|
size_t needed = bytes + slop;
|
|
|
|
char* result;
|
|
|
|
if (needed <= alloc_bytes_remaining_) {
|
2014-01-09 00:06:07 +01:00
|
|
|
result = aligned_alloc_ptr_ + slop;
|
|
|
|
aligned_alloc_ptr_ += needed;
|
2011-03-18 23:37:00 +01:00
|
|
|
alloc_bytes_remaining_ -= needed;
|
|
|
|
} else {
|
|
|
|
// AllocateFallback always returned aligned memory
|
2014-01-09 00:06:07 +01:00
|
|
|
result = AllocateFallback(bytes, true /* aligned */);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-01-09 00:06:07 +01:00
|
|
|
assert((reinterpret_cast<uintptr_t>(result) & (kAlignUnit - 1)) == 0);
|
2011-03-18 23:37:00 +01:00
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2014-01-31 02:18:17 +01:00
|
|
|
char* Arena::AllocateNewBlock(size_t block_bytes) {
|
2014-01-09 00:06:07 +01:00
|
|
|
char* block = new char[block_bytes];
|
2015-08-26 23:19:31 +02:00
|
|
|
|
|
|
|
#ifdef ROCKSDB_MALLOC_USABLE_SIZE
|
|
|
|
blocks_memory_ += malloc_usable_size(block);
|
|
|
|
#else
|
2011-03-18 23:37:00 +01:00
|
|
|
blocks_memory_ += block_bytes;
|
2015-08-26 23:19:31 +02:00
|
|
|
#endif // ROCKSDB_MALLOC_USABLE_SIZE
|
2014-01-09 00:06:07 +01:00
|
|
|
blocks_.push_back(block);
|
|
|
|
return block;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|