1cf5deb8fd
Summary:
This is a conceptually simple change, but it touches many files to
pass the allocator through function calls.
We introduce CacheAllocator, which can be used by clients to configure
custom allocator for cache blocks. Our motivation is to hook this up
with folly's `JemallocNodumpAllocator`
(f43ce6d686/folly/experimental/JemallocNodumpAllocator.h
),
but there are many other possible use cases.
Additionally, this commit cleans up memory allocation in
`util/compression.h`, making sure that all allocations are wrapped in a
unique_ptr as soon as possible.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4437
Differential Revision: D10132814
Pulled By: yiwu-arbug
fbshipit-source-id: be1343a4b69f6048df127939fea9bbc96969f564
167 lines
5.0 KiB
C++
167 lines
5.0 KiB
C++
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
#define __STDC_FORMAT_MACROS
|
|
#endif
|
|
|
|
#include "cache/sharded_cache.h"
|
|
|
|
#include <string>
|
|
|
|
#include "util/mutexlock.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
|
|
bool strict_capacity_limit,
|
|
std::shared_ptr<CacheAllocator> allocator)
|
|
: Cache(std::move(allocator)),
|
|
num_shard_bits_(num_shard_bits),
|
|
capacity_(capacity),
|
|
strict_capacity_limit_(strict_capacity_limit),
|
|
last_id_(1) {}
|
|
|
|
void ShardedCache::SetCapacity(size_t capacity) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
const size_t per_shard = (capacity + (num_shards - 1)) / num_shards;
|
|
MutexLock l(&capacity_mutex_);
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->SetCapacity(per_shard);
|
|
}
|
|
capacity_ = capacity;
|
|
}
|
|
|
|
void ShardedCache::SetStrictCapacityLimit(bool strict_capacity_limit) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
MutexLock l(&capacity_mutex_);
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->SetStrictCapacityLimit(strict_capacity_limit);
|
|
}
|
|
strict_capacity_limit_ = strict_capacity_limit;
|
|
}
|
|
|
|
Status ShardedCache::Insert(const Slice& key, void* value, size_t charge,
|
|
void (*deleter)(const Slice& key, void* value),
|
|
Handle** handle, Priority priority) {
|
|
uint32_t hash = HashSlice(key);
|
|
return GetShard(Shard(hash))
|
|
->Insert(key, hash, value, charge, deleter, handle, priority);
|
|
}
|
|
|
|
Cache::Handle* ShardedCache::Lookup(const Slice& key, Statistics* /*stats*/) {
|
|
uint32_t hash = HashSlice(key);
|
|
return GetShard(Shard(hash))->Lookup(key, hash);
|
|
}
|
|
|
|
bool ShardedCache::Ref(Handle* handle) {
|
|
uint32_t hash = GetHash(handle);
|
|
return GetShard(Shard(hash))->Ref(handle);
|
|
}
|
|
|
|
bool ShardedCache::Release(Handle* handle, bool force_erase) {
|
|
uint32_t hash = GetHash(handle);
|
|
return GetShard(Shard(hash))->Release(handle, force_erase);
|
|
}
|
|
|
|
void ShardedCache::Erase(const Slice& key) {
|
|
uint32_t hash = HashSlice(key);
|
|
GetShard(Shard(hash))->Erase(key, hash);
|
|
}
|
|
|
|
uint64_t ShardedCache::NewId() {
|
|
return last_id_.fetch_add(1, std::memory_order_relaxed);
|
|
}
|
|
|
|
size_t ShardedCache::GetCapacity() const {
|
|
MutexLock l(&capacity_mutex_);
|
|
return capacity_;
|
|
}
|
|
|
|
bool ShardedCache::HasStrictCapacityLimit() const {
|
|
MutexLock l(&capacity_mutex_);
|
|
return strict_capacity_limit_;
|
|
}
|
|
|
|
size_t ShardedCache::GetUsage() const {
|
|
// We will not lock the cache when getting the usage from shards.
|
|
int num_shards = 1 << num_shard_bits_;
|
|
size_t usage = 0;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
usage += GetShard(s)->GetUsage();
|
|
}
|
|
return usage;
|
|
}
|
|
|
|
size_t ShardedCache::GetUsage(Handle* handle) const {
|
|
return GetCharge(handle);
|
|
}
|
|
|
|
size_t ShardedCache::GetPinnedUsage() const {
|
|
// We will not lock the cache when getting the usage from shards.
|
|
int num_shards = 1 << num_shard_bits_;
|
|
size_t usage = 0;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
usage += GetShard(s)->GetPinnedUsage();
|
|
}
|
|
return usage;
|
|
}
|
|
|
|
void ShardedCache::ApplyToAllCacheEntries(void (*callback)(void*, size_t),
|
|
bool thread_safe) {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->ApplyToAllCacheEntries(callback, thread_safe);
|
|
}
|
|
}
|
|
|
|
void ShardedCache::EraseUnRefEntries() {
|
|
int num_shards = 1 << num_shard_bits_;
|
|
for (int s = 0; s < num_shards; s++) {
|
|
GetShard(s)->EraseUnRefEntries();
|
|
}
|
|
}
|
|
|
|
std::string ShardedCache::GetPrintableOptions() const {
|
|
std::string ret;
|
|
ret.reserve(20000);
|
|
const int kBufferSize = 200;
|
|
char buffer[kBufferSize];
|
|
{
|
|
MutexLock l(&capacity_mutex_);
|
|
snprintf(buffer, kBufferSize, " capacity : %" ROCKSDB_PRIszt "\n",
|
|
capacity_);
|
|
ret.append(buffer);
|
|
snprintf(buffer, kBufferSize, " num_shard_bits : %d\n", num_shard_bits_);
|
|
ret.append(buffer);
|
|
snprintf(buffer, kBufferSize, " strict_capacity_limit : %d\n",
|
|
strict_capacity_limit_);
|
|
ret.append(buffer);
|
|
}
|
|
snprintf(buffer, kBufferSize, " cache_allocator : %s\n",
|
|
cache_allocator() ? cache_allocator()->Name() : "None");
|
|
ret.append(buffer);
|
|
ret.append(GetShard(0)->GetPrintableOptions());
|
|
return ret;
|
|
}
|
|
int GetDefaultCacheShardBits(size_t capacity) {
|
|
int num_shard_bits = 0;
|
|
size_t min_shard_size = 512L * 1024L; // Every shard is at least 512KB.
|
|
size_t num_shards = capacity / min_shard_size;
|
|
while (num_shards >>= 1) {
|
|
if (++num_shard_bits >= 6) {
|
|
// No more than 6.
|
|
return num_shard_bits;
|
|
}
|
|
}
|
|
return num_shard_bits;
|
|
}
|
|
|
|
} // namespace rocksdb
|