rocksdb/util/dynamic_bloom.cc
Bruce Mitchener 0de710f5b8 Use nullptr instead of NULL / 0 more consistently.
Summary: Closes https://github.com/facebook/rocksdb/pull/3569

Differential Revision: D7170968

Pulled By: yiwu-arbug

fbshipit-source-id: 308a6b7dd358a04fd9a7de3d927bfd8abd57d348
2018-03-07 12:42:12 -08:00

84 lines
2.7 KiB
C++

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "dynamic_bloom.h"
#include <algorithm>
#include "port/port.h"
#include "rocksdb/slice.h"
#include "util/allocator.h"
#include "util/hash.h"
namespace rocksdb {
namespace {
uint32_t GetTotalBitsForLocality(uint32_t total_bits) {
uint32_t num_blocks =
(total_bits + CACHE_LINE_SIZE * 8 - 1) / (CACHE_LINE_SIZE * 8);
// Make num_blocks an odd number to make sure more bits are involved
// when determining which block.
if (num_blocks % 2 == 0) {
num_blocks++;
}
return num_blocks * (CACHE_LINE_SIZE * 8);
}
}
DynamicBloom::DynamicBloom(Allocator* allocator, uint32_t total_bits,
uint32_t locality, uint32_t num_probes,
uint32_t (*hash_func)(const Slice& key),
size_t huge_page_tlb_size,
Logger* logger)
: DynamicBloom(num_probes, hash_func) {
SetTotalBits(allocator, total_bits, locality, huge_page_tlb_size, logger);
}
DynamicBloom::DynamicBloom(uint32_t num_probes,
uint32_t (*hash_func)(const Slice& key))
: kTotalBits(0),
kNumBlocks(0),
kNumProbes(num_probes),
hash_func_(hash_func == nullptr ? &BloomHash : hash_func),
data_(nullptr) {}
void DynamicBloom::SetRawData(unsigned char* raw_data, uint32_t total_bits,
uint32_t num_blocks) {
data_ = reinterpret_cast<std::atomic<uint8_t>*>(raw_data);
kTotalBits = total_bits;
kNumBlocks = num_blocks;
}
void DynamicBloom::SetTotalBits(Allocator* allocator,
uint32_t total_bits, uint32_t locality,
size_t huge_page_tlb_size,
Logger* logger) {
kTotalBits = (locality > 0) ? GetTotalBitsForLocality(total_bits)
: (total_bits + 7) / 8 * 8;
kNumBlocks = (locality > 0) ? (kTotalBits / (CACHE_LINE_SIZE * 8)) : 0;
assert(kNumBlocks > 0 || kTotalBits > 0);
assert(kNumProbes > 0);
uint32_t sz = kTotalBits / 8;
if (kNumBlocks > 0) {
sz += CACHE_LINE_SIZE - 1;
}
assert(allocator);
char* raw = allocator->AllocateAligned(sz, huge_page_tlb_size, logger);
memset(raw, 0, sz);
auto cache_line_offset = reinterpret_cast<uintptr_t>(raw) % CACHE_LINE_SIZE;
if (kNumBlocks > 0 && cache_line_offset > 0) {
raw += CACHE_LINE_SIZE - cache_line_offset;
}
data_ = reinterpret_cast<std::atomic<uint8_t>*>(raw);
}
} // rocksdb