Compare commits
8 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
df4d3cf6fd | ||
|
70a68ddc06 | ||
|
c79005a6b6 | ||
|
0500c49f62 | ||
|
0cf2c6aa3b | ||
|
b0e248e2a4 | ||
|
fdb3125547 | ||
|
128e36ca53 |
@ -24,6 +24,13 @@ commands:
|
||||
command: |
|
||||
HOMEBREW_NO_AUTO_UPDATE=1 brew install cmake
|
||||
|
||||
install-jdk8-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
name: Install JDK 8 on macos
|
||||
command: |
|
||||
brew install --cask adoptopenjdk/openjdk/adoptopenjdk8
|
||||
|
||||
increase-max-open-files-on-macos:
|
||||
steps:
|
||||
- run:
|
||||
@ -55,7 +62,7 @@ commands:
|
||||
pre-steps-macos:
|
||||
steps:
|
||||
- pre-steps:
|
||||
python-version: "3.6.0"
|
||||
python-version: "3.7.8"
|
||||
|
||||
post-steps:
|
||||
steps:
|
||||
@ -144,8 +151,10 @@ executors:
|
||||
jobs:
|
||||
build-macos:
|
||||
macos:
|
||||
xcode: 11.3.0
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
environment:
|
||||
DISABLE_JEMALLOC: 1 # jemalloc cause env_test hang, disable it for now
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-pyenv-on-macos
|
||||
@ -156,7 +165,7 @@ jobs:
|
||||
|
||||
build-macos-cmake:
|
||||
macos:
|
||||
xcode: 11.3.0
|
||||
xcode: 12.5.1
|
||||
resource_class: large
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
@ -522,14 +531,16 @@ jobs:
|
||||
|
||||
build-macos-java:
|
||||
macos:
|
||||
xcode: 11.3.0
|
||||
xcode: 12.5.1
|
||||
resource_class: medium
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
|
||||
DISABLE_JEMALLOC: 1 # jemalloc causes java 8 crash, maybe a known issue
|
||||
steps:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-pyenv-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
name: "Set Java Environment"
|
||||
@ -548,7 +559,7 @@ jobs:
|
||||
|
||||
build-macos-java-static:
|
||||
macos:
|
||||
xcode: 11.3.0
|
||||
xcode: 12.5.1
|
||||
resource_class: medium
|
||||
environment:
|
||||
JAVA_HOME: /Library/Java/JavaVirtualMachines/adoptopenjdk-8.jdk/Contents/Home
|
||||
@ -556,6 +567,7 @@ jobs:
|
||||
- increase-max-open-files-on-macos
|
||||
- install-pyenv-on-macos
|
||||
- install-gflags-on-macos
|
||||
- install-jdk8-on-macos
|
||||
- install-cmake-on-macos
|
||||
- pre-steps-macos
|
||||
- run:
|
||||
|
@ -1,4 +1,12 @@
|
||||
# Rocksdb Change Log
|
||||
## 6.28.2 (2022-01-31)
|
||||
### Bug Fixes
|
||||
* Fixed a major bug in which batched MultiGet could return old values for keys deleted by DeleteRange when memtable Bloom filter is enabled (memtable_prefix_bloom_size_ratio > 0). (The fix includes a substantial MultiGet performance improvement in the unusual case of both memtable_whole_key_filtering and prefix_extractor.)
|
||||
|
||||
## 6.28.1 (2022-01-10)
|
||||
### Bug Fixes
|
||||
* Fixed compilation errors on newer compiler, e.g. clang-12
|
||||
|
||||
## 6.28.0 (2021-12-17)
|
||||
### New Features
|
||||
* Introduced 'CommitWithTimestamp' as a new tag. Currently, there is no API for user to trigger a write with this tag to the WAL. This is part of the efforts to support write-commited transactions with user-defined timestamps.
|
||||
|
@ -1502,6 +1502,63 @@ TEST_F(DBBloomFilterTest, MemtableWholeKeyBloomFilter) {
|
||||
ASSERT_EQ(1, get_perf_context()->bloom_memtable_hit_count);
|
||||
}
|
||||
|
||||
TEST_F(DBBloomFilterTest, MemtableWholeKeyBloomFilterMultiGet) {
|
||||
Options options = CurrentOptions();
|
||||
options.memtable_prefix_bloom_size_ratio = 0.015;
|
||||
options.memtable_whole_key_filtering = true;
|
||||
Reopen(options);
|
||||
std::string key1("AA");
|
||||
std::string key2("BB");
|
||||
std::string key3("CC");
|
||||
std::string key4("DD");
|
||||
std::string key_not("EE");
|
||||
std::string value1("Value1");
|
||||
std::string value2("Value2");
|
||||
std::string value3("Value3");
|
||||
std::string value4("Value4");
|
||||
|
||||
ASSERT_OK(Put(key1, value1, WriteOptions()));
|
||||
ASSERT_OK(Put(key2, value2, WriteOptions()));
|
||||
ASSERT_OK(Flush());
|
||||
ASSERT_OK(Put(key3, value3, WriteOptions()));
|
||||
const Snapshot* snapshot = db_->GetSnapshot();
|
||||
ASSERT_OK(Put(key4, value4, WriteOptions()));
|
||||
|
||||
// Delete key2 and key3
|
||||
ASSERT_OK(
|
||||
db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(), "BA", "CZ"));
|
||||
|
||||
// Read without snapshot
|
||||
auto results = MultiGet({key_not, key1, key2, key3, key4});
|
||||
ASSERT_EQ(results[0], "NOT_FOUND");
|
||||
ASSERT_EQ(results[1], value1);
|
||||
ASSERT_EQ(results[2], "NOT_FOUND");
|
||||
ASSERT_EQ(results[3], "NOT_FOUND");
|
||||
ASSERT_EQ(results[4], value4);
|
||||
|
||||
// Also check Get
|
||||
ASSERT_EQ(Get(key1), value1);
|
||||
ASSERT_EQ(Get(key2), "NOT_FOUND");
|
||||
ASSERT_EQ(Get(key3), "NOT_FOUND");
|
||||
ASSERT_EQ(Get(key4), value4);
|
||||
|
||||
// Read with snapshot
|
||||
results = MultiGet({key_not, key1, key2, key3, key4}, snapshot);
|
||||
ASSERT_EQ(results[0], "NOT_FOUND");
|
||||
ASSERT_EQ(results[1], value1);
|
||||
ASSERT_EQ(results[2], value2);
|
||||
ASSERT_EQ(results[3], value3);
|
||||
ASSERT_EQ(results[4], "NOT_FOUND");
|
||||
|
||||
// Also check Get
|
||||
ASSERT_EQ(Get(key1, snapshot), value1);
|
||||
ASSERT_EQ(Get(key2, snapshot), value2);
|
||||
ASSERT_EQ(Get(key3, snapshot), value3);
|
||||
ASSERT_EQ(Get(key4, snapshot), "NOT_FOUND");
|
||||
|
||||
db_->ReleaseSnapshot(snapshot);
|
||||
}
|
||||
|
||||
TEST_F(DBBloomFilterTest, MemtablePrefixBloomOutOfDomain) {
|
||||
constexpr size_t kPrefixSize = 8;
|
||||
const std::string kKey = "key";
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "rocksdb/iterator.h"
|
||||
#include "rocksdb/merge_operator.h"
|
||||
#include "rocksdb/slice_transform.h"
|
||||
#include "rocksdb/types.h"
|
||||
#include "rocksdb/write_buffer_manager.h"
|
||||
#include "table/internal_iterator.h"
|
||||
#include "table/iterator_wrapper.h"
|
||||
@ -447,11 +448,13 @@ FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIterator(
|
||||
is_range_del_table_empty_.load(std::memory_order_relaxed)) {
|
||||
return nullptr;
|
||||
}
|
||||
return NewRangeTombstoneIteratorInternal(read_options, read_seq);
|
||||
}
|
||||
|
||||
FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIteratorInternal(
|
||||
const ReadOptions& read_options, SequenceNumber read_seq) {
|
||||
auto* unfragmented_iter = new MemTableIterator(
|
||||
*this, read_options, nullptr /* arena */, true /* use_range_del_table */);
|
||||
if (unfragmented_iter == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
auto fragmented_tombstone_list =
|
||||
std::make_shared<FragmentedRangeTombstoneList>(
|
||||
std::unique_ptr<InternalIterator>(unfragmented_iter),
|
||||
@ -960,53 +963,58 @@ void MemTable::MultiGet(const ReadOptions& read_options, MultiGetRange* range,
|
||||
}
|
||||
PERF_TIMER_GUARD(get_from_memtable_time);
|
||||
|
||||
// For now, memtable Bloom filter is effectively disabled if there are any
|
||||
// range tombstones. This is the simplest way to ensure range tombstones are
|
||||
// handled. TODO: allow Bloom checks where max_covering_tombstone_seq==0
|
||||
bool no_range_del = read_options.ignore_range_deletions ||
|
||||
is_range_del_table_empty_.load(std::memory_order_relaxed);
|
||||
MultiGetRange temp_range(*range, range->begin(), range->end());
|
||||
if (bloom_filter_) {
|
||||
std::array<Slice*, MultiGetContext::MAX_BATCH_SIZE> keys;
|
||||
std::array<bool, MultiGetContext::MAX_BATCH_SIZE> may_match = {{true}};
|
||||
autovector<Slice, MultiGetContext::MAX_BATCH_SIZE> prefixes;
|
||||
if (bloom_filter_ && no_range_del) {
|
||||
bool whole_key =
|
||||
!prefix_extractor_ || moptions_.memtable_whole_key_filtering;
|
||||
std::array<Slice, MultiGetContext::MAX_BATCH_SIZE> bloom_keys;
|
||||
std::array<bool, MultiGetContext::MAX_BATCH_SIZE> may_match;
|
||||
std::array<size_t, MultiGetContext::MAX_BATCH_SIZE> range_indexes;
|
||||
int num_keys = 0;
|
||||
for (auto iter = temp_range.begin(); iter != temp_range.end(); ++iter) {
|
||||
if (!prefix_extractor_) {
|
||||
keys[num_keys++] = &iter->ukey_without_ts;
|
||||
if (whole_key) {
|
||||
bloom_keys[num_keys] = iter->ukey_without_ts;
|
||||
range_indexes[num_keys++] = iter.index();
|
||||
} else if (prefix_extractor_->InDomain(iter->ukey_without_ts)) {
|
||||
prefixes.emplace_back(
|
||||
prefix_extractor_->Transform(iter->ukey_without_ts));
|
||||
keys[num_keys++] = &prefixes.back();
|
||||
bloom_keys[num_keys] =
|
||||
prefix_extractor_->Transform(iter->ukey_without_ts);
|
||||
range_indexes[num_keys++] = iter.index();
|
||||
} else {
|
||||
// TODO: consider not counting these as Bloom hits to more closely
|
||||
// match bloom_sst_hit_count
|
||||
PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
|
||||
}
|
||||
}
|
||||
bloom_filter_->MayContain(num_keys, &keys[0], &may_match[0]);
|
||||
int idx = 0;
|
||||
for (auto iter = temp_range.begin(); iter != temp_range.end(); ++iter) {
|
||||
if (prefix_extractor_ &&
|
||||
!prefix_extractor_->InDomain(iter->ukey_without_ts)) {
|
||||
PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
|
||||
continue;
|
||||
}
|
||||
if (!may_match[idx]) {
|
||||
temp_range.SkipKey(iter);
|
||||
bloom_filter_->MayContain(num_keys, &bloom_keys[0], &may_match[0]);
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
if (!may_match[i]) {
|
||||
temp_range.SkipIndex(range_indexes[i]);
|
||||
PERF_COUNTER_ADD(bloom_memtable_miss_count, 1);
|
||||
} else {
|
||||
PERF_COUNTER_ADD(bloom_memtable_hit_count, 1);
|
||||
}
|
||||
idx++;
|
||||
}
|
||||
}
|
||||
for (auto iter = temp_range.begin(); iter != temp_range.end(); ++iter) {
|
||||
SequenceNumber seq = kMaxSequenceNumber;
|
||||
bool found_final_value{false};
|
||||
bool merge_in_progress = iter->s->IsMergeInProgress();
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
|
||||
NewRangeTombstoneIterator(
|
||||
read_options, GetInternalKeySeqno(iter->lkey->internal_key())));
|
||||
if (range_del_iter != nullptr) {
|
||||
if (!no_range_del) {
|
||||
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
|
||||
NewRangeTombstoneIteratorInternal(
|
||||
read_options, GetInternalKeySeqno(iter->lkey->internal_key())));
|
||||
iter->max_covering_tombstone_seq = std::max(
|
||||
iter->max_covering_tombstone_seq,
|
||||
range_del_iter->MaxCoveringTombstoneSeqnum(iter->lkey->user_key()));
|
||||
}
|
||||
SequenceNumber dummy_seq;
|
||||
GetFromTable(*(iter->lkey), iter->max_covering_tombstone_seq, true,
|
||||
callback, &iter->is_blob_index, iter->value->GetSelf(),
|
||||
iter->timestamp, iter->s, &(iter->merge_context), &seq,
|
||||
iter->timestamp, iter->s, &(iter->merge_context), &dummy_seq,
|
||||
&found_final_value, &merge_in_progress);
|
||||
|
||||
if (!found_final_value && merge_in_progress) {
|
||||
|
@ -600,6 +600,10 @@ class MemTable {
|
||||
std::string* value, std::string* timestamp, Status* s,
|
||||
MergeContext* merge_context, SequenceNumber* seq,
|
||||
bool* found_final_value, bool* merge_in_progress);
|
||||
|
||||
// Always returns non-null and assumes certain pre-checks are done
|
||||
FragmentedRangeTombstoneIterator* NewRangeTombstoneIteratorInternal(
|
||||
const ReadOptions& read_options, SequenceNumber read_seq);
|
||||
};
|
||||
|
||||
extern const char* EncodeKey(std::string* scratch, const Slice& target);
|
||||
|
@ -366,10 +366,16 @@ struct AdvancedColumnFamilyOptions {
|
||||
Slice delta_value,
|
||||
std::string* merged_value) = nullptr;
|
||||
|
||||
// if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0,
|
||||
// create prefix bloom for memtable with the size of
|
||||
// Should really be called `memtable_bloom_size_ratio`. Enables a dynamic
|
||||
// Bloom filter in memtable to optimize many queries that must go beyond
|
||||
// the memtable. The size in bytes of the filter is
|
||||
// write_buffer_size * memtable_prefix_bloom_size_ratio.
|
||||
// If it is larger than 0.25, it is sanitized to 0.25.
|
||||
// * If prefix_extractor is set, the filter includes prefixes.
|
||||
// * If memtable_whole_key_filtering, the filter includes whole keys.
|
||||
// * If both, the filter includes both.
|
||||
// * If neither, the feature is disabled.
|
||||
//
|
||||
// If this value is larger than 0.25, it is sanitized to 0.25.
|
||||
//
|
||||
// Default: 0 (disable)
|
||||
//
|
||||
|
@ -75,7 +75,7 @@ class Comparator : public Customizable {
|
||||
//
|
||||
// Names starting with "rocksdb." are reserved and should not be used
|
||||
// by any clients of this package.
|
||||
virtual const char* Name() const override = 0;
|
||||
const char* Name() const override = 0;
|
||||
|
||||
// Advanced functions: these are used to reduce the space requirements
|
||||
// for internal data structures like index blocks.
|
||||
|
@ -85,7 +85,7 @@ class FileChecksumGenFactory : public Customizable {
|
||||
const FileChecksumGenContext& context) = 0;
|
||||
|
||||
// Return the name of this FileChecksumGenFactory.
|
||||
virtual const char* Name() const override = 0;
|
||||
const char* Name() const override = 0;
|
||||
};
|
||||
|
||||
// FileChecksumList stores the checksum information of a list of files (e.g.,
|
||||
|
@ -311,7 +311,7 @@ class MemTableRepFactory : public Customizable {
|
||||
return CreateMemTableRep(key_cmp, allocator, slice_transform, logger);
|
||||
}
|
||||
|
||||
virtual const char* Name() const override = 0;
|
||||
const char* Name() const override = 0;
|
||||
|
||||
// Return true if the current MemTableRep supports concurrent inserts
|
||||
// Default: false
|
||||
|
@ -93,7 +93,7 @@ class SstPartitionerFactory : public Customizable {
|
||||
const SstPartitioner::Context& context) const = 0;
|
||||
|
||||
// Returns a name that identifies this partitioner factory.
|
||||
virtual const char* Name() const override = 0;
|
||||
const char* Name() const override = 0;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -162,7 +162,7 @@ class TablePropertiesCollectorFactory : public Customizable {
|
||||
TablePropertiesCollectorFactory::Context context) = 0;
|
||||
|
||||
// The name of the properties collector can be used for debugging purpose.
|
||||
virtual const char* Name() const override = 0;
|
||||
const char* Name() const override = 0;
|
||||
|
||||
// Can be overridden by sub-classes to return the Name, followed by
|
||||
// configuration info that will // be logged to the info log when the
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#define ROCKSDB_MAJOR 6
|
||||
#define ROCKSDB_MINOR 28
|
||||
#define ROCKSDB_PATCH 0
|
||||
#define ROCKSDB_PATCH 2
|
||||
|
||||
// Do not use these. We made the mistake of declaring macros starting with
|
||||
// double underscore. Now we have to live with our choice. We'll deprecate these
|
||||
|
@ -202,22 +202,31 @@ struct SimpleOptions {
|
||||
TestCustomizable* cp = nullptr;
|
||||
};
|
||||
|
||||
static SimpleOptions dummy_simple_options;
|
||||
template <typename T1>
|
||||
int offset_of(T1 SimpleOptions::*member) {
|
||||
return static_cast<int>(
|
||||
reinterpret_cast<uintptr_t>(
|
||||
std::addressof(dummy_simple_options.*member)) -
|
||||
reinterpret_cast<uintptr_t>(std::addressof(dummy_simple_options)));
|
||||
}
|
||||
|
||||
static std::unordered_map<std::string, OptionTypeInfo> simple_option_info = {
|
||||
#ifndef ROCKSDB_LITE
|
||||
{"bool",
|
||||
{offsetof(struct SimpleOptions, b), OptionType::kBoolean,
|
||||
{offset_of(&SimpleOptions::b), OptionType::kBoolean,
|
||||
OptionVerificationType::kNormal, OptionTypeFlags::kNone}},
|
||||
{"unique",
|
||||
OptionTypeInfo::AsCustomUniquePtr<TestCustomizable>(
|
||||
offsetof(struct SimpleOptions, cu), OptionVerificationType::kNormal,
|
||||
offset_of(&SimpleOptions::cu), OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kAllowNull)},
|
||||
{"shared",
|
||||
OptionTypeInfo::AsCustomSharedPtr<TestCustomizable>(
|
||||
offsetof(struct SimpleOptions, cs), OptionVerificationType::kNormal,
|
||||
offset_of(&SimpleOptions::cs), OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kAllowNull)},
|
||||
{"pointer",
|
||||
OptionTypeInfo::AsCustomRawPtr<TestCustomizable>(
|
||||
offsetof(struct SimpleOptions, cp), OptionVerificationType::kNormal,
|
||||
offset_of(&SimpleOptions::cp), OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kAllowNull)},
|
||||
#endif // ROCKSDB_LITE
|
||||
};
|
||||
|
@ -235,9 +235,9 @@ class MultiGetContext {
|
||||
|
||||
bool empty() const { return RemainingMask() == 0; }
|
||||
|
||||
void SkipKey(const Iterator& iter) {
|
||||
skip_mask_ |= uint64_t{1} << iter.index_;
|
||||
}
|
||||
void SkipIndex(size_t index) { skip_mask_ |= uint64_t{1} << index; }
|
||||
|
||||
void SkipKey(const Iterator& iter) { SkipIndex(iter.index_); }
|
||||
|
||||
bool IsKeySkipped(const Iterator& iter) const {
|
||||
return skip_mask_ & (uint64_t{1} << iter.index_);
|
||||
|
@ -87,6 +87,7 @@ default_params = {
|
||||
"partition_filters": lambda: random.randint(0, 1),
|
||||
"partition_pinning": lambda: random.randint(0, 3),
|
||||
"pause_background_one_in": 1000000,
|
||||
"prefix_size" : lambda: random.choice([-1, 1, 5, 7, 8]),
|
||||
"prefixpercent": 5,
|
||||
"progress_reports": 0,
|
||||
"readpercent": 45,
|
||||
@ -156,6 +157,8 @@ default_params = {
|
||||
"user_timestamp_size": 0,
|
||||
"secondary_cache_fault_one_in" : lambda: random.choice([0, 0, 32]),
|
||||
"prepopulate_block_cache" : lambda: random.choice([0, 1]),
|
||||
"memtable_prefix_bloom_size_ratio": lambda: random.choice([0.001, 0.01, 0.1, 0.5]),
|
||||
"memtable_whole_key_filtering": lambda: random.randint(0, 1),
|
||||
}
|
||||
|
||||
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
|
||||
@ -233,9 +236,6 @@ simple_default_params = {
|
||||
"max_background_compactions": 1,
|
||||
"max_bytes_for_level_base": 67108864,
|
||||
"memtablerep": "skip_list",
|
||||
"prefixpercent": 0,
|
||||
"readpercent": 50,
|
||||
"prefix_size" : -1,
|
||||
"target_file_size_base": 16777216,
|
||||
"target_file_size_multiplier": 1,
|
||||
"test_batches_snapshots": 0,
|
||||
@ -386,8 +386,15 @@ def finalize_and_sanitize(src_params):
|
||||
dest_params["readpercent"] += dest_params.get("iterpercent", 10)
|
||||
dest_params["iterpercent"] = 0
|
||||
dest_params["test_batches_snapshots"] = 0
|
||||
if dest_params.get("prefix_size") == -1:
|
||||
dest_params["readpercent"] += dest_params.get("prefixpercent", 20)
|
||||
dest_params["prefixpercent"] = 0
|
||||
dest_params["test_batches_snapshots"] = 0
|
||||
if dest_params.get("test_batches_snapshots") == 0:
|
||||
dest_params["batch_protection_bytes_per_key"] = 0
|
||||
if (dest_params.get("prefix_size") == -1 and
|
||||
dest_params.get("memtable_whole_key_filtering") == 0):
|
||||
dest_params["memtable_prefix_bloom_size_ratio"] = 0
|
||||
return dest_params
|
||||
|
||||
def gen_cmd_params(args):
|
||||
|
@ -65,7 +65,7 @@ class DynamicBloom {
|
||||
// Multithreaded access to this function is OK
|
||||
bool MayContain(const Slice& key) const;
|
||||
|
||||
void MayContain(int num_keys, Slice** keys, bool* may_match) const;
|
||||
void MayContain(int num_keys, Slice* keys, bool* may_match) const;
|
||||
|
||||
// Multithreaded access to this function is OK
|
||||
bool MayContainHash(uint32_t hash) const;
|
||||
@ -120,12 +120,12 @@ inline bool DynamicBloom::MayContain(const Slice& key) const {
|
||||
return (MayContainHash(BloomHash(key)));
|
||||
}
|
||||
|
||||
inline void DynamicBloom::MayContain(int num_keys, Slice** keys,
|
||||
inline void DynamicBloom::MayContain(int num_keys, Slice* keys,
|
||||
bool* may_match) const {
|
||||
std::array<uint32_t, MultiGetContext::MAX_BATCH_SIZE> hashes;
|
||||
std::array<size_t, MultiGetContext::MAX_BATCH_SIZE> byte_offsets;
|
||||
for (int i = 0; i < num_keys; ++i) {
|
||||
hashes[i] = BloomHash(*keys[i]);
|
||||
hashes[i] = BloomHash(keys[i]);
|
||||
size_t a = FastRange32(kLen, hashes[i]);
|
||||
PREFETCH(data_ + a, 0, 3);
|
||||
byte_offsets[i] = a;
|
||||
|
Loading…
Reference in New Issue
Block a user