2014-07-24 19:07:41 +02:00
|
|
|
// Copyright (c) 2014, Facebook, Inc. All rights reserved.
|
2014-07-21 22:26:09 +02:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
|
|
|
|
#include <vector>
|
|
|
|
#include <string>
|
|
|
|
#include <map>
|
|
|
|
#include <utility>
|
|
|
|
|
|
|
|
#include "table/meta_blocks.h"
|
|
|
|
#include "table/cuckoo_table_builder.h"
|
|
|
|
#include "util/testharness.h"
|
|
|
|
#include "util/testutil.h"
|
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
extern const uint64_t kCuckooTableMagicNumber;
|
|
|
|
|
|
|
|
namespace {
|
2014-07-24 19:07:41 +02:00
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
|
2014-07-21 22:26:09 +02:00
|
|
|
|
2014-07-24 19:07:41 +02:00
|
|
|
uint64_t GetSliceHash(const Slice& s, uint32_t index,
|
|
|
|
uint64_t max_num_buckets) {
|
2014-07-21 22:26:09 +02:00
|
|
|
return hash_map[s.ToString()][index];
|
|
|
|
}
|
|
|
|
} // namespace
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
class CuckooBuilderTest : public testing::Test {
|
2014-07-21 22:26:09 +02:00
|
|
|
public:
|
|
|
|
CuckooBuilderTest() {
|
|
|
|
env_ = Env::Default();
|
2014-08-06 05:55:46 +02:00
|
|
|
Options options;
|
|
|
|
options.allow_mmap_reads = true;
|
|
|
|
env_options_ = EnvOptions(options);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
void CheckFileContents(const std::vector<std::string>& keys,
|
|
|
|
const std::vector<std::string>& values,
|
|
|
|
const std::vector<uint64_t>& expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
std::string expected_unused_bucket, uint64_t expected_table_size,
|
|
|
|
uint32_t expected_num_hash_func, bool expected_is_last_level,
|
|
|
|
uint32_t expected_cuckoo_block_size = 1) {
|
2014-07-21 22:26:09 +02:00
|
|
|
// Read file
|
|
|
|
unique_ptr<RandomAccessFile> read_file;
|
|
|
|
ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_));
|
|
|
|
uint64_t read_file_size;
|
|
|
|
ASSERT_OK(env_->GetFileSize(fname, &read_file_size));
|
|
|
|
|
|
|
|
// Assert Table Properties.
|
|
|
|
TableProperties* props = nullptr;
|
|
|
|
ASSERT_OK(ReadTableProperties(read_file.get(), read_file_size,
|
|
|
|
kCuckooTableMagicNumber, env_, nullptr, &props));
|
|
|
|
// Check unused bucket.
|
2014-07-24 19:07:41 +02:00
|
|
|
std::string unused_key = props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kEmptyKey];
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_EQ(expected_unused_bucket.substr(0,
|
|
|
|
props->fixed_key_len), unused_key);
|
2014-07-21 22:26:09 +02:00
|
|
|
|
2014-07-24 19:07:41 +02:00
|
|
|
uint32_t value_len_found =
|
|
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kValueLength].data());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found);
|
2014-08-12 05:21:07 +02:00
|
|
|
ASSERT_EQ(props->raw_value_size, values.size()*value_len_found);
|
2014-08-28 19:42:23 +02:00
|
|
|
const uint64_t table_size =
|
2014-07-24 19:07:41 +02:00
|
|
|
*reinterpret_cast<const uint64_t*>(props->user_collected_properties[
|
2014-08-28 19:42:23 +02:00
|
|
|
CuckooTablePropertyNames::kHashTableSize].data());
|
|
|
|
ASSERT_EQ(expected_table_size, table_size);
|
|
|
|
const uint32_t num_hash_func_found =
|
2014-07-24 19:07:41 +02:00
|
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
2014-08-28 19:42:23 +02:00
|
|
|
CuckooTablePropertyNames::kNumHashFunc].data());
|
|
|
|
ASSERT_EQ(expected_num_hash_func, num_hash_func_found);
|
|
|
|
const uint32_t cuckoo_block_size =
|
|
|
|
*reinterpret_cast<const uint32_t*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kCuckooBlockSize].data());
|
|
|
|
ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size);
|
2014-07-26 01:37:32 +02:00
|
|
|
const bool is_last_level_found =
|
|
|
|
*reinterpret_cast<const bool*>(props->user_collected_properties[
|
|
|
|
CuckooTablePropertyNames::kIsLastLevel].data());
|
|
|
|
ASSERT_EQ(expected_is_last_level, is_last_level_found);
|
2014-09-25 22:53:27 +02:00
|
|
|
|
|
|
|
ASSERT_EQ(props->num_entries, keys.size());
|
|
|
|
ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size());
|
|
|
|
ASSERT_EQ(props->data_size, expected_unused_bucket.size() *
|
|
|
|
(expected_table_size + expected_cuckoo_block_size - 1));
|
|
|
|
ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len);
|
2014-07-22 18:49:04 +02:00
|
|
|
delete props;
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
// Check contents of the bucket.
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<bool> keys_found(keys.size(), false);
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = expected_unused_bucket.size();
|
2014-08-28 19:42:23 +02:00
|
|
|
for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) {
|
2014-08-06 05:55:46 +02:00
|
|
|
Slice read_slice;
|
|
|
|
ASSERT_OK(read_file->Read(i*bucket_size, bucket_size,
|
|
|
|
&read_slice, nullptr));
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t key_idx =
|
|
|
|
std::find(expected_locations.begin(), expected_locations.end(), i) -
|
|
|
|
expected_locations.begin();
|
2014-08-06 05:55:46 +02:00
|
|
|
if (key_idx == keys.size()) {
|
|
|
|
// i is not one of the expected locaitons. Empty bucket.
|
|
|
|
ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0);
|
|
|
|
} else {
|
|
|
|
keys_found[key_idx] = true;
|
|
|
|
ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for (auto key_found : keys_found) {
|
|
|
|
// Check that all keys were found.
|
|
|
|
ASSERT_TRUE(key_found);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string GetInternalKey(Slice user_key, bool zero_seqno) {
|
|
|
|
IterKey ikey;
|
|
|
|
ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, kTypeValue);
|
|
|
|
return ikey.GetKey().ToString();
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two.
Summary:
Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod.
After these changes, we get almost 50% improvement in performance.
Results:
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.231us (4.3 Mqps) with batch size of 0
Time taken per op is 0.229us (4.4 Mqps) with batch size of 0
Time taken per op is 0.185us (5.4 Mqps) with batch size of 0
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.108us (9.3 Mqps) with batch size of 10
Time taken per op is 0.100us (10.0 Mqps) with batch size of 10
Time taken per op is 0.103us (9.7 Mqps) with batch size of 10
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.101us (9.9 Mqps) with batch size of 25
Time taken per op is 0.098us (10.2 Mqps) with batch size of 25
Time taken per op is 0.097us (10.3 Mqps) with batch size of 25
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.100us (10.0 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.102us (9.8 Mqps) with batch size of 100
Time taken per op is 0.098us (10.2 Mqps) with batch size of 100
Time taken per op is 0.115us (8.7 Mqps) with batch size of 100
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.201us (5.0 Mqps) with batch size of 0
Time taken per op is 0.155us (6.5 Mqps) with batch size of 0
Time taken per op is 0.152us (6.6 Mqps) with batch size of 0
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.089us (11.3 Mqps) with batch size of 10
Time taken per op is 0.084us (11.9 Mqps) with batch size of 10
Time taken per op is 0.086us (11.6 Mqps) with batch size of 10
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.087us (11.5 Mqps) with batch size of 25
Time taken per op is 0.085us (11.7 Mqps) with batch size of 25
Time taken per op is 0.093us (10.8 Mqps) with batch size of 25
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.094us (10.6 Mqps) with batch size of 50
Time taken per op is 0.094us (10.7 Mqps) with batch size of 50
Time taken per op is 0.093us (10.8 Mqps) with batch size of 50
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.092us (10.9 Mqps) with batch size of 100
Time taken per op is 0.089us (11.2 Mqps) with batch size of 100
Time taken per op is 0.088us (11.3 Mqps) with batch size of 100
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.154us (6.5 Mqps) with batch size of 0
Time taken per op is 0.168us (6.0 Mqps) with batch size of 0
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.081us (12.4 Mqps) with batch size of 10
Time taken per op is 0.077us (13.0 Mqps) with batch size of 10
Time taken per op is 0.083us (12.1 Mqps) with batch size of 10
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.076us (13.1 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.074us (13.6 Mqps) with batch size of 100
Time taken per op is 0.073us (13.6 Mqps) with batch size of 100
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
Time taken per op is 0.186us (5.4 Mqps) with batch size of 0
Time taken per op is 0.184us (5.4 Mqps) with batch size of 0
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.079us (12.7 Mqps) with batch size of 10
Time taken per op is 0.070us (14.2 Mqps) with batch size of 10
Time taken per op is 0.072us (14.0 Mqps) with batch size of 10
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 25
Time taken per op is 0.072us (14.0 Mqps) with batch size of 25
Time taken per op is 0.071us (14.1 Mqps) with batch size of 25
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.082us (12.1 Mqps) with batch size of 50
Time taken per op is 0.071us (14.1 Mqps) with batch size of 50
Time taken per op is 0.073us (13.6 Mqps) with batch size of 50
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 100
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.078us (12.8 Mqps) with batch size of 100
Test Plan:
make check all
make valgrind_check
make asan_check
Reviewers: sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
|
|
|
uint64_t NextPowOf2(uint64_t num) {
|
|
|
|
uint64_t n = 2;
|
|
|
|
while (n <= num) {
|
|
|
|
n *= 2;
|
|
|
|
}
|
|
|
|
return n;
|
|
|
|
}
|
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
Env* env_;
|
2014-08-06 05:55:46 +02:00
|
|
|
EnvOptions env_options_;
|
2014-07-21 22:26:09 +02:00
|
|
|
std::string fname;
|
2014-08-06 05:55:46 +02:00
|
|
|
const double kHashTableRatio = 0.9;
|
2014-07-21 22:26:09 +02:00
|
|
|
};
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) {
|
2014-08-06 05:55:46 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two.
Summary:
Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod.
After these changes, we get almost 50% improvement in performance.
Results:
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.231us (4.3 Mqps) with batch size of 0
Time taken per op is 0.229us (4.4 Mqps) with batch size of 0
Time taken per op is 0.185us (5.4 Mqps) with batch size of 0
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.108us (9.3 Mqps) with batch size of 10
Time taken per op is 0.100us (10.0 Mqps) with batch size of 10
Time taken per op is 0.103us (9.7 Mqps) with batch size of 10
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.101us (9.9 Mqps) with batch size of 25
Time taken per op is 0.098us (10.2 Mqps) with batch size of 25
Time taken per op is 0.097us (10.3 Mqps) with batch size of 25
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.100us (10.0 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
Time taken per op is 0.097us (10.3 Mqps) with batch size of 50
With 120000000 items, utilization is 89.41%, number of hash functions: 2.
Time taken per op is 0.102us (9.8 Mqps) with batch size of 100
Time taken per op is 0.098us (10.2 Mqps) with batch size of 100
Time taken per op is 0.115us (8.7 Mqps) with batch size of 100
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.201us (5.0 Mqps) with batch size of 0
Time taken per op is 0.155us (6.5 Mqps) with batch size of 0
Time taken per op is 0.152us (6.6 Mqps) with batch size of 0
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.089us (11.3 Mqps) with batch size of 10
Time taken per op is 0.084us (11.9 Mqps) with batch size of 10
Time taken per op is 0.086us (11.6 Mqps) with batch size of 10
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.087us (11.5 Mqps) with batch size of 25
Time taken per op is 0.085us (11.7 Mqps) with batch size of 25
Time taken per op is 0.093us (10.8 Mqps) with batch size of 25
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.094us (10.6 Mqps) with batch size of 50
Time taken per op is 0.094us (10.7 Mqps) with batch size of 50
Time taken per op is 0.093us (10.8 Mqps) with batch size of 50
With 100000000 items, utilization is 74.51%, number of hash functions: 2.
Time taken per op is 0.092us (10.9 Mqps) with batch size of 100
Time taken per op is 0.089us (11.2 Mqps) with batch size of 100
Time taken per op is 0.088us (11.3 Mqps) with batch size of 100
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.154us (6.5 Mqps) with batch size of 0
Time taken per op is 0.168us (6.0 Mqps) with batch size of 0
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.081us (12.4 Mqps) with batch size of 10
Time taken per op is 0.077us (13.0 Mqps) with batch size of 10
Time taken per op is 0.083us (12.1 Mqps) with batch size of 10
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
Time taken per op is 0.073us (13.7 Mqps) with batch size of 25
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.076us (13.1 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
Time taken per op is 0.072us (13.8 Mqps) with batch size of 50
With 80000000 items, utilization is 59.60%, number of hash functions: 2.
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.074us (13.6 Mqps) with batch size of 100
Time taken per op is 0.073us (13.6 Mqps) with batch size of 100
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.190us (5.3 Mqps) with batch size of 0
Time taken per op is 0.186us (5.4 Mqps) with batch size of 0
Time taken per op is 0.184us (5.4 Mqps) with batch size of 0
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.079us (12.7 Mqps) with batch size of 10
Time taken per op is 0.070us (14.2 Mqps) with batch size of 10
Time taken per op is 0.072us (14.0 Mqps) with batch size of 10
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 25
Time taken per op is 0.072us (14.0 Mqps) with batch size of 25
Time taken per op is 0.071us (14.1 Mqps) with batch size of 25
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.082us (12.1 Mqps) with batch size of 50
Time taken per op is 0.071us (14.1 Mqps) with batch size of 50
Time taken per op is 0.073us (13.6 Mqps) with batch size of 50
With 70000000 items, utilization is 52.15%, number of hash functions: 2.
Time taken per op is 0.080us (12.5 Mqps) with batch size of 100
Time taken per op is 0.077us (13.0 Mqps) with batch size of 100
Time taken per op is 0.078us (12.8 Mqps) with batch size of 100
Test Plan:
make check all
make valgrind_check
make asan_check
Reviewers: sdong, ljin
Reviewed By: ljin
Subscribers: leveldb
Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
|
|
|
fname = test::TmpDir() + "/EmptyFile";
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
4, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_EQ(0UL, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-25 22:53:27 +02:00
|
|
|
CheckFileContents({}, {}, {}, "", 2, 2, false);
|
2014-08-06 05:55:46 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
2014-07-21 22:26:09 +02:00
|
|
|
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/NoCollisionFullKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 2, false);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
2014-08-06 05:55:46 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/WithCollisionFullKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 4, false);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) {
|
2014-08-28 19:42:23 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
2014-08-28 19:42:23 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-28 19:42:23 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
2014-08-28 19:42:23 +02:00
|
|
|
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
|
|
uint32_t cuckoo_block_size = 2;
|
|
|
|
fname = test::TmpDir() + "/WithCollisionFullKey2";
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), cuckoo_block_size,
|
|
|
|
false, false, GetSliceHash);
|
2014-08-28 19:42:23 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-28 19:42:23 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-28 19:42:23 +02:00
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
|
|
|
// Finally insert an element with hash value somewhere in the middle
|
|
|
|
// so that it displaces all the elements after that.
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 2}},
|
2014-08-06 05:55:46 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/WithCollisionPathFullKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-28 19:42:23 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-28 19:42:23 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-28 19:42:23 +02:00
|
|
|
|
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
|
|
|
expected_unused_bucket, expected_table_size, 2, false);
|
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) {
|
2014-08-28 19:42:23 +02:00
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {3, 4}},
|
|
|
|
{user_keys[3], {4, 5}},
|
|
|
|
{user_keys[4], {0, 3}},
|
2014-08-28 19:42:23 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-28 19:42:23 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {2, 1, 3, 4, 0};
|
|
|
|
std::vector<std::string> keys;
|
|
|
|
for (auto& user_key : user_keys) {
|
|
|
|
keys.push_back(GetInternalKey(user_key, false));
|
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(keys.size() / kHashTableRatio);
|
2014-08-28 19:42:23 +02:00
|
|
|
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
|
|
|
fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock";
|
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 2, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(keys[i]), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = GetInternalKey("key00", true);
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 2, false, 2);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {1, 2, 3, 4}},
|
|
|
|
{user_keys[2], {2, 3, 4, 5}},
|
|
|
|
{user_keys[3], {3, 4, 5, 6}}};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/NoCollisionUserKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = "key00";
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03", "key04"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1, 2, 3}},
|
|
|
|
{user_keys[1], {0, 1, 2, 3}},
|
|
|
|
{user_keys[2], {0, 1, 2, 3}},
|
|
|
|
{user_keys[3], {0, 1, 2, 3}},
|
2014-08-06 05:55:46 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 2, 3};
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/WithCollisionUserKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = "key00";
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 4, true);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) {
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
|
|
|
std::vector<std::string> values = {"v01", "v02", "v03", "v04", "v05"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 2}},
|
2014-08-06 05:55:46 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
|
|
|
|
2014-08-06 05:55:46 +02:00
|
|
|
std::vector<uint64_t> expected_locations = {0, 1, 3, 4, 2};
|
2014-11-11 22:47:22 +01:00
|
|
|
uint64_t expected_table_size = NextPowOf2(user_keys.size() / kHashTableRatio);
|
2014-07-21 22:26:09 +02:00
|
|
|
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/WithCollisionPathUserKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 2, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i]));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t bucket_size = user_keys[0].size() + values[0].size();
|
2014-09-18 00:34:10 +02:00
|
|
|
ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.Finish());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-09-05 20:18:01 +02:00
|
|
|
ASSERT_LE(expected_table_size * bucket_size, builder.FileSize());
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-08-27 19:39:31 +02:00
|
|
|
std::string expected_unused_bucket = "key00";
|
2014-08-06 05:55:46 +02:00
|
|
|
expected_unused_bucket += std::string(values[0].size(), 'a');
|
|
|
|
CheckFileContents(user_keys, values, expected_locations,
|
2014-08-28 19:42:23 +02:00
|
|
|
expected_unused_bucket, expected_table_size, 2, true);
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) {
|
2014-07-21 22:26:09 +02:00
|
|
|
// Have two hash functions. Insert elements with overlapping hashes.
|
2014-08-06 05:55:46 +02:00
|
|
|
// Finally try inserting an element with hash value somewhere in the middle
|
|
|
|
// and it should fail because the no. of elements to displace is too high.
|
|
|
|
uint32_t num_hash_fun = 2;
|
|
|
|
std::vector<std::string> user_keys = {"key01", "key02", "key03",
|
|
|
|
"key04", "key05"};
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{user_keys[0], {0, 1}},
|
|
|
|
{user_keys[1], {1, 2}},
|
|
|
|
{user_keys[2], {2, 3}},
|
|
|
|
{user_keys[3], {3, 4}},
|
|
|
|
{user_keys[4], {0, 1}},
|
2014-08-06 05:55:46 +02:00
|
|
|
};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
2014-08-06 05:55:46 +02:00
|
|
|
|
2014-07-21 22:26:09 +02:00
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/WithCollisionPathUserKey";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 2, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
for (uint32_t i = 0; i < user_keys.size(); i++) {
|
|
|
|
builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value"));
|
|
|
|
ASSERT_EQ(builder.NumEntries(), i + 1);
|
|
|
|
ASSERT_OK(builder.status());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) {
|
2015-07-13 21:11:05 +02:00
|
|
|
// Need to have a temporary variable here as VS compiler does not currently
|
|
|
|
// support operator= with initializer_list as a parameter
|
|
|
|
std::unordered_map<std::string, std::vector<uint64_t>> hm = {
|
|
|
|
{"repeatedkey", {0, 1, 2, 3}}};
|
2015-07-02 01:13:49 +02:00
|
|
|
hash_map = std::move(hm);
|
2014-08-06 05:55:46 +02:00
|
|
|
uint32_t num_hash_fun = 4;
|
|
|
|
std::string user_key = "repeatedkey";
|
2014-07-21 22:26:09 +02:00
|
|
|
|
|
|
|
unique_ptr<WritableFile> writable_file;
|
2014-08-06 05:55:46 +02:00
|
|
|
fname = test::TmpDir() + "/FailWhenSameKeyInserted";
|
2014-07-21 22:26:09 +02:00
|
|
|
ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_));
|
2014-08-06 05:55:46 +02:00
|
|
|
CuckooTableBuilder builder(writable_file.get(), kHashTableRatio,
|
2014-09-25 22:53:27 +02:00
|
|
|
num_hash_fun, 100, BytewiseComparator(), 1, false, false, GetSliceHash);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1"));
|
2014-08-07 11:06:07 +02:00
|
|
|
ASSERT_EQ(builder.NumEntries(), 1u);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2"));
|
2014-08-07 11:06:07 +02:00
|
|
|
ASSERT_EQ(builder.NumEntries(), 2u);
|
2014-08-06 05:55:46 +02:00
|
|
|
ASSERT_OK(builder.status());
|
|
|
|
|
|
|
|
ASSERT_TRUE(builder.Finish().IsNotSupported());
|
|
|
|
ASSERT_OK(writable_file->Close());
|
2014-07-21 22:26:09 +02:00
|
|
|
}
|
|
|
|
} // namespace rocksdb
|
|
|
|
|
2015-03-17 22:08:00 +01:00
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|