rocksdb/table/cuckoo_table_reader_test.cc

569 lines
20 KiB
C++
Raw Normal View History

// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#ifndef ROCKSDB_LITE
#ifndef GFLAGS
#include <cstdio>
int main() {
fprintf(stderr, "Please install gflags to run this test... Skipping...\n");
return 0;
}
#else
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include <gflags/gflags.h>
#include <vector>
#include <string>
#include <map>
#include "table/meta_blocks.h"
#include "table/cuckoo_table_builder.h"
#include "table/cuckoo_table_reader.h"
#include "table/cuckoo_table_factory.h"
#include "table/get_context.h"
#include "util/arena.h"
#include "util/random.h"
#include "util/string_util.h"
#include "util/testharness.h"
#include "util/testutil.h"
using GFLAGS::ParseCommandLineFlags;
using GFLAGS::SetUsageMessage;
DEFINE_string(file_dir, "", "Directory where the files will be created"
" for benchmark. Added for using tmpfs.");
DEFINE_bool(enable_perf, false, "Run Benchmark Tests too.");
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
DEFINE_bool(write, false,
"Should write new values to file in performance tests?");
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
DEFINE_bool(identity_as_first_hash, true, "use identity as first hash");
namespace rocksdb {
namespace {
const uint32_t kNumHashFunc = 10;
// Methods, variables related to Hash functions.
std::unordered_map<std::string, std::vector<uint64_t>> hash_map;
void AddHashLookups(const std::string& s, uint64_t bucket_id,
uint32_t num_hash_fun) {
std::vector<uint64_t> v;
for (uint32_t i = 0; i < num_hash_fun; i++) {
v.push_back(bucket_id + i);
}
hash_map[s] = v;
}
uint64_t GetSliceHash(const Slice& s, uint32_t index,
uint64_t max_num_buckets) {
return hash_map[s.ToString()][index];
}
} // namespace
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
class CuckooReaderTest : public testing::Test {
public:
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
using testing::Test::SetUp;
CuckooReaderTest() {
options.allow_mmap_reads = true;
env = options.env;
env_options = EnvOptions(options);
}
void SetUp(int num) {
num_items = num;
hash_map.clear();
keys.clear();
keys.resize(num_items);
user_keys.clear();
user_keys.resize(num_items);
values.clear();
values.resize(num_items);
}
std::string NumToStr(int64_t i) {
return std::string(reinterpret_cast<char*>(&i), sizeof(i));
}
void CreateCuckooFileAndCheckReader(
const Comparator* ucomp = BytewiseComparator()) {
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
std::unique_ptr<WritableFile> writable_file;
ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(writable_file), env_options));
CuckooTableBuilder builder(
file_writer.get(), 0.9, kNumHashFunc, 100, ucomp, 2, false, false,
GetSliceHash, 0 /* column_family_id */, kDefaultColumnFamilyName);
ASSERT_OK(builder.status());
for (uint32_t key_idx = 0; key_idx < num_items; ++key_idx) {
builder.Add(Slice(keys[key_idx]), Slice(values[key_idx]));
ASSERT_OK(builder.status());
ASSERT_EQ(builder.NumEntries(), key_idx + 1);
}
ASSERT_OK(builder.Finish());
ASSERT_EQ(num_items, builder.NumEntries());
file_size = builder.FileSize();
ASSERT_OK(file_writer->Close());
// Check reader now.
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
std::unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
unique_ptr<RandomAccessFileReader> file_reader(
new RandomAccessFileReader(std::move(read_file), fname));
const ImmutableCFOptions ioptions(options);
CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp,
GetSliceHash);
ASSERT_OK(reader.status());
// Assume no merge/deletion
for (uint32_t i = 0; i < num_items; ++i) {
PinnableSlice value;
GetContext get_context(ucomp, nullptr, nullptr, nullptr,
GetContext::kNotFound, Slice(user_keys[i]), &value,
nullptr, nullptr, nullptr, nullptr);
ASSERT_OK(reader.Get(ReadOptions(), Slice(keys[i]), &get_context));
ASSERT_STREQ(values[i].c_str(), value.data());
}
}
void UpdateKeys(bool with_zero_seqno) {
for (uint32_t i = 0; i < num_items; i++) {
ParsedInternalKey ikey(user_keys[i],
with_zero_seqno ? 0 : i + 1000, kTypeValue);
keys[i].clear();
AppendInternalKey(&keys[i], ikey);
}
}
void CheckIterator(const Comparator* ucomp = BytewiseComparator()) {
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
std::unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
unique_ptr<RandomAccessFileReader> file_reader(
new RandomAccessFileReader(std::move(read_file), fname));
const ImmutableCFOptions ioptions(options);
CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp,
GetSliceHash);
ASSERT_OK(reader.status());
InternalIterator* it = reader.NewIterator(ReadOptions(), nullptr);
ASSERT_OK(it->status());
ASSERT_TRUE(!it->Valid());
it->SeekToFirst();
int cnt = 0;
while (it->Valid()) {
ASSERT_OK(it->status());
ASSERT_TRUE(Slice(keys[cnt]) == it->key());
ASSERT_TRUE(Slice(values[cnt]) == it->value());
++cnt;
it->Next();
}
ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
it->SeekToLast();
cnt = static_cast<int>(num_items) - 1;
ASSERT_TRUE(it->Valid());
while (it->Valid()) {
ASSERT_OK(it->status());
ASSERT_TRUE(Slice(keys[cnt]) == it->key());
ASSERT_TRUE(Slice(values[cnt]) == it->value());
--cnt;
it->Prev();
}
ASSERT_EQ(cnt, -1);
cnt = static_cast<int>(num_items) / 2;
it->Seek(keys[cnt]);
while (it->Valid()) {
ASSERT_OK(it->status());
ASSERT_TRUE(Slice(keys[cnt]) == it->key());
ASSERT_TRUE(Slice(values[cnt]) == it->value());
++cnt;
it->Next();
}
ASSERT_EQ(static_cast<uint32_t>(cnt), num_items);
delete it;
Arena arena;
it = reader.NewIterator(ReadOptions(), &arena);
ASSERT_OK(it->status());
ASSERT_TRUE(!it->Valid());
it->Seek(keys[num_items/2]);
ASSERT_TRUE(it->Valid());
ASSERT_OK(it->status());
ASSERT_TRUE(keys[num_items/2] == it->key());
ASSERT_TRUE(values[num_items/2] == it->value());
ASSERT_OK(it->status());
it->~InternalIterator();
}
std::vector<std::string> keys;
std::vector<std::string> user_keys;
std::vector<std::string> values;
uint64_t num_items;
std::string fname;
uint64_t file_size;
Options options;
Env* env;
EnvOptions env_options;
};
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, WhenKeyExists) {
SetUp(kNumHashFunc);
fname = test::TmpDir() + "/CuckooReader_WhenKeyExists";
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i] = "key" + NumToStr(i);
ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values.
AddHashLookups(user_keys[i], i, kNumHashFunc);
}
CreateCuckooFileAndCheckReader();
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader();
// Test with collision. Make all hash values collide.
hash_map.clear();
for (uint32_t i = 0; i < num_items; i++) {
AddHashLookups(user_keys[i], 0, kNumHashFunc);
}
UpdateKeys(false);
CreateCuckooFileAndCheckReader();
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader();
}
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) {
SetUp(kNumHashFunc);
fname = test::TmpDir() + "/CuckooReaderUint64_WhenKeyExists";
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i].resize(8);
memcpy(&user_keys[i][0], static_cast<void*>(&i), 8);
ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values.
AddHashLookups(user_keys[i], i, kNumHashFunc);
}
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
// Test with collision. Make all hash values collide.
hash_map.clear();
for (uint32_t i = 0; i < num_items; i++) {
AddHashLookups(user_keys[i], 0, kNumHashFunc);
}
UpdateKeys(false);
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
}
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, CheckIterator) {
SetUp(2*kNumHashFunc);
fname = test::TmpDir() + "/CuckooReader_CheckIterator";
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i] = "key" + NumToStr(i);
ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader();
CheckIterator();
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader();
CheckIterator();
}
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, CheckIteratorUint64) {
SetUp(2*kNumHashFunc);
fname = test::TmpDir() + "/CuckooReader_CheckIterator";
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i].resize(8);
memcpy(&user_keys[i][0], static_cast<void*>(&i), 8);
ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Give disjoint hash values, in reverse order.
AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc);
}
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
CheckIterator(test::Uint64Comparator());
// Last level file.
UpdateKeys(true);
CreateCuckooFileAndCheckReader(test::Uint64Comparator());
CheckIterator(test::Uint64Comparator());
}
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, WhenKeyNotFound) {
// Add keys with colliding hash values.
SetUp(kNumHashFunc);
fname = test::TmpDir() + "/CuckooReader_WhenKeyNotFound";
for (uint64_t i = 0; i < num_items; i++) {
user_keys[i] = "key" + NumToStr(i);
ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue);
AppendInternalKey(&keys[i], ikey);
values[i] = "value" + NumToStr(i);
// Make all hash values collide.
AddHashLookups(user_keys[i], 0, kNumHashFunc);
}
auto* ucmp = BytewiseComparator();
CreateCuckooFileAndCheckReader();
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
std::unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
unique_ptr<RandomAccessFileReader> file_reader(
new RandomAccessFileReader(std::move(read_file), fname));
const ImmutableCFOptions ioptions(options);
CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucmp,
GetSliceHash);
ASSERT_OK(reader.status());
// Search for a key with colliding hash values.
std::string not_found_user_key = "key" + NumToStr(num_items);
std::string not_found_key;
AddHashLookups(not_found_user_key, 0, kNumHashFunc);
ParsedInternalKey ikey(not_found_user_key, 1000, kTypeValue);
AppendInternalKey(&not_found_key, ikey);
PinnableSlice value;
GetContext get_context(ucmp, nullptr, nullptr, nullptr, GetContext::kNotFound,
Slice(not_found_key), &value, nullptr, nullptr,
nullptr, nullptr);
ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key), &get_context));
ASSERT_TRUE(value.empty());
ASSERT_OK(reader.status());
// Search for a key with an independent hash value.
std::string not_found_user_key2 = "key" + NumToStr(num_items + 1);
AddHashLookups(not_found_user_key2, kNumHashFunc, kNumHashFunc);
ParsedInternalKey ikey2(not_found_user_key2, 1000, kTypeValue);
std::string not_found_key2;
AppendInternalKey(&not_found_key2, ikey2);
value.Reset();
GetContext get_context2(ucmp, nullptr, nullptr, nullptr,
GetContext::kNotFound, Slice(not_found_key2), &value,
nullptr, nullptr, nullptr, nullptr);
ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key2), &get_context2));
ASSERT_TRUE(value.empty());
ASSERT_OK(reader.status());
// Test read when key is unused key.
std::string unused_key =
reader.GetTableProperties()->user_collected_properties.at(
CuckooTablePropertyNames::kEmptyKey);
// Add hash values that map to empty buckets.
AddHashLookups(ExtractUserKey(unused_key).ToString(),
kNumHashFunc, kNumHashFunc);
value.Reset();
GetContext get_context3(ucmp, nullptr, nullptr, nullptr,
GetContext::kNotFound, Slice(unused_key), &value,
nullptr, nullptr, nullptr, nullptr);
ASSERT_OK(reader.Get(ReadOptions(), Slice(unused_key), &get_context3));
ASSERT_TRUE(value.empty());
ASSERT_OK(reader.status());
}
// Performance tests
namespace {
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
void GetKeys(uint64_t num, std::vector<std::string>* keys) {
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
keys->clear();
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
IterKey k;
k.SetInternalKey("", 0, kTypeValue);
std::string internal_key_suffix = k.GetInternalKey().ToString();
2014-08-22 03:11:33 +02:00
ASSERT_EQ(static_cast<size_t>(8), internal_key_suffix.size());
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
for (uint64_t key_idx = 0; key_idx < num; ++key_idx) {
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
uint64_t value = 2 * key_idx;
std::string new_key(reinterpret_cast<char*>(&value), sizeof(value));
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
new_key += internal_key_suffix;
keys->push_back(new_key);
}
}
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
std::string GetFileName(uint64_t num) {
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
if (FLAGS_file_dir.empty()) {
FLAGS_file_dir = test::TmpDir();
}
return FLAGS_file_dir + "/cuckoo_read_benchmark" +
ToString(num/1000000) + "Mkeys";
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
// Create last level file as we are interested in measuring performance of
// last level file only.
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
void WriteFile(const std::vector<std::string>& keys,
const uint64_t num, double hash_ratio) {
Options options;
options.allow_mmap_reads = true;
Env* env = options.env;
EnvOptions env_options = EnvOptions(options);
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
std::string fname = GetFileName(num);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
std::unique_ptr<WritableFile> writable_file;
ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options));
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(writable_file), env_options));
CuckooTableBuilder builder(
file_writer.get(), hash_ratio, 64, 1000, test::Uint64Comparator(), 5,
false, FLAGS_identity_as_first_hash, nullptr, 0 /* column_family_id */,
kDefaultColumnFamilyName);
ASSERT_OK(builder.status());
for (uint64_t key_idx = 0; key_idx < num; ++key_idx) {
// Value is just a part of key.
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
builder.Add(Slice(keys[key_idx]), Slice(&keys[key_idx][0], 4));
ASSERT_EQ(builder.NumEntries(), key_idx + 1);
ASSERT_OK(builder.status());
}
ASSERT_OK(builder.Finish());
ASSERT_EQ(num, builder.NumEntries());
ASSERT_OK(file_writer->Close());
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
uint64_t file_size;
env->GetFileSize(fname, &file_size);
std::unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
unique_ptr<RandomAccessFileReader> file_reader(
new RandomAccessFileReader(std::move(read_file), fname));
const ImmutableCFOptions ioptions(options);
CuckooTableReader reader(ioptions, std::move(file_reader), file_size,
test::Uint64Comparator(), nullptr);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
ASSERT_OK(reader.status());
ReadOptions r_options;
PinnableSlice value;
// Assume only the fast path is triggered
GetContext get_context(nullptr, nullptr, nullptr, nullptr,
GetContext::kNotFound, Slice(), &value, nullptr,
nullptr, nullptr, nullptr);
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
for (uint64_t i = 0; i < num; ++i) {
value.Reset();
value.clear();
ASSERT_OK(reader.Get(r_options, Slice(keys[i]), &get_context));
ASSERT_TRUE(Slice(keys[i]) == Slice(&keys[i][0], 4));
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
}
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
void ReadKeys(uint64_t num, uint32_t batch_size) {
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
Options options;
options.allow_mmap_reads = true;
Env* env = options.env;
EnvOptions env_options = EnvOptions(options);
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
std::string fname = GetFileName(num);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
uint64_t file_size;
env->GetFileSize(fname, &file_size);
std::unique_ptr<RandomAccessFile> read_file;
ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options));
unique_ptr<RandomAccessFileReader> file_reader(
new RandomAccessFileReader(std::move(read_file), fname));
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
const ImmutableCFOptions ioptions(options);
CuckooTableReader reader(ioptions, std::move(file_reader), file_size,
test::Uint64Comparator(), nullptr);
ASSERT_OK(reader.status());
const UserCollectedProperties user_props =
reader.GetTableProperties()->user_collected_properties;
const uint32_t num_hash_fun = *reinterpret_cast<const uint32_t*>(
user_props.at(CuckooTablePropertyNames::kNumHashFunc).data());
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
const uint64_t table_size = *reinterpret_cast<const uint64_t*>(
user_props.at(CuckooTablePropertyNames::kHashTableSize).data());
fprintf(stderr, "With %" PRIu64 " items, utilization is %.2f%%, number of"
" hash functions: %u.\n", num, num * 100.0 / (table_size), num_hash_fun);
ReadOptions r_options;
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
std::vector<uint64_t> keys;
keys.reserve(num);
for (uint64_t i = 0; i < num; ++i) {
keys.push_back(2 * i);
}
std::random_shuffle(keys.begin(), keys.end());
PinnableSlice value;
// Assume only the fast path is triggered
GetContext get_context(nullptr, nullptr, nullptr, nullptr,
GetContext::kNotFound, Slice(), &value, nullptr,
nullptr, nullptr, nullptr);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
uint64_t start_time = env->NowMicros();
if (batch_size > 0) {
for (uint64_t i = 0; i < num; i += batch_size) {
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
reader.Prepare(Slice(reinterpret_cast<char*>(&keys[j]), 16));
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
for (uint64_t j = i; j < i+batch_size && j < num; ++j) {
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[j]), 16),
&get_context);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
}
} else {
for (uint64_t i = 0; i < num; i++) {
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
reader.Get(r_options, Slice(reinterpret_cast<char*>(&keys[i]), 16),
&get_context);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
}
float time_per_op = (env->NowMicros() - start_time) * 1.0f / num;
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
fprintf(stderr,
"Time taken per op is %.3fus (%.1f Mqps) with batch size of %u\n",
time_per_op, 1.0 / time_per_op, batch_size);
}
} // namespace.
rocksdb: switch to gtest Summary: Our existing test notation is very similar to what is used in gtest. It makes it easy to adopt what is different. In this diff I modify existing [[ https://code.google.com/p/googletest/wiki/Primer#Test_Fixtures:_Using_the_Same_Data_Configuration_for_Multiple_Te | test fixture ]] classes to inherit from `testing::Test`. Also for unit tests that use fixture class, `TEST` is replaced with `TEST_F` as required in gtest. There are several custom `main` functions in our existing tests. To make this transition easier, I modify all `main` functions to fallow gtest notation. But eventually we can remove them and use implementation of `main` that gtest provides. ```lang=bash % cat ~/transform #!/bin/sh files=$(git ls-files '*test\.cc') for file in $files do if grep -q "rocksdb::test::RunAllTests()" $file then if grep -Eq '^class \w+Test {' $file then perl -pi -e 's/^(class \w+Test) {/${1}: public testing::Test {/g' $file perl -pi -e 's/^(TEST)/${1}_F/g' $file fi perl -pi -e 's/(int main.*\{)/${1}::testing::InitGoogleTest(&argc, argv);/g' $file perl -pi -e 's/rocksdb::test::RunAllTests/RUN_ALL_TESTS/g' $file fi done % sh ~/transform % make format ``` Second iteration of this diff contains only scripted changes. Third iteration contains manual changes to fix last errors and make it compilable. Test Plan: Build and notice no errors. ```lang=bash % USE_CLANG=1 make check -j55 ``` Tests are still testing. Reviewers: meyering, sdong, rven, igor Reviewed By: igor Subscribers: dhruba, leveldb Differential Revision: https://reviews.facebook.net/D35157
2015-03-17 22:08:00 +01:00
TEST_F(CuckooReaderTest, TestReadPerformance) {
if (!FLAGS_enable_perf) {
return;
}
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
double hash_ratio = 0.95;
// These numbers are chosen to have a hash utilization % close to
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
// 0.9, 0.75, 0.6 and 0.5 respectively.
// They all create 128 M buckets.
CuckooTable: add one option to allow identity function for the first hash function Summary: MurmurHash becomes expensive when we do millions Get() a second in one thread. Add this option to allow the first hash function to use identity function as hash function. It results in QPS increase from 3.7M/s to ~4.3M/s. I did not observe improvement for end to end RocksDB performance. This may be caused by other bottlenecks that I will address in a separate diff. Test Plan: ``` [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=0 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.272us (3.7 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.138us (7.2 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.1 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.142us (7.0 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.144us (6.9 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.123us (8.1 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.121us (8.3 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.112us (8.9 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.251us (4.0 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.107us (9.4 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.099us (10.1 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.116us (8.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.189us (5.3 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.095us (10.5 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.096us (10.4 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.098us (10.2 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.105us (9.5 Mqps) with batch size of 100, # of found keys 73400320 [ljin@dev1964 rocksdb] ./cuckoo_table_reader_test --enable_perf --file_dir=/dev/shm --write --identity_as_first_hash=1 ==== Test CuckooReaderTest.WhenKeyExists ==== Test CuckooReaderTest.WhenKeyExistsWithUint64Comparator ==== Test CuckooReaderTest.CheckIterator ==== Test CuckooReaderTest.CheckIteratorUint64 ==== Test CuckooReaderTest.WhenKeyNotFound ==== Test CuckooReaderTest.TestReadPerformance With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.230us (4.3 Mqps) with batch size of 0, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.086us (11.7 Mqps) with batch size of 10, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.088us (11.3 Mqps) with batch size of 25, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 50, # of found keys 125829120 With 125829120 items, utilization is 93.75%, number of hash functions: 2. Time taken per op is 0.083us (12.1 Mqps) with batch size of 100, # of found keys 125829120 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.159us (6.3 Mqps) with batch size of 0, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 10, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.6 Mqps) with batch size of 25, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 50, # of found keys 104857600 With 104857600 items, utilization is 78.12%, number of hash functions: 2. Time taken per op is 0.082us (12.2 Mqps) with batch size of 100, # of found keys 104857600 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 10, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.077us (12.9 Mqps) with batch size of 25, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 50, # of found keys 83886080 With 83886080 items, utilization is 62.50%, number of hash functions: 2. Time taken per op is 0.079us (12.6 Mqps) with batch size of 100, # of found keys 83886080 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.218us (4.6 Mqps) with batch size of 0, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.083us (12.0 Mqps) with batch size of 10, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.085us (11.7 Mqps) with batch size of 25, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.086us (11.6 Mqps) with batch size of 50, # of found keys 73400320 With 73400320 items, utilization is 54.69%, number of hash functions: 2. Time taken per op is 0.078us (12.8 Mqps) with batch size of 100, # of found keys 73400320 ``` Reviewers: sdong, igor, yhchiang Reviewed By: igor Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D23451
2014-09-18 20:00:48 +02:00
std::vector<uint64_t> nums = {120*1024*1024, 100*1024*1024, 80*1024*1024,
70*1024*1024};
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
#ifndef NDEBUG
fprintf(stdout,
"WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n");
#endif
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
for (uint64_t num : nums) {
if (FLAGS_write ||
Env::Default()->FileExists(GetFileName(num)).IsNotFound()) {
std::vector<std::string> all_keys;
GetKeys(num, &all_keys);
WriteFile(all_keys, num, hash_ratio);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
}
Improve Cuckoo Table Reader performance. Inlined hash function and number of buckets a power of two. Summary: Use inlined hash functions instead of function pointer. Make number of buckets a power of two and use bitwise and instead of mod. After these changes, we get almost 50% improvement in performance. Results: With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.231us (4.3 Mqps) with batch size of 0 Time taken per op is 0.229us (4.4 Mqps) with batch size of 0 Time taken per op is 0.185us (5.4 Mqps) with batch size of 0 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.108us (9.3 Mqps) with batch size of 10 Time taken per op is 0.100us (10.0 Mqps) with batch size of 10 Time taken per op is 0.103us (9.7 Mqps) with batch size of 10 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.101us (9.9 Mqps) with batch size of 25 Time taken per op is 0.098us (10.2 Mqps) with batch size of 25 Time taken per op is 0.097us (10.3 Mqps) with batch size of 25 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.100us (10.0 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 Time taken per op is 0.097us (10.3 Mqps) with batch size of 50 With 120000000 items, utilization is 89.41%, number of hash functions: 2. Time taken per op is 0.102us (9.8 Mqps) with batch size of 100 Time taken per op is 0.098us (10.2 Mqps) with batch size of 100 Time taken per op is 0.115us (8.7 Mqps) with batch size of 100 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.201us (5.0 Mqps) with batch size of 0 Time taken per op is 0.155us (6.5 Mqps) with batch size of 0 Time taken per op is 0.152us (6.6 Mqps) with batch size of 0 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.089us (11.3 Mqps) with batch size of 10 Time taken per op is 0.084us (11.9 Mqps) with batch size of 10 Time taken per op is 0.086us (11.6 Mqps) with batch size of 10 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.087us (11.5 Mqps) with batch size of 25 Time taken per op is 0.085us (11.7 Mqps) with batch size of 25 Time taken per op is 0.093us (10.8 Mqps) with batch size of 25 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.094us (10.6 Mqps) with batch size of 50 Time taken per op is 0.094us (10.7 Mqps) with batch size of 50 Time taken per op is 0.093us (10.8 Mqps) with batch size of 50 With 100000000 items, utilization is 74.51%, number of hash functions: 2. Time taken per op is 0.092us (10.9 Mqps) with batch size of 100 Time taken per op is 0.089us (11.2 Mqps) with batch size of 100 Time taken per op is 0.088us (11.3 Mqps) with batch size of 100 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.154us (6.5 Mqps) with batch size of 0 Time taken per op is 0.168us (6.0 Mqps) with batch size of 0 Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.081us (12.4 Mqps) with batch size of 10 Time taken per op is 0.077us (13.0 Mqps) with batch size of 10 Time taken per op is 0.083us (12.1 Mqps) with batch size of 10 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 Time taken per op is 0.073us (13.7 Mqps) with batch size of 25 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.076us (13.1 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 Time taken per op is 0.072us (13.8 Mqps) with batch size of 50 With 80000000 items, utilization is 59.60%, number of hash functions: 2. Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.074us (13.6 Mqps) with batch size of 100 Time taken per op is 0.073us (13.6 Mqps) with batch size of 100 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.190us (5.3 Mqps) with batch size of 0 Time taken per op is 0.186us (5.4 Mqps) with batch size of 0 Time taken per op is 0.184us (5.4 Mqps) with batch size of 0 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.079us (12.7 Mqps) with batch size of 10 Time taken per op is 0.070us (14.2 Mqps) with batch size of 10 Time taken per op is 0.072us (14.0 Mqps) with batch size of 10 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 25 Time taken per op is 0.072us (14.0 Mqps) with batch size of 25 Time taken per op is 0.071us (14.1 Mqps) with batch size of 25 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.082us (12.1 Mqps) with batch size of 50 Time taken per op is 0.071us (14.1 Mqps) with batch size of 50 Time taken per op is 0.073us (13.6 Mqps) with batch size of 50 With 70000000 items, utilization is 52.15%, number of hash functions: 2. Time taken per op is 0.080us (12.5 Mqps) with batch size of 100 Time taken per op is 0.077us (13.0 Mqps) with batch size of 100 Time taken per op is 0.078us (12.8 Mqps) with batch size of 100 Test Plan: make check all make valgrind_check make asan_check Reviewers: sdong, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22539
2014-08-30 04:06:15 +02:00
ReadKeys(num, 0);
ReadKeys(num, 10);
ReadKeys(num, 25);
ReadKeys(num, 50);
ReadKeys(num, 100);
Implement Prepare method in CuckooTableReader Summary: - Implement Prepare method - Rewrite performance tests in cuckoo_table_reader_test to write new file only if one doesn't already exist. - Add performance tests for batch lookup along with prefetching. Test Plan: ./cuckoo_table_reader_test --enable_perf Results (We get better results if we used int64 comparator instead of string comparator (TBD in future diffs)): With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.208us (4.8 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.182us (5.5 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.161us (6.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.500000, number of hash functions used: 2. Time taken per op is 0.163us (6.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.252us (4.0 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.192us (5.2 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.195us (5.1 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.191us (5.2 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.600000, number of hash functions used: 3. Time taken per op is 0.194us (5.1 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.228us (4.4 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.185us (5.4 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.186us (5.4 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.189us (5.3 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.750000, number of hash functions used: 3. Time taken per op is 0.188us (5.3 Mqps) with batch size of 100 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.325us (3.1 Mqps) with batch size of 0 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 10 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.199us (5.0 Mqps) with batch size of 25 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.196us (5.1 Mqps) with batch size of 50 With 100000000 items and hash table ratio 0.900000, number of hash functions used: 3. Time taken per op is 0.209us (4.8 Mqps) with batch size of 100 Reviewers: sdong, yhchiang, igor, ljin Reviewed By: ljin Subscribers: leveldb Differential Revision: https://reviews.facebook.net/D22167
2014-08-21 03:35:35 +02:00
fprintf(stderr, "\n");
}
}
} // namespace rocksdb
int main(int argc, char** argv) {
if (rocksdb::port::kLittleEndian) {
::testing::InitGoogleTest(&argc, argv);
ParseCommandLineFlags(&argc, &argv, true);
return RUN_ALL_TESTS();
}
else {
fprintf(stderr, "SKIPPED as Cuckoo table doesn't support Big Endian\n");
return 0;
}
}
#endif // GFLAGS.
#else
#include <stdio.h>
int main(int argc, char** argv) {
fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n");
return 0;
}
#endif // ROCKSDB_LITE