Add microbenchmarks for DB::GetMergeOperands()
(#9971)
Summary: The new microbenchmarks, DBGetMergeOperandsInMemtable and DBGetMergeOperandsInSstFile, correspond to the two different LSMs tested: all data in one memtable and all data in one SST file, respectively. Both cases are parameterized by thread count (1 or 8) and merge operands per key (1, 32, or 1024). The SST file case is additionally parameterized by whether data is in block cache or mmap'd memory. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9971 Test Plan: ``` $ TEST_TMPDIR=/dev/shm/db_basic_bench/ ./db_basic_bench --benchmark_filter=DBGetMergeOperands The number of inputs is very large. DBGet will be repeated at least 192 times. The number of inputs is very large. DBGet will be repeated at least 192 times. 2022-05-09T13:15:40-07:00 Running ./db_basic_bench Run on (36 X 2570.91 MHz CPU s) CPU Caches: L1 Data 32 KiB (x18) L1 Instruction 32 KiB (x18) L2 Unified 1024 KiB (x18) L3 Unified 25344 KiB (x1) Load Average: 4.50, 4.33, 4.37 ---------------------------------------------------------------------------------------------------------------------------- Benchmark Time CPU Iterations UserCounters... ---------------------------------------------------------------------------------------------------------------------------- DBGetMergeOperandsInMemtable/entries_per_key:1/threads:1 846 ns 846 ns 849893 db_size=0 DBGetMergeOperandsInMemtable/entries_per_key:32/threads:1 2436 ns 2436 ns 305779 db_size=0 DBGetMergeOperandsInMemtable/entries_per_key:1024/threads:1 77226 ns 77224 ns 8152 db_size=0 DBGetMergeOperandsInMemtable/entries_per_key:1/threads:8 116 ns 929 ns 779368 db_size=0 DBGetMergeOperandsInMemtable/entries_per_key:32/threads:8 330 ns 2644 ns 280824 db_size=0 DBGetMergeOperandsInMemtable/entries_per_key:1024/threads:8 12466 ns 99718 ns 7200 db_size=0 DBGetMergeOperandsInSstFile/entries_per_key:1/mmap:0/threads:1 1640 ns 1640 ns 461262 db_size=21.7826M DBGetMergeOperandsInSstFile/entries_per_key:1/mmap:1/threads:1 1693 ns 1693 ns 439936 db_size=21.7826M DBGetMergeOperandsInSstFile/entries_per_key:32/mmap:0/threads:1 3999 ns 3999 ns 172881 db_size=19.6981M DBGetMergeOperandsInSstFile/entries_per_key:32/mmap:1/threads:1 5544 ns 5543 ns 135657 db_size=19.6981M DBGetMergeOperandsInSstFile/entries_per_key:1024/mmap:0/threads:1 78767 ns 78761 ns 8395 db_size=19.6389M DBGetMergeOperandsInSstFile/entries_per_key:1024/mmap:1/threads:1 157242 ns 157238 ns 4495 db_size=19.6389M DBGetMergeOperandsInSstFile/entries_per_key:1/mmap:0/threads:8 231 ns 1848 ns 347768 db_size=21.7826M DBGetMergeOperandsInSstFile/entries_per_key:1/mmap:1/threads:8 214 ns 1715 ns 393312 db_size=21.7826M DBGetMergeOperandsInSstFile/entries_per_key:32/mmap:0/threads:8 596 ns 4767 ns 142088 db_size=19.6981M DBGetMergeOperandsInSstFile/entries_per_key:32/mmap:1/threads:8 720 ns 5757 ns 118200 db_size=19.6981M DBGetMergeOperandsInSstFile/entries_per_key:1024/mmap:0/threads:8 11613 ns 92460 ns 7344 db_size=19.6389M DBGetMergeOperandsInSstFile/entries_per_key:1024/mmap:1/threads:8 19989 ns 159908 ns 4440 db_size=19.6389M ``` Reviewed By: jay-zhuang Differential Revision: D36258861 Pulled By: ajkr fbshipit-source-id: 04b733e1cc3a4a70ed9baa894c50fdf96c0d6064
This commit is contained in:
parent
c5c58708db
commit
7b7a37c069
@ -15,6 +15,7 @@
|
||||
#include "table/block_based/block.h"
|
||||
#include "table/block_based/block_builder.h"
|
||||
#include "util/random.h"
|
||||
#include "utilities/merge_operators.h"
|
||||
|
||||
namespace ROCKSDB_NAMESPACE {
|
||||
|
||||
@ -32,7 +33,11 @@ class KeyGenerator {
|
||||
if (is_sequential_) {
|
||||
assert(next_sequential_key_ < max_key_);
|
||||
k = (next_sequential_key_ % max_key_) * MULTIPLIER + offset;
|
||||
next_sequential_key_++;
|
||||
if (next_sequential_key_ + 1 == max_key_) {
|
||||
next_sequential_key_ = 0;
|
||||
} else {
|
||||
next_sequential_key_++;
|
||||
}
|
||||
} else {
|
||||
k = (rnd_->Next() % max_key_) * MULTIPLIER + offset;
|
||||
}
|
||||
@ -786,6 +791,191 @@ static void SimpleGetWithPerfContext(benchmark::State& state) {
|
||||
|
||||
BENCHMARK(SimpleGetWithPerfContext)->Iterations(1000000);
|
||||
|
||||
static void DBGetMergeOperandsInMemtable(benchmark::State& state) {
|
||||
const uint64_t kDataLen = 16 << 20; // 16MB
|
||||
const uint64_t kValueLen = 64;
|
||||
const uint64_t kNumEntries = kDataLen / kValueLen;
|
||||
const uint64_t kNumEntriesPerKey = state.range(0);
|
||||
const uint64_t kNumKeys = kNumEntries / kNumEntriesPerKey;
|
||||
|
||||
// setup DB
|
||||
static std::unique_ptr<DB> db;
|
||||
|
||||
Options options;
|
||||
options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
||||
// Make memtable large enough that automatic flush will not be triggered.
|
||||
options.write_buffer_size = 2 * kDataLen;
|
||||
|
||||
KeyGenerator sequential_key_gen(kNumKeys);
|
||||
auto rnd = Random(301 + state.thread_index());
|
||||
|
||||
if (state.thread_index() == 0) {
|
||||
SetupDB(state, options, &db, "DBGetMergeOperandsInMemtable");
|
||||
|
||||
// load db
|
||||
auto write_opts = WriteOptions();
|
||||
write_opts.disableWAL = true;
|
||||
for (uint64_t i = 0; i < kNumEntries; i++) {
|
||||
Status s = db->Merge(write_opts, sequential_key_gen.Next(),
|
||||
rnd.RandomString(static_cast<int>(kValueLen)));
|
||||
if (!s.ok()) {
|
||||
state.SkipWithError(s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KeyGenerator random_key_gen(kNumKeys);
|
||||
std::vector<PinnableSlice> value_operands;
|
||||
value_operands.resize(kNumEntriesPerKey);
|
||||
GetMergeOperandsOptions get_merge_ops_opts;
|
||||
get_merge_ops_opts.expected_max_number_of_operands =
|
||||
static_cast<int>(kNumEntriesPerKey);
|
||||
for (auto _ : state) {
|
||||
int num_value_operands = 0;
|
||||
Status s = db->GetMergeOperands(
|
||||
ReadOptions(), db->DefaultColumnFamily(), random_key_gen.Next(),
|
||||
value_operands.data(), &get_merge_ops_opts, &num_value_operands);
|
||||
if (!s.ok()) {
|
||||
state.SkipWithError(s.ToString().c_str());
|
||||
}
|
||||
if (num_value_operands != static_cast<int>(kNumEntriesPerKey)) {
|
||||
state.SkipWithError("Unexpected number of merge operands found for key");
|
||||
}
|
||||
}
|
||||
|
||||
if (state.thread_index() == 0) {
|
||||
TeardownDB(state, db, options, random_key_gen);
|
||||
}
|
||||
}
|
||||
|
||||
static void DBGetMergeOperandsInSstFile(benchmark::State& state) {
|
||||
const uint64_t kDataLen = 16 << 20; // 16MB
|
||||
const uint64_t kValueLen = 64;
|
||||
const uint64_t kNumEntries = kDataLen / kValueLen;
|
||||
const uint64_t kNumEntriesPerKey = state.range(0);
|
||||
const uint64_t kNumKeys = kNumEntries / kNumEntriesPerKey;
|
||||
const bool kMmap = state.range(1);
|
||||
|
||||
// setup DB
|
||||
static std::unique_ptr<DB> db;
|
||||
|
||||
BlockBasedTableOptions table_options;
|
||||
if (kMmap) {
|
||||
table_options.no_block_cache = true;
|
||||
} else {
|
||||
// Make block cache large enough that eviction will not be triggered.
|
||||
table_options.block_cache = NewLRUCache(2 * kDataLen);
|
||||
}
|
||||
|
||||
Options options;
|
||||
if (kMmap) {
|
||||
options.allow_mmap_reads = true;
|
||||
}
|
||||
options.compression = kNoCompression;
|
||||
options.merge_operator = MergeOperators::CreateStringAppendOperator();
|
||||
options.table_factory.reset(NewBlockBasedTableFactory(table_options));
|
||||
// Make memtable large enough that automatic flush will not be triggered.
|
||||
options.write_buffer_size = 2 * kDataLen;
|
||||
|
||||
KeyGenerator sequential_key_gen(kNumKeys);
|
||||
auto rnd = Random(301 + state.thread_index());
|
||||
|
||||
if (state.thread_index() == 0) {
|
||||
SetupDB(state, options, &db, "DBGetMergeOperandsInBlockCache");
|
||||
|
||||
// load db
|
||||
//
|
||||
// Take a snapshot after each cycle of merges to ensure flush cannot
|
||||
// merge any entries.
|
||||
std::vector<const Snapshot*> snapshots;
|
||||
snapshots.resize(kNumEntriesPerKey);
|
||||
auto write_opts = WriteOptions();
|
||||
write_opts.disableWAL = true;
|
||||
for (uint64_t i = 0; i < kNumEntriesPerKey; i++) {
|
||||
for (uint64_t j = 0; j < kNumKeys; j++) {
|
||||
Status s = db->Merge(write_opts, sequential_key_gen.Next(),
|
||||
rnd.RandomString(static_cast<int>(kValueLen)));
|
||||
if (!s.ok()) {
|
||||
state.SkipWithError(s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
snapshots[i] = db->GetSnapshot();
|
||||
}
|
||||
|
||||
// Flush to an L0 file; read back to prime the cache/mapped memory.
|
||||
db->Flush(FlushOptions());
|
||||
for (uint64_t i = 0; i < kNumKeys; ++i) {
|
||||
std::string value;
|
||||
Status s = db->Get(ReadOptions(), sequential_key_gen.Next(), &value);
|
||||
if (!s.ok()) {
|
||||
state.SkipWithError(s.ToString().c_str());
|
||||
}
|
||||
}
|
||||
|
||||
if (state.thread_index() == 0) {
|
||||
for (uint64_t i = 0; i < kNumEntriesPerKey; ++i) {
|
||||
db->ReleaseSnapshot(snapshots[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
KeyGenerator random_key_gen(kNumKeys);
|
||||
std::vector<PinnableSlice> value_operands;
|
||||
value_operands.resize(kNumEntriesPerKey);
|
||||
GetMergeOperandsOptions get_merge_ops_opts;
|
||||
get_merge_ops_opts.expected_max_number_of_operands =
|
||||
static_cast<int>(kNumEntriesPerKey);
|
||||
for (auto _ : state) {
|
||||
int num_value_operands = 0;
|
||||
ReadOptions read_opts;
|
||||
read_opts.verify_checksums = false;
|
||||
Status s = db->GetMergeOperands(
|
||||
read_opts, db->DefaultColumnFamily(), random_key_gen.Next(),
|
||||
value_operands.data(), &get_merge_ops_opts, &num_value_operands);
|
||||
if (!s.ok()) {
|
||||
state.SkipWithError(s.ToString().c_str());
|
||||
}
|
||||
if (num_value_operands != static_cast<int>(kNumEntriesPerKey)) {
|
||||
state.SkipWithError("Unexpected number of merge operands found for key");
|
||||
}
|
||||
}
|
||||
|
||||
if (state.thread_index() == 0) {
|
||||
TeardownDB(state, db, options, random_key_gen);
|
||||
}
|
||||
}
|
||||
|
||||
static void DBGetMergeOperandsInMemtableArguments(
|
||||
benchmark::internal::Benchmark* b) {
|
||||
for (int entries_per_key : {1, 32, 1024}) {
|
||||
b->Args({entries_per_key});
|
||||
}
|
||||
b->ArgNames({"entries_per_key"});
|
||||
}
|
||||
|
||||
static void DBGetMergeOperandsInSstFileArguments(
|
||||
benchmark::internal::Benchmark* b) {
|
||||
for (int entries_per_key : {1, 32, 1024}) {
|
||||
for (bool mmap : {false, true}) {
|
||||
b->Args({entries_per_key, mmap});
|
||||
}
|
||||
}
|
||||
b->ArgNames({"entries_per_key", "mmap"});
|
||||
}
|
||||
|
||||
BENCHMARK(DBGetMergeOperandsInMemtable)
|
||||
->Threads(1)
|
||||
->Apply(DBGetMergeOperandsInMemtableArguments);
|
||||
BENCHMARK(DBGetMergeOperandsInMemtable)
|
||||
->Threads(8)
|
||||
->Apply(DBGetMergeOperandsInMemtableArguments);
|
||||
BENCHMARK(DBGetMergeOperandsInSstFile)
|
||||
->Threads(1)
|
||||
->Apply(DBGetMergeOperandsInSstFileArguments);
|
||||
BENCHMARK(DBGetMergeOperandsInSstFile)
|
||||
->Threads(8)
|
||||
->Apply(DBGetMergeOperandsInSstFileArguments);
|
||||
|
||||
std::string GenerateKey(int primary_key, int secondary_key, int padding_size,
|
||||
Random* rnd) {
|
||||
char buf[50];
|
||||
|
Loading…
Reference in New Issue
Block a user