Improvements to benchmark.sh script (#8346)

Summary:
1.  Fix printing of stats when there are no writes (wamp=0).  Previously had a div0 error

2.  Added multireadrandom command as a valid target

3.  Added ability to pass additional command line options to db_bench.  Now can say things like benchmark.sh readrandom --mmap_read and the option will be passed to db_bench.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/8346

Reviewed By: zhichao-cao

Differential Revision: D29500436

Pulled By: mrambacher

fbshipit-source-id: 54e90708aae9133be3a903e35efdf8f8abbd86fa
This commit is contained in:
mrambacher 2021-07-12 12:17:32 -07:00 committed by Facebook GitHub Bot
parent 955b80e84f
commit da90e23998
2 changed files with 53 additions and 18 deletions

View File

@ -2,17 +2,20 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# REQUIRE: db_bench binary exists in the current directory # REQUIRE: db_bench binary exists in the current directory
if [ $# -ne 1 ]; then if [ $# -lt 1 ]; then
echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/" echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/"
echo "readrandom/readwhilewriting/readwhilemerging/updaterandom/" echo "readrandom/readwhilewriting/readwhilemerging/updaterandom/"
echo "mergerandom/randomtransaction/compact]" echo "mergerandom/randomtransaction/compact/multireadrandom]"
exit 0 exit 0
fi fi
bench_cmd=$1
shift
bench_args=$*
# Make it easier to run only the compaction test. Getting valid data requires # Make it easier to run only the compaction test. Getting valid data requires
# a number of iterations and having an ability to run the test separately from # a number of iterations and having an ability to run the test separately from
# rest of the benchmarks helps. # rest of the benchmarks helps.
if [ "$COMPACTION_TEST" == "1" -a "$1" != "universal_compaction" ]; then if [ "$COMPACTION_TEST" == "1" -a "$bench_cmd" != "universal_compaction" ]; then
echo "Skipping $1 because it's not a compaction test." echo "Skipping $1 because it's not a compaction test."
exit 0 exit 0
fi fi
@ -97,7 +100,9 @@ const_params="
\ \
--memtablerep=skip_list \ --memtablerep=skip_list \
--bloom_bits=10 \ --bloom_bits=10 \
--open_files=-1" --open_files=-1 \
\
$bench_args"
l0_config=" l0_config="
--level0_file_num_compaction_trigger=4 \ --level0_file_num_compaction_trigger=4 \
@ -107,23 +112,24 @@ if [ $duration -gt 0 ]; then
const_params="$const_params --duration=$duration" const_params="$const_params --duration=$duration"
fi fi
params_w="$const_params \ params_w="$l0_config \
$l0_config \
--max_background_compactions=16 \ --max_background_compactions=16 \
--max_write_buffer_number=8 \ --max_write_buffer_number=8 \
--max_background_flushes=7" --max_background_flushes=7 \
$const_params"
params_bulkload="$const_params \ params_bulkload="--max_background_compactions=16 \
--max_background_compactions=16 \
--max_write_buffer_number=8 \ --max_write_buffer_number=8 \
--allow_concurrent_memtable_write=false \ --allow_concurrent_memtable_write=false \
--max_background_flushes=7 \ --max_background_flushes=7 \
--level0_file_num_compaction_trigger=$((10 * M)) \ --level0_file_num_compaction_trigger=$((10 * M)) \
--level0_slowdown_writes_trigger=$((10 * M)) \ --level0_slowdown_writes_trigger=$((10 * M)) \
--level0_stop_writes_trigger=$((10 * M))" --level0_stop_writes_trigger=$((10 * M)) \
$const_params "
params_fillseq="--allow_concurrent_memtable_write=false \
$params_w "
params_fillseq="$params_w \
--allow_concurrent_memtable_write=false"
# #
# Tune values for level and universal compaction. # Tune values for level and universal compaction.
# For universal compaction, these level0_* options mean total sorted of runs in # For universal compaction, these level0_* options mean total sorted of runs in
@ -160,7 +166,14 @@ function summarize_result {
lo_wgb=$( grep "^ L0" $test_out | tail -1 | awk '{ print $9 }' ) lo_wgb=$( grep "^ L0" $test_out | tail -1 | awk '{ print $9 }' )
sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $9 }' ) sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $9 }' )
sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' ) sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' )
if [ "$lo_wgb" = "" ]; then
lo_wgb="0.0"
fi
if [ "$lo_wgb" = "0.0" ]; then
wamp="0.0"
else
wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc ) wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc )
fi
wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc ) wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc )
usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' ) usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' )
p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' ) p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
@ -377,6 +390,21 @@ function run_readrandom {
summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom
} }
function run_multireadrandom {
echo "Multi-Reading $num_keys random keys"
out_name="benchmark_multireadrandom.t${num_threads}.log"
cmd="./db_bench --benchmarks=multireadrandom \
--use_existing_db=1 \
--threads=$num_threads \
--batch_size=10 \
$params_w \
--seed=$( date +%s ) \
2>&1 | tee -a $output_dir/${out_name}"
echo $cmd | tee $output_dir/${out_name}
eval $cmd
summarize_result $output_dir/${out_name} multireadrandom.t${num_threads} multireadrandom
}
function run_readwhile { function run_readwhile {
operation=$1 operation=$1
echo "Reading $num_keys random keys while $operation" echo "Reading $num_keys random keys while $operation"
@ -455,7 +483,7 @@ schedule="$output_dir/schedule.txt"
echo "===== Benchmark =====" echo "===== Benchmark ====="
# Run!!! # Run!!!
IFS=',' read -a jobs <<< $1 IFS=',' read -a jobs <<< $bench_cmd
# shellcheck disable=SC2068 # shellcheck disable=SC2068
for job in ${jobs[@]}; do for job in ${jobs[@]}; do
@ -486,6 +514,8 @@ for job in ${jobs[@]}; do
run_filluniquerandom run_filluniquerandom
elif [ $job = readrandom ]; then elif [ $job = readrandom ]; then
run_readrandom run_readrandom
elif [ $job = multireadrandom ]; then
run_multireadrandom
elif [ $job = fwdrange ]; then elif [ $job = fwdrange ]; then
run_range $job false run_range $job false
elif [ $job = revrange ]; then elif [ $job = revrange ]; then

View File

@ -5461,6 +5461,7 @@ class Benchmark {
// Returns the total number of keys found. // Returns the total number of keys found.
void MultiReadRandom(ThreadState* thread) { void MultiReadRandom(ThreadState* thread) {
int64_t read = 0; int64_t read = 0;
int64_t bytes = 0;
int64_t num_multireads = 0; int64_t num_multireads = 0;
int64_t found = 0; int64_t found = 0;
ReadOptions options(FLAGS_verify_checksum, true); ReadOptions options(FLAGS_verify_checksum, true);
@ -5511,6 +5512,7 @@ class Benchmark {
num_multireads++; num_multireads++;
for (int64_t i = 0; i < entries_per_batch_; ++i) { for (int64_t i = 0; i < entries_per_batch_; ++i) {
if (statuses[i].ok()) { if (statuses[i].ok()) {
bytes += keys[i].size() + values[i].size() + user_timestamp_size_;
++found; ++found;
} else if (!statuses[i].IsNotFound()) { } else if (!statuses[i].IsNotFound()) {
fprintf(stderr, "MultiGet returned an error: %s\n", fprintf(stderr, "MultiGet returned an error: %s\n",
@ -5526,6 +5528,8 @@ class Benchmark {
num_multireads++; num_multireads++;
for (int64_t i = 0; i < entries_per_batch_; ++i) { for (int64_t i = 0; i < entries_per_batch_; ++i) {
if (stat_list[i].ok()) { if (stat_list[i].ok()) {
bytes +=
keys[i].size() + pin_values[i].size() + user_timestamp_size_;
++found; ++found;
} else if (!stat_list[i].IsNotFound()) { } else if (!stat_list[i].IsNotFound()) {
fprintf(stderr, "MultiGet returned an error: %s\n", fprintf(stderr, "MultiGet returned an error: %s\n",
@ -5548,6 +5552,7 @@ class Benchmark {
char msg[100]; char msg[100];
snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)", snprintf(msg, sizeof(msg), "(%" PRIu64 " of %" PRIu64 " found)",
found, read); found, read);
thread->stats.AddBytes(bytes);
thread->stats.AddMessage(msg); thread->stats.AddMessage(msg);
} }