2014-09-13 01:25:35 +02:00
|
|
|
#!/bin/bash
|
|
|
|
# REQUIRE: db_bench binary exists in the current directory
|
|
|
|
|
|
|
|
if [ $# -ne 1 ]; then
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/"
|
2015-05-29 23:36:35 +02:00
|
|
|
echo "readrandom/readwhilewriting/readwhilemerging/updaterandom/"
|
|
|
|
echo "mergerandom/randomtransaction]"
|
2014-09-13 01:25:35 +02:00
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
# size constants
|
|
|
|
K=1024
|
|
|
|
M=$((1024 * K))
|
|
|
|
G=$((1024 * M))
|
|
|
|
|
|
|
|
if [ -z $DB_DIR ]; then
|
|
|
|
echo "DB_DIR is not defined"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [ -z $WAL_DIR ]; then
|
|
|
|
echo "WAL_DIR is not defined"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
output_dir=${OUTPUT_DIR:-/tmp/}
|
|
|
|
if [ ! -d $output_dir ]; then
|
|
|
|
mkdir -p $output_dir
|
|
|
|
fi
|
|
|
|
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
# all multithreaded tests run with sync=1 unless
|
|
|
|
# $DB_BENCH_NO_SYNC is defined
|
|
|
|
syncval="1"
|
|
|
|
if [ ! -z $DB_BENCH_NO_SYNC ]; then
|
|
|
|
echo "Turning sync off for all multithreaded tests"
|
|
|
|
syncval="0";
|
|
|
|
fi
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
num_threads=${NUM_THREADS:-16}
|
2016-01-04 21:01:27 +01:00
|
|
|
mb_written_per_sec=${MB_WRITE_PER_SEC:-0}
|
2015-03-30 20:28:25 +02:00
|
|
|
# Only for tests that do range scans
|
|
|
|
num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10}
|
|
|
|
cache_size=${CACHE_SIZE:-$((1 * G))}
|
2014-09-13 01:25:35 +02:00
|
|
|
duration=${DURATION:-0}
|
|
|
|
|
|
|
|
num_keys=${NUM_KEYS:-$((1 * G))}
|
|
|
|
key_size=20
|
2015-03-30 20:28:25 +02:00
|
|
|
value_size=${VALUE_SIZE:-400}
|
2015-08-21 03:59:10 +02:00
|
|
|
block_size=${BLOCK_SIZE:-8192}
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
const_params="
|
|
|
|
--db=$DB_DIR \
|
|
|
|
--wal_dir=$WAL_DIR \
|
2015-03-30 20:28:25 +02:00
|
|
|
--disable_data_sync=0 \
|
2014-09-13 01:25:35 +02:00
|
|
|
\
|
2015-03-30 20:28:25 +02:00
|
|
|
--num=$num_keys \
|
2014-09-13 01:25:35 +02:00
|
|
|
--num_levels=6 \
|
|
|
|
--key_size=$key_size \
|
|
|
|
--value_size=$value_size \
|
2015-04-22 22:23:08 +02:00
|
|
|
--block_size=$block_size \
|
2014-09-13 01:25:35 +02:00
|
|
|
--cache_size=$cache_size \
|
|
|
|
--cache_numshardbits=6 \
|
2015-08-21 03:59:10 +02:00
|
|
|
--compression_type=snappy \
|
2015-03-30 20:28:25 +02:00
|
|
|
--min_level_to_compress=3 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--compression_ratio=0.5 \
|
2015-04-22 22:23:08 +02:00
|
|
|
--level_compaction_dynamic_level_bytes=true \
|
2015-08-21 03:59:10 +02:00
|
|
|
--bytes_per_sync=$((8 * M)) \
|
|
|
|
--cache_index_and_filter_blocks=0 \
|
2016-01-04 21:01:27 +01:00
|
|
|
--benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \
|
2014-09-13 01:25:35 +02:00
|
|
|
\
|
2015-03-30 20:28:25 +02:00
|
|
|
--hard_rate_limit=3 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--rate_limit_delay_max_milliseconds=1000000 \
|
|
|
|
--write_buffer_size=$((128 * M)) \
|
2015-04-06 19:42:12 +02:00
|
|
|
--max_write_buffer_number=8 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--target_file_size_base=$((128 * M)) \
|
|
|
|
--max_bytes_for_level_base=$((1 * G)) \
|
|
|
|
\
|
|
|
|
--verify_checksum=1 \
|
|
|
|
--delete_obsolete_files_period_micros=$((60 * M)) \
|
2015-03-30 20:28:25 +02:00
|
|
|
--max_grandparent_overlap_factor=8 \
|
|
|
|
--max_bytes_for_level_multiplier=8 \
|
2014-09-13 01:25:35 +02:00
|
|
|
\
|
2015-08-21 03:59:10 +02:00
|
|
|
--statistics=0 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--stats_per_interval=1 \
|
2015-03-30 21:58:32 +02:00
|
|
|
--stats_interval_seconds=60 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--histogram=1 \
|
|
|
|
\
|
|
|
|
--memtablerep=skip_list \
|
|
|
|
--bloom_bits=10 \
|
2015-08-21 03:59:10 +02:00
|
|
|
--open_files=-1"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
l0_config="
|
2014-10-10 18:55:40 +02:00
|
|
|
--level0_file_num_compaction_trigger=4 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--level0_slowdown_writes_trigger=12 \
|
|
|
|
--level0_stop_writes_trigger=20"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
if [ $duration -gt 0 ]; then
|
|
|
|
const_params="$const_params --duration=$duration"
|
|
|
|
fi
|
|
|
|
|
2015-04-06 19:42:12 +02:00
|
|
|
params_w="$const_params $l0_config --max_background_compactions=16 --max_background_flushes=7"
|
|
|
|
params_bulkload="$const_params --max_background_compactions=16 --max_background_flushes=7 \
|
2014-10-10 18:55:40 +02:00
|
|
|
--level0_file_num_compaction_trigger=$((10 * M)) \
|
|
|
|
--level0_slowdown_writes_trigger=$((10 * M)) \
|
|
|
|
--level0_stop_writes_trigger=$((10 * M))"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function summarize_result {
|
|
|
|
test_out=$1
|
|
|
|
test_name=$2
|
|
|
|
bench_name=$3
|
|
|
|
|
2016-02-05 22:20:56 +01:00
|
|
|
# Note that this function assumes that the benchmark executes long enough so
|
|
|
|
# that "Compaction Stats" is written to stdout at least once. If it won't
|
|
|
|
# happen then empty output from grep when searching for "Sum" will cause
|
|
|
|
# syntax errors.
|
2015-03-30 20:28:25 +02:00
|
|
|
uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' )
|
|
|
|
stall_time=$( grep "^Cumulative stall" $test_out | tail -1 | awk '{ print $3 }' )
|
|
|
|
stall_pct=$( grep "^Cumulative stall" $test_out| tail -1 | awk '{ print $5 }' )
|
|
|
|
ops_sec=$( grep ^${bench_name} $test_out | awk '{ print $5 }' )
|
|
|
|
mb_sec=$( grep ^${bench_name} $test_out | awk '{ print $7 }' )
|
|
|
|
lo_wgb=$( grep "^ L0" $test_out | tail -1 | awk '{ print $8 }' )
|
|
|
|
sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $8 }' )
|
2015-05-02 16:46:12 +02:00
|
|
|
sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' )
|
2015-03-30 20:28:25 +02:00
|
|
|
wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc )
|
|
|
|
wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc )
|
|
|
|
usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' )
|
2015-08-22 21:18:00 +02:00
|
|
|
p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
|
|
|
|
p75=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $5 }' )
|
|
|
|
p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' )
|
|
|
|
p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' )
|
|
|
|
p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' )
|
2015-05-02 16:46:12 +02:00
|
|
|
echo -e "$ops_sec\t$mb_sec\t$sum_size\t$lo_wgb\t$sum_wgb\t$wamp\t$wmb_ps\t$usecs_op\t$p50\t$p75\t$p99\t$p999\t$p9999\t$uptime\t$stall_time\t$stall_pct\t$test_name" \
|
2015-03-30 20:28:25 +02:00
|
|
|
>> $output_dir/report.txt
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function run_bulkload {
|
2015-04-14 02:18:07 +02:00
|
|
|
# This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
|
|
|
|
# client can discover where to restart a load after a crash. I think this is a good way to load.
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Bulk loading $num_keys random keys"
|
|
|
|
cmd="./db_bench --benchmarks=fillrandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 22:04:58 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_bulkload \
|
|
|
|
--threads=1 \
|
2015-04-14 02:18:07 +02:00
|
|
|
--memtablerep=vector \
|
|
|
|
--disable_wal=1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/benchmark_bulkload_fillrandom.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_bulkload_fillrandom.log
|
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/benchmark_bulkload_fillrandom.log bulkload fillrandom
|
2014-09-13 01:25:35 +02:00
|
|
|
echo "Compacting..."
|
2015-03-30 20:28:25 +02:00
|
|
|
cmd="./db_bench --benchmarks=compact \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 22:04:58 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
|
|
|
2>&1 | tee -a $output_dir/benchmark_bulkload_compact.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_bulkload_compact.log
|
|
|
|
eval $cmd
|
|
|
|
}
|
|
|
|
|
2016-02-11 00:30:47 +01:00
|
|
|
function run_univ_compaction_worker {
|
|
|
|
# Worker function intended to be called from run_univ_compaction.
|
|
|
|
echo -e "\nCompacting ...\n"
|
|
|
|
|
|
|
|
compact_output_file=$output_dir/benchmark_univ_compact_sub_$3.t${num_threads}.s${syncval}.log
|
|
|
|
|
|
|
|
# The essence of the command is borrowed from run_change overwrite with
|
|
|
|
# compaction specific options being added.
|
|
|
|
cmd="./db_bench --benchmarks=overwrite \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--sync=$syncval \
|
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
|
|
|
--seed=$( date +%s ) \
|
|
|
|
--compaction_measure_io_stats=$1 \
|
|
|
|
--compaction_style=$2 \
|
|
|
|
--subcompactions=$3 \
|
|
|
|
2>&1 | tee -a $compact_output_file"
|
|
|
|
echo $cmd | tee $compact_output_file
|
|
|
|
eval $cmd
|
|
|
|
|
|
|
|
summarize_result $compact_output_file univ_compact_sub_comp_$3 overwrite
|
|
|
|
}
|
|
|
|
|
|
|
|
function run_univ_compaction {
|
|
|
|
# Always ask for I/O statistics to be measured.
|
|
|
|
io_stats=1
|
|
|
|
|
|
|
|
# Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1.
|
|
|
|
compaction_style=1
|
|
|
|
|
|
|
|
# Get the basic understanding about impact of scaling out the subcompactions
|
|
|
|
# by allowing the usage of { 1, 2, 4, 8, 16 } threads for different runs.
|
|
|
|
subcompactions=("1" "2" "4" "8" "16")
|
|
|
|
|
|
|
|
# Have a separate suffix for each experiment so that separate results will be
|
|
|
|
# persisted.
|
|
|
|
log_suffix=1
|
|
|
|
|
|
|
|
# Do the real work of running various experiments.
|
|
|
|
for ((i=0; i < ${#subcompactions[@]}; i++))
|
|
|
|
do
|
|
|
|
run_univ_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} $log_suffix
|
|
|
|
((log_suffix++))
|
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function run_fillseq {
|
2016-02-05 22:20:56 +01:00
|
|
|
# This runs with a vector memtable. WAL can be either disabled or enabled
|
|
|
|
# depending on the input parameter (1 for disabled, 0 for enabled). The main
|
|
|
|
# benefit behind disabling WAL is to make loading faster. It is still crash
|
|
|
|
# safe and the client can discover where to restart a load after a crash. I
|
|
|
|
# think this is a good way to load.
|
|
|
|
|
|
|
|
# Make sure that we'll have unique names for all the files so that data won't
|
|
|
|
# be overwritten.
|
|
|
|
if [ $1 == 1 ]; then
|
|
|
|
log_file_name=$output_dir/benchmark_fillseq.wal_disabled.v${value_size}.log
|
|
|
|
test_name=fillseq.wal_disabled.v${value_size}
|
|
|
|
else
|
|
|
|
log_file_name=$output_dir/benchmark_fillseq.wal_enabled.v${value_size}.log
|
|
|
|
test_name=fillseq.wal_enabled.v${value_size}
|
|
|
|
fi
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Loading $num_keys keys sequentially"
|
|
|
|
cmd="./db_bench --benchmarks=fillseq \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
2015-04-06 19:42:12 +02:00
|
|
|
--min_level_to_compress=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--threads=1 \
|
2015-04-14 02:18:07 +02:00
|
|
|
--memtablerep=vector \
|
2016-02-05 22:20:56 +01:00
|
|
|
--disable_wal=$1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2016-02-05 22:20:56 +01:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
echo $cmd | tee $log_file_name
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2016-02-05 22:20:56 +01:00
|
|
|
|
|
|
|
# The constant "fillseq" which we pass to db_bench is the benchmark name.
|
|
|
|
summarize_result $log_file_name $test_name fillseq
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_change {
|
|
|
|
operation=$1
|
|
|
|
echo "Do $num_keys random $operation"
|
|
|
|
out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log"
|
|
|
|
cmd="./db_bench --benchmarks=$operation \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--sync=$syncval \
|
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_filluniquerandom {
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Loading $num_keys unique keys randomly"
|
|
|
|
cmd="./db_bench --benchmarks=filluniquerandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/benchmark_filluniquerandom.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_filluniquerandom.log
|
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/benchmark_filluniquerandom.log filluniquerandom filluniquerandom
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_readrandom {
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Reading $num_keys random keys"
|
|
|
|
out_name="benchmark_readrandom.t${num_threads}.log"
|
|
|
|
cmd="./db_bench --benchmarks=readrandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_readwhile {
|
|
|
|
operation=$1
|
|
|
|
echo "Reading $num_keys random keys while $operation"
|
|
|
|
out_name="benchmark_readwhile${operation}.t${num_threads}.log"
|
|
|
|
cmd="./db_bench --benchmarks=readwhile${operation} \
|
2015-03-18 21:50:52 +01:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--sync=$syncval \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-03-18 21:50:52 +01:00
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2015-03-18 21:50:52 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation}
|
2015-03-18 21:50:52 +01:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_rangewhile {
|
|
|
|
operation=$1
|
|
|
|
full_name=$2
|
|
|
|
reverse_arg=$3
|
|
|
|
out_name="benchmark_${full_name}.t${num_threads}.log"
|
|
|
|
echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}"
|
|
|
|
cmd="./db_bench --benchmarks=seekrandomwhile${operation} \
|
2014-12-10 22:04:58 +01:00
|
|
|
--use_existing_db=1 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=$syncval \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2014-12-10 22:04:58 +01:00
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
2015-03-30 20:28:25 +02:00
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-12-10 22:04:58 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandomwhile${operation}
|
2014-12-10 22:04:58 +01:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_range {
|
|
|
|
full_name=$1
|
|
|
|
reverse_arg=$2
|
|
|
|
out_name="benchmark_${full_name}.t${num_threads}.log"
|
|
|
|
echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}"
|
|
|
|
cmd="./db_bench --benchmarks=seekrandom \
|
2015-03-14 16:36:57 +01:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandom
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
}
|
|
|
|
|
2015-05-29 23:36:35 +02:00
|
|
|
function run_randomtransaction {
|
|
|
|
echo "..."
|
|
|
|
cmd="./db_bench $params_r --benchmarks=randomtransaction \
|
|
|
|
--num=$num_keys \
|
|
|
|
--transaction_db \
|
|
|
|
--threads=5 \
|
|
|
|
--transaction_sets=5 \
|
|
|
|
2>&1 | tee $output_dir/benchmark_randomtransaction.log"
|
|
|
|
echo $cmd | tee $output_dir/benchmark_rangescanwhilewriting.log
|
|
|
|
eval $cmd
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function now() {
|
|
|
|
echo `date +"%s"`
|
|
|
|
}
|
|
|
|
|
|
|
|
report="$output_dir/report.txt"
|
2015-03-30 20:28:25 +02:00
|
|
|
schedule="$output_dir/schedule.txt"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
echo "===== Benchmark ====="
|
|
|
|
|
|
|
|
# Run!!!
|
|
|
|
IFS=',' read -a jobs <<< $1
|
|
|
|
for job in ${jobs[@]}; do
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
|
|
|
|
if [ $job != debug ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Start $job at `date`" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
fi
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
start=$(now)
|
|
|
|
if [ $job = bulkload ]; then
|
|
|
|
run_bulkload
|
2016-02-05 22:20:56 +01:00
|
|
|
elif [ $job = fillseq_disable_wal ]; then
|
|
|
|
run_fillseq 1
|
|
|
|
elif [ $job = fillseq_enable_wal ]; then
|
|
|
|
run_fillseq 0
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = overwrite ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
run_change overwrite
|
|
|
|
elif [ $job = updaterandom ]; then
|
|
|
|
run_change updaterandom
|
|
|
|
elif [ $job = mergerandom ]; then
|
|
|
|
run_change mergerandom
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = filluniquerandom ]; then
|
|
|
|
run_filluniquerandom
|
|
|
|
elif [ $job = readrandom ]; then
|
|
|
|
run_readrandom
|
2015-03-30 20:28:25 +02:00
|
|
|
elif [ $job = fwdrange ]; then
|
|
|
|
run_range $job false
|
|
|
|
elif [ $job = revrange ]; then
|
|
|
|
run_range $job true
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = readwhilewriting ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
run_readwhile writing
|
2015-03-18 21:50:52 +01:00
|
|
|
elif [ $job = readwhilemerging ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
run_readwhile merging
|
|
|
|
elif [ $job = fwdrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job false
|
|
|
|
elif [ $job = revrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job true
|
|
|
|
elif [ $job = fwdrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job false
|
|
|
|
elif [ $job = revrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job true
|
2015-05-29 23:36:35 +02:00
|
|
|
elif [ $job = randomtransaction ]; then
|
|
|
|
run_randomtransaction
|
2016-02-11 00:30:47 +01:00
|
|
|
elif [ $job = universal_compaction ]; then
|
|
|
|
run_univ_compaction
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
elif [ $job = debug ]; then
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
num_keys=1000; # debug
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
echo "Setting num_keys to $num_keys"
|
2014-09-13 01:25:35 +02:00
|
|
|
else
|
|
|
|
echo "unknown job $job"
|
|
|
|
exit
|
|
|
|
fi
|
|
|
|
end=$(now)
|
|
|
|
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
if [ $job != debug ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Complete $job in $((end-start)) seconds" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
fi
|
|
|
|
|
2015-05-02 16:46:12 +02:00
|
|
|
echo -e "ops/sec\tmb/sec\tSize-GB\tL0_MB\tSum_GB\tW-Amp\tW-MB/s\tusec/op\tp50\tp75\tp99\tp99.9\tp99.99\tUptime\tStall-time\tStall%\tTest"
|
2015-03-30 20:28:25 +02:00
|
|
|
tail -1 $output_dir/report.txt
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
done
|