2017-08-04 00:43:05 +02:00
|
|
|
#!/usr/bin/env bash
|
2014-09-13 01:25:35 +02:00
|
|
|
# REQUIRE: db_bench binary exists in the current directory
|
|
|
|
|
|
|
|
if [ $# -ne 1 ]; then
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/"
|
2015-05-29 23:36:35 +02:00
|
|
|
echo "readrandom/readwhilewriting/readwhilemerging/updaterandom/"
|
2016-03-15 07:09:04 +01:00
|
|
|
echo "mergerandom/randomtransaction/compact]"
|
2014-09-13 01:25:35 +02:00
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
2016-03-04 21:32:11 +01:00
|
|
|
# Make it easier to run only the compaction test. Getting valid data requires
|
|
|
|
# a number of iterations and having an ability to run the test separately from
|
|
|
|
# rest of the benchmarks helps.
|
|
|
|
if [ "$COMPACTION_TEST" == "1" -a "$1" != "universal_compaction" ]; then
|
|
|
|
echo "Skipping $1 because it's not a compaction test."
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
# size constants
|
|
|
|
K=1024
|
|
|
|
M=$((1024 * K))
|
|
|
|
G=$((1024 * M))
|
2018-12-18 01:27:08 +01:00
|
|
|
T=$((1024 * T))
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
if [ -z $DB_DIR ]; then
|
|
|
|
echo "DB_DIR is not defined"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
if [ -z $WAL_DIR ]; then
|
|
|
|
echo "WAL_DIR is not defined"
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
|
|
|
|
output_dir=${OUTPUT_DIR:-/tmp/}
|
|
|
|
if [ ! -d $output_dir ]; then
|
|
|
|
mkdir -p $output_dir
|
|
|
|
fi
|
|
|
|
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
# all multithreaded tests run with sync=1 unless
|
|
|
|
# $DB_BENCH_NO_SYNC is defined
|
|
|
|
syncval="1"
|
|
|
|
if [ ! -z $DB_BENCH_NO_SYNC ]; then
|
|
|
|
echo "Turning sync off for all multithreaded tests"
|
|
|
|
syncval="0";
|
|
|
|
fi
|
|
|
|
|
2018-12-18 01:27:08 +01:00
|
|
|
num_threads=${NUM_THREADS:-64}
|
2016-01-04 21:01:27 +01:00
|
|
|
mb_written_per_sec=${MB_WRITE_PER_SEC:-0}
|
2015-03-30 20:28:25 +02:00
|
|
|
# Only for tests that do range scans
|
|
|
|
num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10}
|
2018-12-18 01:27:08 +01:00
|
|
|
cache_size=${CACHE_SIZE:-$((17179869184))}
|
2016-04-28 02:39:18 +02:00
|
|
|
compression_max_dict_bytes=${COMPRESSION_MAX_DICT_BYTES:-0}
|
2018-12-18 01:27:08 +01:00
|
|
|
compression_type=${COMPRESSION_TYPE:-zstd}
|
2014-09-13 01:25:35 +02:00
|
|
|
duration=${DURATION:-0}
|
|
|
|
|
2018-12-18 01:27:08 +01:00
|
|
|
num_keys=${NUM_KEYS:-8000000000}
|
Initial script for the new regression test
Summary:
This diff includes an initial script running a set of benchmarks for
regression test. The script does the following things:
checkout the specified rocksdb commit (or origin/master as default)
make clean && DEBUG_LEVEL=0 make db_bench
setup test directories
run set of benchmarks and store results
Currently, the script will run couple benchmarks, store all the benchmark
output, extract micros per op and percentile information for each benchmark
and store them in a single SUMMARY.csv file. The SUMMARY.csv will make the
follow-up regression detection easier.
In addition, the current script only takes env arguments to set important
attributes of db_bench. Will follow-up with a patch that allows db_bench
to construct options from an options file.
Test Plan:
NUM_KEYS=100 ./tools/regression_test.sh
Sample SUMMARY.csv file:
commit id, benchmark, ms-per-op, p50, p75, p99, p99.9, p99.99
7e23ddf575890510e7d2fc7a79b31a1bbf317917, fillseq, 15.28, 54.66, 77.14, 5000.00, 17900.00, 18483.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, overwrite, 13.54, 57.69, 86.39, 3000.00, 15600.00, 17013.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, readrandom, 1.04, 0.80, 1.67, 293.33, 395.00, 504.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, readwhilewriting, 2.75, 1.01, 1.87, 200.00, 460.00, 485.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, deleterandom, 3.64, 48.12, 70.09, 200.00, 336.67, 347.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, seekrandom, 24.31, 391.87, 513.69, 872.73, 990.00, 1048.00
7e23ddf575890510e7d2fc7a79b31a1bbf317917, seekrandomwhilewriting, 14.02, 185.14, 294.15, 700.00, 1440.00, 1527.00
Reviewers: sdong, IslamAbdelRahman, kradhakrishnan, yiwu, andrewkr, gunnarku
Reviewed By: gunnarku
Subscribers: gunnarku, MarkCallaghan, andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D57597
2016-05-09 22:32:57 +02:00
|
|
|
key_size=${KEY_SIZE:-20}
|
2015-03-30 20:28:25 +02:00
|
|
|
value_size=${VALUE_SIZE:-400}
|
2015-08-21 03:59:10 +02:00
|
|
|
block_size=${BLOCK_SIZE:-8192}
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
const_params="
|
|
|
|
--db=$DB_DIR \
|
|
|
|
--wal_dir=$WAL_DIR \
|
|
|
|
\
|
2015-03-30 20:28:25 +02:00
|
|
|
--num=$num_keys \
|
2014-09-13 01:25:35 +02:00
|
|
|
--num_levels=6 \
|
|
|
|
--key_size=$key_size \
|
|
|
|
--value_size=$value_size \
|
2015-04-22 22:23:08 +02:00
|
|
|
--block_size=$block_size \
|
2014-09-13 01:25:35 +02:00
|
|
|
--cache_size=$cache_size \
|
|
|
|
--cache_numshardbits=6 \
|
2016-04-28 02:39:18 +02:00
|
|
|
--compression_max_dict_bytes=$compression_max_dict_bytes \
|
2014-09-13 01:25:35 +02:00
|
|
|
--compression_ratio=0.5 \
|
2016-04-28 02:39:18 +02:00
|
|
|
--compression_type=$compression_type \
|
2015-04-22 22:23:08 +02:00
|
|
|
--level_compaction_dynamic_level_bytes=true \
|
2015-08-21 03:59:10 +02:00
|
|
|
--bytes_per_sync=$((8 * M)) \
|
|
|
|
--cache_index_and_filter_blocks=0 \
|
Adding pin_l0_filter_and_index_blocks_in_cache feature and related fixes.
Summary:
When a block based table file is opened, if prefetch_index_and_filter is true, it will prefetch the index and filter blocks, putting them into the block cache.
What this feature adds: when a L0 block based table file is opened, if pin_l0_filter_and_index_blocks_in_cache is true in the options (and prefetch_index_and_filter is true), then the filter and index blocks aren't released back to the block cache at the end of BlockBasedTableReader::Open(). Instead the table reader takes ownership of them, hence pinning them, ie. the LRU cache will never push them out. Meanwhile in the table reader, further accesses will not hit the block cache, thus avoiding lock contention.
Test Plan:
'export TEST_TMPDIR=/dev/shm/ && DISABLE_JEMALLOC=1 OPT=-g make all valgrind_check -j32' is OK.
I didn't run the Java tests, I don't have Java set up on my devserver.
Reviewers: sdong
Reviewed By: sdong
Subscribers: andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D56133
2016-04-01 19:42:39 +02:00
|
|
|
--pin_l0_filter_and_index_blocks_in_cache=1 \
|
2016-01-04 21:01:27 +01:00
|
|
|
--benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \
|
2014-09-13 01:25:35 +02:00
|
|
|
\
|
2015-03-30 20:28:25 +02:00
|
|
|
--hard_rate_limit=3 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--rate_limit_delay_max_milliseconds=1000000 \
|
|
|
|
--write_buffer_size=$((128 * M)) \
|
|
|
|
--target_file_size_base=$((128 * M)) \
|
|
|
|
--max_bytes_for_level_base=$((1 * G)) \
|
|
|
|
\
|
|
|
|
--verify_checksum=1 \
|
|
|
|
--delete_obsolete_files_period_micros=$((60 * M)) \
|
2015-03-30 20:28:25 +02:00
|
|
|
--max_bytes_for_level_multiplier=8 \
|
2014-09-13 01:25:35 +02:00
|
|
|
\
|
2015-08-21 03:59:10 +02:00
|
|
|
--statistics=0 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--stats_per_interval=1 \
|
2015-03-30 21:58:32 +02:00
|
|
|
--stats_interval_seconds=60 \
|
2014-09-13 01:25:35 +02:00
|
|
|
--histogram=1 \
|
|
|
|
\
|
|
|
|
--memtablerep=skip_list \
|
|
|
|
--bloom_bits=10 \
|
2015-08-21 03:59:10 +02:00
|
|
|
--open_files=-1"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
l0_config="
|
2014-10-10 18:55:40 +02:00
|
|
|
--level0_file_num_compaction_trigger=4 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--level0_stop_writes_trigger=20"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
if [ $duration -gt 0 ]; then
|
|
|
|
const_params="$const_params --duration=$duration"
|
|
|
|
fi
|
|
|
|
|
2016-03-17 18:14:23 +01:00
|
|
|
params_w="$const_params \
|
|
|
|
$l0_config \
|
2018-12-18 01:27:08 +01:00
|
|
|
--max_background_compactions=16 \
|
|
|
|
--max_write_buffer_number=8 \
|
|
|
|
--max_background_flushes=7"
|
2016-03-17 18:14:23 +01:00
|
|
|
|
|
|
|
params_bulkload="$const_params \
|
2018-12-18 01:27:08 +01:00
|
|
|
--max_background_compactions=16 \
|
2016-03-17 18:14:23 +01:00
|
|
|
--max_write_buffer_number=8 \
|
2018-12-18 01:27:08 +01:00
|
|
|
--allow_concurrent_memtable_write=false \
|
|
|
|
--max_background_flushes=7 \
|
2014-10-10 18:55:40 +02:00
|
|
|
--level0_file_num_compaction_trigger=$((10 * M)) \
|
|
|
|
--level0_slowdown_writes_trigger=$((10 * M)) \
|
|
|
|
--level0_stop_writes_trigger=$((10 * M))"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
2018-12-18 01:27:08 +01:00
|
|
|
params_fillseq="$params_w \
|
|
|
|
--allow_concurrent_memtable_write=false"
|
2016-03-04 21:32:11 +01:00
|
|
|
#
|
|
|
|
# Tune values for level and universal compaction.
|
|
|
|
# For universal compaction, these level0_* options mean total sorted of runs in
|
|
|
|
# LSM. In level-based compaction, it means number of L0 files.
|
|
|
|
#
|
2016-03-15 07:09:04 +01:00
|
|
|
params_level_compact="$const_params \
|
2018-12-18 01:27:08 +01:00
|
|
|
--max_background_flushes=4 \
|
2016-03-17 18:14:23 +01:00
|
|
|
--max_write_buffer_number=4 \
|
2016-03-04 21:32:11 +01:00
|
|
|
--level0_file_num_compaction_trigger=4 \
|
|
|
|
--level0_slowdown_writes_trigger=16 \
|
|
|
|
--level0_stop_writes_trigger=20"
|
|
|
|
|
2016-03-15 07:09:04 +01:00
|
|
|
params_univ_compact="$const_params \
|
2018-12-18 01:27:08 +01:00
|
|
|
--max_background_flushes=4 \
|
2016-03-17 18:14:23 +01:00
|
|
|
--max_write_buffer_number=4 \
|
2016-03-04 21:32:11 +01:00
|
|
|
--level0_file_num_compaction_trigger=8 \
|
|
|
|
--level0_slowdown_writes_trigger=16 \
|
|
|
|
--level0_stop_writes_trigger=20"
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function summarize_result {
|
|
|
|
test_out=$1
|
|
|
|
test_name=$2
|
|
|
|
bench_name=$3
|
|
|
|
|
2016-02-05 22:20:56 +01:00
|
|
|
# Note that this function assumes that the benchmark executes long enough so
|
|
|
|
# that "Compaction Stats" is written to stdout at least once. If it won't
|
|
|
|
# happen then empty output from grep when searching for "Sum" will cause
|
|
|
|
# syntax errors.
|
2015-03-30 20:28:25 +02:00
|
|
|
uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' )
|
|
|
|
stall_time=$( grep "^Cumulative stall" $test_out | tail -1 | awk '{ print $3 }' )
|
|
|
|
stall_pct=$( grep "^Cumulative stall" $test_out| tail -1 | awk '{ print $5 }' )
|
|
|
|
ops_sec=$( grep ^${bench_name} $test_out | awk '{ print $5 }' )
|
|
|
|
mb_sec=$( grep ^${bench_name} $test_out | awk '{ print $7 }' )
|
2018-10-11 06:01:41 +02:00
|
|
|
lo_wgb=$( grep "^ L0" $test_out | tail -1 | awk '{ print $9 }' )
|
|
|
|
sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $9 }' )
|
2015-05-02 16:46:12 +02:00
|
|
|
sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' )
|
2015-03-30 20:28:25 +02:00
|
|
|
wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc )
|
|
|
|
wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc )
|
|
|
|
usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' )
|
2015-08-22 21:18:00 +02:00
|
|
|
p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' )
|
|
|
|
p75=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $5 }' )
|
|
|
|
p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' )
|
|
|
|
p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' )
|
|
|
|
p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' )
|
2015-05-02 16:46:12 +02:00
|
|
|
echo -e "$ops_sec\t$mb_sec\t$sum_size\t$lo_wgb\t$sum_wgb\t$wamp\t$wmb_ps\t$usecs_op\t$p50\t$p75\t$p99\t$p999\t$p9999\t$uptime\t$stall_time\t$stall_pct\t$test_name" \
|
2015-03-30 20:28:25 +02:00
|
|
|
>> $output_dir/report.txt
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function run_bulkload {
|
2015-04-14 02:18:07 +02:00
|
|
|
# This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the
|
|
|
|
# client can discover where to restart a load after a crash. I think this is a good way to load.
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Bulk loading $num_keys random keys"
|
|
|
|
cmd="./db_bench --benchmarks=fillrandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 22:04:58 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_bulkload \
|
|
|
|
--threads=1 \
|
2015-04-14 02:18:07 +02:00
|
|
|
--memtablerep=vector \
|
2018-09-26 22:19:13 +02:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2015-04-14 02:18:07 +02:00
|
|
|
--disable_wal=1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/benchmark_bulkload_fillrandom.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_bulkload_fillrandom.log
|
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/benchmark_bulkload_fillrandom.log bulkload fillrandom
|
2014-09-13 01:25:35 +02:00
|
|
|
echo "Compacting..."
|
2015-03-30 20:28:25 +02:00
|
|
|
cmd="./db_bench --benchmarks=compact \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--disable_auto_compactions=1 \
|
2014-12-10 22:04:58 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
|
|
|
2>&1 | tee -a $output_dir/benchmark_bulkload_compact.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_bulkload_compact.log
|
|
|
|
eval $cmd
|
|
|
|
}
|
|
|
|
|
2016-03-15 07:09:04 +01:00
|
|
|
#
|
|
|
|
# Parameter description:
|
|
|
|
#
|
|
|
|
# $1 - 1 if I/O statistics should be collected.
|
|
|
|
# $2 - compaction type to use (level=0, universal=1).
|
|
|
|
# $3 - number of subcompactions.
|
|
|
|
# $4 - number of maximum background compactions.
|
|
|
|
#
|
2016-03-04 21:32:11 +01:00
|
|
|
function run_manual_compaction_worker {
|
|
|
|
# This runs with a vector memtable and the WAL disabled to load faster.
|
|
|
|
# It is still crash safe and the client can discover where to restart a
|
|
|
|
# load after a crash. I think this is a good way to load.
|
|
|
|
echo "Bulk loading $num_keys random keys for manual compaction."
|
2016-02-11 00:30:47 +01:00
|
|
|
|
2016-03-04 21:32:11 +01:00
|
|
|
fillrandom_output_file=$output_dir/benchmark_man_compact_fillrandom_$3.log
|
|
|
|
man_compact_output_log=$output_dir/benchmark_man_compact_$3.log
|
2016-02-11 00:30:47 +01:00
|
|
|
|
2016-03-04 21:32:11 +01:00
|
|
|
if [ "$2" == "1" ]; then
|
|
|
|
extra_params=$params_univ_compact
|
|
|
|
else
|
|
|
|
extra_params=$params_level_compact
|
|
|
|
fi
|
|
|
|
|
|
|
|
# Make sure that fillrandom uses the same compaction options as compact.
|
|
|
|
cmd="./db_bench --benchmarks=fillrandom \
|
|
|
|
--use_existing_db=0 \
|
|
|
|
--disable_auto_compactions=0 \
|
|
|
|
--sync=0 \
|
|
|
|
$extra_params \
|
2016-02-11 00:30:47 +01:00
|
|
|
--threads=$num_threads \
|
2016-03-04 21:32:11 +01:00
|
|
|
--compaction_measure_io_stats=$1 \
|
|
|
|
--compaction_style=$2 \
|
|
|
|
--subcompactions=$3 \
|
|
|
|
--memtablerep=vector \
|
2018-09-26 22:19:13 +02:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2016-03-04 21:32:11 +01:00
|
|
|
--disable_wal=1 \
|
2018-12-18 01:27:08 +01:00
|
|
|
--max_background_compactions=$4 \
|
2016-02-11 00:30:47 +01:00
|
|
|
--seed=$( date +%s ) \
|
2016-03-04 21:32:11 +01:00
|
|
|
2>&1 | tee -a $fillrandom_output_file"
|
|
|
|
|
|
|
|
echo $cmd | tee $fillrandom_output_file
|
|
|
|
eval $cmd
|
|
|
|
|
|
|
|
summarize_result $fillrandom_output_file man_compact_fillrandom_$3 fillrandom
|
|
|
|
|
|
|
|
echo "Compacting with $3 subcompactions specified ..."
|
|
|
|
|
|
|
|
# This is the part we're really interested in. Given that compact benchmark
|
|
|
|
# doesn't output regular statistics then we'll just use the time command to
|
|
|
|
# measure how long this step takes.
|
|
|
|
cmd="{ \
|
|
|
|
time ./db_bench --benchmarks=compact \
|
|
|
|
--use_existing_db=1 \
|
|
|
|
--disable_auto_compactions=0 \
|
|
|
|
--sync=0 \
|
|
|
|
$extra_params \
|
|
|
|
--threads=$num_threads \
|
2016-02-11 00:30:47 +01:00
|
|
|
--compaction_measure_io_stats=$1 \
|
|
|
|
--compaction_style=$2 \
|
|
|
|
--subcompactions=$3 \
|
2016-03-15 07:09:04 +01:00
|
|
|
--max_background_compactions=$4 \
|
2016-03-04 21:32:11 +01:00
|
|
|
;}
|
|
|
|
2>&1 | tee -a $man_compact_output_log"
|
|
|
|
|
|
|
|
echo $cmd | tee $man_compact_output_log
|
2016-02-11 00:30:47 +01:00
|
|
|
eval $cmd
|
|
|
|
|
2016-03-04 21:32:11 +01:00
|
|
|
# Can't use summarize_result here. One way to analyze the results is to run
|
|
|
|
# "grep real" on the resulting log files.
|
2016-02-11 00:30:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_univ_compaction {
|
|
|
|
# Always ask for I/O statistics to be measured.
|
|
|
|
io_stats=1
|
|
|
|
|
|
|
|
# Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1.
|
|
|
|
compaction_style=1
|
|
|
|
|
2016-03-15 07:09:04 +01:00
|
|
|
# Define a set of benchmarks.
|
|
|
|
subcompactions=(1 2 4 8 16)
|
2018-12-18 01:27:08 +01:00
|
|
|
max_background_compactions=(16 16 8 4 2)
|
2016-03-04 21:32:11 +01:00
|
|
|
|
2016-03-15 07:09:04 +01:00
|
|
|
i=0
|
|
|
|
total=${#subcompactions[@]}
|
2016-02-11 00:30:47 +01:00
|
|
|
|
2016-03-15 07:09:04 +01:00
|
|
|
# Execute a set of benchmarks to cover variety of scenarios.
|
|
|
|
while [ "$i" -lt "$total" ]
|
2016-02-11 00:30:47 +01:00
|
|
|
do
|
2016-03-15 07:09:04 +01:00
|
|
|
run_manual_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} \
|
2018-12-18 01:27:08 +01:00
|
|
|
${max_background_compactions[$i]}
|
2016-03-15 07:09:04 +01:00
|
|
|
((i++))
|
2016-02-11 00:30:47 +01:00
|
|
|
done
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function run_fillseq {
|
2016-02-05 22:20:56 +01:00
|
|
|
# This runs with a vector memtable. WAL can be either disabled or enabled
|
|
|
|
# depending on the input parameter (1 for disabled, 0 for enabled). The main
|
|
|
|
# benefit behind disabling WAL is to make loading faster. It is still crash
|
|
|
|
# safe and the client can discover where to restart a load after a crash. I
|
|
|
|
# think this is a good way to load.
|
|
|
|
|
|
|
|
# Make sure that we'll have unique names for all the files so that data won't
|
|
|
|
# be overwritten.
|
|
|
|
if [ $1 == 1 ]; then
|
|
|
|
log_file_name=$output_dir/benchmark_fillseq.wal_disabled.v${value_size}.log
|
|
|
|
test_name=fillseq.wal_disabled.v${value_size}
|
|
|
|
else
|
|
|
|
log_file_name=$output_dir/benchmark_fillseq.wal_enabled.v${value_size}.log
|
|
|
|
test_name=fillseq.wal_enabled.v${value_size}
|
|
|
|
fi
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Loading $num_keys keys sequentially"
|
|
|
|
cmd="./db_bench --benchmarks=fillseq \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=0 \
|
2018-12-18 01:27:08 +01:00
|
|
|
$params_fillseq \
|
2015-04-06 19:42:12 +02:00
|
|
|
--min_level_to_compress=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--threads=1 \
|
2015-04-14 02:18:07 +02:00
|
|
|
--memtablerep=vector \
|
2018-09-26 22:19:13 +02:00
|
|
|
--allow_concurrent_memtable_write=false \
|
2016-02-05 22:20:56 +01:00
|
|
|
--disable_wal=$1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2016-02-05 22:20:56 +01:00
|
|
|
2>&1 | tee -a $log_file_name"
|
|
|
|
echo $cmd | tee $log_file_name
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2016-02-05 22:20:56 +01:00
|
|
|
|
|
|
|
# The constant "fillseq" which we pass to db_bench is the benchmark name.
|
|
|
|
summarize_result $log_file_name $test_name fillseq
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_change {
|
|
|
|
operation=$1
|
|
|
|
echo "Do $num_keys random $operation"
|
|
|
|
out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log"
|
|
|
|
cmd="./db_bench --benchmarks=$operation \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
--sync=$syncval \
|
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_filluniquerandom {
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Loading $num_keys unique keys randomly"
|
|
|
|
cmd="./db_bench --benchmarks=filluniquerandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=0 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=0 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=1 \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/benchmark_filluniquerandom.log"
|
2014-09-13 01:25:35 +02:00
|
|
|
echo $cmd | tee $output_dir/benchmark_filluniquerandom.log
|
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/benchmark_filluniquerandom.log filluniquerandom filluniquerandom
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
function run_readrandom {
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Reading $num_keys random keys"
|
|
|
|
out_name="benchmark_readrandom.t${num_threads}.log"
|
|
|
|
cmd="./db_bench --benchmarks=readrandom \
|
2014-09-13 01:25:35 +02:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-09-13 01:25:35 +02:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom
|
2014-09-13 01:25:35 +02:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_readwhile {
|
|
|
|
operation=$1
|
|
|
|
echo "Reading $num_keys random keys while $operation"
|
|
|
|
out_name="benchmark_readwhile${operation}.t${num_threads}.log"
|
|
|
|
cmd="./db_bench --benchmarks=readwhile${operation} \
|
2015-03-18 21:50:52 +01:00
|
|
|
--use_existing_db=1 \
|
|
|
|
--sync=$syncval \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
2015-03-18 21:50:52 +01:00
|
|
|
--merge_operator=\"put\" \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2015-03-18 21:50:52 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation}
|
2015-03-18 21:50:52 +01:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_rangewhile {
|
|
|
|
operation=$1
|
|
|
|
full_name=$2
|
|
|
|
reverse_arg=$3
|
|
|
|
out_name="benchmark_${full_name}.t${num_threads}.log"
|
|
|
|
echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}"
|
|
|
|
cmd="./db_bench --benchmarks=seekrandomwhile${operation} \
|
2014-12-10 22:04:58 +01:00
|
|
|
--use_existing_db=1 \
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
--sync=$syncval \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--merge_operator=\"put\" \
|
2014-12-10 22:04:58 +01:00
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
2015-03-30 20:28:25 +02:00
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
2014-12-10 22:04:58 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandomwhile${operation}
|
2014-12-10 22:04:58 +01:00
|
|
|
}
|
|
|
|
|
2015-03-30 20:28:25 +02:00
|
|
|
function run_range {
|
|
|
|
full_name=$1
|
|
|
|
reverse_arg=$2
|
|
|
|
out_name="benchmark_${full_name}.t${num_threads}.log"
|
|
|
|
echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}"
|
|
|
|
cmd="./db_bench --benchmarks=seekrandom \
|
2015-03-14 16:36:57 +01:00
|
|
|
--use_existing_db=1 \
|
2015-03-30 20:28:25 +02:00
|
|
|
$params_w \
|
|
|
|
--threads=$num_threads \
|
|
|
|
--seek_nexts=$num_nexts_per_seek \
|
|
|
|
--reverse_iterator=$reverse_arg \
|
2015-04-23 18:18:25 +02:00
|
|
|
--seed=$( date +%s ) \
|
2015-03-30 20:28:25 +02:00
|
|
|
2>&1 | tee -a $output_dir/${out_name}"
|
|
|
|
echo $cmd | tee $output_dir/${out_name}
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
eval $cmd
|
2015-03-30 20:28:25 +02:00
|
|
|
summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandom
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
}
|
|
|
|
|
2015-05-29 23:36:35 +02:00
|
|
|
function run_randomtransaction {
|
|
|
|
echo "..."
|
|
|
|
cmd="./db_bench $params_r --benchmarks=randomtransaction \
|
|
|
|
--num=$num_keys \
|
|
|
|
--transaction_db \
|
|
|
|
--threads=5 \
|
|
|
|
--transaction_sets=5 \
|
|
|
|
2>&1 | tee $output_dir/benchmark_randomtransaction.log"
|
|
|
|
echo $cmd | tee $output_dir/benchmark_rangescanwhilewriting.log
|
|
|
|
eval $cmd
|
|
|
|
}
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
function now() {
|
|
|
|
echo `date +"%s"`
|
|
|
|
}
|
|
|
|
|
|
|
|
report="$output_dir/report.txt"
|
2015-03-30 20:28:25 +02:00
|
|
|
schedule="$output_dir/schedule.txt"
|
2014-09-13 01:25:35 +02:00
|
|
|
|
|
|
|
echo "===== Benchmark ====="
|
|
|
|
|
|
|
|
# Run!!!
|
|
|
|
IFS=',' read -a jobs <<< $1
|
2018-01-29 21:43:56 +01:00
|
|
|
# shellcheck disable=SC2068
|
2014-09-13 01:25:35 +02:00
|
|
|
for job in ${jobs[@]}; do
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
|
|
|
|
if [ $job != debug ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Start $job at `date`" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
fi
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
start=$(now)
|
|
|
|
if [ $job = bulkload ]; then
|
|
|
|
run_bulkload
|
2016-02-05 22:20:56 +01:00
|
|
|
elif [ $job = fillseq_disable_wal ]; then
|
|
|
|
run_fillseq 1
|
|
|
|
elif [ $job = fillseq_enable_wal ]; then
|
|
|
|
run_fillseq 0
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = overwrite ]; then
|
2018-12-18 01:27:08 +01:00
|
|
|
syncval="0"
|
|
|
|
params_w="$params_w \
|
|
|
|
--writes=125000000 \
|
|
|
|
--subcompactions=4 \
|
|
|
|
--soft_pending_compaction_bytes_limit=$((1 * T)) \
|
|
|
|
--hard_pending_compaction_bytes_limit=$((4 * T)) "
|
2015-03-30 20:28:25 +02:00
|
|
|
run_change overwrite
|
|
|
|
elif [ $job = updaterandom ]; then
|
|
|
|
run_change updaterandom
|
|
|
|
elif [ $job = mergerandom ]; then
|
|
|
|
run_change mergerandom
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = filluniquerandom ]; then
|
|
|
|
run_filluniquerandom
|
|
|
|
elif [ $job = readrandom ]; then
|
|
|
|
run_readrandom
|
2015-03-30 20:28:25 +02:00
|
|
|
elif [ $job = fwdrange ]; then
|
|
|
|
run_range $job false
|
|
|
|
elif [ $job = revrange ]; then
|
|
|
|
run_range $job true
|
2014-09-13 01:25:35 +02:00
|
|
|
elif [ $job = readwhilewriting ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
run_readwhile writing
|
2015-03-18 21:50:52 +01:00
|
|
|
elif [ $job = readwhilemerging ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
run_readwhile merging
|
|
|
|
elif [ $job = fwdrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job false
|
|
|
|
elif [ $job = revrangewhilewriting ]; then
|
|
|
|
run_rangewhile writing $job true
|
|
|
|
elif [ $job = fwdrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job false
|
|
|
|
elif [ $job = revrangewhilemerging ]; then
|
|
|
|
run_rangewhile merging $job true
|
2015-05-29 23:36:35 +02:00
|
|
|
elif [ $job = randomtransaction ]; then
|
|
|
|
run_randomtransaction
|
2016-02-11 00:30:47 +01:00
|
|
|
elif [ $job = universal_compaction ]; then
|
|
|
|
run_univ_compaction
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
elif [ $job = debug ]; then
|
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined
Summary:
Single threaded tests -> sync=0 Multi threaded tests -> sync=1 by default unless DB_BENCH_NO_SYNC is defined.
Also added updaterandom and mergerandom with putOperator. I am waiting for some results from udb on this.
Test Plan:
DB_BENCH_NO_SYNC=1 WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
WAL_DIR=/tmp OUTPUT_DIR=/tmp/b DB_DIR=/tmp ./tools/benchmark.sh debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting,updaterandom,mergerandom
Verify sync settings
Reviewers: sdong, MarkCallaghan, igor, rven
Reviewed By: igor, rven
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D34185
2015-03-06 23:12:53 +01:00
|
|
|
num_keys=1000; # debug
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
echo "Setting num_keys to $num_keys"
|
2014-09-13 01:25:35 +02:00
|
|
|
else
|
|
|
|
echo "unknown job $job"
|
|
|
|
exit
|
|
|
|
fi
|
|
|
|
end=$(now)
|
|
|
|
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
if [ $job != debug ]; then
|
2015-03-30 20:28:25 +02:00
|
|
|
echo "Complete $job in $((end-start)) seconds" | tee -a $schedule
|
benchmark.sh won't run through all tests properly if one specifies wal_dir to be different than db directory.
Summary:
A command line like this to run all the tests:
source benchmark.config.sh && nohup ./benchmark.sh 'bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting'
where
benchmark.config.sh is:
export DB_DIR=/data/mysql/rocksdata
export WAL_DIR=/txlogs/rockswal
export OUTPUT_DIR=/root/rocks_benchmarking/output
Will fail for the tests that need a new DB .
Also 1) set disable_data_sync=0 and 2) add debug mode to run through all the tests more quickly
Test Plan: run ./benchmark.sh 'debug,bulkload,fillseq,overwrite,filluniquerandom,readrandom,readwhilewriting' and verify that there are no complaints about WAL dir not being empty.
Reviewers: sdong, yhchiang, rven, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D30909
2015-01-06 00:36:47 +01:00
|
|
|
fi
|
|
|
|
|
2016-04-18 23:34:45 +02:00
|
|
|
echo -e "ops/sec\tmb/sec\tSize-GB\tL0_GB\tSum_GB\tW-Amp\tW-MB/s\tusec/op\tp50\tp75\tp99\tp99.9\tp99.99\tUptime\tStall-time\tStall%\tTest"
|
2015-03-30 20:28:25 +02:00
|
|
|
tail -1 $output_dir/report.txt
|
|
|
|
|
2014-09-13 01:25:35 +02:00
|
|
|
done
|