2019-07-09 19:47:31 +02:00
|
|
|
#!/usr/bin/env python2
|
2019-04-18 19:51:19 +02:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2013-03-13 07:20:14 +01:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import time
|
2013-06-08 21:29:43 +02:00
|
|
|
import random
|
2013-04-05 22:44:59 +02:00
|
|
|
import tempfile
|
2013-03-13 07:20:14 +01:00
|
|
|
import subprocess
|
2014-03-20 19:11:08 +01:00
|
|
|
import shutil
|
2015-10-19 20:43:14 +02:00
|
|
|
import argparse
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
# params overwrite priority:
|
|
|
|
# for default:
|
2018-05-09 22:32:03 +02:00
|
|
|
# default_params < {blackbox,whitebox}_default_params < args
|
2015-10-19 20:43:14 +02:00
|
|
|
# for simple:
|
2018-05-09 22:32:03 +02:00
|
|
|
# default_params < {blackbox,whitebox}_default_params <
|
|
|
|
# simple_default_params <
|
|
|
|
# {blackbox,whitebox}_simple_default_params < args
|
2019-08-23 01:30:30 +02:00
|
|
|
# for cf_consistency:
|
2018-10-30 22:01:38 +01:00
|
|
|
# default_params < {blackbox,whitebox}_default_params <
|
2019-08-23 01:30:30 +02:00
|
|
|
# cf_consistency_params < args
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2018-04-30 21:23:45 +02:00
|
|
|
expected_values_file = tempfile.NamedTemporaryFile()
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
default_params = {
|
2017-10-21 00:17:03 +02:00
|
|
|
"acquire_snapshot_one_in": 10000,
|
2015-10-19 20:43:14 +02:00
|
|
|
"block_size": 16384,
|
2019-04-30 18:46:40 +02:00
|
|
|
"cache_index_and_filter_blocks": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"cache_size": 1048576,
|
2018-06-19 04:23:42 +02:00
|
|
|
"checkpoint_one_in": 1000000,
|
2019-02-11 23:36:20 +01:00
|
|
|
"compression_type": "snappy",
|
2018-08-07 00:23:03 +02:00
|
|
|
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
|
|
|
|
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
|
2018-04-30 21:23:45 +02:00
|
|
|
"clear_column_family_one_in": 0,
|
2018-06-14 01:39:03 +02:00
|
|
|
"compact_files_one_in": 1000000,
|
|
|
|
"compact_range_one_in": 1000000,
|
2018-12-18 22:30:56 +01:00
|
|
|
"delpercent": 4,
|
|
|
|
"delrangepercent": 1,
|
2015-10-19 20:43:14 +02:00
|
|
|
"destroy_db_initially": 0,
|
2019-06-12 20:09:02 +02:00
|
|
|
# Temporarily disable it until its concurrency issue are fixed
|
|
|
|
"enable_pipelined_write": 0,
|
2018-04-30 21:23:45 +02:00
|
|
|
"expected_values_path": expected_values_file.name,
|
2018-09-17 21:23:38 +02:00
|
|
|
"flush_one_in": 1000000,
|
2019-09-12 21:10:05 +02:00
|
|
|
# Temporarily disable hash index
|
|
|
|
"index_type": lambda: random.choice([0, 2]),
|
2015-10-19 20:43:14 +02:00
|
|
|
"max_background_compactions": 20,
|
|
|
|
"max_bytes_for_level_base": 10485760,
|
|
|
|
"max_key": 100000000,
|
|
|
|
"max_write_buffer_number": 3,
|
|
|
|
"mmap_read": lambda: random.randint(0, 1),
|
2018-04-04 00:20:30 +02:00
|
|
|
"nooverwritepercent": 1,
|
2019-08-27 00:00:04 +02:00
|
|
|
"open_files": lambda : random.choice([-1, 500000]),
|
2019-09-11 23:11:38 +02:00
|
|
|
"partition_filters": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"prefixpercent": 5,
|
|
|
|
"progress_reports": 0,
|
|
|
|
"readpercent": 45,
|
2019-03-15 19:58:37 +01:00
|
|
|
"recycle_log_file_num": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"reopen": 20,
|
2017-10-21 00:17:03 +02:00
|
|
|
"snapshot_hold_ops": 100000,
|
2018-05-09 22:32:03 +02:00
|
|
|
"subcompactions": lambda: random.randint(1, 4),
|
2015-10-19 20:43:14 +02:00
|
|
|
"target_file_size_base": 2097152,
|
|
|
|
"target_file_size_multiplier": 2,
|
2018-06-02 01:33:07 +02:00
|
|
|
"use_direct_reads": lambda: random.randint(0, 1),
|
|
|
|
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
|
2018-05-09 22:32:03 +02:00
|
|
|
"use_full_merge_v1": lambda: random.randint(0, 1),
|
|
|
|
"use_merge": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"verify_checksum": 1,
|
|
|
|
"write_buffer_size": 4 * 1024 * 1024,
|
|
|
|
"writepercent": 35,
|
2018-08-14 23:07:12 +02:00
|
|
|
"format_version": lambda: random.randint(2, 4),
|
2018-09-19 21:08:42 +02:00
|
|
|
"index_block_restart_interval": lambda: random.choice(range(1, 16)),
|
2019-05-09 22:03:37 +02:00
|
|
|
"use_multiget" : lambda: random.randint(0, 1),
|
2019-08-27 00:00:04 +02:00
|
|
|
"periodic_compaction_seconds" :
|
|
|
|
lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
|
|
|
|
"compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
|
2015-10-19 20:43:14 +02:00
|
|
|
}
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2018-06-04 06:28:41 +02:00
|
|
|
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
|
|
|
|
|
2013-04-10 21:15:30 +02:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
def get_dbname(test_name):
|
2018-06-04 06:28:41 +02:00
|
|
|
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
|
2015-08-04 20:35:44 +02:00
|
|
|
if test_tmpdir is None or test_tmpdir == "":
|
2015-10-19 20:43:14 +02:00
|
|
|
dbname = tempfile.mkdtemp(prefix='rocksdb_crashtest_' + test_name)
|
2015-08-04 20:35:44 +02:00
|
|
|
else:
|
2015-10-19 20:43:14 +02:00
|
|
|
dbname = test_tmpdir + "/rocksdb_crashtest_" + test_name
|
2015-08-04 21:20:38 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2018-06-02 01:33:07 +02:00
|
|
|
os.mkdir(dbname)
|
2015-10-19 20:43:14 +02:00
|
|
|
return dbname
|
|
|
|
|
2018-06-02 01:33:07 +02:00
|
|
|
|
|
|
|
def is_direct_io_supported(dbname):
|
|
|
|
with tempfile.NamedTemporaryFile(dir=dbname) as f:
|
|
|
|
try:
|
|
|
|
os.open(f.name, os.O_DIRECT)
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
blackbox_default_params = {
|
|
|
|
# total time for this script to test db_stress
|
|
|
|
"duration": 6000,
|
|
|
|
# time for one db_stress instance to run
|
|
|
|
"interval": 120,
|
|
|
|
# since we will be killing anyway, use large value for ops_per_thread
|
|
|
|
"ops_per_thread": 100000000,
|
|
|
|
"set_options_one_in": 10000,
|
|
|
|
"test_batches_snapshots": 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
whitebox_default_params = {
|
|
|
|
"duration": 10000,
|
|
|
|
"log2_keys_per_lock": 10,
|
|
|
|
"ops_per_thread": 200000,
|
2016-07-22 20:46:13 +02:00
|
|
|
"random_kill_odd": 888887,
|
2018-05-09 22:32:03 +02:00
|
|
|
"test_batches_snapshots": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
simple_default_params = {
|
2016-02-05 00:21:32 +01:00
|
|
|
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
|
2018-05-09 22:32:03 +02:00
|
|
|
"column_families": 1,
|
2015-10-19 20:43:14 +02:00
|
|
|
"max_background_compactions": 1,
|
|
|
|
"max_bytes_for_level_base": 67108864,
|
|
|
|
"memtablerep": "skip_list",
|
2018-07-23 19:40:40 +02:00
|
|
|
"prefixpercent": 25,
|
|
|
|
"readpercent": 25,
|
2015-10-19 20:43:14 +02:00
|
|
|
"target_file_size_base": 16777216,
|
|
|
|
"target_file_size_multiplier": 1,
|
|
|
|
"test_batches_snapshots": 0,
|
|
|
|
"write_buffer_size": 32 * 1024 * 1024,
|
|
|
|
}
|
|
|
|
|
|
|
|
blackbox_simple_default_params = {
|
|
|
|
"open_files": -1,
|
|
|
|
"set_options_one_in": 0,
|
|
|
|
}
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
whitebox_simple_default_params = {}
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2019-08-23 01:30:30 +02:00
|
|
|
cf_consistency_params = {
|
|
|
|
"disable_wal": lambda: random.randint(0, 1),
|
2018-10-30 22:01:38 +01:00
|
|
|
"reopen": 0,
|
2019-08-23 01:30:30 +02:00
|
|
|
"test_cf_consistency": 1,
|
2018-10-30 22:01:38 +01:00
|
|
|
# use small value for write_buffer_size so that RocksDB triggers flush
|
|
|
|
# more frequently
|
|
|
|
"write_buffer_size": 1024 * 1024,
|
2019-04-30 17:09:13 +02:00
|
|
|
# disable pipelined write when test_atomic_flush is true
|
|
|
|
"enable_pipelined_write": 0,
|
2019-07-22 23:35:03 +02:00
|
|
|
"snap_refresh_nanos": 0,
|
2018-10-30 22:01:38 +01:00
|
|
|
}
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2016-02-05 00:21:32 +01:00
|
|
|
def finalize_and_sanitize(src_params):
|
|
|
|
dest_params = dict([(k, v() if callable(v) else v)
|
|
|
|
for (k, v) in src_params.items()])
|
2018-08-07 00:23:03 +02:00
|
|
|
if dest_params.get("compression_type") != "zstd" or \
|
|
|
|
dest_params.get("compression_max_dict_bytes") == 0:
|
|
|
|
dest_params["compression_zstd_max_train_bytes"] = 0
|
2016-02-05 00:21:32 +01:00
|
|
|
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
|
2016-02-05 23:30:18 +01:00
|
|
|
dest_params["memtablerep"] = "skip_list"
|
2018-06-02 01:33:07 +02:00
|
|
|
if dest_params["mmap_read"] == 1 or not is_direct_io_supported(
|
|
|
|
dest_params["db"]):
|
|
|
|
dest_params["use_direct_io_for_flush_and_compaction"] = 0
|
|
|
|
dest_params["use_direct_reads"] = 0
|
2018-12-18 22:30:56 +01:00
|
|
|
if dest_params.get("test_batches_snapshots") == 1:
|
|
|
|
dest_params["delpercent"] += dest_params["delrangepercent"]
|
|
|
|
dest_params["delrangepercent"] = 0
|
2019-08-23 01:30:30 +02:00
|
|
|
if dest_params.get("disable_wal", 0) == 1:
|
|
|
|
dest_params["atomic_flush"] = 1
|
2019-08-27 00:00:04 +02:00
|
|
|
if dest_params.get("open_files", 1) != -1:
|
|
|
|
# Compaction TTL and periodic compactions are only compatible
|
|
|
|
# with open_files = -1
|
|
|
|
dest_params["compaction_ttl"] = 0
|
|
|
|
dest_params["periodic_compaction_seconds"] = 0
|
2019-08-28 02:54:18 +02:00
|
|
|
if dest_params.get("compaction_style", 0) == 2:
|
|
|
|
# Disable compaction TTL in FIFO compaction, because right
|
|
|
|
# now assertion failures are triggered.
|
|
|
|
dest_params["compaction_ttl"] = 0
|
2019-09-11 23:11:38 +02:00
|
|
|
if dest_params["partition_filters"] == 1:
|
|
|
|
dest_params["index_type"] = 2
|
|
|
|
dest_params["use_block_based_filter"] = 0
|
2016-02-05 00:21:32 +01:00
|
|
|
return dest_params
|
|
|
|
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
def gen_cmd_params(args):
|
|
|
|
params = {}
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
params.update(default_params)
|
|
|
|
if args.test_type == 'blackbox':
|
|
|
|
params.update(blackbox_default_params)
|
|
|
|
if args.test_type == 'whitebox':
|
|
|
|
params.update(whitebox_default_params)
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.simple:
|
|
|
|
params.update(simple_default_params)
|
|
|
|
if args.test_type == 'blackbox':
|
|
|
|
params.update(blackbox_simple_default_params)
|
|
|
|
if args.test_type == 'whitebox':
|
|
|
|
params.update(whitebox_simple_default_params)
|
2019-08-23 01:30:30 +02:00
|
|
|
if args.cf_consistency:
|
|
|
|
params.update(cf_consistency_params)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
for k, v in vars(args).items():
|
|
|
|
if v is not None:
|
|
|
|
params[k] = v
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
def gen_cmd(params, unknown_params):
|
2017-07-14 17:59:45 +02:00
|
|
|
cmd = ['./db_stress'] + [
|
2016-02-05 00:21:32 +01:00
|
|
|
'--{0}={1}'.format(k, v)
|
|
|
|
for k, v in finalize_and_sanitize(params).items()
|
2016-07-22 20:46:13 +02:00
|
|
|
if k not in set(['test_type', 'simple', 'duration', 'interval',
|
2019-08-23 01:30:30 +02:00
|
|
|
'random_kill_odd', 'cf_consistency'])
|
2018-05-09 22:32:03 +02:00
|
|
|
and v is not None] + unknown_params
|
2015-10-19 20:43:14 +02:00
|
|
|
return cmd
|
|
|
|
|
|
|
|
|
|
|
|
# This script runs and kills db_stress multiple times. It checks consistency
|
|
|
|
# in case of unsafe crashes in RocksDB.
|
2018-05-09 22:32:03 +02:00
|
|
|
def blackbox_crash_main(args, unknown_args):
|
2015-10-19 20:43:14 +02:00
|
|
|
cmd_params = gen_cmd_params(args)
|
2015-10-20 20:31:27 +02:00
|
|
|
dbname = get_dbname('blackbox')
|
2015-10-19 20:43:14 +02:00
|
|
|
exit_time = time.time() + cmd_params['duration']
|
|
|
|
|
|
|
|
print("Running blackbox-crash-test with \n"
|
|
|
|
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
|
2018-05-09 22:32:03 +02:00
|
|
|
+ "total-duration=" + str(cmd_params['duration']) + "\n")
|
2014-03-20 19:11:08 +01:00
|
|
|
|
2013-03-13 07:20:14 +01:00
|
|
|
while time.time() < exit_time:
|
|
|
|
run_had_errors = False
|
2015-10-19 20:43:14 +02:00
|
|
|
killtime = time.time() + cmd_params['interval']
|
|
|
|
|
2018-04-30 21:23:45 +02:00
|
|
|
cmd = gen_cmd(dict(
|
|
|
|
cmd_params.items() +
|
2018-05-09 22:32:03 +02:00
|
|
|
{'db': dbname}.items()), unknown_args)
|
2013-08-21 02:37:49 +02:00
|
|
|
|
2017-07-14 17:59:45 +02:00
|
|
|
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
|
2013-08-21 02:37:49 +02:00
|
|
|
print("Running db_stress with pid=%d: %s\n\n"
|
2017-07-14 17:59:45 +02:00
|
|
|
% (child.pid, ' '.join(cmd)))
|
2013-08-21 02:37:49 +02:00
|
|
|
|
2014-03-11 21:44:33 +01:00
|
|
|
stop_early = False
|
2013-08-21 02:37:49 +02:00
|
|
|
while time.time() < killtime:
|
2014-03-11 21:44:33 +01:00
|
|
|
if child.poll() is not None:
|
|
|
|
print("WARNING: db_stress ended before kill: exitcode=%d\n"
|
|
|
|
% child.returncode)
|
|
|
|
stop_early = True
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2014-03-11 21:44:33 +01:00
|
|
|
if not stop_early:
|
|
|
|
if child.poll() is not None:
|
|
|
|
print("WARNING: db_stress ended before kill: exitcode=%d\n"
|
|
|
|
% child.returncode)
|
|
|
|
else:
|
|
|
|
child.kill()
|
|
|
|
print("KILLED %d\n" % child.pid)
|
|
|
|
time.sleep(1) # time to stabilize after a kill
|
2013-08-21 02:37:49 +02:00
|
|
|
|
|
|
|
while True:
|
|
|
|
line = child.stderr.readline().strip()
|
2018-06-19 02:42:48 +02:00
|
|
|
if line == '':
|
|
|
|
break
|
|
|
|
elif not line.startswith('WARNING'):
|
2013-08-21 02:37:49 +02:00
|
|
|
run_had_errors = True
|
2017-04-12 01:53:57 +02:00
|
|
|
print('stderr has error message:')
|
|
|
|
print('***' + line + '***')
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2013-08-21 02:37:49 +02:00
|
|
|
if run_had_errors:
|
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
time.sleep(1) # time to stabilize before the next run
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2014-03-20 19:11:08 +01:00
|
|
|
# we need to clean up after ourselves -- only do this on test success
|
2015-10-20 20:31:27 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2014-03-20 19:11:08 +01:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
# This python script runs db_stress multiple times. Some runs with
|
|
|
|
# kill_random_test that causes rocksdb to crash at various points in code.
|
2018-05-09 22:32:03 +02:00
|
|
|
def whitebox_crash_main(args, unknown_args):
|
2015-10-19 20:43:14 +02:00
|
|
|
cmd_params = gen_cmd_params(args)
|
2015-10-20 20:31:27 +02:00
|
|
|
dbname = get_dbname('whitebox')
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
cur_time = time.time()
|
|
|
|
exit_time = cur_time + cmd_params['duration']
|
|
|
|
half_time = cur_time + cmd_params['duration'] / 2
|
|
|
|
|
|
|
|
print("Running whitebox-crash-test with \n"
|
2018-05-09 22:32:03 +02:00
|
|
|
+ "total-duration=" + str(cmd_params['duration']) + "\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
total_check_mode = 4
|
|
|
|
check_mode = 0
|
2016-07-22 20:46:13 +02:00
|
|
|
kill_random_test = cmd_params['random_kill_odd']
|
2015-10-19 20:43:14 +02:00
|
|
|
kill_mode = 0
|
|
|
|
|
|
|
|
while time.time() < exit_time:
|
|
|
|
if check_mode == 0:
|
|
|
|
additional_opts = {
|
|
|
|
# use large ops per thread since we will kill it anyway
|
|
|
|
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
|
|
|
|
}
|
2015-10-27 00:02:32 +01:00
|
|
|
# run with kill_random_test, with three modes.
|
|
|
|
# Mode 0 covers all kill points. Mode 1 covers less kill points but
|
|
|
|
# increases change of triggering them. Mode 2 covers even less
|
|
|
|
# frequent kill points and further increases triggering change.
|
2015-10-19 20:43:14 +02:00
|
|
|
if kill_mode == 0:
|
|
|
|
additional_opts.update({
|
|
|
|
"kill_random_test": kill_random_test,
|
|
|
|
})
|
|
|
|
elif kill_mode == 1:
|
2019-08-19 19:50:25 +02:00
|
|
|
if cmd_params.get('disable_wal', 0) == 1:
|
|
|
|
my_kill_odd = kill_random_test / 50 + 1
|
|
|
|
else:
|
|
|
|
my_kill_odd = kill_random_test / 10 + 1
|
2015-10-19 20:43:14 +02:00
|
|
|
additional_opts.update({
|
2019-08-19 19:50:25 +02:00
|
|
|
"kill_random_test": my_kill_odd,
|
2015-10-19 20:43:14 +02:00
|
|
|
"kill_prefix_blacklist": "WritableFileWriter::Append,"
|
|
|
|
+ "WritableFileWriter::WriteBuffered",
|
|
|
|
})
|
2015-10-27 00:02:32 +01:00
|
|
|
elif kill_mode == 2:
|
2016-07-22 20:46:13 +02:00
|
|
|
# TODO: May need to adjust random odds if kill_random_test
|
|
|
|
# is too small.
|
2015-10-27 00:02:32 +01:00
|
|
|
additional_opts.update({
|
Crash test to make kill decision for every kill point
Summary:
In crash test, when coming to each kill point, we start a random class using seed as current second. With this approach, for every second, the random number used is the same. However, in each second, there are multiple kill points with different frequency. It makes it hard to reason about chance of kill point to trigger. With this commit, we use thread local random seed to generate the random number, so that it will take different values per second, hoping it makes chances of killing much easier to reason about.
Also significantly reduce the kill odd to make sure time before kiling is similar as before.
Test Plan: Run white box crash test and see the killing happens as expected and the run time time before killing reasonable.
Reviewers: kradhakrishnan, IslamAbdelRahman, rven, yhchiang, andrewkr, anthony
Reviewed By: anthony
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D52971
2016-01-19 23:40:54 +01:00
|
|
|
"kill_random_test": (kill_random_test / 5000 + 1),
|
2015-10-27 00:02:32 +01:00
|
|
|
"kill_prefix_blacklist": "WritableFileWriter::Append,"
|
|
|
|
"WritableFileWriter::WriteBuffered,"
|
|
|
|
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
|
|
|
|
})
|
|
|
|
# Run kill mode 0, 1 and 2 by turn.
|
|
|
|
kill_mode = (kill_mode + 1) % 3
|
2015-10-19 20:43:14 +02:00
|
|
|
elif check_mode == 1:
|
|
|
|
# normal run with universal compaction mode
|
|
|
|
additional_opts = {
|
|
|
|
"kill_random_test": None,
|
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'],
|
|
|
|
"compaction_style": 1,
|
|
|
|
}
|
|
|
|
elif check_mode == 2:
|
|
|
|
# normal run with FIFO compaction mode
|
|
|
|
# ops_per_thread is divided by 5 because FIFO compaction
|
|
|
|
# style is quite a bit slower on reads with lot of files
|
|
|
|
additional_opts = {
|
|
|
|
"kill_random_test": None,
|
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'] / 5,
|
|
|
|
"compaction_style": 2,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
# normal run
|
2018-06-26 21:35:26 +02:00
|
|
|
additional_opts = {
|
2015-10-19 20:43:14 +02:00
|
|
|
"kill_random_test": None,
|
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'],
|
|
|
|
}
|
|
|
|
|
2015-10-20 20:31:27 +02:00
|
|
|
cmd = gen_cmd(dict(cmd_params.items() + additional_opts.items()
|
2018-05-09 22:32:03 +02:00
|
|
|
+ {'db': dbname}.items()), unknown_args)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2018-01-29 21:43:56 +01:00
|
|
|
print "Running:" + ' '.join(cmd) + "\n" # noqa: E999 T25377293 Grandfathered in
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2017-07-14 17:59:45 +02:00
|
|
|
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT)
|
2015-10-19 20:43:14 +02:00
|
|
|
stdoutdata, stderrdata = popen.communicate()
|
|
|
|
retncode = popen.returncode
|
|
|
|
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
|
|
|
|
check_mode, additional_opts['kill_random_test'], retncode))
|
|
|
|
print msg
|
|
|
|
print stdoutdata
|
|
|
|
|
|
|
|
expected = False
|
|
|
|
if additional_opts['kill_random_test'] is None and (retncode == 0):
|
|
|
|
# we expect zero retncode if no kill option
|
|
|
|
expected = True
|
2019-05-04 02:26:20 +02:00
|
|
|
elif additional_opts['kill_random_test'] is not None and retncode <= 0:
|
|
|
|
# When kill option is given, the test MIGHT kill itself.
|
|
|
|
# If it does, negative retncode is expected. Otherwise 0.
|
2015-10-19 20:43:14 +02:00
|
|
|
expected = True
|
|
|
|
|
|
|
|
if not expected:
|
|
|
|
print "TEST FAILED. See kill option and exit code above!!!\n"
|
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
stdoutdata = stdoutdata.lower()
|
|
|
|
errorcount = (stdoutdata.count('error') -
|
|
|
|
stdoutdata.count('got errors 0 times'))
|
|
|
|
print "#times error occurred in output is " + str(errorcount) + "\n"
|
|
|
|
|
|
|
|
if (errorcount > 0):
|
|
|
|
print "TEST FAILED. Output has 'error'!!!\n"
|
|
|
|
sys.exit(2)
|
|
|
|
if (stdoutdata.find('fail') >= 0):
|
|
|
|
print "TEST FAILED. Output has 'fail'!!!\n"
|
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
# First half of the duration, keep doing kill test. For the next half,
|
|
|
|
# try different modes.
|
|
|
|
if time.time() > half_time:
|
|
|
|
# we need to clean up after ourselves -- only do this on test
|
|
|
|
# success
|
2015-10-20 20:31:27 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2018-06-04 06:28:41 +02:00
|
|
|
os.mkdir(dbname)
|
2018-04-30 21:23:45 +02:00
|
|
|
cmd_params.pop('expected_values_path', None)
|
2015-10-19 20:43:14 +02:00
|
|
|
check_mode = (check_mode + 1) % total_check_mode
|
|
|
|
|
|
|
|
time.sleep(1) # time to stabilize after a kill
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
parser = argparse.ArgumentParser(description="This script runs and kills \
|
|
|
|
db_stress multiple times")
|
|
|
|
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
|
|
|
|
parser.add_argument("--simple", action="store_true")
|
2019-08-23 01:30:30 +02:00
|
|
|
parser.add_argument("--cf_consistency", action='store_true')
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
all_params = dict(default_params.items()
|
|
|
|
+ blackbox_default_params.items()
|
|
|
|
+ whitebox_default_params.items()
|
|
|
|
+ simple_default_params.items()
|
|
|
|
+ blackbox_simple_default_params.items()
|
|
|
|
+ whitebox_simple_default_params.items())
|
|
|
|
|
|
|
|
for k, v in all_params.items():
|
|
|
|
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
|
2018-05-09 22:32:03 +02:00
|
|
|
# unknown_args are passed directly to db_stress
|
|
|
|
args, unknown_args = parser.parse_known_args()
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2018-06-04 06:28:41 +02:00
|
|
|
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
|
|
|
|
if test_tmpdir is not None and not os.path.isdir(test_tmpdir):
|
|
|
|
print('%s env var is set to a non-existent directory: %s' %
|
|
|
|
(_TEST_DIR_ENV_VAR, test_tmpdir))
|
|
|
|
sys.exit(1)
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.test_type == 'blackbox':
|
2018-05-09 22:32:03 +02:00
|
|
|
blackbox_crash_main(args, unknown_args)
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.test_type == 'whitebox':
|
2018-05-09 22:32:03 +02:00
|
|
|
whitebox_crash_main(args, unknown_args)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|