2020-05-29 20:24:19 +02:00
|
|
|
#!/usr/bin/env python3
|
2019-04-18 19:51:19 +02:00
|
|
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
|
2020-03-25 04:57:53 +01:00
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals
|
|
|
|
|
2013-03-13 07:20:14 +01:00
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import time
|
2013-06-08 21:29:43 +02:00
|
|
|
import random
|
2020-06-13 04:24:11 +02:00
|
|
|
import re
|
2013-04-05 22:44:59 +02:00
|
|
|
import tempfile
|
2013-03-13 07:20:14 +01:00
|
|
|
import subprocess
|
2014-03-20 19:11:08 +01:00
|
|
|
import shutil
|
2015-10-19 20:43:14 +02:00
|
|
|
import argparse
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
# params overwrite priority:
|
|
|
|
# for default:
|
2018-05-09 22:32:03 +02:00
|
|
|
# default_params < {blackbox,whitebox}_default_params < args
|
2015-10-19 20:43:14 +02:00
|
|
|
# for simple:
|
2018-05-09 22:32:03 +02:00
|
|
|
# default_params < {blackbox,whitebox}_default_params <
|
|
|
|
# simple_default_params <
|
|
|
|
# {blackbox,whitebox}_simple_default_params < args
|
2019-08-23 01:30:30 +02:00
|
|
|
# for cf_consistency:
|
2018-10-30 22:01:38 +01:00
|
|
|
# default_params < {blackbox,whitebox}_default_params <
|
2019-08-23 01:30:30 +02:00
|
|
|
# cf_consistency_params < args
|
2019-12-12 00:59:45 +01:00
|
|
|
# for txn:
|
|
|
|
# default_params < {blackbox,whitebox}_default_params < txn_params < args
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2018-04-30 21:23:45 +02:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
default_params = {
|
2017-10-21 00:17:03 +02:00
|
|
|
"acquire_snapshot_one_in": 10000,
|
2020-09-08 19:46:55 +02:00
|
|
|
"backup_max_size": 100 * 1024 * 1024,
|
|
|
|
# Consider larger number when backups considered more stable
|
|
|
|
"backup_one_in": 100000,
|
2015-10-19 20:43:14 +02:00
|
|
|
"block_size": 16384,
|
2019-12-10 17:38:23 +01:00
|
|
|
"bloom_bits": lambda: random.choice([random.randint(0,19),
|
|
|
|
random.lognormvariate(2.3, 1.3)]),
|
2019-04-30 18:46:40 +02:00
|
|
|
"cache_index_and_filter_blocks": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"cache_size": 1048576,
|
2018-06-19 04:23:42 +02:00
|
|
|
"checkpoint_one_in": 1000000,
|
2019-12-17 00:24:26 +01:00
|
|
|
"compression_type": lambda: random.choice(
|
2019-12-21 01:13:19 +01:00
|
|
|
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress", "zstd"]),
|
|
|
|
"bottommost_compression_type": lambda:
|
|
|
|
"disable" if random.randint(0, 1) == 0 else
|
|
|
|
random.choice(
|
|
|
|
["none", "snappy", "zlib", "bzip2", "lz4", "lz4hc", "xpress",
|
|
|
|
"zstd"]),
|
2019-12-17 00:24:26 +01:00
|
|
|
"checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
|
2018-08-07 00:23:03 +02:00
|
|
|
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
|
|
|
|
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
|
2020-05-08 05:50:14 +02:00
|
|
|
# Disabled compression_parallel_threads as the feature is not stable
|
|
|
|
# lambda: random.choice([1] * 9 + [4])
|
|
|
|
"compression_parallel_threads": 1,
|
2018-04-30 21:23:45 +02:00
|
|
|
"clear_column_family_one_in": 0,
|
2018-06-14 01:39:03 +02:00
|
|
|
"compact_files_one_in": 1000000,
|
|
|
|
"compact_range_one_in": 1000000,
|
2018-12-18 22:30:56 +01:00
|
|
|
"delpercent": 4,
|
|
|
|
"delrangepercent": 1,
|
2015-10-19 20:43:14 +02:00
|
|
|
"destroy_db_initially": 0,
|
2019-10-30 02:15:08 +01:00
|
|
|
"enable_pipelined_write": lambda: random.randint(0, 1),
|
2020-06-18 18:51:14 +02:00
|
|
|
"enable_compaction_filter": lambda: random.choice([0, 0, 0, 1]),
|
2020-10-12 23:08:35 +02:00
|
|
|
"expected_values_path": lambda: setup_expected_values_file(),
|
2018-09-17 21:23:38 +02:00
|
|
|
"flush_one_in": 1000000,
|
2020-09-04 08:49:27 +02:00
|
|
|
"file_checksum_impl": lambda: random.choice(["none", "crc32c", "xxh64", "big"]),
|
2020-03-19 01:11:06 +01:00
|
|
|
"get_live_files_one_in": 1000000,
|
|
|
|
# Note: the following two are intentionally disabled as the corresponding
|
|
|
|
# APIs are not guaranteed to succeed.
|
|
|
|
"get_sorted_wal_files_one_in": 0,
|
|
|
|
"get_current_wal_file_one_in": 0,
|
2020-01-23 18:07:50 +01:00
|
|
|
# Temporarily disable hash index
|
2020-04-20 21:55:13 +02:00
|
|
|
"index_type": lambda: random.choice([0, 0, 0, 2, 2, 3]),
|
2020-06-18 18:51:14 +02:00
|
|
|
"iterpercent": 10,
|
2020-08-11 01:16:19 +02:00
|
|
|
"mark_for_compaction_one_file_in": lambda: 10 * random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"max_background_compactions": 20,
|
|
|
|
"max_bytes_for_level_base": 10485760,
|
|
|
|
"max_key": 100000000,
|
|
|
|
"max_write_buffer_number": 3,
|
|
|
|
"mmap_read": lambda: random.randint(0, 1),
|
2018-04-04 00:20:30 +02:00
|
|
|
"nooverwritepercent": 1,
|
2020-04-17 19:57:17 +02:00
|
|
|
"open_files": lambda : random.choice([-1, -1, 100, 500000]),
|
Minimize memory internal fragmentation for Bloom filters (#6427)
Summary:
New experimental option BBTO::optimize_filters_for_memory builds
filters that maximize their use of "usable size" from malloc_usable_size,
which is also used to compute block cache charges.
Rather than always "rounding up," we track state in the
BloomFilterPolicy object to mix essentially "rounding down" and
"rounding up" so that the average FP rate of all generated filters is
the same as without the option. (YMMV as heavily accessed filters might
be unluckily lower accuracy.)
Thus, the option near-minimizes what the block cache considers as
"memory used" for a given target Bloom filter false positive rate and
Bloom filter implementation. There are no forward or backward
compatibility issues with this change, though it only works on the
format_version=5 Bloom filter.
With Jemalloc, we see about 10% reduction in memory footprint (and block
cache charge) for Bloom filters, but 1-2% increase in storage footprint,
due to encoding efficiency losses (FP rate is non-linear with bits/key).
Why not weighted random round up/down rather than state tracking? By
only requiring malloc_usable_size, we don't actually know what the next
larger and next smaller usable sizes for the allocator are. We pick a
requested size, accept and use whatever usable size it has, and use the
difference to inform our next choice. This allows us to narrow in on the
right balance without tracking/predicting usable sizes.
Why not weight history of generated filter false positive rates by
number of keys? This could lead to excess skew in small filters after
generating a large filter.
Results from filter_bench with jemalloc (irrelevant details omitted):
(normal keys/filter, but high variance)
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=30000 -vary_key_count_ratio=0.9
Build avg ns/key: 29.6278
Number of filters: 5516
Total size (MB): 200.046
Reported total allocated memory (MB): 220.597
Reported internal fragmentation: 10.2732%
Bits/key stored: 10.0097
Average FP rate %: 0.965228
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=30000 -vary_key_count_ratio=0.9 -optimize_filters_for_memory
Build avg ns/key: 30.5104
Number of filters: 5464
Total size (MB): 200.015
Reported total allocated memory (MB): 200.322
Reported internal fragmentation: 0.153709%
Bits/key stored: 10.1011
Average FP rate %: 0.966313
(very few keys / filter, optimization not as effective due to ~59 byte
internal fragmentation in blocked Bloom filter representation)
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=1000 -vary_key_count_ratio=0.9
Build avg ns/key: 29.5649
Number of filters: 162950
Total size (MB): 200.001
Reported total allocated memory (MB): 224.624
Reported internal fragmentation: 12.3117%
Bits/key stored: 10.2951
Average FP rate %: 0.821534
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=1000 -vary_key_count_ratio=0.9 -optimize_filters_for_memory
Build avg ns/key: 31.8057
Number of filters: 159849
Total size (MB): 200
Reported total allocated memory (MB): 208.846
Reported internal fragmentation: 4.42297%
Bits/key stored: 10.4948
Average FP rate %: 0.811006
(high keys/filter)
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=1000000 -vary_key_count_ratio=0.9
Build avg ns/key: 29.7017
Number of filters: 164
Total size (MB): 200.352
Reported total allocated memory (MB): 221.5
Reported internal fragmentation: 10.5552%
Bits/key stored: 10.0003
Average FP rate %: 0.969358
$ ./filter_bench -quick -impl=2 -average_keys_per_filter=1000000 -vary_key_count_ratio=0.9 -optimize_filters_for_memory
Build avg ns/key: 30.7131
Number of filters: 160
Total size (MB): 200.928
Reported total allocated memory (MB): 200.938
Reported internal fragmentation: 0.00448054%
Bits/key stored: 10.1852
Average FP rate %: 0.963387
And from db_bench (block cache) with jemalloc:
$ ./db_bench -db=/dev/shm/dbbench.no_optimize -benchmarks=fillrandom -format_version=5 -value_size=90 -bloom_bits=10 -num=2000000 -threads=8 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=false
$ ./db_bench -db=/dev/shm/dbbench -benchmarks=fillrandom -format_version=5 -value_size=90 -bloom_bits=10 -num=2000000 -threads=8 -optimize_filters_for_memory -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=false
$ (for FILE in /dev/shm/dbbench.no_optimize/*.sst; do ./sst_dump --file=$FILE --show_properties | grep 'filter block' ; done) | awk '{ t += $4; } END { print t; }'
17063835
$ (for FILE in /dev/shm/dbbench/*.sst; do ./sst_dump --file=$FILE --show_properties | grep 'filter block' ; done) | awk '{ t += $4; } END { print t; }'
17430747
$ #^ 2.1% additional filter storage
$ ./db_bench -db=/dev/shm/dbbench.no_optimize -use_existing_db -benchmarks=readrandom,stats -statistics -bloom_bits=10 -num=2000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=false -duration=10 -cache_index_and_filter_blocks -cache_size=1000000000
rocksdb.block.cache.index.add COUNT : 33
rocksdb.block.cache.index.bytes.insert COUNT : 8440400
rocksdb.block.cache.filter.add COUNT : 33
rocksdb.block.cache.filter.bytes.insert COUNT : 21087528
rocksdb.bloom.filter.useful COUNT : 4963889
rocksdb.bloom.filter.full.positive COUNT : 1214081
rocksdb.bloom.filter.full.true.positive COUNT : 1161999
$ #^ 1.04 % observed FP rate
$ ./db_bench -db=/dev/shm/dbbench -use_existing_db -benchmarks=readrandom,stats -statistics -bloom_bits=10 -num=2000000 -compaction_style=2 -fifo_compaction_max_table_files_size_mb=10000 -fifo_compaction_allow_compaction=false -optimize_filters_for_memory -duration=10 -cache_index_and_filter_blocks -cache_size=1000000000
rocksdb.block.cache.index.add COUNT : 33
rocksdb.block.cache.index.bytes.insert COUNT : 8448592
rocksdb.block.cache.filter.add COUNT : 33
rocksdb.block.cache.filter.bytes.insert COUNT : 18220328
rocksdb.bloom.filter.useful COUNT : 5360933
rocksdb.bloom.filter.full.positive COUNT : 1321315
rocksdb.bloom.filter.full.true.positive COUNT : 1262999
$ #^ 1.08 % observed FP rate, 13.6% less memory usage for filters
(Due to specific key density, this example tends to generate filters that are "worse than average" for internal fragmentation. "Better than average" cases can show little or no improvement.)
Pull Request resolved: https://github.com/facebook/rocksdb/pull/6427
Test Plan: unit test added, 'make check' with gcc, clang and valgrind
Reviewed By: siying
Differential Revision: D22124374
Pulled By: pdillinger
fbshipit-source-id: f3e3aa152f9043ddf4fae25799e76341d0d8714e
2020-06-22 22:30:57 +02:00
|
|
|
"optimize_filters_for_memory": lambda: random.randint(0, 1),
|
2019-10-14 19:33:31 +02:00
|
|
|
"partition_filters": lambda: random.randint(0, 1),
|
2020-10-11 23:52:49 +02:00
|
|
|
"partition_pinning": lambda: random.randint(0, 3),
|
2019-12-11 00:45:25 +01:00
|
|
|
"pause_background_one_in": 1000000,
|
2015-10-19 20:43:14 +02:00
|
|
|
"prefixpercent": 5,
|
|
|
|
"progress_reports": 0,
|
|
|
|
"readpercent": 45,
|
2019-03-15 19:58:37 +01:00
|
|
|
"recycle_log_file_num": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"reopen": 20,
|
2017-10-21 00:17:03 +02:00
|
|
|
"snapshot_hold_ops": 100000,
|
2020-06-24 01:24:15 +02:00
|
|
|
"sst_file_manager_bytes_per_sec": lambda: random.choice([0, 104857600]),
|
|
|
|
"sst_file_manager_bytes_per_truncate": lambda: random.choice([0, 1048576]),
|
2019-12-15 00:17:05 +01:00
|
|
|
"long_running_snapshots": lambda: random.randint(0, 1),
|
2018-05-09 22:32:03 +02:00
|
|
|
"subcompactions": lambda: random.randint(1, 4),
|
2015-10-19 20:43:14 +02:00
|
|
|
"target_file_size_base": 2097152,
|
|
|
|
"target_file_size_multiplier": 2,
|
2020-10-11 23:52:49 +02:00
|
|
|
"top_level_index_pinning": lambda: random.randint(0, 3),
|
|
|
|
"unpartitioned_pinning": lambda: random.randint(0, 3),
|
2018-06-02 01:33:07 +02:00
|
|
|
"use_direct_reads": lambda: random.randint(0, 1),
|
|
|
|
"use_direct_io_for_flush_and_compaction": lambda: random.randint(0, 1),
|
2020-04-25 08:58:13 +02:00
|
|
|
"mock_direct_io": False,
|
2018-05-09 22:32:03 +02:00
|
|
|
"use_full_merge_v1": lambda: random.randint(0, 1),
|
|
|
|
"use_merge": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
"verify_checksum": 1,
|
|
|
|
"write_buffer_size": 4 * 1024 * 1024,
|
|
|
|
"writepercent": 35,
|
2019-11-27 22:17:28 +01:00
|
|
|
"format_version": lambda: random.choice([2, 3, 4, 5, 5]),
|
2018-09-19 21:08:42 +02:00
|
|
|
"index_block_restart_interval": lambda: random.choice(range(1, 16)),
|
2019-05-09 22:03:37 +02:00
|
|
|
"use_multiget" : lambda: random.randint(0, 1),
|
2019-08-27 00:00:04 +02:00
|
|
|
"periodic_compaction_seconds" :
|
|
|
|
lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
|
|
|
|
"compaction_ttl" : lambda: random.choice([0, 0, 1, 2, 10, 100, 1000]),
|
2019-11-14 22:59:43 +01:00
|
|
|
# Test small max_manifest_file_size in a smaller chance, as most of the
|
|
|
|
# time we wnat manifest history to be preserved to help debug
|
|
|
|
"max_manifest_file_size" : lambda : random.choice(
|
2019-12-17 00:24:26 +01:00
|
|
|
[t * 16384 if t < 3 else 1024 * 1024 * 1024 for t in range(1, 30)]),
|
|
|
|
# Sync mode might make test runs slower so running it in a smaller chance
|
|
|
|
"sync" : lambda : random.choice(
|
2020-01-17 10:44:47 +01:00
|
|
|
[1 if t == 0 else 0 for t in range(0, 20)]),
|
2019-12-18 03:23:26 +01:00
|
|
|
# Disable compation_readahead_size because the test is not passing.
|
|
|
|
#"compaction_readahead_size" : lambda : random.choice(
|
|
|
|
# [0, 0, 1024 * 1024]),
|
2019-12-17 00:24:26 +01:00
|
|
|
"db_write_buffer_size" : lambda: random.choice(
|
|
|
|
[0, 0, 0, 1024 * 1024, 8 * 1024 * 1024, 128 * 1024 * 1024]),
|
|
|
|
"avoid_unnecessary_blocking_io" : random.randint(0, 1),
|
|
|
|
"write_dbid_to_manifest" : random.randint(0, 1),
|
2020-04-16 21:09:18 +02:00
|
|
|
"avoid_flush_during_recovery" : random.choice(
|
|
|
|
[1 if t == 0 else 0 for t in range(0, 8)]),
|
2019-12-17 00:24:26 +01:00
|
|
|
"max_write_batch_group_size_bytes" : lambda: random.choice(
|
|
|
|
[16, 64, 1024 * 1024, 16 * 1024 * 1024]),
|
2019-12-30 19:13:50 +01:00
|
|
|
"level_compaction_dynamic_level_bytes" : True,
|
2019-12-20 17:46:52 +01:00
|
|
|
"verify_checksum_one_in": 1000000,
|
|
|
|
"verify_db_one_in": 100000,
|
2020-01-10 06:25:40 +01:00
|
|
|
"continuous_verification_interval" : 0,
|
|
|
|
"max_key_len": 3,
|
2020-04-11 02:18:56 +02:00
|
|
|
"key_len_percent_dist": "1,30,69",
|
2020-04-16 20:10:53 +02:00
|
|
|
"read_fault_one_in": lambda: random.choice([0, 1000]),
|
2020-07-14 21:10:56 +02:00
|
|
|
"sync_fault_injection": False,
|
|
|
|
"get_property_one_in": 1000000,
|
2020-10-09 17:30:44 +02:00
|
|
|
"paranoid_file_checks": lambda: random.choice([0, 1, 1, 1]),
|
2020-11-03 22:54:01 +01:00
|
|
|
"max_write_buffer_size_to_maintain": lambda: random.choice(
|
|
|
|
[0, 1024 * 1024, 2 * 1024 * 1024, 4 * 1024 * 1024, 8 * 1024 * 1024]),
|
2015-10-19 20:43:14 +02:00
|
|
|
}
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2018-06-04 06:28:41 +02:00
|
|
|
_TEST_DIR_ENV_VAR = 'TEST_TMPDIR'
|
2020-04-25 08:58:13 +02:00
|
|
|
_DEBUG_LEVEL_ENV_VAR = 'DEBUG_LEVEL'
|
|
|
|
|
|
|
|
|
|
|
|
def is_release_mode():
|
|
|
|
return os.environ.get(_DEBUG_LEVEL_ENV_VAR) == "0"
|
2018-06-04 06:28:41 +02:00
|
|
|
|
2013-04-10 21:15:30 +02:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
def get_dbname(test_name):
|
2020-04-25 08:58:13 +02:00
|
|
|
test_dir_name = "rocksdb_crashtest_" + test_name
|
2018-06-04 06:28:41 +02:00
|
|
|
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
|
2015-08-04 20:35:44 +02:00
|
|
|
if test_tmpdir is None or test_tmpdir == "":
|
2020-04-25 08:58:13 +02:00
|
|
|
dbname = tempfile.mkdtemp(prefix=test_dir_name)
|
2015-08-04 20:35:44 +02:00
|
|
|
else:
|
2020-04-25 08:58:13 +02:00
|
|
|
dbname = test_tmpdir + "/" + test_dir_name
|
2015-08-04 21:20:38 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2018-06-02 01:33:07 +02:00
|
|
|
os.mkdir(dbname)
|
2015-10-19 20:43:14 +02:00
|
|
|
return dbname
|
|
|
|
|
2020-10-12 23:08:35 +02:00
|
|
|
expected_values_file = None
|
|
|
|
def setup_expected_values_file():
|
|
|
|
global expected_values_file
|
|
|
|
if expected_values_file is not None:
|
|
|
|
return expected_values_file
|
|
|
|
expected_file_name = "rocksdb_crashtest_" + "expected"
|
|
|
|
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
|
|
|
|
if test_tmpdir is None or test_tmpdir == "":
|
|
|
|
expected_values_file = tempfile.NamedTemporaryFile(
|
|
|
|
prefix=expected_file_name, delete=False).name
|
|
|
|
else:
|
|
|
|
# if tmpdir is specified, store the expected_values_file in the same dir
|
|
|
|
expected_values_file = test_tmpdir + "/" + expected_file_name
|
|
|
|
if os.path.exists(expected_values_file):
|
|
|
|
os.remove(expected_values_file)
|
|
|
|
open(expected_values_file, 'a').close()
|
|
|
|
return expected_values_file
|
|
|
|
|
2018-06-02 01:33:07 +02:00
|
|
|
|
|
|
|
def is_direct_io_supported(dbname):
|
|
|
|
with tempfile.NamedTemporaryFile(dir=dbname) as f:
|
|
|
|
try:
|
|
|
|
os.open(f.name, os.O_DIRECT)
|
2020-03-25 04:57:53 +01:00
|
|
|
except BaseException:
|
2018-06-02 01:33:07 +02:00
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
blackbox_default_params = {
|
|
|
|
# total time for this script to test db_stress
|
|
|
|
"duration": 6000,
|
|
|
|
# time for one db_stress instance to run
|
|
|
|
"interval": 120,
|
|
|
|
# since we will be killing anyway, use large value for ops_per_thread
|
|
|
|
"ops_per_thread": 100000000,
|
|
|
|
"set_options_one_in": 10000,
|
|
|
|
"test_batches_snapshots": 1,
|
|
|
|
}
|
|
|
|
|
|
|
|
whitebox_default_params = {
|
|
|
|
"duration": 10000,
|
|
|
|
"log2_keys_per_lock": 10,
|
|
|
|
"ops_per_thread": 200000,
|
2016-07-22 20:46:13 +02:00
|
|
|
"random_kill_odd": 888887,
|
2018-05-09 22:32:03 +02:00
|
|
|
"test_batches_snapshots": lambda: random.randint(0, 1),
|
2015-10-19 20:43:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
simple_default_params = {
|
2016-02-05 00:21:32 +01:00
|
|
|
"allow_concurrent_memtable_write": lambda: random.randint(0, 1),
|
2018-05-09 22:32:03 +02:00
|
|
|
"column_families": 1,
|
2015-10-19 20:43:14 +02:00
|
|
|
"max_background_compactions": 1,
|
|
|
|
"max_bytes_for_level_base": 67108864,
|
|
|
|
"memtablerep": "skip_list",
|
2019-09-27 20:09:06 +02:00
|
|
|
"prefixpercent": 0,
|
|
|
|
"readpercent": 50,
|
|
|
|
"prefix_size" : -1,
|
2015-10-19 20:43:14 +02:00
|
|
|
"target_file_size_base": 16777216,
|
|
|
|
"target_file_size_multiplier": 1,
|
|
|
|
"test_batches_snapshots": 0,
|
|
|
|
"write_buffer_size": 32 * 1024 * 1024,
|
2019-12-17 00:24:26 +01:00
|
|
|
"level_compaction_dynamic_level_bytes": False,
|
2020-10-09 17:30:44 +02:00
|
|
|
"paranoid_file_checks": lambda: random.choice([0, 1, 1, 1]),
|
2015-10-19 20:43:14 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
blackbox_simple_default_params = {
|
|
|
|
"open_files": -1,
|
|
|
|
"set_options_one_in": 0,
|
|
|
|
}
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
whitebox_simple_default_params = {}
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2019-08-23 01:30:30 +02:00
|
|
|
cf_consistency_params = {
|
|
|
|
"disable_wal": lambda: random.randint(0, 1),
|
2018-10-30 22:01:38 +01:00
|
|
|
"reopen": 0,
|
2019-08-23 01:30:30 +02:00
|
|
|
"test_cf_consistency": 1,
|
2018-10-30 22:01:38 +01:00
|
|
|
# use small value for write_buffer_size so that RocksDB triggers flush
|
|
|
|
# more frequently
|
|
|
|
"write_buffer_size": 1024 * 1024,
|
2019-10-30 02:15:08 +01:00
|
|
|
"enable_pipelined_write": lambda: random.randint(0, 1),
|
2020-06-20 01:03:56 +02:00
|
|
|
# Snapshots are used heavily in this test mode, while they are incompatible
|
|
|
|
# with compaction filter.
|
|
|
|
"enable_compaction_filter": 0,
|
2018-10-30 22:01:38 +01:00
|
|
|
}
|
|
|
|
|
2019-12-12 00:59:45 +01:00
|
|
|
txn_params = {
|
|
|
|
"use_txn" : 1,
|
2019-12-12 19:34:52 +01:00
|
|
|
# Avoid lambda to set it once for the entire test
|
|
|
|
"txn_write_policy": random.randint(0, 2),
|
2019-12-13 19:23:01 +01:00
|
|
|
"unordered_write": random.randint(0, 1),
|
2019-12-12 00:59:45 +01:00
|
|
|
"disable_wal": 0,
|
|
|
|
# OpenReadOnly after checkpoint is not currnetly compatible with WritePrepared txns
|
|
|
|
"checkpoint_one_in": 0,
|
|
|
|
# pipeline write is not currnetly compatible with WritePrepared txns
|
|
|
|
"enable_pipelined_write": 0,
|
|
|
|
}
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2020-06-13 04:24:11 +02:00
|
|
|
best_efforts_recovery_params = {
|
|
|
|
"best_efforts_recovery": True,
|
|
|
|
"skip_verifydb": True,
|
|
|
|
"verify_db_one_in": 0,
|
|
|
|
"continuous_verification_interval": 0,
|
|
|
|
}
|
|
|
|
|
2016-02-05 00:21:32 +01:00
|
|
|
def finalize_and_sanitize(src_params):
|
|
|
|
dest_params = dict([(k, v() if callable(v) else v)
|
|
|
|
for (k, v) in src_params.items()])
|
2018-08-07 00:23:03 +02:00
|
|
|
if dest_params.get("compression_type") != "zstd" or \
|
|
|
|
dest_params.get("compression_max_dict_bytes") == 0:
|
|
|
|
dest_params["compression_zstd_max_train_bytes"] = 0
|
2016-02-05 00:21:32 +01:00
|
|
|
if dest_params.get("allow_concurrent_memtable_write", 1) == 1:
|
2016-02-05 23:30:18 +01:00
|
|
|
dest_params["memtablerep"] = "skip_list"
|
2020-04-25 08:58:13 +02:00
|
|
|
if dest_params["mmap_read"] == 1:
|
2018-06-02 01:33:07 +02:00
|
|
|
dest_params["use_direct_io_for_flush_and_compaction"] = 0
|
|
|
|
dest_params["use_direct_reads"] = 0
|
2020-04-25 08:58:13 +02:00
|
|
|
if (dest_params["use_direct_io_for_flush_and_compaction"] == 1
|
|
|
|
or dest_params["use_direct_reads"] == 1) and \
|
|
|
|
not is_direct_io_supported(dest_params["db"]):
|
|
|
|
if is_release_mode():
|
2020-11-10 19:48:46 +01:00
|
|
|
print("{} does not support direct IO. Disabling use_direct_reads and "
|
|
|
|
"use_direct_io_for_flush_and_compaction.\n".format(
|
|
|
|
dest_params["db"]))
|
|
|
|
dest_params["use_direct_reads"] = 0
|
|
|
|
dest_params["use_direct_io_for_flush_and_compaction"] = 0
|
2020-04-25 08:58:13 +02:00
|
|
|
else:
|
|
|
|
dest_params["mock_direct_io"] = True
|
|
|
|
|
2019-12-12 00:59:45 +01:00
|
|
|
# DeleteRange is not currnetly compatible with Txns
|
|
|
|
if dest_params.get("test_batches_snapshots") == 1 or \
|
|
|
|
dest_params.get("use_txn") == 1:
|
2018-12-18 22:30:56 +01:00
|
|
|
dest_params["delpercent"] += dest_params["delrangepercent"]
|
|
|
|
dest_params["delrangepercent"] = 0
|
2019-12-13 19:23:01 +01:00
|
|
|
# Only under WritePrepared txns, unordered_write would provide the same guarnatees as vanilla rocksdb
|
|
|
|
if dest_params.get("unordered_write", 0) == 1:
|
|
|
|
dest_params["txn_write_policy"] = 1
|
|
|
|
dest_params["allow_concurrent_memtable_write"] = 1
|
2019-08-23 01:30:30 +02:00
|
|
|
if dest_params.get("disable_wal", 0) == 1:
|
|
|
|
dest_params["atomic_flush"] = 1
|
2019-12-18 03:23:26 +01:00
|
|
|
dest_params["sync"] = 0
|
2019-08-27 00:00:04 +02:00
|
|
|
if dest_params.get("open_files", 1) != -1:
|
|
|
|
# Compaction TTL and periodic compactions are only compatible
|
|
|
|
# with open_files = -1
|
|
|
|
dest_params["compaction_ttl"] = 0
|
|
|
|
dest_params["periodic_compaction_seconds"] = 0
|
2019-08-28 02:54:18 +02:00
|
|
|
if dest_params.get("compaction_style", 0) == 2:
|
|
|
|
# Disable compaction TTL in FIFO compaction, because right
|
|
|
|
# now assertion failures are triggered.
|
|
|
|
dest_params["compaction_ttl"] = 0
|
2019-11-01 01:26:46 +01:00
|
|
|
dest_params["periodic_compaction_seconds"] = 0
|
2019-09-11 23:11:38 +02:00
|
|
|
if dest_params["partition_filters"] == 1:
|
2019-10-14 19:33:31 +02:00
|
|
|
if dest_params["index_type"] != 2:
|
|
|
|
dest_params["partition_filters"] = 0
|
|
|
|
else:
|
|
|
|
dest_params["use_block_based_filter"] = 0
|
2019-10-30 19:34:38 +01:00
|
|
|
if dest_params.get("atomic_flush", 0) == 1:
|
|
|
|
# disable pipelined write when atomic flush is used.
|
|
|
|
dest_params["enable_pipelined_write"] = 0
|
2020-06-24 01:24:15 +02:00
|
|
|
if dest_params.get("sst_file_manager_bytes_per_sec", 0) == 0:
|
|
|
|
dest_params["sst_file_manager_bytes_per_truncate"] = 0
|
2020-06-18 18:51:14 +02:00
|
|
|
if dest_params.get("enable_compaction_filter", 0) == 1:
|
|
|
|
# Compaction filter is incompatible with snapshots. Need to avoid taking
|
|
|
|
# snapshots, as well as avoid operations that use snapshots for
|
|
|
|
# verification.
|
|
|
|
dest_params["acquire_snapshot_one_in"] = 0
|
|
|
|
dest_params["compact_range_one_in"] = 0
|
|
|
|
# Give the iterator ops away to reads.
|
|
|
|
dest_params["readpercent"] += dest_params.get("iterpercent", 10)
|
|
|
|
dest_params["iterpercent"] = 0
|
|
|
|
dest_params["test_batches_snapshots"] = 0
|
2016-02-05 00:21:32 +01:00
|
|
|
return dest_params
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
def gen_cmd_params(args):
|
|
|
|
params = {}
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
params.update(default_params)
|
|
|
|
if args.test_type == 'blackbox':
|
|
|
|
params.update(blackbox_default_params)
|
|
|
|
if args.test_type == 'whitebox':
|
|
|
|
params.update(whitebox_default_params)
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.simple:
|
|
|
|
params.update(simple_default_params)
|
|
|
|
if args.test_type == 'blackbox':
|
|
|
|
params.update(blackbox_simple_default_params)
|
|
|
|
if args.test_type == 'whitebox':
|
|
|
|
params.update(whitebox_simple_default_params)
|
2019-08-23 01:30:30 +02:00
|
|
|
if args.cf_consistency:
|
|
|
|
params.update(cf_consistency_params)
|
2019-12-12 00:59:45 +01:00
|
|
|
if args.txn:
|
|
|
|
params.update(txn_params)
|
2020-06-13 04:24:11 +02:00
|
|
|
if args.test_best_efforts_recovery:
|
|
|
|
params.update(best_efforts_recovery_params)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
for k, v in vars(args).items():
|
|
|
|
if v is not None:
|
|
|
|
params[k] = v
|
|
|
|
return params
|
|
|
|
|
|
|
|
|
2018-05-09 22:32:03 +02:00
|
|
|
def gen_cmd(params, unknown_params):
|
2019-12-21 01:13:19 +01:00
|
|
|
finalzied_params = finalize_and_sanitize(params)
|
2017-07-14 17:59:45 +02:00
|
|
|
cmd = ['./db_stress'] + [
|
2016-02-05 00:21:32 +01:00
|
|
|
'--{0}={1}'.format(k, v)
|
2019-12-21 01:13:19 +01:00
|
|
|
for k, v in [(k, finalzied_params[k]) for k in sorted(finalzied_params)]
|
2016-07-22 20:46:13 +02:00
|
|
|
if k not in set(['test_type', 'simple', 'duration', 'interval',
|
2020-06-13 04:24:11 +02:00
|
|
|
'random_kill_odd', 'cf_consistency', 'txn',
|
|
|
|
'test_best_efforts_recovery'])
|
2018-05-09 22:32:03 +02:00
|
|
|
and v is not None] + unknown_params
|
2015-10-19 20:43:14 +02:00
|
|
|
return cmd
|
|
|
|
|
|
|
|
|
2020-06-13 04:24:11 +02:00
|
|
|
# Inject inconsistency to db directory.
|
|
|
|
def inject_inconsistencies_to_db_dir(dir_path):
|
|
|
|
files = os.listdir(dir_path)
|
|
|
|
file_num_rgx = re.compile(r'(?P<number>[0-9]{6})')
|
|
|
|
largest_fnum = 0
|
|
|
|
for f in files:
|
|
|
|
m = file_num_rgx.search(f)
|
|
|
|
if m and not f.startswith('LOG'):
|
|
|
|
largest_fnum = max(largest_fnum, int(m.group('number')))
|
|
|
|
|
|
|
|
candidates = [
|
|
|
|
f for f in files if re.search(r'[0-9]+\.sst', f)
|
|
|
|
]
|
|
|
|
deleted = 0
|
|
|
|
corrupted = 0
|
|
|
|
for f in candidates:
|
|
|
|
rnd = random.randint(0, 99)
|
|
|
|
f_path = os.path.join(dir_path, f)
|
|
|
|
if rnd < 10:
|
|
|
|
os.unlink(f_path)
|
|
|
|
deleted = deleted + 1
|
|
|
|
elif 10 <= rnd and rnd < 30:
|
|
|
|
with open(f_path, "a") as fd:
|
|
|
|
fd.write('12345678')
|
|
|
|
corrupted = corrupted + 1
|
|
|
|
print('Removed %d table files' % deleted)
|
|
|
|
print('Corrupted %d table files' % corrupted)
|
|
|
|
|
|
|
|
# Add corrupted MANIFEST and SST
|
|
|
|
for num in range(largest_fnum + 1, largest_fnum + 10):
|
|
|
|
rnd = random.randint(0, 1)
|
|
|
|
fname = ("MANIFEST-%06d" % num) if rnd == 0 else ("%06d.sst" % num)
|
|
|
|
print('Write %s' % fname)
|
|
|
|
with open(os.path.join(dir_path, fname), "w") as fd:
|
|
|
|
fd.write("garbage")
|
|
|
|
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
# This script runs and kills db_stress multiple times. It checks consistency
|
|
|
|
# in case of unsafe crashes in RocksDB.
|
2018-05-09 22:32:03 +02:00
|
|
|
def blackbox_crash_main(args, unknown_args):
|
2015-10-19 20:43:14 +02:00
|
|
|
cmd_params = gen_cmd_params(args)
|
2015-10-20 20:31:27 +02:00
|
|
|
dbname = get_dbname('blackbox')
|
2015-10-19 20:43:14 +02:00
|
|
|
exit_time = time.time() + cmd_params['duration']
|
|
|
|
|
|
|
|
print("Running blackbox-crash-test with \n"
|
|
|
|
+ "interval_between_crash=" + str(cmd_params['interval']) + "\n"
|
2018-05-09 22:32:03 +02:00
|
|
|
+ "total-duration=" + str(cmd_params['duration']) + "\n")
|
2014-03-20 19:11:08 +01:00
|
|
|
|
2013-03-13 07:20:14 +01:00
|
|
|
while time.time() < exit_time:
|
|
|
|
run_had_errors = False
|
2015-10-19 20:43:14 +02:00
|
|
|
killtime = time.time() + cmd_params['interval']
|
|
|
|
|
2018-04-30 21:23:45 +02:00
|
|
|
cmd = gen_cmd(dict(
|
2020-03-25 04:57:53 +01:00
|
|
|
list(cmd_params.items())
|
|
|
|
+ list({'db': dbname}.items())), unknown_args)
|
2013-08-21 02:37:49 +02:00
|
|
|
|
2017-07-14 17:59:45 +02:00
|
|
|
child = subprocess.Popen(cmd, stderr=subprocess.PIPE)
|
2013-08-21 02:37:49 +02:00
|
|
|
print("Running db_stress with pid=%d: %s\n\n"
|
2017-07-14 17:59:45 +02:00
|
|
|
% (child.pid, ' '.join(cmd)))
|
2013-08-21 02:37:49 +02:00
|
|
|
|
2014-03-11 21:44:33 +01:00
|
|
|
stop_early = False
|
2013-08-21 02:37:49 +02:00
|
|
|
while time.time() < killtime:
|
2014-03-11 21:44:33 +01:00
|
|
|
if child.poll() is not None:
|
|
|
|
print("WARNING: db_stress ended before kill: exitcode=%d\n"
|
|
|
|
% child.returncode)
|
|
|
|
stop_early = True
|
|
|
|
break
|
|
|
|
time.sleep(1)
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2014-03-11 21:44:33 +01:00
|
|
|
if not stop_early:
|
|
|
|
if child.poll() is not None:
|
|
|
|
print("WARNING: db_stress ended before kill: exitcode=%d\n"
|
|
|
|
% child.returncode)
|
|
|
|
else:
|
|
|
|
child.kill()
|
|
|
|
print("KILLED %d\n" % child.pid)
|
|
|
|
time.sleep(1) # time to stabilize after a kill
|
2013-08-21 02:37:49 +02:00
|
|
|
|
|
|
|
while True:
|
2020-03-25 04:57:53 +01:00
|
|
|
line = child.stderr.readline().strip().decode('utf-8')
|
2018-06-19 02:42:48 +02:00
|
|
|
if line == '':
|
|
|
|
break
|
|
|
|
elif not line.startswith('WARNING'):
|
2013-08-21 02:37:49 +02:00
|
|
|
run_had_errors = True
|
2017-04-12 01:53:57 +02:00
|
|
|
print('stderr has error message:')
|
|
|
|
print('***' + line + '***')
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2013-08-21 02:37:49 +02:00
|
|
|
if run_had_errors:
|
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
time.sleep(1) # time to stabilize before the next run
|
2013-03-13 07:20:14 +01:00
|
|
|
|
2020-06-13 04:24:11 +02:00
|
|
|
if args.test_best_efforts_recovery:
|
|
|
|
inject_inconsistencies_to_db_dir(dbname)
|
|
|
|
|
|
|
|
time.sleep(1) # time to stabilize before the next run
|
|
|
|
|
2014-03-20 19:11:08 +01:00
|
|
|
# we need to clean up after ourselves -- only do this on test success
|
2015-10-20 20:31:27 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2014-03-20 19:11:08 +01:00
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
# This python script runs db_stress multiple times. Some runs with
|
|
|
|
# kill_random_test that causes rocksdb to crash at various points in code.
|
2018-05-09 22:32:03 +02:00
|
|
|
def whitebox_crash_main(args, unknown_args):
|
2015-10-19 20:43:14 +02:00
|
|
|
cmd_params = gen_cmd_params(args)
|
2015-10-20 20:31:27 +02:00
|
|
|
dbname = get_dbname('whitebox')
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
cur_time = time.time()
|
|
|
|
exit_time = cur_time + cmd_params['duration']
|
2020-03-25 04:57:53 +01:00
|
|
|
half_time = cur_time + cmd_params['duration'] // 2
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
print("Running whitebox-crash-test with \n"
|
2018-05-09 22:32:03 +02:00
|
|
|
+ "total-duration=" + str(cmd_params['duration']) + "\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
total_check_mode = 4
|
|
|
|
check_mode = 0
|
2016-07-22 20:46:13 +02:00
|
|
|
kill_random_test = cmd_params['random_kill_odd']
|
2015-10-19 20:43:14 +02:00
|
|
|
kill_mode = 0
|
|
|
|
|
|
|
|
while time.time() < exit_time:
|
|
|
|
if check_mode == 0:
|
|
|
|
additional_opts = {
|
|
|
|
# use large ops per thread since we will kill it anyway
|
|
|
|
"ops_per_thread": 100 * cmd_params['ops_per_thread'],
|
|
|
|
}
|
2015-10-27 00:02:32 +01:00
|
|
|
# run with kill_random_test, with three modes.
|
|
|
|
# Mode 0 covers all kill points. Mode 1 covers less kill points but
|
|
|
|
# increases change of triggering them. Mode 2 covers even less
|
|
|
|
# frequent kill points and further increases triggering change.
|
2015-10-19 20:43:14 +02:00
|
|
|
if kill_mode == 0:
|
|
|
|
additional_opts.update({
|
|
|
|
"kill_random_test": kill_random_test,
|
|
|
|
})
|
|
|
|
elif kill_mode == 1:
|
2019-08-19 19:50:25 +02:00
|
|
|
if cmd_params.get('disable_wal', 0) == 1:
|
2020-03-25 04:57:53 +01:00
|
|
|
my_kill_odd = kill_random_test // 50 + 1
|
2019-08-19 19:50:25 +02:00
|
|
|
else:
|
2020-03-25 04:57:53 +01:00
|
|
|
my_kill_odd = kill_random_test // 10 + 1
|
2015-10-19 20:43:14 +02:00
|
|
|
additional_opts.update({
|
2019-08-19 19:50:25 +02:00
|
|
|
"kill_random_test": my_kill_odd,
|
2020-06-20 00:26:05 +02:00
|
|
|
"kill_exclude_prefixes": "WritableFileWriter::Append,"
|
2015-10-19 20:43:14 +02:00
|
|
|
+ "WritableFileWriter::WriteBuffered",
|
|
|
|
})
|
2015-10-27 00:02:32 +01:00
|
|
|
elif kill_mode == 2:
|
2016-07-22 20:46:13 +02:00
|
|
|
# TODO: May need to adjust random odds if kill_random_test
|
|
|
|
# is too small.
|
2015-10-27 00:02:32 +01:00
|
|
|
additional_opts.update({
|
2020-03-25 04:57:53 +01:00
|
|
|
"kill_random_test": (kill_random_test // 5000 + 1),
|
2020-06-20 00:26:05 +02:00
|
|
|
"kill_exclude_prefixes": "WritableFileWriter::Append,"
|
2015-10-27 00:02:32 +01:00
|
|
|
"WritableFileWriter::WriteBuffered,"
|
|
|
|
"PosixMmapFile::Allocate,WritableFileWriter::Flush",
|
|
|
|
})
|
|
|
|
# Run kill mode 0, 1 and 2 by turn.
|
|
|
|
kill_mode = (kill_mode + 1) % 3
|
2015-10-19 20:43:14 +02:00
|
|
|
elif check_mode == 1:
|
|
|
|
# normal run with universal compaction mode
|
|
|
|
additional_opts = {
|
|
|
|
"kill_random_test": None,
|
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'],
|
|
|
|
"compaction_style": 1,
|
|
|
|
}
|
2020-05-07 03:06:04 +02:00
|
|
|
# Single level universal has a lot of special logic. Ensure we cover
|
|
|
|
# it sometimes.
|
|
|
|
if random.randint(0, 1) == 1:
|
|
|
|
additional_opts.update({
|
|
|
|
"num_levels": 1,
|
|
|
|
})
|
2015-10-19 20:43:14 +02:00
|
|
|
elif check_mode == 2:
|
|
|
|
# normal run with FIFO compaction mode
|
|
|
|
# ops_per_thread is divided by 5 because FIFO compaction
|
|
|
|
# style is quite a bit slower on reads with lot of files
|
|
|
|
additional_opts = {
|
|
|
|
"kill_random_test": None,
|
2020-03-25 04:57:53 +01:00
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'] // 5,
|
2015-10-19 20:43:14 +02:00
|
|
|
"compaction_style": 2,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
# normal run
|
2018-06-26 21:35:26 +02:00
|
|
|
additional_opts = {
|
2015-10-19 20:43:14 +02:00
|
|
|
"kill_random_test": None,
|
|
|
|
"ops_per_thread": cmd_params['ops_per_thread'],
|
|
|
|
}
|
|
|
|
|
2020-03-25 04:57:53 +01:00
|
|
|
cmd = gen_cmd(dict(list(cmd_params.items())
|
|
|
|
+ list(additional_opts.items())
|
|
|
|
+ list({'db': dbname}.items())), unknown_args)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2020-03-25 04:57:53 +01:00
|
|
|
print("Running:" + ' '.join(cmd) + "\n") # noqa: E999 T25377293 Grandfathered in
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2017-07-14 17:59:45 +02:00
|
|
|
popen = subprocess.Popen(cmd, stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.STDOUT)
|
2015-10-19 20:43:14 +02:00
|
|
|
stdoutdata, stderrdata = popen.communicate()
|
2020-03-25 04:57:53 +01:00
|
|
|
if stdoutdata:
|
|
|
|
stdoutdata = stdoutdata.decode('utf-8')
|
|
|
|
if stderrdata:
|
|
|
|
stderrdata = stderrdata.decode('utf-8')
|
2015-10-19 20:43:14 +02:00
|
|
|
retncode = popen.returncode
|
|
|
|
msg = ("check_mode={0}, kill option={1}, exitcode={2}\n".format(
|
|
|
|
check_mode, additional_opts['kill_random_test'], retncode))
|
2020-03-25 04:57:53 +01:00
|
|
|
print(msg)
|
|
|
|
print(stdoutdata)
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
expected = False
|
|
|
|
if additional_opts['kill_random_test'] is None and (retncode == 0):
|
|
|
|
# we expect zero retncode if no kill option
|
|
|
|
expected = True
|
2019-05-04 02:26:20 +02:00
|
|
|
elif additional_opts['kill_random_test'] is not None and retncode <= 0:
|
|
|
|
# When kill option is given, the test MIGHT kill itself.
|
|
|
|
# If it does, negative retncode is expected. Otherwise 0.
|
2015-10-19 20:43:14 +02:00
|
|
|
expected = True
|
|
|
|
|
|
|
|
if not expected:
|
2020-03-25 04:57:53 +01:00
|
|
|
print("TEST FAILED. See kill option and exit code above!!!\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
sys.exit(1)
|
|
|
|
|
|
|
|
stdoutdata = stdoutdata.lower()
|
|
|
|
errorcount = (stdoutdata.count('error') -
|
2020-04-15 21:09:33 +02:00
|
|
|
stdoutdata.count('got errors 0 times'))
|
2020-03-25 04:57:53 +01:00
|
|
|
print("#times error occurred in output is " + str(errorcount) + "\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
if (errorcount > 0):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("TEST FAILED. Output has 'error'!!!\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
sys.exit(2)
|
|
|
|
if (stdoutdata.find('fail') >= 0):
|
2020-03-25 04:57:53 +01:00
|
|
|
print("TEST FAILED. Output has 'fail'!!!\n")
|
2015-10-19 20:43:14 +02:00
|
|
|
sys.exit(2)
|
|
|
|
|
|
|
|
# First half of the duration, keep doing kill test. For the next half,
|
|
|
|
# try different modes.
|
|
|
|
if time.time() > half_time:
|
|
|
|
# we need to clean up after ourselves -- only do this on test
|
|
|
|
# success
|
2015-10-20 20:31:27 +02:00
|
|
|
shutil.rmtree(dbname, True)
|
2018-06-04 06:28:41 +02:00
|
|
|
os.mkdir(dbname)
|
2018-04-30 21:23:45 +02:00
|
|
|
cmd_params.pop('expected_values_path', None)
|
2015-10-19 20:43:14 +02:00
|
|
|
check_mode = (check_mode + 1) % total_check_mode
|
|
|
|
|
|
|
|
time.sleep(1) # time to stabilize after a kill
|
|
|
|
|
|
|
|
|
|
|
|
def main():
|
|
|
|
parser = argparse.ArgumentParser(description="This script runs and kills \
|
|
|
|
db_stress multiple times")
|
|
|
|
parser.add_argument("test_type", choices=["blackbox", "whitebox"])
|
|
|
|
parser.add_argument("--simple", action="store_true")
|
2019-08-23 01:30:30 +02:00
|
|
|
parser.add_argument("--cf_consistency", action='store_true')
|
2019-12-12 00:59:45 +01:00
|
|
|
parser.add_argument("--txn", action='store_true')
|
2020-06-13 04:24:11 +02:00
|
|
|
parser.add_argument("--test_best_efforts_recovery", action='store_true')
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2020-03-25 04:57:53 +01:00
|
|
|
all_params = dict(list(default_params.items())
|
|
|
|
+ list(blackbox_default_params.items())
|
|
|
|
+ list(whitebox_default_params.items())
|
|
|
|
+ list(simple_default_params.items())
|
|
|
|
+ list(blackbox_simple_default_params.items())
|
|
|
|
+ list(whitebox_simple_default_params.items()))
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
for k, v in all_params.items():
|
|
|
|
parser.add_argument("--" + k, type=type(v() if callable(v) else v))
|
2018-05-09 22:32:03 +02:00
|
|
|
# unknown_args are passed directly to db_stress
|
|
|
|
args, unknown_args = parser.parse_known_args()
|
2015-10-19 20:43:14 +02:00
|
|
|
|
2018-06-04 06:28:41 +02:00
|
|
|
test_tmpdir = os.environ.get(_TEST_DIR_ENV_VAR)
|
|
|
|
if test_tmpdir is not None and not os.path.isdir(test_tmpdir):
|
|
|
|
print('%s env var is set to a non-existent directory: %s' %
|
|
|
|
(_TEST_DIR_ENV_VAR, test_tmpdir))
|
|
|
|
sys.exit(1)
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.test_type == 'blackbox':
|
2018-05-09 22:32:03 +02:00
|
|
|
blackbox_crash_main(args, unknown_args)
|
2015-10-19 20:43:14 +02:00
|
|
|
if args.test_type == 'whitebox':
|
2018-05-09 22:32:03 +02:00
|
|
|
whitebox_crash_main(args, unknown_args)
|
2020-10-12 23:08:35 +02:00
|
|
|
# Only delete the `expected_values_file` if test passes
|
|
|
|
if os.path.exists(expected_values_file):
|
|
|
|
os.remove(expected_values_file)
|
|
|
|
|
2015-10-19 20:43:14 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|