Add an option for parallel compression in for db_stress (#6722)
Summary: This commit adds an `compression_parallel_threads` option in db_stress. It also fixes the naming of parallel compression option in db_bench to keep it aligned with others. Pull Request resolved: https://github.com/facebook/rocksdb/pull/6722 Reviewed By: pdillinger Differential Revision: D21091385 fbshipit-source-id: c9ba8c4e5cc327ff9e6094a6dc6a15fcff70f100
This commit is contained in:
parent
8c694025e9
commit
e619a20e93
@ -201,6 +201,7 @@ DECLARE_string(compression_type);
|
|||||||
DECLARE_string(bottommost_compression_type);
|
DECLARE_string(bottommost_compression_type);
|
||||||
DECLARE_int32(compression_max_dict_bytes);
|
DECLARE_int32(compression_max_dict_bytes);
|
||||||
DECLARE_int32(compression_zstd_max_train_bytes);
|
DECLARE_int32(compression_zstd_max_train_bytes);
|
||||||
|
DECLARE_int32(compression_parallel_threads);
|
||||||
DECLARE_string(checksum_type);
|
DECLARE_string(checksum_type);
|
||||||
DECLARE_string(hdfs);
|
DECLARE_string(hdfs);
|
||||||
DECLARE_string(env_uri);
|
DECLARE_string(env_uri);
|
||||||
|
@ -584,6 +584,9 @@ DEFINE_int32(compression_zstd_max_train_bytes, 0,
|
|||||||
"Maximum size of training data passed to zstd's dictionary "
|
"Maximum size of training data passed to zstd's dictionary "
|
||||||
"trainer.");
|
"trainer.");
|
||||||
|
|
||||||
|
DEFINE_int32(compression_parallel_threads, 1,
|
||||||
|
"Number of threads for parallel compression.");
|
||||||
|
|
||||||
DEFINE_string(bottommost_compression_type, "disable",
|
DEFINE_string(bottommost_compression_type, "disable",
|
||||||
"Algorithm to use to compress bottommost level of the database. "
|
"Algorithm to use to compress bottommost level of the database. "
|
||||||
"\"disable\" means disabling the feature");
|
"\"disable\" means disabling the feature");
|
||||||
|
@ -1803,6 +1803,8 @@ void StressTest::Open() {
|
|||||||
options_.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
|
options_.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
|
||||||
options_.compression_opts.zstd_max_train_bytes =
|
options_.compression_opts.zstd_max_train_bytes =
|
||||||
FLAGS_compression_zstd_max_train_bytes;
|
FLAGS_compression_zstd_max_train_bytes;
|
||||||
|
options_.compression_opts.parallel_threads =
|
||||||
|
FLAGS_compression_parallel_threads;
|
||||||
options_.create_if_missing = true;
|
options_.create_if_missing = true;
|
||||||
options_.max_manifest_file_size = FLAGS_max_manifest_file_size;
|
options_.max_manifest_file_size = FLAGS_max_manifest_file_size;
|
||||||
options_.inplace_update_support = FLAGS_in_place_update;
|
options_.inplace_update_support = FLAGS_in_place_update;
|
||||||
|
@ -926,8 +926,8 @@ DEFINE_int32(min_level_to_compress, -1, "If non-negative, compression starts"
|
|||||||
" not compressed. Otherwise, apply compression_type to "
|
" not compressed. Otherwise, apply compression_type to "
|
||||||
"all levels.");
|
"all levels.");
|
||||||
|
|
||||||
DEFINE_int32(compression_threads, 1,
|
DEFINE_int32(compression_parallel_threads, 1,
|
||||||
"Number of concurrent compression threads to run.");
|
"Number of threads for parallel compression.");
|
||||||
|
|
||||||
static bool ValidateTableCacheNumshardbits(const char* flagname,
|
static bool ValidateTableCacheNumshardbits(const char* flagname,
|
||||||
int32_t value) {
|
int32_t value) {
|
||||||
@ -4030,7 +4030,8 @@ class Benchmark {
|
|||||||
options.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
|
options.compression_opts.max_dict_bytes = FLAGS_compression_max_dict_bytes;
|
||||||
options.compression_opts.zstd_max_train_bytes =
|
options.compression_opts.zstd_max_train_bytes =
|
||||||
FLAGS_compression_zstd_max_train_bytes;
|
FLAGS_compression_zstd_max_train_bytes;
|
||||||
options.compression_opts.parallel_threads = FLAGS_compression_threads;
|
options.compression_opts.parallel_threads =
|
||||||
|
FLAGS_compression_parallel_threads;
|
||||||
// If this is a block based table, set some related options
|
// If this is a block based table, set some related options
|
||||||
if (options.table_factory->Name() == BlockBasedTableFactory::kName &&
|
if (options.table_factory->Name() == BlockBasedTableFactory::kName &&
|
||||||
options.table_factory->GetOptions() != nullptr) {
|
options.table_factory->GetOptions() != nullptr) {
|
||||||
|
@ -44,6 +44,7 @@ default_params = {
|
|||||||
"checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
|
"checksum_type" : lambda: random.choice(["kCRC32c", "kxxHash", "kxxHash64"]),
|
||||||
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
|
"compression_max_dict_bytes": lambda: 16384 * random.randint(0, 1),
|
||||||
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
|
"compression_zstd_max_train_bytes": lambda: 65536 * random.randint(0, 1),
|
||||||
|
"compression_parallel_threads": lambda: random.choice([1] * 9 + [4]),
|
||||||
"clear_column_family_one_in": 0,
|
"clear_column_family_one_in": 0,
|
||||||
"compact_files_one_in": 1000000,
|
"compact_files_one_in": 1000000,
|
||||||
"compact_range_one_in": 1000000,
|
"compact_range_one_in": 1000000,
|
||||||
|
Loading…
Reference in New Issue
Block a user