rocksdb/tools/advisor/advisor/rules.ini
Pooja Malik 134a52e144 Optimizer's skeleton: use advisor to optimize config options (#4169)
Summary:
In https://github.com/facebook/rocksdb/pull/3934 we introduced advisor scripts that make suggestions in the config options based on the log file and stats from a run of rocksdb. The optimizer runs the advisor on a benchmark application in a loop and automatically applies the suggested changes until the config options are optimized. This is a work in progress and the patch is the initial skeleton for the optimizer. The sample application that is run in the loop is currently dbbench.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4169

Reviewed By: maysamyabandeh

Differential Revision: D9023671

Pulled By: poojam23

fbshipit-source-id: a6192d475c462cf6eb2b316716f97cb400fcb64d
2018-07-26 17:13:32 -07:00

215 lines
6.9 KiB
INI

# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
#
# FORMAT: very similar to the Rocksdb ini file in terms of syntax
# (refer rocksdb/examples/rocksdb_option_file_example.ini)
#
# The Rules INI file is made up of multiple sections and each section is made
# up of multiple key-value pairs. The recognized section types are:
# Rule, Suggestion, Condition. Each section must have a name specified in ""
# in the section header. This name acts as an identifier in that section
# type's namespace. A section header looks like:
# [<section_type> "<section_name_identifier>"]
#
# There should be at least one Rule section in the file with its corresponding
# Condition and Suggestion sections. A Rule is triggered only when all of its
# conditions are triggered. The order in which a Rule's conditions and
# suggestions are specified has no significance.
#
# A Condition must be associated with a data source specified by the parameter
# 'source' and this must be the first parameter specified for the Condition.
# A condition can be associated with one or more Rules.
#
# A Suggestion is an advised change to a Rocksdb option to improve the
# performance of the database in some way. Every suggestion can be a part of
# one or more Rules.
[Rule "stall-too-many-memtables"]
suggestions=inc-bg-flush:inc-write-buffer
conditions=stall-too-many-memtables
[Condition "stall-too-many-memtables"]
source=LOG
regex=Stopping writes because we have \d+ immutable memtables \(waiting for flush\), max_write_buffer_number is set to \d+
[Rule "stall-too-many-L0"]
suggestions=inc-max-subcompactions:inc-max-bg-compactions:inc-write-buffer-size:dec-max-bytes-for-level-base:inc-l0-slowdown-writes-trigger
conditions=stall-too-many-L0
[Condition "stall-too-many-L0"]
source=LOG
regex=Stalling writes because we have \d+ level-0 files
[Rule "stop-too-many-L0"]
suggestions=inc-max-bg-compactions:inc-write-buffer-size:inc-l0-stop-writes-trigger
conditions=stop-too-many-L0
[Condition "stop-too-many-L0"]
source=LOG
regex=Stopping writes because we have \d+ level-0 files
[Rule "stall-too-many-compaction-bytes"]
suggestions=inc-max-bg-compactions:inc-write-buffer-size:inc-hard-pending-compaction-bytes-limit:inc-soft-pending-compaction-bytes-limit
conditions=stall-too-many-compaction-bytes
[Condition "stall-too-many-compaction-bytes"]
source=LOG
regex=Stalling writes because of estimated pending compaction bytes \d+
[Suggestion "inc-bg-flush"]
option=DBOptions.max_background_flushes
action=increase
suggested_values=2
[Suggestion "inc-write-buffer"]
option=CFOptions.max_write_buffer_number
action=increase
[Suggestion "inc-max-subcompactions"]
option=DBOptions.max_subcompactions
action=increase
[Suggestion "inc-max-bg-compactions"]
option=DBOptions.max_background_compactions
action=increase
suggested_values=2
[Suggestion "inc-write-buffer-size"]
option=CFOptions.write_buffer_size
action=increase
[Suggestion "dec-max-bytes-for-level-base"]
option=CFOptions.max_bytes_for_level_base
action=decrease
[Suggestion "inc-l0-slowdown-writes-trigger"]
option=CFOptions.level0_slowdown_writes_trigger
action=increase
[Suggestion "inc-l0-stop-writes-trigger"]
option=CFOptions.level0_stop_writes_trigger
action=increase
[Suggestion "inc-hard-pending-compaction-bytes-limit"]
option=CFOptions.hard_pending_compaction_bytes_limit
action=increase
[Suggestion "inc-soft-pending-compaction-bytes-limit"]
option=CFOptions.soft_pending_compaction_bytes_limit
action=increase
[Rule "level0-level1-ratio"]
conditions=level0-level1-ratio
suggestions=inc-base-max-bytes
[Condition "level0-level1-ratio"]
source=OPTIONS
options=CFOptions.level0_file_num_compaction_trigger:CFOptions.write_buffer_size:CFOptions.max_bytes_for_level_base
evaluate=int(options[0])*int(options[1])-int(options[2])>=1 # should evaluate to a boolean, condition triggered if evaluates to true
[Suggestion "inc-base-max-bytes"]
option=CFOptions.max_bytes_for_level_base
action=increase
[Rules "tuning-iostat-burst"]
conditions=large-db-get-p99
suggestions=bytes-per-sync-non0:wal-bytes-per-sync-non0:set-rate-limiter
#overlap_time_period=10m
[Condition "write-burst"]
source=TIME_SERIES
keys=dyno.flash_write_bytes_per_sec
behavior=bursty
window_sec=300 # the smaller this window, the more sensitivity to changes in the time series, so the rate_threshold should be bigger; when it's 60, then same as diff(%)
rate_threshold=20
[Condition "large-p99-read-latency"]
source=TIME_SERIES
keys=[]rocksdb.read.block.get.micros.p99
behavior=bursty
window_sec=300
rate_threshold=10
[Condition "large-db-get-p99"]
source=TIME_SERIES
keys=[]rocksdb.db.get.micros.p50:[]rocksdb.db.get.micros.p99
behavior=evaluate_expression
evaluate=(keys[1]/keys[0])>5
[Suggestion "bytes-per-sync-non0"]
option=DBOptions.bytes_per_sync
action=set
suggested_values=1048576
[Suggestion "wal-bytes-per-sync-non0"]
option=DBOptions.wal_bytes_per_sync
action=set
suggested_values=1048576
[Suggestion "set-rate-limiter"]
option=rate_limiter_bytes_per_sec
action=set
suggested_values=1024000
[Rule "bloom-filter-percent-useful"]
conditions=bloom-filter-percent-useful
suggestions=inc-bloom-bits-per-key
[Condition "bloom-filter-percent-useful"]
source=TIME_SERIES
keys=[]rocksdb.bloom.filter.useful.count:[]rocksdb.bloom.filter.full.positive.count:[]rocksdb.bloom.filter.full.true.positive.count
behavior=evaluate_expression
evaluate=((keys[0]+keys[2])/(keys[0]+keys[1]))<0.9 # should evaluate to a boolean
aggregation_op=latest
[Rule "bloom-not-enabled"]
conditions=bloom-not-enabled
suggestions=inc-bloom-bits-per-key
[Condition "bloom-not-enabled"]
source=TIME_SERIES
keys=[]rocksdb.bloom.filter.useful.count:[]rocksdb.bloom.filter.full.positive.count:[]rocksdb.bloom.filter.full.true.positive.count
behavior=evaluate_expression
evaluate=keys[0]+keys[1]+keys[2]==0
aggregation_op=avg
[Suggestion "inc-bloom-bits-per-key"]
option=bloom_bits
action=increase
suggested_values=2
[Rule "small-l0-files"]
conditions=small-l0-files
suggestions=dec-max-bytes-for-level-base:inc-write-buffer-size
[Condition "small-l0-files"]
source=OPTIONS
options=CFOptions.max_bytes_for_level_base:CFOptions.level0_file_num_compaction_trigger:CFOptions.write_buffer_size
evaluate=int(options[0])>(10*int(options[1])*int(options[2]))
[Rule "decompress-time-long"]
conditions=decompress-time-long
suggestions=dec-block-size:inc-block-cache-size:faster-compression-type
[Condition "decompress-time-long"]
source=TIME_SERIES
keys=block_decompress_time:block_read_time:block_checksum_time
behavior=evaluate_expression
evaluate=(keys[0]/(keys[0]+keys[1]+keys[2]))>0.3
[Suggestion "dec-block-size"]
option=TableOptions.BlockBasedTable.block_size
action=decrease
[Suggestion "inc-block-cache-size"]
option=cache_size
action=increase
suggested_values=16000000
[Suggestion "faster-compression-type"]
option=CFOptions.compression
action=set
suggested_values=kLZ4Compression