Remove deprecated option new_table_reader_for_compaction_inputs (#9443)
Summary: In RocksDB option new_table_reader_for_compaction_inputs has not effect on Compaction or on the behavior of RocksDB library. Therefore, we are removing it in the upcoming 7.0 release. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9443 Test Plan: CircleCI Reviewed By: ajkr Differential Revision: D33788508 Pulled By: akankshamahajan15 fbshipit-source-id: 324ca6f12bfd019e9bd5e1b0cdac39be5c3cec7d
This commit is contained in:
parent
2ee25e8846
commit
9745c68eb1
@ -58,6 +58,7 @@
|
||||
* Rename `SizeApproximationOptions.include_memtabtles` to `SizeApproximationOptions.include_memtables`.
|
||||
* Remove deprecated option DBOptions::max_mem_compaction_level.
|
||||
* Remove deprecated overloads of API DB::GetApproximateSizes.
|
||||
* Remove deprecated option DBOptions::new_table_reader_for_compaction_inputs.
|
||||
|
||||
### Behavior Changes
|
||||
* Disallow the combination of DBOptions.use_direct_io_for_flush_and_compaction == true and DBOptions.writable_file_max_buffer_size == 0. This combination can cause WritableFileWriter::Append() to loop forever, and it does not make much sense in direct IO.
|
||||
|
@ -418,7 +418,6 @@ TEST_F(DBCompactionTest, SkipStatsUpdateTest) {
|
||||
TEST_F(DBCompactionTest, TestTableReaderForCompaction) {
|
||||
Options options = CurrentOptions();
|
||||
options.env = env_;
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
options.max_open_files = 20;
|
||||
options.level0_file_num_compaction_trigger = 3;
|
||||
// Avoid many shards with small max_open_files, where as little as
|
||||
|
@ -142,10 +142,6 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src,
|
||||
result.compaction_readahead_size = 1024 * 1024 * 2;
|
||||
}
|
||||
|
||||
if (result.compaction_readahead_size > 0 || result.use_direct_reads) {
|
||||
result.new_table_reader_for_compaction_inputs = true;
|
||||
}
|
||||
|
||||
// Force flush on DB open if 2PC is enabled, since with 2PC we have no
|
||||
// guarantee that consecutive log files have consecutive sequence id, which
|
||||
// make recovery complicated.
|
||||
|
@ -1003,7 +1003,6 @@ TEST_F(DBOptionsTest, CompactionReadaheadSizeChange) {
|
||||
options.env = &env;
|
||||
|
||||
options.compaction_readahead_size = 0;
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
options.level0_file_num_compaction_trigger = 2;
|
||||
const std::string kValue(1024, 'v');
|
||||
Reopen(options);
|
||||
|
@ -3946,7 +3946,6 @@ TEST_F(DBTest2, RateLimitedCompactionReads) {
|
||||
options.level0_file_num_compaction_trigger = kNumL0Files;
|
||||
options.memtable_factory.reset(
|
||||
test::NewSpecialSkipListFactory(kNumKeysPerFile));
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
// takes roughly one second, split into 100 x 10ms intervals. Each interval
|
||||
// permits 5.12KB, which is smaller than the block size, so this test
|
||||
// exercises the code for chunking reads.
|
||||
|
@ -426,7 +426,6 @@ Options DBTestBase::GetOptions(
|
||||
break;
|
||||
case kFullFilterWithNewTableReaderForCompactions:
|
||||
table_options.filter_policy.reset(NewBloomFilterPolicy(10, false));
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
options.compaction_readahead_size = 10 * 1024 * 1024;
|
||||
break;
|
||||
case kPartitionedFilterWithNewTableReaderForCompactions:
|
||||
@ -434,7 +433,6 @@ Options DBTestBase::GetOptions(
|
||||
table_options.partition_filters = true;
|
||||
table_options.index_type =
|
||||
BlockBasedTableOptions::IndexType::kTwoLevelIndexSearch;
|
||||
options.new_table_reader_for_compaction_inputs = true;
|
||||
options.compaction_readahead_size = 10 * 1024 * 1024;
|
||||
break;
|
||||
case kUncompressed:
|
||||
|
@ -2406,9 +2406,6 @@ void StressTest::Open() {
|
||||
10 /* fairness */,
|
||||
FLAGS_rate_limit_bg_reads ? RateLimiter::Mode::kReadsOnly
|
||||
: RateLimiter::Mode::kWritesOnly));
|
||||
if (FLAGS_rate_limit_bg_reads) {
|
||||
options_.new_table_reader_for_compaction_inputs = true;
|
||||
}
|
||||
}
|
||||
if (FLAGS_sst_file_manager_bytes_per_sec > 0 ||
|
||||
FLAGS_sst_file_manager_bytes_per_truncate > 0) {
|
||||
|
@ -782,7 +782,6 @@ struct DBOptions {
|
||||
// be used. Memory mapped files are not impacted by these parameters.
|
||||
|
||||
// Use O_DIRECT for user and compaction reads.
|
||||
// When true, we also force new_table_reader_for_compaction_inputs to true.
|
||||
// Default: false
|
||||
// Not supported in ROCKSDB_LITE mode!
|
||||
bool use_direct_reads = false;
|
||||
@ -890,27 +889,10 @@ struct DBOptions {
|
||||
enum AccessHint { NONE, NORMAL, SEQUENTIAL, WILLNEED };
|
||||
AccessHint access_hint_on_compaction_start = NORMAL;
|
||||
|
||||
// If true, always create a new file descriptor and new table reader
|
||||
// for compaction inputs. Turn this parameter on may introduce extra
|
||||
// memory usage in the table reader, if it allocates extra memory
|
||||
// for indexes. This will allow file descriptor prefetch options
|
||||
// to be set for compaction input files and not to impact file
|
||||
// descriptors for the same file used by user queries.
|
||||
// Suggest to enable BlockBasedTableOptions.cache_index_and_filter_blocks
|
||||
// for this mode if using block-based table.
|
||||
//
|
||||
// Default: false
|
||||
// This flag has no affect on the behavior of compaction and plan to delete
|
||||
// in the future.
|
||||
bool new_table_reader_for_compaction_inputs = false;
|
||||
|
||||
// If non-zero, we perform bigger reads when doing compaction. If you're
|
||||
// running RocksDB on spinning disks, you should set this to at least 2MB.
|
||||
// That way RocksDB's compaction is doing sequential instead of random reads.
|
||||
//
|
||||
// When non-zero, we also force new_table_reader_for_compaction_inputs to
|
||||
// true.
|
||||
//
|
||||
// Default: 0
|
||||
//
|
||||
// Dynamically changeable through SetDBOptions() API.
|
||||
|
@ -22,11 +22,10 @@ namespace ROCKSDB_NAMESPACE {
|
||||
class RateLimiter : public Customizable {
|
||||
public:
|
||||
enum class OpType {
|
||||
// Limitation: we currently only invoke Request() with OpType::kRead for
|
||||
// compactions when DBOptions::new_table_reader_for_compaction_inputs is set
|
||||
kRead,
|
||||
kWrite,
|
||||
};
|
||||
|
||||
enum class Mode {
|
||||
kReadsOnly,
|
||||
kWritesOnly,
|
||||
|
@ -1566,30 +1566,6 @@ jbyte Java_org_rocksdb_Options_accessHintOnCompactionStart(
|
||||
opt->access_hint_on_compaction_start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: setNewTableReaderForCompactionInputs
|
||||
* Signature: (JZ)V
|
||||
*/
|
||||
void Java_org_rocksdb_Options_setNewTableReaderForCompactionInputs(
|
||||
JNIEnv*, jobject, jlong jhandle,
|
||||
jboolean jnew_table_reader_for_compaction_inputs) {
|
||||
auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
|
||||
opt->new_table_reader_for_compaction_inputs =
|
||||
static_cast<bool>(jnew_table_reader_for_compaction_inputs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: newTableReaderForCompactionInputs
|
||||
* Signature: (J)Z
|
||||
*/
|
||||
jboolean Java_org_rocksdb_Options_newTableReaderForCompactionInputs(
|
||||
JNIEnv*, jobject, jlong jhandle) {
|
||||
auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::Options*>(jhandle);
|
||||
return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_Options
|
||||
* Method: setCompactionReadaheadSize
|
||||
@ -6815,30 +6791,6 @@ jbyte Java_org_rocksdb_DBOptions_accessHintOnCompactionStart(
|
||||
opt->access_hint_on_compaction_start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_DBOptions
|
||||
* Method: setNewTableReaderForCompactionInputs
|
||||
* Signature: (JZ)V
|
||||
*/
|
||||
void Java_org_rocksdb_DBOptions_setNewTableReaderForCompactionInputs(
|
||||
JNIEnv*, jobject, jlong jhandle,
|
||||
jboolean jnew_table_reader_for_compaction_inputs) {
|
||||
auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
|
||||
opt->new_table_reader_for_compaction_inputs =
|
||||
static_cast<bool>(jnew_table_reader_for_compaction_inputs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_DBOptions
|
||||
* Method: newTableReaderForCompactionInputs
|
||||
* Signature: (J)Z
|
||||
*/
|
||||
jboolean Java_org_rocksdb_DBOptions_newTableReaderForCompactionInputs(
|
||||
JNIEnv*, jobject, jlong jhandle) {
|
||||
auto* opt = reinterpret_cast<ROCKSDB_NAMESPACE::DBOptions*>(jhandle);
|
||||
return static_cast<bool>(opt->new_table_reader_for_compaction_inputs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Class: org_rocksdb_DBOptions
|
||||
* Method: setCompactionReadaheadSize
|
||||
|
@ -763,21 +763,6 @@ public class DBOptions extends RocksObject
|
||||
return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
|
||||
}
|
||||
|
||||
@Override
|
||||
public DBOptions setNewTableReaderForCompactionInputs(
|
||||
final boolean newTableReaderForCompactionInputs) {
|
||||
assert(isOwningHandle());
|
||||
setNewTableReaderForCompactionInputs(nativeHandle_,
|
||||
newTableReaderForCompactionInputs);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean newTableReaderForCompactionInputs() {
|
||||
assert(isOwningHandle());
|
||||
return newTableReaderForCompactionInputs(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DBOptions setCompactionReadaheadSize(final long compactionReadaheadSize) {
|
||||
assert(isOwningHandle());
|
||||
@ -1391,9 +1376,6 @@ public class DBOptions extends RocksObject
|
||||
private native void setAccessHintOnCompactionStart(final long handle,
|
||||
final byte accessHintOnCompactionStart);
|
||||
private native byte accessHintOnCompactionStart(final long handle);
|
||||
private native void setNewTableReaderForCompactionInputs(final long handle,
|
||||
final boolean newTableReaderForCompactionInputs);
|
||||
private native boolean newTableReaderForCompactionInputs(final long handle);
|
||||
private native void setCompactionReadaheadSize(final long handle,
|
||||
final long compactionReadaheadSize);
|
||||
private native long compactionReadaheadSize(final long handle);
|
||||
|
@ -947,43 +947,6 @@ public interface DBOptionsInterface<T extends DBOptionsInterface<T>> {
|
||||
*/
|
||||
AccessHint accessHintOnCompactionStart();
|
||||
|
||||
/**
|
||||
* If true, always create a new file descriptor and new table reader
|
||||
* for compaction inputs. Turn this parameter on may introduce extra
|
||||
* memory usage in the table reader, if it allocates extra memory
|
||||
* for indexes. This will allow file descriptor prefetch options
|
||||
* to be set for compaction input files and not to impact file
|
||||
* descriptors for the same file used by user queries.
|
||||
* Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
|
||||
* for this mode if using block-based table.
|
||||
*
|
||||
* Default: false
|
||||
*
|
||||
* @param newTableReaderForCompactionInputs true if a new file descriptor and
|
||||
* table reader should be created for compaction inputs
|
||||
*
|
||||
* @return the reference to the current options.
|
||||
*/
|
||||
T setNewTableReaderForCompactionInputs(
|
||||
boolean newTableReaderForCompactionInputs);
|
||||
|
||||
/**
|
||||
* If true, always create a new file descriptor and new table reader
|
||||
* for compaction inputs. Turn this parameter on may introduce extra
|
||||
* memory usage in the table reader, if it allocates extra memory
|
||||
* for indexes. This will allow file descriptor prefetch options
|
||||
* to be set for compaction input files and not to impact file
|
||||
* descriptors for the same file used by user queries.
|
||||
* Suggest to enable {@link BlockBasedTableConfig#cacheIndexAndFilterBlocks()}
|
||||
* for this mode if using block-based table.
|
||||
*
|
||||
* Default: false
|
||||
*
|
||||
* @return true if a new file descriptor and table reader are created for
|
||||
* compaction inputs
|
||||
*/
|
||||
boolean newTableReaderForCompactionInputs();
|
||||
|
||||
/**
|
||||
* This is a maximum buffer size that is used by WinMmapReadableFile in
|
||||
* unbuffered disk I/O mode. We need to maintain an aligned buffer for
|
||||
|
@ -417,8 +417,6 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
|
||||
* running RocksDB on spinning disks, you should set this to at least 2MB.
|
||||
*
|
||||
* That way RocksDB's compaction is doing sequential instead of random reads.
|
||||
* When non-zero, we also force
|
||||
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
|
||||
*
|
||||
* Default: 0
|
||||
*
|
||||
@ -433,8 +431,6 @@ public interface MutableDBOptionsInterface<T extends MutableDBOptionsInterface<T
|
||||
* running RocksDB on spinning disks, you should set this to at least 2MB.
|
||||
*
|
||||
* That way RocksDB's compaction is doing sequential instead of random reads.
|
||||
* When non-zero, we also force
|
||||
* {@link DBOptionsInterface#newTableReaderForCompactionInputs()} to true.
|
||||
*
|
||||
* Default: 0
|
||||
*
|
||||
|
@ -854,21 +854,6 @@ public class Options extends RocksObject
|
||||
return AccessHint.getAccessHint(accessHintOnCompactionStart(nativeHandle_));
|
||||
}
|
||||
|
||||
@Override
|
||||
public Options setNewTableReaderForCompactionInputs(
|
||||
final boolean newTableReaderForCompactionInputs) {
|
||||
assert(isOwningHandle());
|
||||
setNewTableReaderForCompactionInputs(nativeHandle_,
|
||||
newTableReaderForCompactionInputs);
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean newTableReaderForCompactionInputs() {
|
||||
assert(isOwningHandle());
|
||||
return newTableReaderForCompactionInputs(nativeHandle_);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Options setCompactionReadaheadSize(final long compactionReadaheadSize) {
|
||||
assert(isOwningHandle());
|
||||
@ -2221,9 +2206,6 @@ public class Options extends RocksObject
|
||||
private native void setAccessHintOnCompactionStart(final long handle,
|
||||
final byte accessHintOnCompactionStart);
|
||||
private native byte accessHintOnCompactionStart(final long handle);
|
||||
private native void setNewTableReaderForCompactionInputs(final long handle,
|
||||
final boolean newTableReaderForCompactionInputs);
|
||||
private native boolean newTableReaderForCompactionInputs(final long handle);
|
||||
private native void setCompactionReadaheadSize(final long handle,
|
||||
final long compactionReadaheadSize);
|
||||
private native long compactionReadaheadSize(final long handle);
|
||||
|
@ -464,15 +464,6 @@ public class DBOptionsTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void newTableReaderForCompactionInputs() {
|
||||
try(final DBOptions opt = new DBOptions()) {
|
||||
final boolean boolValue = rand.nextBoolean();
|
||||
opt.setNewTableReaderForCompactionInputs(boolValue);
|
||||
assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void compactionReadaheadSize() {
|
||||
try(final DBOptions opt = new DBOptions()) {
|
||||
|
@ -703,15 +703,6 @@ public class OptionsTest {
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void newTableReaderForCompactionInputs() {
|
||||
try (final Options opt = new Options()) {
|
||||
final boolean boolValue = rand.nextBoolean();
|
||||
opt.setNewTableReaderForCompactionInputs(boolValue);
|
||||
assertThat(opt.newTableReaderForCompactionInputs()).isEqualTo(boolValue);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void compactionReadaheadSize() {
|
||||
try (final Options opt = new Options()) {
|
||||
|
@ -241,9 +241,7 @@ static std::unordered_map<std::string, OptionTypeInfo>
|
||||
OptionType::kBoolean, OptionVerificationType::kNormal,
|
||||
OptionTypeFlags::kNone}},
|
||||
{"new_table_reader_for_compaction_inputs",
|
||||
{offsetof(struct ImmutableDBOptions,
|
||||
new_table_reader_for_compaction_inputs),
|
||||
OptionType::kBoolean, OptionVerificationType::kNormal,
|
||||
{0, OptionType::kBoolean, OptionVerificationType::kDeprecated,
|
||||
OptionTypeFlags::kNone}},
|
||||
{"random_access_max_buffer_size",
|
||||
{offsetof(struct ImmutableDBOptions, random_access_max_buffer_size),
|
||||
@ -698,8 +696,6 @@ ImmutableDBOptions::ImmutableDBOptions(const DBOptions& options)
|
||||
db_write_buffer_size(options.db_write_buffer_size),
|
||||
write_buffer_manager(options.write_buffer_manager),
|
||||
access_hint_on_compaction_start(options.access_hint_on_compaction_start),
|
||||
new_table_reader_for_compaction_inputs(
|
||||
options.new_table_reader_for_compaction_inputs),
|
||||
random_access_max_buffer_size(options.random_access_max_buffer_size),
|
||||
use_adaptive_mutex(options.use_adaptive_mutex),
|
||||
listeners(options.listeners),
|
||||
@ -839,8 +835,6 @@ void ImmutableDBOptions::Dump(Logger* log) const {
|
||||
write_buffer_manager.get());
|
||||
ROCKS_LOG_HEADER(log, " Options.access_hint_on_compaction_start: %d",
|
||||
static_cast<int>(access_hint_on_compaction_start));
|
||||
ROCKS_LOG_HEADER(log, " Options.new_table_reader_for_compaction_inputs: %d",
|
||||
new_table_reader_for_compaction_inputs);
|
||||
ROCKS_LOG_HEADER(
|
||||
log, " Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt,
|
||||
random_access_max_buffer_size);
|
||||
|
@ -61,7 +61,6 @@ struct ImmutableDBOptions {
|
||||
size_t db_write_buffer_size;
|
||||
std::shared_ptr<WriteBufferManager> write_buffer_manager;
|
||||
DBOptions::AccessHint access_hint_on_compaction_start;
|
||||
bool new_table_reader_for_compaction_inputs;
|
||||
size_t random_access_max_buffer_size;
|
||||
bool use_adaptive_mutex;
|
||||
std::vector<std::shared_ptr<EventListener>> listeners;
|
||||
|
@ -123,8 +123,6 @@ DBOptions BuildDBOptions(const ImmutableDBOptions& immutable_db_options,
|
||||
options.write_buffer_manager = immutable_db_options.write_buffer_manager;
|
||||
options.access_hint_on_compaction_start =
|
||||
immutable_db_options.access_hint_on_compaction_start;
|
||||
options.new_table_reader_for_compaction_inputs =
|
||||
immutable_db_options.new_table_reader_for_compaction_inputs;
|
||||
options.compaction_readahead_size =
|
||||
mutable_db_options.compaction_readahead_size;
|
||||
options.random_access_max_buffer_size =
|
||||
|
@ -292,7 +292,6 @@ TEST_F(OptionsSettableTest, DBOptionsAllFieldsSettable) {
|
||||
"use_adaptive_mutex=false;"
|
||||
"max_total_wal_size=4295005604;"
|
||||
"compaction_readahead_size=0;"
|
||||
"new_table_reader_for_compaction_inputs=false;"
|
||||
"keep_log_file_num=4890;"
|
||||
"skip_stats_update_on_db_open=false;"
|
||||
"skip_checking_sst_file_sizes_on_db_open=false;"
|
||||
|
@ -148,7 +148,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
||||
{"advise_random_on_open", "true"},
|
||||
{"experimental_mempurge_threshold", "0.0"},
|
||||
{"use_adaptive_mutex", "false"},
|
||||
{"new_table_reader_for_compaction_inputs", "true"},
|
||||
{"compaction_readahead_size", "100"},
|
||||
{"random_access_max_buffer_size", "3145728"},
|
||||
{"writable_file_max_buffer_size", "314159"},
|
||||
@ -309,7 +308,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
|
||||
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
||||
ASSERT_EQ(new_db_opt.experimental_mempurge_threshold, 0.0);
|
||||
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
||||
ASSERT_EQ(new_db_opt.new_table_reader_for_compaction_inputs, true);
|
||||
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
||||
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
||||
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
||||
@ -2302,7 +2300,6 @@ TEST_F(OptionsOldApiTest, GetOptionsFromMapTest) {
|
||||
{"advise_random_on_open", "true"},
|
||||
{"experimental_mempurge_threshold", "0.0"},
|
||||
{"use_adaptive_mutex", "false"},
|
||||
{"new_table_reader_for_compaction_inputs", "true"},
|
||||
{"compaction_readahead_size", "100"},
|
||||
{"random_access_max_buffer_size", "3145728"},
|
||||
{"writable_file_max_buffer_size", "314159"},
|
||||
@ -2457,7 +2454,6 @@ TEST_F(OptionsOldApiTest, GetOptionsFromMapTest) {
|
||||
ASSERT_EQ(new_db_opt.advise_random_on_open, true);
|
||||
ASSERT_EQ(new_db_opt.experimental_mempurge_threshold, 0.0);
|
||||
ASSERT_EQ(new_db_opt.use_adaptive_mutex, false);
|
||||
ASSERT_EQ(new_db_opt.new_table_reader_for_compaction_inputs, true);
|
||||
ASSERT_EQ(new_db_opt.compaction_readahead_size, 100);
|
||||
ASSERT_EQ(new_db_opt.random_access_max_buffer_size, 3145728);
|
||||
ASSERT_EQ(new_db_opt.writable_file_max_buffer_size, 314159);
|
||||
|
@ -662,9 +662,6 @@ DEFINE_int32(file_opening_threads,
|
||||
"If open_files is set to -1, this option set the number of "
|
||||
"threads that will be used to open files during DB::Open()");
|
||||
|
||||
DEFINE_bool(new_table_reader_for_compaction_inputs, true,
|
||||
"If true, uses a separate file handle for compaction inputs");
|
||||
|
||||
DEFINE_int32(compaction_readahead_size, 0, "Compaction readahead size");
|
||||
|
||||
DEFINE_int32(log_readahead_size, 0, "WAL and manifest readahead size");
|
||||
@ -3818,8 +3815,6 @@ class Benchmark {
|
||||
}
|
||||
options.bloom_locality = FLAGS_bloom_locality;
|
||||
options.max_file_opening_threads = FLAGS_file_opening_threads;
|
||||
options.new_table_reader_for_compaction_inputs =
|
||||
FLAGS_new_table_reader_for_compaction_inputs;
|
||||
options.compaction_readahead_size = FLAGS_compaction_readahead_size;
|
||||
options.log_readahead_size = FLAGS_log_readahead_size;
|
||||
options.random_access_max_buffer_size = FLAGS_random_access_max_buffer_size;
|
||||
@ -4252,13 +4247,6 @@ class Benchmark {
|
||||
}
|
||||
|
||||
if (FLAGS_rate_limiter_bytes_per_sec > 0) {
|
||||
if (FLAGS_rate_limit_bg_reads &&
|
||||
!FLAGS_new_table_reader_for_compaction_inputs) {
|
||||
fprintf(stderr,
|
||||
"rate limit compaction reads must have "
|
||||
"new_table_reader_for_compaction_inputs set\n");
|
||||
exit(1);
|
||||
}
|
||||
options.rate_limiter.reset(NewGenericRateLimiter(
|
||||
FLAGS_rate_limiter_bytes_per_sec, FLAGS_rate_limiter_refill_period_us,
|
||||
10 /* fairness */,
|
||||
|
@ -194,7 +194,6 @@ const std::string options_file_content = R"OPTIONS_FILE(
|
||||
use_adaptive_mutex=false
|
||||
max_total_wal_size=18446744073709551615
|
||||
compaction_readahead_size=0
|
||||
new_table_reader_for_compaction_inputs=false
|
||||
keep_log_file_num=10
|
||||
skip_stats_update_on_db_open=false
|
||||
max_manifest_file_size=18446744073709551615
|
||||
|
Loading…
Reference in New Issue
Block a user