Deprectate filter_deletes

Summary: filter_deltes is not a frequently used feature. Remove it.

Test Plan: Run all test suites.

Reviewers: igor, yhchiang, IslamAbdelRahman

Reviewed By: IslamAbdelRahman

Subscribers: leveldb, andrewkr, dhruba

Differential Revision: https://reviews.facebook.net/D59427
This commit is contained in:
sdong 2016-06-07 17:25:06 -07:00
parent 4939fc3892
commit 7b79238b65
29 changed files with 31 additions and 271 deletions

View File

@ -1,9 +1,9 @@
# Rocksdb Change Log # Rocksdb Change Log
## Unreleased ## Unreleased
### Public API Change ### Public API Change
* Deprecate BlockBaseTableOptions.hash_index_allow_collision=false
* options.memtable_prefix_bloom_bits changes to options.memtable_prefix_bloom_bits_ratio and deprecate options.memtable_prefix_bloom_probes * options.memtable_prefix_bloom_bits changes to options.memtable_prefix_bloom_bits_ratio and deprecate options.memtable_prefix_bloom_probes
* enum type #movebot Rocksdb Users Group and PerfLevel changes from char to unsigned char. Value of all PerfLevel shift by one. * enum type #movebot Rocksdb Users Group and PerfLevel changes from char to unsigned char. Value of all PerfLevel shift by one.
* Deprecate options.filter_deletes.
### New Features ### New Features
* Add avoid_flush_during_recovery option. * Add avoid_flush_during_recovery option.

View File

@ -1692,11 +1692,6 @@ void rocksdb_options_set_verify_checksums_in_compaction(
opt->rep.verify_checksums_in_compaction = v; opt->rep.verify_checksums_in_compaction = v;
} }
void rocksdb_options_set_filter_deletes(
rocksdb_options_t* opt, unsigned char v) {
opt->rep.filter_deletes = v;
}
void rocksdb_options_set_max_sequential_skip_in_iterations( void rocksdb_options_set_max_sequential_skip_in_iterations(
rocksdb_options_t* opt, uint64_t v) { rocksdb_options_t* opt, uint64_t v) {
opt->rep.max_sequential_skip_in_iterations = v; opt->rep.max_sequential_skip_in_iterations = v;

View File

@ -130,11 +130,6 @@ Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) {
"In-place memtable updates (inplace_update_support) is not compatible " "In-place memtable updates (inplace_update_support) is not compatible "
"with concurrent writes (allow_concurrent_memtable_write)"); "with concurrent writes (allow_concurrent_memtable_write)");
} }
if (cf_options.filter_deletes) {
return Status::InvalidArgument(
"Delete filtering (filter_deletes) is not compatible with concurrent "
"memtable writes (allow_concurrent_memtable_writes)");
}
if (!cf_options.memtable_factory->IsInsertConcurrentlySupported()) { if (!cf_options.memtable_factory->IsInsertConcurrentlySupported()) {
return Status::InvalidArgument( return Status::InvalidArgument(
"Memtable doesn't concurrent writes (allow_concurrent_memtable_write)"); "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)");

View File

@ -101,46 +101,6 @@ TEST_P(DBBloomFilterTestWithParam, KeyMayExist) {
ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction)); ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction));
} }
// A delete is skipped for key if KeyMayExist(key) returns False
// Tests Writebatch consistency and proper delete behaviour
TEST_P(DBBloomFilterTestWithParam, FilterDeletes) {
do {
anon::OptionsOverride options_override;
options_override.filter_policy.reset(
NewBloomFilterPolicy(20, use_block_based_filter_));
Options options = CurrentOptions(options_override);
options.filter_deletes = true;
CreateAndReopenWithCF({"pikachu"}, options);
WriteBatch batch;
batch.Delete(handles_[1], "a");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(AllEntriesFor("a", 1), "[ ]"); // Delete skipped
batch.Clear();
batch.Put(handles_[1], "a", "b");
batch.Delete(handles_[1], "a");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(Get(1, "a"), "NOT_FOUND");
ASSERT_EQ(AllEntriesFor("a", 1), "[ DEL, b ]"); // Delete issued
batch.Clear();
batch.Delete(handles_[1], "c");
batch.Put(handles_[1], "c", "d");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(Get(1, "c"), "d");
ASSERT_EQ(AllEntriesFor("c", 1), "[ d ]"); // Delete skipped
batch.Clear();
ASSERT_OK(Flush(1)); // A stray Flush
batch.Delete(handles_[1], "c");
dbfull()->Write(WriteOptions(), &batch);
ASSERT_EQ(AllEntriesFor("c", 1), "[ DEL, d ]"); // Delete issued
batch.Clear();
} while (ChangeCompactOptions());
}
TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) { TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) {
Options options = last_options_; Options options = last_options_;
options.prefix_extractor.reset(NewFixedPrefixTransform(8)); options.prefix_extractor.reset(NewFixedPrefixTransform(8));

View File

@ -1534,7 +1534,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
// That's why we set ignore missing column families to true // That's why we set ignore missing column families to true
status = WriteBatchInternal::InsertInto( status = WriteBatchInternal::InsertInto(
&batch, column_family_memtables_.get(), &flush_scheduler_, true, &batch, column_family_memtables_.get(), &flush_scheduler_, true,
log_number, this, true, false, next_sequence); log_number, this, false /* concurrent_memtable_writes */,
next_sequence);
MaybeIgnoreError(&status); MaybeIgnoreError(&status);
if (!status.ok()) { if (!status.ok()) {
// We are treating this as a failure while reading since we read valid // We are treating this as a failure while reading since we read valid
@ -4500,7 +4501,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
w.status = WriteBatchInternal::InsertInto( w.status = WriteBatchInternal::InsertInto(
&w, &column_family_memtables, &flush_scheduler_, &w, &column_family_memtables, &flush_scheduler_,
write_options.ignore_missing_column_families, 0 /*log_number*/, this, write_options.ignore_missing_column_families, 0 /*log_number*/, this,
true /*dont_filter_deletes*/, true /*concurrent_memtable_writes*/); true /*concurrent_memtable_writes*/);
} }
if (write_thread_.CompleteParallelWorker(&w)) { if (write_thread_.CompleteParallelWorker(&w)) {
@ -4794,7 +4795,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
status = WriteBatchInternal::InsertInto( status = WriteBatchInternal::InsertInto(
write_group, current_sequence, column_family_memtables_.get(), write_group, current_sequence, column_family_memtables_.get(),
&flush_scheduler_, write_options.ignore_missing_column_families, &flush_scheduler_, write_options.ignore_missing_column_families,
0 /*log_number*/, this, false /*dont_filter_deletes*/); 0 /*log_number*/, this);
if (status.ok()) { if (status.ok()) {
// There were no write failures. Set leader's status // There were no write failures. Set leader's status
@ -4821,8 +4822,7 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
w.status = WriteBatchInternal::InsertInto( w.status = WriteBatchInternal::InsertInto(
&w, &column_family_memtables, &flush_scheduler_, &w, &column_family_memtables, &flush_scheduler_,
write_options.ignore_missing_column_families, 0 /*log_number*/, write_options.ignore_missing_column_families, 0 /*log_number*/,
this, true /*dont_filter_deletes*/, this, true /*concurrent_memtable_writes*/);
true /*concurrent_memtable_writes*/);
} }
// CompleteParallelWorker returns true if this thread should // CompleteParallelWorker returns true if this thread should

View File

@ -91,10 +91,6 @@ bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) {
} }
#endif #endif
if ((skip_mask & kSkipDeletesFilterFirst) &&
option_config == kDeletesFilterFirst) {
return true;
}
if ((skip_mask & kSkipUniversalCompaction) && if ((skip_mask & kSkipUniversalCompaction) &&
(option_config == kUniversalCompaction || (option_config == kUniversalCompaction ||
option_config == kUniversalCompactionMultiLevel)) { option_config == kUniversalCompactionMultiLevel)) {
@ -311,9 +307,6 @@ Options DBTestBase::CurrentOptions(
options.report_bg_io_stats = true; options.report_bg_io_stats = true;
// TODO(3.13) -- test more options // TODO(3.13) -- test more options
break; break;
case kDeletesFilterFirst:
options.filter_deletes = true;
break;
case kUniversalCompaction: case kUniversalCompaction:
options.compaction_style = kCompactionStyleUniversal; options.compaction_style = kCompactionStyleUniversal;
options.num_levels = 1; options.num_levels = 1;

View File

@ -550,19 +550,18 @@ class DBTestBase : public testing::Test {
kWalDirAndMmapReads = 16, kWalDirAndMmapReads = 16,
kManifestFileSize = 17, kManifestFileSize = 17,
kPerfOptions = 18, kPerfOptions = 18,
kDeletesFilterFirst = 19, kHashSkipList = 19,
kHashSkipList = 20, kUniversalCompaction = 20,
kUniversalCompaction = 21, kUniversalCompactionMultiLevel = 21,
kUniversalCompactionMultiLevel = 22, kCompressedBlockCache = 22,
kCompressedBlockCache = 23, kInfiniteMaxOpenFiles = 23,
kInfiniteMaxOpenFiles = 24, kxxHashChecksum = 24,
kxxHashChecksum = 25, kFIFOCompaction = 25,
kFIFOCompaction = 26, kOptimizeFiltersForHits = 26,
kOptimizeFiltersForHits = 27, kRowCache = 27,
kRowCache = 28, kRecycleLogFiles = 28,
kRecycleLogFiles = 29, kConcurrentSkipList = 29,
kConcurrentSkipList = 30, kEnd = 30,
kEnd = 31,
kLevelSubcompactions = 31, kLevelSubcompactions = 31,
kUniversalSubcompactions = 32, kUniversalSubcompactions = 32,
kBlockBasedTableWithIndexRestartInterval = 33, kBlockBasedTableWithIndexRestartInterval = 33,

View File

@ -50,7 +50,6 @@ MemTableOptions::MemTableOptions(const ImmutableCFOptions& ioptions,
inplace_update_num_locks(mutable_cf_options.inplace_update_num_locks), inplace_update_num_locks(mutable_cf_options.inplace_update_num_locks),
inplace_callback(ioptions.inplace_callback), inplace_callback(ioptions.inplace_callback),
max_successive_merges(mutable_cf_options.max_successive_merges), max_successive_merges(mutable_cf_options.max_successive_merges),
filter_deletes(mutable_cf_options.filter_deletes),
statistics(ioptions.statistics), statistics(ioptions.statistics),
merge_operator(ioptions.merge_operator), merge_operator(ioptions.merge_operator),
info_log(ioptions.info_log) {} info_log(ioptions.info_log) {}

View File

@ -50,7 +50,6 @@ struct MemTableOptions {
Slice delta_value, Slice delta_value,
std::string* merged_value); std::string* merged_value);
size_t max_successive_merges; size_t max_successive_merges;
bool filter_deletes;
Statistics* statistics; Statistics* statistics;
MergeOperator* merge_operator; MergeOperator* merge_operator;
Logger* info_log; Logger* info_log;

View File

@ -692,7 +692,6 @@ class MemTableInserter : public WriteBatch::Handler {
// log number that all Memtables inserted into should reference // log number that all Memtables inserted into should reference
uint64_t log_number_ref_; uint64_t log_number_ref_;
DBImpl* db_; DBImpl* db_;
const bool dont_filter_deletes_;
const bool concurrent_memtable_writes_; const bool concurrent_memtable_writes_;
// current recovered transaction we are rebuilding (recovery) // current recovered transaction we are rebuilding (recovery)
WriteBatch* rebuilding_trx_; WriteBatch* rebuilding_trx_;
@ -702,7 +701,6 @@ class MemTableInserter : public WriteBatch::Handler {
FlushScheduler* flush_scheduler, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families, bool ignore_missing_column_families,
uint64_t recovering_log_number, DB* db, uint64_t recovering_log_number, DB* db,
const bool dont_filter_deletes,
bool concurrent_memtable_writes) bool concurrent_memtable_writes)
: sequence_(sequence), : sequence_(sequence),
cf_mems_(cf_mems), cf_mems_(cf_mems),
@ -711,13 +709,9 @@ class MemTableInserter : public WriteBatch::Handler {
recovering_log_number_(recovering_log_number), recovering_log_number_(recovering_log_number),
log_number_ref_(0), log_number_ref_(0),
db_(reinterpret_cast<DBImpl*>(db)), db_(reinterpret_cast<DBImpl*>(db)),
dont_filter_deletes_(dont_filter_deletes),
concurrent_memtable_writes_(concurrent_memtable_writes), concurrent_memtable_writes_(concurrent_memtable_writes),
rebuilding_trx_(nullptr) { rebuilding_trx_(nullptr) {
assert(cf_mems_); assert(cf_mems_);
if (!dont_filter_deletes_) {
assert(db_);
}
} }
void set_log_number_ref(uint64_t log) { log_number_ref_ = log; } void set_log_number_ref(uint64_t log) { log_number_ref_ = log; }
@ -827,23 +821,6 @@ class MemTableInserter : public WriteBatch::Handler {
Status DeleteImpl(uint32_t column_family_id, const Slice& key, Status DeleteImpl(uint32_t column_family_id, const Slice& key,
ValueType delete_type) { ValueType delete_type) {
MemTable* mem = cf_mems_->GetMemTable(); MemTable* mem = cf_mems_->GetMemTable();
auto* moptions = mem->GetMemTableOptions();
if (!dont_filter_deletes_ && moptions->filter_deletes) {
assert(!concurrent_memtable_writes_);
SnapshotImpl read_from_snapshot;
read_from_snapshot.number_ = sequence_;
ReadOptions ropts;
ropts.snapshot = &read_from_snapshot;
std::string value;
auto cf_handle = cf_mems_->GetColumnFamilyHandle();
if (cf_handle == nullptr) {
cf_handle = db_->DefaultColumnFamily();
}
if (!db_->KeyMayExist(ropts, cf_handle, key, &value)) {
RecordTick(moptions->statistics, NUMBER_FILTERED_DELETES);
return Status::OK();
}
}
mem->Add(sequence_, delete_type, key, Slice(), concurrent_memtable_writes_); mem->Add(sequence_, delete_type, key, Slice(), concurrent_memtable_writes_);
sequence_++; sequence_++;
CheckMemtableFull(); CheckMemtableFull();
@ -1080,10 +1057,10 @@ Status WriteBatchInternal::InsertInto(
const autovector<WriteThread::Writer*>& writers, SequenceNumber sequence, const autovector<WriteThread::Writer*>& writers, SequenceNumber sequence,
ColumnFamilyMemTables* memtables, FlushScheduler* flush_scheduler, ColumnFamilyMemTables* memtables, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families, uint64_t log_number, DB* db, bool ignore_missing_column_families, uint64_t log_number, DB* db,
const bool dont_filter_deletes, bool concurrent_memtable_writes) { bool concurrent_memtable_writes) {
MemTableInserter inserter(sequence, memtables, flush_scheduler, MemTableInserter inserter(sequence, memtables, flush_scheduler,
ignore_missing_column_families, log_number, db, ignore_missing_column_families, log_number, db,
dont_filter_deletes, concurrent_memtable_writes); concurrent_memtable_writes);
for (size_t i = 0; i < writers.size(); i++) { for (size_t i = 0; i < writers.size(); i++) {
auto w = writers[i]; auto w = writers[i];
if (!w->ShouldWriteToMemtable()) { if (!w->ShouldWriteToMemtable()) {
@ -1103,26 +1080,26 @@ Status WriteBatchInternal::InsertInto(WriteThread::Writer* writer,
FlushScheduler* flush_scheduler, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families, bool ignore_missing_column_families,
uint64_t log_number, DB* db, uint64_t log_number, DB* db,
const bool dont_filter_deletes,
bool concurrent_memtable_writes) { bool concurrent_memtable_writes) {
MemTableInserter inserter(WriteBatchInternal::Sequence(writer->batch), MemTableInserter inserter(WriteBatchInternal::Sequence(writer->batch),
memtables, flush_scheduler, memtables, flush_scheduler,
ignore_missing_column_families, log_number, db, ignore_missing_column_families, log_number, db,
dont_filter_deletes, concurrent_memtable_writes); concurrent_memtable_writes);
assert(writer->ShouldWriteToMemtable()); assert(writer->ShouldWriteToMemtable());
inserter.set_log_number_ref(writer->log_ref); inserter.set_log_number_ref(writer->log_ref);
return writer->batch->Iterate(&inserter); return writer->batch->Iterate(&inserter);
} }
Status WriteBatchInternal::InsertInto( Status WriteBatchInternal::InsertInto(const WriteBatch* batch,
const WriteBatch* batch, ColumnFamilyMemTables* memtables, ColumnFamilyMemTables* memtables,
FlushScheduler* flush_scheduler, bool ignore_missing_column_families, FlushScheduler* flush_scheduler,
uint64_t log_number, DB* db, const bool dont_filter_deletes, bool ignore_missing_column_families,
bool concurrent_memtable_writes, SequenceNumber* last_seq_used) { uint64_t log_number, DB* db,
bool concurrent_memtable_writes,
SequenceNumber* last_seq_used) {
MemTableInserter inserter(WriteBatchInternal::Sequence(batch), memtables, MemTableInserter inserter(WriteBatchInternal::Sequence(batch), memtables,
flush_scheduler, ignore_missing_column_families, flush_scheduler, ignore_missing_column_families,
log_number, db, dont_filter_deletes, log_number, db, concurrent_memtable_writes);
concurrent_memtable_writes);
Status s = batch->Iterate(&inserter); Status s = batch->Iterate(&inserter);
if (last_seq_used != nullptr) { if (last_seq_used != nullptr) {
*last_seq_used = inserter.get_final_sequence(); *last_seq_used = inserter.get_final_sequence();

View File

@ -129,9 +129,6 @@ class WriteBatchInternal {
// Inserts batches[i] into memtable, for i in 0..num_batches-1 inclusive. // Inserts batches[i] into memtable, for i in 0..num_batches-1 inclusive.
// //
// If dont_filter_deletes is false AND options.filter_deletes is true
// AND db->KeyMayExist is false, then a Delete won't modify the memtable.
//
// If ignore_missing_column_families == true. WriteBatch // If ignore_missing_column_families == true. WriteBatch
// referencing non-existing column family will be ignored. // referencing non-existing column family will be ignored.
// If ignore_missing_column_families == false, processing of the // If ignore_missing_column_families == false, processing of the
@ -153,7 +150,6 @@ class WriteBatchInternal {
FlushScheduler* flush_scheduler, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families = false, bool ignore_missing_column_families = false,
uint64_t log_number = 0, DB* db = nullptr, uint64_t log_number = 0, DB* db = nullptr,
const bool dont_filter_deletes = true,
bool concurrent_memtable_writes = false); bool concurrent_memtable_writes = false);
// Convenience form of InsertInto when you have only one batch // Convenience form of InsertInto when you have only one batch
@ -163,7 +159,6 @@ class WriteBatchInternal {
FlushScheduler* flush_scheduler, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families = false, bool ignore_missing_column_families = false,
uint64_t log_number = 0, DB* db = nullptr, uint64_t log_number = 0, DB* db = nullptr,
const bool dont_filter_deletes = true,
bool concurrent_memtable_writes = false, bool concurrent_memtable_writes = false,
SequenceNumber* last_seq_used = nullptr); SequenceNumber* last_seq_used = nullptr);
@ -172,7 +167,6 @@ class WriteBatchInternal {
FlushScheduler* flush_scheduler, FlushScheduler* flush_scheduler,
bool ignore_missing_column_families = false, bool ignore_missing_column_families = false,
uint64_t log_number = 0, DB* db = nullptr, uint64_t log_number = 0, DB* db = nullptr,
const bool dont_filter_deletes = true,
bool concurrent_memtable_writes = false); bool concurrent_memtable_writes = false);
static void Append(WriteBatch* dst, const WriteBatch* src); static void Append(WriteBatch* dst, const WriteBatch* src);

View File

@ -632,16 +632,6 @@ struct ColumnFamilyOptions {
// The options for FIFO compaction style // The options for FIFO compaction style
CompactionOptionsFIFO compaction_options_fifo; CompactionOptionsFIFO compaction_options_fifo;
// Use KeyMayExist API to filter deletes when this is true.
// If KeyMayExist returns false, i.e. the key definitely does not exist, then
// the delete is a noop. KeyMayExist only incurs in-memory look up.
// This optimization avoids writing the delete to storage when appropriate.
//
// Default: false
//
// Dynamically changeable through SetOptions() API
bool filter_deletes;
// An iteration->Next() sequentially skips over keys with the same // An iteration->Next() sequentially skips over keys with the same
// user-key unless this option is set. This number specifies the number // user-key unless this option is set. This number specifies the number
// of keys (with the same userkey) that will be sequentially // of keys (with the same userkey) that will be sequentially

View File

@ -598,8 +598,6 @@ public class DbBenchmark {
(Boolean)flags_.get(Flag.disable_auto_compactions)); (Boolean)flags_.get(Flag.disable_auto_compactions));
options.setSourceCompactionFactor( options.setSourceCompactionFactor(
(Integer)flags_.get(Flag.source_compaction_factor)); (Integer)flags_.get(Flag.source_compaction_factor));
options.setFilterDeletes(
(Boolean)flags_.get(Flag.filter_deletes));
options.setMaxSuccessiveMerges( options.setMaxSuccessiveMerges(
(Integer)flags_.get(Flag.max_successive_merges)); (Integer)flags_.get(Flag.max_successive_merges));
options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds));

View File

@ -1632,27 +1632,6 @@ void Java_org_rocksdb_Options_setVerifyChecksumsInCompaction(
static_cast<bool>(jverify_checksums_in_compaction); static_cast<bool>(jverify_checksums_in_compaction);
} }
/*
* Class: org_rocksdb_Options
* Method: filterDeletes
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_Options_filterDeletes(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::Options*>(jhandle)->filter_deletes;
}
/*
* Class: org_rocksdb_Options
* Method: setFilterDeletes
* Signature: (JZ)V
*/
void Java_org_rocksdb_Options_setFilterDeletes(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jfilter_deletes) {
reinterpret_cast<rocksdb::Options*>(jhandle)->filter_deletes =
static_cast<bool>(jfilter_deletes);
}
/* /*
* Class: org_rocksdb_Options * Class: org_rocksdb_Options
* Method: maxSequentialSkipInIterations * Method: maxSequentialSkipInIterations
@ -2791,28 +2770,6 @@ void Java_org_rocksdb_ColumnFamilyOptions_setVerifyChecksumsInCompaction(
static_cast<bool>(jverify_checksums_in_compaction); static_cast<bool>(jverify_checksums_in_compaction);
} }
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: filterDeletes
* Signature: (J)Z
*/
jboolean Java_org_rocksdb_ColumnFamilyOptions_filterDeletes(
JNIEnv* env, jobject jobj, jlong jhandle) {
return reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->
filter_deletes;
}
/*
* Class: org_rocksdb_ColumnFamilyOptions
* Method: setFilterDeletes
* Signature: (JZ)V
*/
void Java_org_rocksdb_ColumnFamilyOptions_setFilterDeletes(
JNIEnv* env, jobject jobj, jlong jhandle, jboolean jfilter_deletes) {
reinterpret_cast<rocksdb::ColumnFamilyOptions*>(jhandle)->filter_deletes =
static_cast<bool>(jfilter_deletes);
}
/* /*
* Class: org_rocksdb_ColumnFamilyOptions * Class: org_rocksdb_ColumnFamilyOptions
* Method: maxSequentialSkipInIterations * Method: maxSequentialSkipInIterations

View File

@ -511,18 +511,6 @@ public class ColumnFamilyOptions extends RocksObject
return verifyChecksumsInCompaction(nativeHandle_); return verifyChecksumsInCompaction(nativeHandle_);
} }
@Override
public ColumnFamilyOptions setFilterDeletes(
final boolean filterDeletes) {
setFilterDeletes(nativeHandle_, filterDeletes);
return this;
}
@Override
public boolean filterDeletes() {
return filterDeletes(nativeHandle_);
}
@Override @Override
public ColumnFamilyOptions setMaxSequentialSkipInIterations( public ColumnFamilyOptions setMaxSequentialSkipInIterations(
final long maxSequentialSkipInIterations) { final long maxSequentialSkipInIterations) {
@ -758,9 +746,6 @@ public class ColumnFamilyOptions extends RocksObject
private native void setVerifyChecksumsInCompaction( private native void setVerifyChecksumsInCompaction(
long handle, boolean verifyChecksumsInCompaction); long handle, boolean verifyChecksumsInCompaction);
private native boolean verifyChecksumsInCompaction(long handle); private native boolean verifyChecksumsInCompaction(long handle);
private native void setFilterDeletes(
long handle, boolean filterDeletes);
private native boolean filterDeletes(long handle);
private native void setMaxSequentialSkipInIterations( private native void setMaxSequentialSkipInIterations(
long handle, long maxSequentialSkipInIterations); long handle, long maxSequentialSkipInIterations);
private native long maxSequentialSkipInIterations(long handle); private native long maxSequentialSkipInIterations(long handle);

View File

@ -881,29 +881,6 @@ public interface ColumnFamilyOptionsInterface {
*/ */
boolean verifyChecksumsInCompaction(); boolean verifyChecksumsInCompaction();
/**
* Use KeyMayExist API to filter deletes when this is true.
* If KeyMayExist returns false, i.e. the key definitely does not exist, then
* the delete is a noop. KeyMayExist only incurs in-memory look up.
* This optimization avoids writing the delete to storage when appropriate.
* Default: false
*
* @param filterDeletes true if filter-deletes behavior is on.
* @return the reference to the current option.
*/
Object setFilterDeletes(boolean filterDeletes);
/**
* Use KeyMayExist API to filter deletes when this is true.
* If KeyMayExist returns false, i.e. the key definitely does not exist, then
* the delete is a noop. KeyMayExist only incurs in-memory look up.
* This optimization avoids writing the delete to storage when appropriate.
* Default: false
*
* @return true if filter-deletes behavior is on.
*/
boolean filterDeletes();
/** /**
* An iteration-&gt;Next() sequentially skips over keys with the same * An iteration-&gt;Next() sequentially skips over keys with the same
* user-key unless this option is set. This number specifies the number * user-key unless this option is set. This number specifies the number

View File

@ -955,18 +955,6 @@ public class Options extends RocksObject
return this; return this;
} }
@Override
public boolean filterDeletes() {
return filterDeletes(nativeHandle_);
}
@Override
public Options setFilterDeletes(
final boolean filterDeletes) {
setFilterDeletes(nativeHandle_, filterDeletes);
return this;
}
@Override @Override
public long maxSequentialSkipInIterations() { public long maxSequentialSkipInIterations() {
return maxSequentialSkipInIterations(nativeHandle_); return maxSequentialSkipInIterations(nativeHandle_);
@ -1263,9 +1251,6 @@ public class Options extends RocksObject
private native void setVerifyChecksumsInCompaction( private native void setVerifyChecksumsInCompaction(
long handle, boolean verifyChecksumsInCompaction); long handle, boolean verifyChecksumsInCompaction);
private native boolean verifyChecksumsInCompaction(long handle); private native boolean verifyChecksumsInCompaction(long handle);
private native void setFilterDeletes(
long handle, boolean filterDeletes);
private native boolean filterDeletes(long handle);
private native void setMaxSequentialSkipInIterations( private native void setMaxSequentialSkipInIterations(
long handle, long maxSequentialSkipInIterations); long handle, long maxSequentialSkipInIterations);
private native long maxSequentialSkipInIterations(long handle); private native long maxSequentialSkipInIterations(long handle);

View File

@ -268,15 +268,6 @@ public class ColumnFamilyOptionsTest {
} }
} }
@Test
public void filterDeletes() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {
final boolean boolValue = rand.nextBoolean();
opt.setFilterDeletes(boolValue);
assertThat(opt.filterDeletes()).isEqualTo(boolValue);
}
}
@Test @Test
public void maxSequentialSkipInIterations() { public void maxSequentialSkipInIterations() {
try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) {

View File

@ -231,15 +231,6 @@ public class OptionsTest {
} }
} }
@Test
public void filterDeletes() {
try (final Options opt = new Options()) {
final boolean boolValue = rand.nextBoolean();
opt.setFilterDeletes(boolValue);
assertThat(opt.filterDeletes()).isEqualTo(boolValue);
}
}
@Test @Test
public void maxSequentialSkipInIterations() { public void maxSequentialSkipInIterations() {
try (final Options opt = new Options()) { try (final Options opt = new Options()) {

View File

@ -766,9 +766,6 @@ DEFINE_uint64(wal_bytes_per_sync, rocksdb::Options().wal_bytes_per_sync,
" being written, in the background. Issue one request for every" " being written, in the background. Issue one request for every"
" wal_bytes_per_sync written. 0 turns it off."); " wal_bytes_per_sync written. 0 turns it off.");
DEFINE_bool(filter_deletes, false, " On true, deletes use bloom-filter and drop"
" the delete if key not present");
DEFINE_bool(use_single_deletes, true, DEFINE_bool(use_single_deletes, true,
"Use single deletes (used in RandomReplaceKeys only)."); "Use single deletes (used in RandomReplaceKeys only).");
@ -2480,7 +2477,6 @@ class Benchmark {
FLAGS_level_compaction_dynamic_level_bytes; FLAGS_level_compaction_dynamic_level_bytes;
options.max_bytes_for_level_multiplier = options.max_bytes_for_level_multiplier =
FLAGS_max_bytes_for_level_multiplier; FLAGS_max_bytes_for_level_multiplier;
options.filter_deletes = FLAGS_filter_deletes;
if ((FLAGS_prefix_size == 0) && (FLAGS_rep_factory == kPrefixHash || if ((FLAGS_prefix_size == 0) && (FLAGS_rep_factory == kPrefixHash ||
FLAGS_rep_factory == kHashLinkedList)) { FLAGS_rep_factory == kHashLinkedList)) {
fprintf(stderr, "prefix_size should be non-zero if PrefixHash or " fprintf(stderr, "prefix_size should be non-zero if PrefixHash or "

View File

@ -411,9 +411,6 @@ DEFINE_uint64(log2_keys_per_lock, 2, "Log2 of number of keys per lock");
static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((unused)) = static const bool FLAGS_log2_keys_per_lock_dummy __attribute__((unused)) =
RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range); RegisterFlagValidator(&FLAGS_log2_keys_per_lock, &ValidateUint32Range);
DEFINE_bool(filter_deletes, false, "On true, deletes use KeyMayExist to drop"
" the delete if key not present");
DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable"); DEFINE_bool(in_place_update, false, "On true, does inplace update in memtable");
enum RepFactory { enum RepFactory {
@ -1048,7 +1045,6 @@ class StressTest {
{"memtable_prefix_bloom_huge_page_tlb_size", {"memtable_prefix_bloom_huge_page_tlb_size",
{"0", ToString(2 * 1024 * 1024)}}, {"0", ToString(2 * 1024 * 1024)}},
{"max_successive_merges", {"0", "2", "4"}}, {"max_successive_merges", {"0", "2", "4"}},
{"filter_deletes", {"0", "1"}},
{"inplace_update_num_locks", {"100", "200", "300"}}, {"inplace_update_num_locks", {"100", "200", "300"}},
// TODO(ljin): enable test for this option // TODO(ljin): enable test for this option
// {"disable_auto_compactions", {"100", "200", "300"}}, // {"disable_auto_compactions", {"100", "200", "300"}},
@ -1998,7 +1994,6 @@ class StressTest {
fprintf(stdout, "Num times DB reopens : %d\n", FLAGS_reopen); fprintf(stdout, "Num times DB reopens : %d\n", FLAGS_reopen);
fprintf(stdout, "Batches/snapshots : %d\n", fprintf(stdout, "Batches/snapshots : %d\n",
FLAGS_test_batches_snapshots); FLAGS_test_batches_snapshots);
fprintf(stdout, "Deletes use filter : %d\n", FLAGS_filter_deletes);
fprintf(stdout, "Do update in place : %d\n", FLAGS_in_place_update); fprintf(stdout, "Do update in place : %d\n", FLAGS_in_place_update);
fprintf(stdout, "Num keys per lock : %d\n", fprintf(stdout, "Num keys per lock : %d\n",
1 << FLAGS_log2_keys_per_lock); 1 << FLAGS_log2_keys_per_lock);
@ -2074,7 +2069,6 @@ class StressTest {
options_.compression = FLAGS_compression_type_e; options_.compression = FLAGS_compression_type_e;
options_.create_if_missing = true; options_.create_if_missing = true;
options_.max_manifest_file_size = 10 * 1024; options_.max_manifest_file_size = 10 * 1024;
options_.filter_deletes = FLAGS_filter_deletes;
options_.inplace_update_support = FLAGS_in_place_update; options_.inplace_update_support = FLAGS_in_place_update;
options_.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions); options_.max_subcompactions = static_cast<uint32_t>(FLAGS_subcompactions);
options_.allow_concurrent_memtable_write = options_.allow_concurrent_memtable_write =

View File

@ -76,8 +76,6 @@ void MutableCFOptions::Dump(Logger* log) const {
memtable_prefix_bloom_huge_page_tlb_size); memtable_prefix_bloom_huge_page_tlb_size);
Log(log, " max_successive_merges: %" ROCKSDB_PRIszt, Log(log, " max_successive_merges: %" ROCKSDB_PRIszt,
max_successive_merges); max_successive_merges);
Log(log, " filter_deletes: %d",
filter_deletes);
Log(log, " disable_auto_compactions: %d", Log(log, " disable_auto_compactions: %d",
disable_auto_compactions); disable_auto_compactions);
Log(log, " soft_pending_compaction_bytes_limit: %" PRIu64, Log(log, " soft_pending_compaction_bytes_limit: %" PRIu64,

View File

@ -22,7 +22,6 @@ struct MutableCFOptions {
memtable_prefix_bloom_huge_page_tlb_size( memtable_prefix_bloom_huge_page_tlb_size(
options.memtable_prefix_bloom_huge_page_tlb_size), options.memtable_prefix_bloom_huge_page_tlb_size),
max_successive_merges(options.max_successive_merges), max_successive_merges(options.max_successive_merges),
filter_deletes(options.filter_deletes),
inplace_update_num_locks(options.inplace_update_num_locks), inplace_update_num_locks(options.inplace_update_num_locks),
disable_auto_compactions(options.disable_auto_compactions), disable_auto_compactions(options.disable_auto_compactions),
soft_pending_compaction_bytes_limit( soft_pending_compaction_bytes_limit(
@ -61,7 +60,6 @@ struct MutableCFOptions {
memtable_prefix_bloom_size_ratio(0), memtable_prefix_bloom_size_ratio(0),
memtable_prefix_bloom_huge_page_tlb_size(0), memtable_prefix_bloom_huge_page_tlb_size(0),
max_successive_merges(0), max_successive_merges(0),
filter_deletes(false),
inplace_update_num_locks(0), inplace_update_num_locks(0),
disable_auto_compactions(false), disable_auto_compactions(false),
soft_pending_compaction_bytes_limit(0), soft_pending_compaction_bytes_limit(0),
@ -112,7 +110,6 @@ struct MutableCFOptions {
double memtable_prefix_bloom_size_ratio; double memtable_prefix_bloom_size_ratio;
size_t memtable_prefix_bloom_huge_page_tlb_size; size_t memtable_prefix_bloom_huge_page_tlb_size;
size_t max_successive_merges; size_t max_successive_merges;
bool filter_deletes;
size_t inplace_update_num_locks; size_t inplace_update_num_locks;
// Compaction related options // Compaction related options

View File

@ -113,7 +113,6 @@ ColumnFamilyOptions::ColumnFamilyOptions()
compaction_style(kCompactionStyleLevel), compaction_style(kCompactionStyleLevel),
compaction_pri(kByCompensatedSize), compaction_pri(kByCompensatedSize),
verify_checksums_in_compaction(true), verify_checksums_in_compaction(true),
filter_deletes(false),
max_sequential_skip_in_iterations(8), max_sequential_skip_in_iterations(8),
memtable_factory(std::shared_ptr<SkipListFactory>(new SkipListFactory)), memtable_factory(std::shared_ptr<SkipListFactory>(new SkipListFactory)),
table_factory( table_factory(
@ -179,7 +178,6 @@ ColumnFamilyOptions::ColumnFamilyOptions(const Options& options)
verify_checksums_in_compaction(options.verify_checksums_in_compaction), verify_checksums_in_compaction(options.verify_checksums_in_compaction),
compaction_options_universal(options.compaction_options_universal), compaction_options_universal(options.compaction_options_universal),
compaction_options_fifo(options.compaction_options_fifo), compaction_options_fifo(options.compaction_options_fifo),
filter_deletes(options.filter_deletes),
max_sequential_skip_in_iterations( max_sequential_skip_in_iterations(
options.max_sequential_skip_in_iterations), options.max_sequential_skip_in_iterations),
memtable_factory(options.memtable_factory), memtable_factory(options.memtable_factory),
@ -561,8 +559,6 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
rate_limit_delay_max_milliseconds); rate_limit_delay_max_milliseconds);
Header(log, " Options.disable_auto_compactions: %d", Header(log, " Options.disable_auto_compactions: %d",
disable_auto_compactions); disable_auto_compactions);
Header(log, " Options.filter_deletes: %d",
filter_deletes);
Header(log, " Options.verify_checksums_in_compaction: %d", Header(log, " Options.verify_checksums_in_compaction: %d",
verify_checksums_in_compaction); verify_checksums_in_compaction);
Header(log, " Options.compaction_style: %d", Header(log, " Options.compaction_style: %d",

View File

@ -548,7 +548,7 @@ bool ParseMemtableOptions(const std::string& name, const std::string& value,
} else if (name == "max_successive_merges") { } else if (name == "max_successive_merges") {
new_options->max_successive_merges = ParseSizeT(value); new_options->max_successive_merges = ParseSizeT(value);
} else if (name == "filter_deletes") { } else if (name == "filter_deletes") {
new_options->filter_deletes = ParseBoolean(name, value); // Deprecated
} else if (name == "max_write_buffer_number") { } else if (name == "max_write_buffer_number") {
new_options->max_write_buffer_number = ParseInt(value); new_options->max_write_buffer_number = ParseInt(value);
} else if (name == "inplace_update_num_locks") { } else if (name == "inplace_update_num_locks") {
@ -1446,7 +1446,6 @@ ColumnFamilyOptions BuildColumnFamilyOptions(
cf_opts.memtable_prefix_bloom_huge_page_tlb_size = cf_opts.memtable_prefix_bloom_huge_page_tlb_size =
mutable_cf_options.memtable_prefix_bloom_huge_page_tlb_size; mutable_cf_options.memtable_prefix_bloom_huge_page_tlb_size;
cf_opts.max_successive_merges = mutable_cf_options.max_successive_merges; cf_opts.max_successive_merges = mutable_cf_options.max_successive_merges;
cf_opts.filter_deletes = mutable_cf_options.filter_deletes;
cf_opts.inplace_update_num_locks = cf_opts.inplace_update_num_locks =
mutable_cf_options.inplace_update_num_locks; mutable_cf_options.inplace_update_num_locks;

View File

@ -348,8 +348,7 @@ static std::unordered_map<std::string, OptionTypeInfo> cf_options_type_info = {
{offsetof(struct ColumnFamilyOptions, disable_auto_compactions), {offsetof(struct ColumnFamilyOptions, disable_auto_compactions),
OptionType::kBoolean, OptionVerificationType::kNormal}}, OptionType::kBoolean, OptionVerificationType::kNormal}},
{"filter_deletes", {"filter_deletes",
{offsetof(struct ColumnFamilyOptions, filter_deletes), {0, OptionType::kBoolean, OptionVerificationType::kDeprecated}},
OptionType::kBoolean, OptionVerificationType::kNormal}},
{"inplace_update_support", {"inplace_update_support",
{offsetof(struct ColumnFamilyOptions, inplace_update_support), {offsetof(struct ColumnFamilyOptions, inplace_update_support),
OptionType::kBoolean, OptionVerificationType::kNormal}}, OptionType::kBoolean, OptionVerificationType::kNormal}},

View File

@ -423,7 +423,6 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) {
"inplace_update_support=false;" "inplace_update_support=false;"
"compaction_style=kCompactionStyleFIFO;" "compaction_style=kCompactionStyleFIFO;"
"purge_redundant_kvs_while_flush=true;" "purge_redundant_kvs_while_flush=true;"
"filter_deletes=false;"
"hard_pending_compaction_bytes_limit=0;" "hard_pending_compaction_bytes_limit=0;"
"disable_auto_compactions=false;" "disable_auto_compactions=false;"
"report_bg_io_stats=true;", "report_bg_io_stats=true;",

View File

@ -80,7 +80,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
{"compaction_style", "kCompactionStyleLevel"}, {"compaction_style", "kCompactionStyleLevel"},
{"verify_checksums_in_compaction", "false"}, {"verify_checksums_in_compaction", "false"},
{"compaction_options_fifo", "23"}, {"compaction_options_fifo", "23"},
{"filter_deletes", "0"},
{"max_sequential_skip_in_iterations", "24"}, {"max_sequential_skip_in_iterations", "24"},
{"inplace_update_support", "true"}, {"inplace_update_support", "true"},
{"report_bg_io_stats", "true"}, {"report_bg_io_stats", "true"},
@ -181,7 +180,6 @@ TEST_F(OptionsTest, GetOptionsFromMapTest) {
ASSERT_EQ(new_cf_opt.verify_checksums_in_compaction, false); ASSERT_EQ(new_cf_opt.verify_checksums_in_compaction, false);
ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size, ASSERT_EQ(new_cf_opt.compaction_options_fifo.max_table_files_size,
static_cast<uint64_t>(23)); static_cast<uint64_t>(23));
ASSERT_EQ(new_cf_opt.filter_deletes, false);
ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations, ASSERT_EQ(new_cf_opt.max_sequential_skip_in_iterations,
static_cast<uint64_t>(24)); static_cast<uint64_t>(24));
ASSERT_EQ(new_cf_opt.inplace_update_support, true); ASSERT_EQ(new_cf_opt.inplace_update_support, true);

View File

@ -296,7 +296,6 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, Random* rnd) {
// boolean options // boolean options
cf_opt->report_bg_io_stats = rnd->Uniform(2); cf_opt->report_bg_io_stats = rnd->Uniform(2);
cf_opt->disable_auto_compactions = rnd->Uniform(2); cf_opt->disable_auto_compactions = rnd->Uniform(2);
cf_opt->filter_deletes = rnd->Uniform(2);
cf_opt->inplace_update_support = rnd->Uniform(2); cf_opt->inplace_update_support = rnd->Uniform(2);
cf_opt->level_compaction_dynamic_level_bytes = rnd->Uniform(2); cf_opt->level_compaction_dynamic_level_bytes = rnd->Uniform(2);
cf_opt->optimize_filters_for_hits = rnd->Uniform(2); cf_opt->optimize_filters_for_hits = rnd->Uniform(2);