Change is_range_del_table_empty_ flag to atomic (#4801)

Summary:
To avoid a race on the flag, make it an atomic_bool. This
doesn't seem to significantly affect benchmarks.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4801

Differential Revision: D13523845

Pulled By: abhimadan

fbshipit-source-id: 3bc29f53c50a4e06cd9f8c6232a4bb221868e055
This commit is contained in:
Abhishek Madan 2018-12-19 17:18:44 -08:00 committed by Facebook Github Bot
parent 8bf73208a4
commit 02bfc5831e
2 changed files with 5 additions and 4 deletions

View File

@ -414,7 +414,8 @@ InternalIterator* MemTable::NewIterator(const ReadOptions& read_options,
FragmentedRangeTombstoneIterator* MemTable::NewRangeTombstoneIterator(
const ReadOptions& read_options, SequenceNumber read_seq) {
if (read_options.ignore_range_deletions || is_range_del_table_empty_) {
if (read_options.ignore_range_deletions ||
is_range_del_table_empty_.load(std::memory_order_relaxed)) {
return nullptr;
}
auto* unfragmented_iter = new MemTableIterator(
@ -563,8 +564,8 @@ bool MemTable::Add(SequenceNumber s, ValueType type,
!first_seqno_.compare_exchange_weak(cur_earliest_seqno, s)) {
}
}
if (is_range_del_table_empty_ && type == kTypeRangeDeletion) {
is_range_del_table_empty_ = false;
if (type == kTypeRangeDeletion) {
is_range_del_table_empty_.store(false, std::memory_order_relaxed);
}
UpdateOldestKeyTime();
return true;

View File

@ -409,7 +409,7 @@ class MemTable {
ConcurrentArena arena_;
std::unique_ptr<MemTableRep> table_;
std::unique_ptr<MemTableRep> range_del_table_;
bool is_range_del_table_empty_;
std::atomic_bool is_range_del_table_empty_;
// Total data size of all data inserted
std::atomic<uint64_t> data_size_;