From 885b1c682e85391beb7f26a842e2128e3653bd4a Mon Sep 17 00:00:00 2001 From: Siying Dong Date: Thu, 14 Sep 2017 15:41:19 -0700 Subject: [PATCH] Two small refactoring for better inlining Summary: Move uncommon code paths in RangeDelAggregator::ShouldDelete() and IterKey::EnlargeBufferIfNeeded() to a separate function, so that the inlined strcuture can be more optimized. Optimize it because these places show up in CPU profiling, though minimum. The performance is really hard measure. I ran db_bench with readseq benchmark against in-memory DB many times. The variation is big, but it seems to show 1% improvements. Closes https://github.com/facebook/rocksdb/pull/2877 Differential Revision: D5828123 Pulled By: siying fbshipit-source-id: 41a49e229f91e9f8409f85cc6f0dc70e31334e4b --- db/dbformat.cc | 9 +++++++++ db/dbformat.h | 7 +++---- db/range_del_aggregator.cc | 12 ++++-------- db/range_del_aggregator.h | 19 +++++++++++++++++-- 4 files changed, 33 insertions(+), 14 deletions(-) diff --git a/db/dbformat.cc b/db/dbformat.cc index 20c54495a..d76b28e60 100644 --- a/db/dbformat.cc +++ b/db/dbformat.cc @@ -174,4 +174,13 @@ LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) { end_ = dst; } +void IterKey::EnlargeBuffer(size_t key_size) { + // If size is smaller than buffer size, continue using current buffer, + // or the static allocated one, as default + assert(key_size > buf_size_); + // Need to enlarge the buffer. + ResetBuffer(); + buf_ = new char[key_size]; + buf_size_ = key_size; +} } // namespace rocksdb diff --git a/db/dbformat.h b/db/dbformat.h index 187069cd0..8e176807d 100644 --- a/db/dbformat.h +++ b/db/dbformat.h @@ -504,13 +504,12 @@ class IterKey { // If size is smaller than buffer size, continue using current buffer, // or the static allocated one, as default if (key_size > buf_size_) { - // Need to enlarge the buffer. - ResetBuffer(); - buf_ = new char[key_size]; - buf_size_ = key_size; + EnlargeBuffer(key_size); } } + void EnlargeBuffer(size_t key_size); + // No copying allowed IterKey(const IterKey&) = delete; void operator=(const IterKey&) = delete; diff --git a/db/range_del_aggregator.cc b/db/range_del_aggregator.cc index c83f5a88c..e80b53494 100644 --- a/db/range_del_aggregator.cc +++ b/db/range_del_aggregator.cc @@ -43,11 +43,9 @@ void RangeDelAggregator::InitRep(const std::vector& snapshots) { rep_->pinned_iters_mgr_.StartPinning(); } -bool RangeDelAggregator::ShouldDelete( +bool RangeDelAggregator::ShouldDeleteImpl( const Slice& internal_key, RangeDelAggregator::RangePositioningMode mode) { - if (rep_ == nullptr) { - return false; - } + assert(rep_ != nullptr); ParsedInternalKey parsed; if (!ParseInternalKey(internal_key, &parsed)) { assert(false); @@ -55,13 +53,11 @@ bool RangeDelAggregator::ShouldDelete( return ShouldDelete(parsed, mode); } -bool RangeDelAggregator::ShouldDelete( +bool RangeDelAggregator::ShouldDeleteImpl( const ParsedInternalKey& parsed, RangeDelAggregator::RangePositioningMode mode) { assert(IsValueType(parsed.type)); - if (rep_ == nullptr) { - return false; - } + assert(rep_ != nullptr); auto& positional_tombstone_map = GetPositionalTombstoneMap(parsed.sequence); const auto& tombstone_map = positional_tombstone_map.raw_map; if (tombstone_map.empty()) { diff --git a/db/range_del_aggregator.h b/db/range_del_aggregator.h index 9d4b8ca16..76fe1870a 100644 --- a/db/range_del_aggregator.h +++ b/db/range_del_aggregator.h @@ -74,9 +74,24 @@ class RangeDelAggregator { // the deletion whose interval contains this key. Otherwise, its // value must be kFullScan indicating linear scan from beginning.. bool ShouldDelete(const ParsedInternalKey& parsed, - RangePositioningMode mode = kFullScan); + RangePositioningMode mode = kFullScan) { + if (rep_ == nullptr) { + return false; + } + return ShouldDeleteImpl(parsed, mode); + } bool ShouldDelete(const Slice& internal_key, - RangePositioningMode mode = kFullScan); + RangePositioningMode mode = kFullScan) { + if (rep_ == nullptr) { + return false; + } + return ShouldDeleteImpl(internal_key, mode); + } + bool ShouldDeleteImpl(const ParsedInternalKey& parsed, + RangePositioningMode mode = kFullScan); + bool ShouldDeleteImpl(const Slice& internal_key, + RangePositioningMode mode = kFullScan); + bool ShouldAddTombstones(bool bottommost_level = false); // Adds tombstones to the tombstone aggregation structure maintained by this