2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-04-28 02:50:56 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2014-09-05 02:40:41 +02:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#pragma once
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
#include "table/internal_iterator.h"
|
2016-10-12 20:16:16 +02:00
|
|
|
#include "port/port.h"
|
2014-09-05 02:40:41 +02:00
|
|
|
|
|
|
|
namespace rocksdb {
|
|
|
|
class ScopedArenaIterator {
|
2016-10-12 20:16:16 +02:00
|
|
|
|
|
|
|
void reset(InternalIterator* iter) ROCKSDB_NOEXCEPT {
|
|
|
|
if (iter_ != nullptr) {
|
|
|
|
iter_->~InternalIterator();
|
|
|
|
}
|
|
|
|
iter_ = iter;
|
|
|
|
}
|
|
|
|
|
2014-09-05 02:40:41 +02:00
|
|
|
public:
|
2016-10-12 20:16:16 +02:00
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
explicit ScopedArenaIterator(InternalIterator* iter = nullptr)
|
|
|
|
: iter_(iter) {}
|
2014-09-05 02:40:41 +02:00
|
|
|
|
2016-10-12 20:16:16 +02:00
|
|
|
ScopedArenaIterator(const ScopedArenaIterator&) = delete;
|
|
|
|
ScopedArenaIterator& operator=(const ScopedArenaIterator&) = delete;
|
|
|
|
|
|
|
|
ScopedArenaIterator(ScopedArenaIterator&& o) ROCKSDB_NOEXCEPT {
|
|
|
|
iter_ = o.iter_;
|
|
|
|
o.iter_ = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
ScopedArenaIterator& operator=(ScopedArenaIterator&& o) ROCKSDB_NOEXCEPT {
|
|
|
|
reset(o.iter_);
|
|
|
|
o.iter_ = nullptr;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* operator->() { return iter_; }
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
2016-10-18 21:04:56 +02:00
|
|
|
InternalIterator* get() { return iter_; }
|
2014-09-05 02:40:41 +02:00
|
|
|
|
2016-10-12 20:16:16 +02:00
|
|
|
void set(InternalIterator* iter) { reset(iter); }
|
2014-09-05 02:40:41 +02:00
|
|
|
|
Compaction Support for Range Deletion
Summary:
This diff introduces RangeDelAggregator, which takes ownership of iterators
provided to it via AddTombstones(). The tombstones are organized in a two-level
map (snapshot stripe -> begin key -> tombstone). Tombstone creation avoids data
copy by holding Slices returned by the iterator, which remain valid thanks to pinning.
For compaction, we create a hierarchical range tombstone iterator with structure
matching the iterator over compaction input data. An aggregator based on that
iterator is used by CompactionIterator to determine which keys are covered by
range tombstones. In case of merge operand, the same aggregator is used by
MergeHelper. Upon finishing each file in the compaction, relevant range tombstones
are added to the output file's range tombstone metablock and file boundaries are
updated accordingly.
To check whether a key is covered by range tombstone, RangeDelAggregator::ShouldDelete()
considers tombstones in the key's snapshot stripe. When this function is used outside of
compaction, it also checks newer stripes, which can contain covering tombstones. Currently
the intra-stripe check involves a linear scan; however, in the future we plan to collapse ranges
within a stripe such that binary search can be used.
RangeDelAggregator::AddToBuilder() adds all range tombstones in the table's key-range
to a new table's range tombstone meta-block. Since range tombstones may fall in the gap
between files, we may need to extend some files' key-ranges. The strategy is (1) first file
extends as far left as possible and other files do not extend left, (2) all files extend right
until either the start of the next file or the end of the last range tombstone in the gap,
whichever comes first.
One other notable change is adding release/move semantics to ScopedArenaIterator
such that it can be used to transfer ownership of an arena-allocated iterator, similar to
how unique_ptr is used for malloc'd data.
Depends on D61473
Test Plan: compaction_iterator_test, mock_table, end-to-end tests in D63927
Reviewers: sdong, IslamAbdelRahman, wanning, yhchiang, lightmark
Reviewed By: lightmark
Subscribers: andrewkr, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D62205
2016-10-18 21:04:56 +02:00
|
|
|
InternalIterator* release() {
|
|
|
|
assert(iter_ != nullptr);
|
|
|
|
auto* res = iter_;
|
|
|
|
iter_ = nullptr;
|
|
|
|
return res;
|
|
|
|
}
|
2014-09-05 02:40:41 +02:00
|
|
|
|
2016-10-12 20:16:16 +02:00
|
|
|
~ScopedArenaIterator() {
|
|
|
|
reset(nullptr);
|
|
|
|
}
|
2014-09-05 02:40:41 +02:00
|
|
|
|
|
|
|
private:
|
2015-10-13 00:06:38 +02:00
|
|
|
InternalIterator* iter_;
|
2014-09-05 02:40:41 +02:00
|
|
|
};
|
|
|
|
} // namespace rocksdb
|