2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-09-10 23:35:25 +02:00
|
|
|
#pragma once
|
|
|
|
|
|
|
|
#include <algorithm>
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
#include <cinttypes>
|
2015-09-10 23:35:25 +02:00
|
|
|
#include <deque>
|
|
|
|
#include <string>
|
WritePrepared: fix two versions in compaction see different status for released snapshots (#4890)
Summary:
Fix how CompactionIterator::findEarliestVisibleSnapshots handles released snapshot. It fixing the two scenarios:
Scenario 1:
key1 has two values v1 and v2. There're two snapshots s1 and s2 taken after v1 and v2 are committed. Right after compaction output v2, s1 is released. Now findEarliestVisibleSnapshot may see s1 being released, and return the next snapshot, which is s2. That's larger than v2's earliest visible snapshot, which was s1.
The fix: the only place we check against last snapshot and current key snapshot is when we decide whether to compact out a value if it is hidden by a later value. In the check if we see current snapshot is even larger than last snapshot, we know last snapshot is released, and we are safe to compact out current key.
Scenario 2:
key1 has two values v1 and v2. there are two snapshots s1 and s2 taken after v1 and v2 are committed. During compaction before we process the key, s1 is released. When compaction process v2, snapshot checker may return kSnapshotReleased, and the earliest visible snapshot for v2 become s2. When compaction process v1, snapshot checker may return kIsInSnapshot (for WritePrepared transaction, it could be because v1 is still in commit cache). The result will become inconsistent here.
The fix: remember the set of released snapshots ever reported by snapshot checker, and ignore them when finding result for findEarliestVisibleSnapshot.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4890
Differential Revision: D13705538
Pulled By: maysamyabandeh
fbshipit-source-id: e577f0d9ee1ff5a6035f26859e56902ecc85a5a4
2019-01-19 02:20:13 +01:00
|
|
|
#include <unordered_set>
|
2015-09-10 23:35:25 +02:00
|
|
|
#include <vector>
|
|
|
|
|
2019-05-31 20:52:59 +02:00
|
|
|
#include "db/compaction/compaction.h"
|
2019-06-01 00:21:36 +02:00
|
|
|
#include "db/compaction/compaction_iteration_stats.h"
|
2015-09-10 23:35:25 +02:00
|
|
|
#include "db/merge_helper.h"
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
#include "db/pinned_iterators_manager.h"
|
2018-12-18 02:26:56 +01:00
|
|
|
#include "db/range_del_aggregator.h"
|
2017-10-06 19:26:38 +02:00
|
|
|
#include "db/snapshot_checker.h"
|
2017-05-17 20:32:26 +02:00
|
|
|
#include "options/cf_options.h"
|
2015-09-10 23:35:25 +02:00
|
|
|
#include "rocksdb/compaction_filter.h"
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2015-09-10 23:35:25 +02:00
|
|
|
|
2020-09-15 06:10:09 +02:00
|
|
|
class BlobFileBuilder;
|
|
|
|
|
2021-05-21 01:06:12 +02:00
|
|
|
// A wrapper of internal iterator whose purpose is to count how
|
|
|
|
// many entries there are in the iterator.
|
|
|
|
class SequenceIterWrapper : public InternalIterator {
|
|
|
|
public:
|
|
|
|
SequenceIterWrapper(InternalIterator* iter, const Comparator* cmp,
|
|
|
|
bool need_count_entries)
|
2021-05-24 21:45:35 +02:00
|
|
|
: icmp_(cmp, /*named=*/false),
|
|
|
|
inner_iter_(iter),
|
|
|
|
need_count_entries_(need_count_entries) {}
|
2021-05-21 01:06:12 +02:00
|
|
|
bool Valid() const override { return inner_iter_->Valid(); }
|
|
|
|
Status status() const override { return inner_iter_->status(); }
|
|
|
|
void Next() override {
|
|
|
|
num_itered_++;
|
|
|
|
inner_iter_->Next();
|
|
|
|
}
|
|
|
|
void Seek(const Slice& target) override {
|
|
|
|
if (!need_count_entries_) {
|
|
|
|
inner_iter_->Seek(target);
|
|
|
|
} else {
|
|
|
|
// For flush cases, we need to count total number of entries, so we
|
|
|
|
// do Next() rather than Seek().
|
|
|
|
while (inner_iter_->Valid() &&
|
2021-05-24 21:45:35 +02:00
|
|
|
icmp_.Compare(inner_iter_->key(), target) < 0) {
|
2021-05-21 01:06:12 +02:00
|
|
|
Next();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Slice key() const override { return inner_iter_->key(); }
|
|
|
|
Slice value() const override { return inner_iter_->value(); }
|
|
|
|
|
|
|
|
// Unused InternalIterator methods
|
|
|
|
void SeekToFirst() override { assert(false); }
|
|
|
|
void Prev() override { assert(false); }
|
|
|
|
void SeekForPrev(const Slice& /* target */) override { assert(false); }
|
|
|
|
void SeekToLast() override { assert(false); }
|
|
|
|
|
|
|
|
uint64_t num_itered() const { return num_itered_; }
|
|
|
|
|
|
|
|
private:
|
2021-05-24 21:45:35 +02:00
|
|
|
InternalKeyComparator icmp_;
|
2021-05-21 01:06:12 +02:00
|
|
|
InternalIterator* inner_iter_; // not owned
|
|
|
|
uint64_t num_itered_ = 0;
|
|
|
|
bool need_count_entries_;
|
|
|
|
};
|
|
|
|
|
2015-09-10 23:35:25 +02:00
|
|
|
class CompactionIterator {
|
|
|
|
public:
|
2016-12-01 16:00:17 +01:00
|
|
|
// A wrapper around Compaction. Has a much smaller interface, only what
|
|
|
|
// CompactionIterator uses. Tests can override it.
|
|
|
|
class CompactionProxy {
|
|
|
|
public:
|
|
|
|
virtual ~CompactionProxy() = default;
|
2020-11-12 17:48:14 +01:00
|
|
|
|
|
|
|
virtual int level() const = 0;
|
|
|
|
|
2016-12-01 16:00:17 +01:00
|
|
|
virtual bool KeyNotExistsBeyondOutputLevel(
|
2020-11-12 17:48:14 +01:00
|
|
|
const Slice& user_key, std::vector<size_t>* level_ptrs) const = 0;
|
|
|
|
|
|
|
|
virtual bool bottommost_level() const = 0;
|
|
|
|
|
|
|
|
virtual int number_levels() const = 0;
|
|
|
|
|
|
|
|
virtual Slice GetLargestUserKey() const = 0;
|
|
|
|
|
|
|
|
virtual bool allow_ingest_behind() const = 0;
|
|
|
|
|
|
|
|
virtual bool preserve_deletes() const = 0;
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
|
|
|
|
virtual bool enable_blob_garbage_collection() const = 0;
|
|
|
|
|
|
|
|
virtual double blob_garbage_collection_age_cutoff() const = 0;
|
|
|
|
|
|
|
|
virtual Version* input_version() const = 0;
|
2021-06-25 01:11:07 +02:00
|
|
|
|
|
|
|
virtual bool DoesInputReferenceBlobFiles() const = 0;
|
2021-08-18 07:13:21 +02:00
|
|
|
|
|
|
|
virtual const Compaction* real_compaction() const = 0;
|
2020-11-12 17:48:14 +01:00
|
|
|
};
|
|
|
|
|
|
|
|
class RealCompaction : public CompactionProxy {
|
|
|
|
public:
|
|
|
|
explicit RealCompaction(const Compaction* compaction)
|
|
|
|
: compaction_(compaction) {
|
|
|
|
assert(compaction_);
|
2021-06-17 01:50:43 +02:00
|
|
|
assert(compaction_->immutable_options());
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
assert(compaction_->mutable_cf_options());
|
2020-11-12 17:48:14 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
int level() const override { return compaction_->level(); }
|
|
|
|
|
|
|
|
bool KeyNotExistsBeyondOutputLevel(
|
|
|
|
const Slice& user_key, std::vector<size_t>* level_ptrs) const override {
|
2016-12-01 16:00:17 +01:00
|
|
|
return compaction_->KeyNotExistsBeyondOutputLevel(user_key, level_ptrs);
|
|
|
|
}
|
2020-11-12 17:48:14 +01:00
|
|
|
|
|
|
|
bool bottommost_level() const override {
|
2016-12-01 16:00:17 +01:00
|
|
|
return compaction_->bottommost_level();
|
|
|
|
}
|
2020-11-12 17:48:14 +01:00
|
|
|
|
|
|
|
int number_levels() const override { return compaction_->number_levels(); }
|
|
|
|
|
|
|
|
Slice GetLargestUserKey() const override {
|
2016-12-01 16:00:17 +01:00
|
|
|
return compaction_->GetLargestUserKey();
|
|
|
|
}
|
2020-11-12 17:48:14 +01:00
|
|
|
|
|
|
|
bool allow_ingest_behind() const override {
|
2021-06-17 01:50:43 +02:00
|
|
|
return compaction_->immutable_options()->allow_ingest_behind;
|
2017-05-17 20:32:26 +02:00
|
|
|
}
|
2020-11-12 17:48:14 +01:00
|
|
|
|
|
|
|
bool preserve_deletes() const override {
|
2021-06-17 01:50:43 +02:00
|
|
|
return compaction_->immutable_options()->preserve_deletes;
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
2017-11-02 02:43:29 +01:00
|
|
|
}
|
2016-12-01 16:00:17 +01:00
|
|
|
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
bool enable_blob_garbage_collection() const override {
|
|
|
|
return compaction_->mutable_cf_options()->enable_blob_garbage_collection;
|
|
|
|
}
|
|
|
|
|
|
|
|
double blob_garbage_collection_age_cutoff() const override {
|
|
|
|
return compaction_->mutable_cf_options()
|
|
|
|
->blob_garbage_collection_age_cutoff;
|
|
|
|
}
|
|
|
|
|
|
|
|
Version* input_version() const override {
|
|
|
|
return compaction_->input_version();
|
|
|
|
}
|
|
|
|
|
2021-06-25 01:11:07 +02:00
|
|
|
bool DoesInputReferenceBlobFiles() const override {
|
|
|
|
return compaction_->DoesInputReferenceBlobFiles();
|
|
|
|
}
|
|
|
|
|
2021-08-18 07:13:21 +02:00
|
|
|
const Compaction* real_compaction() const override { return compaction_; }
|
|
|
|
|
2016-12-01 16:00:17 +01:00
|
|
|
private:
|
|
|
|
const Compaction* compaction_;
|
|
|
|
};
|
|
|
|
|
2021-06-07 20:40:31 +02:00
|
|
|
CompactionIterator(
|
|
|
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
|
|
|
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
|
|
|
SequenceNumber earliest_write_conflict_snapshot,
|
|
|
|
const SnapshotChecker* snapshot_checker, Env* env,
|
|
|
|
bool report_detailed_time, bool expect_valid_internal_key,
|
|
|
|
CompactionRangeDelAggregator* range_del_agg,
|
|
|
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
|
|
|
const Compaction* compaction = nullptr,
|
|
|
|
const CompactionFilter* compaction_filter = nullptr,
|
|
|
|
const std::atomic<bool>* shutting_down = nullptr,
|
|
|
|
const SequenceNumber preserve_deletes_seqnum = 0,
|
|
|
|
const std::atomic<int>* manual_compaction_paused = nullptr,
|
|
|
|
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
|
|
|
const std::shared_ptr<Logger> info_log = nullptr,
|
|
|
|
const std::string* full_history_ts_low = nullptr);
|
2015-09-10 23:35:25 +02:00
|
|
|
|
2016-12-01 16:00:17 +01:00
|
|
|
// Constructor with custom CompactionProxy, used for tests.
|
2021-06-07 20:40:31 +02:00
|
|
|
CompactionIterator(
|
|
|
|
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
|
|
|
|
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
|
|
|
|
SequenceNumber earliest_write_conflict_snapshot,
|
|
|
|
const SnapshotChecker* snapshot_checker, Env* env,
|
|
|
|
bool report_detailed_time, bool expect_valid_internal_key,
|
|
|
|
CompactionRangeDelAggregator* range_del_agg,
|
|
|
|
BlobFileBuilder* blob_file_builder, bool allow_data_in_errors,
|
|
|
|
std::unique_ptr<CompactionProxy> compaction,
|
|
|
|
const CompactionFilter* compaction_filter = nullptr,
|
|
|
|
const std::atomic<bool>* shutting_down = nullptr,
|
|
|
|
const SequenceNumber preserve_deletes_seqnum = 0,
|
|
|
|
const std::atomic<int>* manual_compaction_paused = nullptr,
|
|
|
|
const std::atomic<bool>* manual_compaction_canceled = nullptr,
|
|
|
|
const std::shared_ptr<Logger> info_log = nullptr,
|
|
|
|
const std::string* full_history_ts_low = nullptr);
|
2016-12-01 16:00:17 +01:00
|
|
|
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
~CompactionIterator();
|
|
|
|
|
2015-09-10 23:35:25 +02:00
|
|
|
void ResetRecordCounts();
|
|
|
|
|
|
|
|
// Seek to the beginning of the compaction iterator output.
|
|
|
|
//
|
|
|
|
// REQUIRED: Call only once.
|
|
|
|
void SeekToFirst();
|
|
|
|
|
|
|
|
// Produces the next record in the compaction.
|
|
|
|
//
|
|
|
|
// REQUIRED: SeekToFirst() has been called.
|
|
|
|
void Next();
|
|
|
|
|
|
|
|
// Getters
|
|
|
|
const Slice& key() const { return key_; }
|
|
|
|
const Slice& value() const { return value_; }
|
|
|
|
const Status& status() const { return status_; }
|
|
|
|
const ParsedInternalKey& ikey() const { return ikey_; }
|
|
|
|
bool Valid() const { return valid_; }
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
const Slice& user_key() const { return current_user_key_; }
|
2016-11-28 20:44:40 +01:00
|
|
|
const CompactionIterationStats& iter_stats() const { return iter_stats_; }
|
2021-05-21 01:06:12 +02:00
|
|
|
uint64_t num_input_entry_scanned() const { return input_.num_itered(); }
|
2015-09-10 23:35:25 +02:00
|
|
|
|
|
|
|
private:
|
|
|
|
// Processes the input stream to find the next output
|
|
|
|
void NextFromInput();
|
|
|
|
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
// Do final preparations before presenting the output to the callee.
|
2015-09-10 23:35:25 +02:00
|
|
|
void PrepareOutput();
|
|
|
|
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
// Passes the output value to the blob file builder (if any), and replaces it
|
|
|
|
// with the corresponding blob reference if it has been actually written to a
|
|
|
|
// blob file (i.e. if it passed the value size check). Returns true if the
|
|
|
|
// value got extracted to a blob file, false otherwise.
|
|
|
|
bool ExtractLargeValueIfNeededImpl();
|
|
|
|
|
|
|
|
// Extracts large values as described above, and updates the internal key's
|
|
|
|
// type to kTypeBlobIndex if the value got extracted. Should only be called
|
|
|
|
// for regular values (kTypeValue).
|
|
|
|
void ExtractLargeValueIfNeeded();
|
|
|
|
|
|
|
|
// Relocates valid blobs residing in the oldest blob files if garbage
|
|
|
|
// collection is enabled. Relocated blobs are written to new blob files or
|
|
|
|
// inlined in the LSM tree depending on the current settings (i.e.
|
|
|
|
// enable_blob_files and min_blob_size). Should only be called for blob
|
|
|
|
// references (kTypeBlobIndex).
|
|
|
|
//
|
|
|
|
// Note: the stacked BlobDB implementation's compaction filter based GC
|
|
|
|
// algorithm is also called from here.
|
|
|
|
void GarbageCollectBlobIfNeeded();
|
|
|
|
|
2017-10-06 19:26:38 +02:00
|
|
|
// Invoke compaction filter if needed.
|
2020-06-30 02:30:04 +02:00
|
|
|
// Return true on success, false on failures (e.g.: kIOError).
|
|
|
|
bool InvokeFilterIfNeeded(bool* need_skip, Slice* skip_until);
|
2017-10-06 19:26:38 +02:00
|
|
|
|
2015-09-10 23:35:25 +02:00
|
|
|
// Given a sequence number, return the sequence number of the
|
|
|
|
// earliest snapshot that this sequence number is visible in.
|
|
|
|
// The snapshots themselves are arranged in ascending order of
|
|
|
|
// sequence numbers.
|
|
|
|
// Employ a sequential search because the total number of
|
|
|
|
// snapshots are typically small.
|
|
|
|
inline SequenceNumber findEarliestVisibleSnapshot(
|
|
|
|
SequenceNumber in, SequenceNumber* prev_snapshot);
|
|
|
|
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
2017-11-02 02:43:29 +01:00
|
|
|
// Checks whether the currently seen ikey_ is needed for
|
|
|
|
// incremental (differential) snapshot and hence can't be dropped
|
|
|
|
// or seqnum be zero-ed out even if all other conditions for it are met.
|
|
|
|
inline bool ikeyNotNeededForIncrementalSnapshot();
|
|
|
|
|
2019-01-16 18:48:10 +01:00
|
|
|
inline bool KeyCommitted(SequenceNumber sequence) {
|
|
|
|
return snapshot_checker_ == nullptr ||
|
|
|
|
snapshot_checker_->CheckInSnapshot(sequence, kMaxSequenceNumber) ==
|
|
|
|
SnapshotCheckerResult::kInSnapshot;
|
|
|
|
}
|
|
|
|
|
2021-07-28 23:52:35 +02:00
|
|
|
bool DefinitelyInSnapshot(SequenceNumber seq, SequenceNumber snapshot);
|
|
|
|
|
|
|
|
bool DefinitelyNotInSnapshot(SequenceNumber seq, SequenceNumber snapshot);
|
|
|
|
|
Allow compaction iterator to perform garbage collection (#7556)
Summary:
Add a threshold timestamp, full_history_ts_low_ of type `std::string*` to
`CompactionIterator`, so that RocksDB can also perform garbage collection during
compaction.
* If full_history_ts_low_ is nullptr, then compaction iterator does not perform
GC, preserving all timestamp history for all keys. Compaction iterator will
treat user key with different timestamps as different user keys.
* If full_history_ts_low_ is not nullptr, then compaction iterator performs
GC. GC will look at keys older than `*full_history_ts_low_` and determine their
eligibility based on factors including snapshots.
Current rules of GC:
* If an internal key is in the same snapshot as a previous counterpart
with the same user key, and this key is eligible for GC, and the key is
not single-delete or merge operand, then this key can be dropped. Note
that the previous internal key cannot be a merge operand either.
* If a tombstone is the most recent one in the earliest snapshot and it
is eligible for GC, and keyNotExistsBeyondLevel() is true, then this
tombstone can be dropped.
* If a tombstone is the most recent one in a snapshot and it is eligible
for GC, and the compaction is at bottommost level, then all other older
internal keys of the same user key must also be eligible for GC, thus
can be dropped
* Single-delete, delete-range and merge are not currently supported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7556
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D24507728
Pulled By: riversand963
fbshipit-source-id: 3c09c7301f41eed76dfcf4d1527e68cf6e0a8bb3
2020-10-24 07:58:05 +02:00
|
|
|
// Extract user-defined timestamp from user key if possible and compare it
|
|
|
|
// with *full_history_ts_low_ if applicable.
|
|
|
|
inline void UpdateTimestampAndCompareWithFullHistoryLow() {
|
2020-11-10 03:19:42 +01:00
|
|
|
if (!timestamp_size_) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
Slice ts = ExtractTimestampFromUserKey(ikey_.user_key, timestamp_size_);
|
|
|
|
curr_ts_.assign(ts.data(), ts.size());
|
Allow compaction iterator to perform garbage collection (#7556)
Summary:
Add a threshold timestamp, full_history_ts_low_ of type `std::string*` to
`CompactionIterator`, so that RocksDB can also perform garbage collection during
compaction.
* If full_history_ts_low_ is nullptr, then compaction iterator does not perform
GC, preserving all timestamp history for all keys. Compaction iterator will
treat user key with different timestamps as different user keys.
* If full_history_ts_low_ is not nullptr, then compaction iterator performs
GC. GC will look at keys older than `*full_history_ts_low_` and determine their
eligibility based on factors including snapshots.
Current rules of GC:
* If an internal key is in the same snapshot as a previous counterpart
with the same user key, and this key is eligible for GC, and the key is
not single-delete or merge operand, then this key can be dropped. Note
that the previous internal key cannot be a merge operand either.
* If a tombstone is the most recent one in the earliest snapshot and it
is eligible for GC, and keyNotExistsBeyondLevel() is true, then this
tombstone can be dropped.
* If a tombstone is the most recent one in a snapshot and it is eligible
for GC, and the compaction is at bottommost level, then all other older
internal keys of the same user key must also be eligible for GC, thus
can be dropped
* Single-delete, delete-range and merge are not currently supported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7556
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D24507728
Pulled By: riversand963
fbshipit-source-id: 3c09c7301f41eed76dfcf4d1527e68cf6e0a8bb3
2020-10-24 07:58:05 +02:00
|
|
|
if (full_history_ts_low_) {
|
|
|
|
cmp_with_history_ts_low_ =
|
2020-11-10 03:19:42 +01:00
|
|
|
cmp_->CompareTimestamp(ts, *full_history_ts_low_);
|
Allow compaction iterator to perform garbage collection (#7556)
Summary:
Add a threshold timestamp, full_history_ts_low_ of type `std::string*` to
`CompactionIterator`, so that RocksDB can also perform garbage collection during
compaction.
* If full_history_ts_low_ is nullptr, then compaction iterator does not perform
GC, preserving all timestamp history for all keys. Compaction iterator will
treat user key with different timestamps as different user keys.
* If full_history_ts_low_ is not nullptr, then compaction iterator performs
GC. GC will look at keys older than `*full_history_ts_low_` and determine their
eligibility based on factors including snapshots.
Current rules of GC:
* If an internal key is in the same snapshot as a previous counterpart
with the same user key, and this key is eligible for GC, and the key is
not single-delete or merge operand, then this key can be dropped. Note
that the previous internal key cannot be a merge operand either.
* If a tombstone is the most recent one in the earliest snapshot and it
is eligible for GC, and keyNotExistsBeyondLevel() is true, then this
tombstone can be dropped.
* If a tombstone is the most recent one in a snapshot and it is eligible
for GC, and the compaction is at bottommost level, then all other older
internal keys of the same user key must also be eligible for GC, thus
can be dropped
* Single-delete, delete-range and merge are not currently supported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7556
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D24507728
Pulled By: riversand963
fbshipit-source-id: 3c09c7301f41eed76dfcf4d1527e68cf6e0a8bb3
2020-10-24 07:58:05 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
static uint64_t ComputeBlobGarbageCollectionCutoffFileNumber(
|
|
|
|
const CompactionProxy* compaction);
|
|
|
|
|
2021-05-21 01:06:12 +02:00
|
|
|
SequenceIterWrapper input_;
|
2015-09-10 23:35:25 +02:00
|
|
|
const Comparator* cmp_;
|
|
|
|
MergeHelper* merge_helper_;
|
2019-09-19 05:24:17 +02:00
|
|
|
const std::vector<SequenceNumber>* snapshots_;
|
WritePrepared: fix two versions in compaction see different status for released snapshots (#4890)
Summary:
Fix how CompactionIterator::findEarliestVisibleSnapshots handles released snapshot. It fixing the two scenarios:
Scenario 1:
key1 has two values v1 and v2. There're two snapshots s1 and s2 taken after v1 and v2 are committed. Right after compaction output v2, s1 is released. Now findEarliestVisibleSnapshot may see s1 being released, and return the next snapshot, which is s2. That's larger than v2's earliest visible snapshot, which was s1.
The fix: the only place we check against last snapshot and current key snapshot is when we decide whether to compact out a value if it is hidden by a later value. In the check if we see current snapshot is even larger than last snapshot, we know last snapshot is released, and we are safe to compact out current key.
Scenario 2:
key1 has two values v1 and v2. there are two snapshots s1 and s2 taken after v1 and v2 are committed. During compaction before we process the key, s1 is released. When compaction process v2, snapshot checker may return kSnapshotReleased, and the earliest visible snapshot for v2 become s2. When compaction process v1, snapshot checker may return kIsInSnapshot (for WritePrepared transaction, it could be because v1 is still in commit cache). The result will become inconsistent here.
The fix: remember the set of released snapshots ever reported by snapshot checker, and ignore them when finding result for findEarliestVisibleSnapshot.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4890
Differential Revision: D13705538
Pulled By: maysamyabandeh
fbshipit-source-id: e577f0d9ee1ff5a6035f26859e56902ecc85a5a4
2019-01-19 02:20:13 +01:00
|
|
|
// List of snapshots released during compaction.
|
|
|
|
// findEarliestVisibleSnapshot() find them out from return of
|
|
|
|
// snapshot_checker, and make sure they will not be returned as
|
|
|
|
// earliest visible snapshot of an older value.
|
|
|
|
// See WritePreparedTransactionTest::ReleaseSnapshotDuringCompaction3.
|
|
|
|
std::unordered_set<SequenceNumber> released_snapshots_;
|
2019-01-16 18:48:10 +01:00
|
|
|
std::vector<SequenceNumber>::const_iterator earliest_snapshot_iter_;
|
2015-12-08 21:25:48 +01:00
|
|
|
const SequenceNumber earliest_write_conflict_snapshot_;
|
2017-10-06 19:26:38 +02:00
|
|
|
const SnapshotChecker* const snapshot_checker_;
|
2015-09-10 23:35:25 +02:00
|
|
|
Env* env_;
|
2021-03-15 12:32:24 +01:00
|
|
|
SystemClock* clock_;
|
2018-06-22 06:16:49 +02:00
|
|
|
bool report_detailed_time_;
|
2015-09-11 00:16:32 +02:00
|
|
|
bool expect_valid_internal_key_;
|
2018-12-18 02:26:56 +01:00
|
|
|
CompactionRangeDelAggregator* range_del_agg_;
|
2020-09-15 06:10:09 +02:00
|
|
|
BlobFileBuilder* blob_file_builder_;
|
2016-12-01 16:00:17 +01:00
|
|
|
std::unique_ptr<CompactionProxy> compaction_;
|
2015-09-10 23:35:25 +02:00
|
|
|
const CompactionFilter* compaction_filter_;
|
2017-01-12 00:01:21 +01:00
|
|
|
const std::atomic<bool>* shutting_down_;
|
2020-08-14 20:28:12 +02:00
|
|
|
const std::atomic<int>* manual_compaction_paused_;
|
2021-06-07 20:40:31 +02:00
|
|
|
const std::atomic<bool>* manual_compaction_canceled_;
|
Added support for differential snapshots
Summary:
The motivation for this PR is to add to RocksDB support for differential (incremental) snapshots, as snapshot of the DB changes between two points in time (one can think of it as diff between to sequence numbers, or the diff D which can be thought of as an SST file or just set of KVs that can be applied to sequence number S1 to get the database to the state at sequence number S2).
This feature would be useful for various distributed storages layers built on top of RocksDB, as it should help reduce resources (time and network bandwidth) needed to recover and rebuilt DB instances as replicas in the context of distributed storages.
From the API standpoint that would like client app requesting iterator between (start seqnum) and current DB state, and reading the "diff".
This is a very draft PR for initial review in the discussion on the approach, i'm going to rework some parts and keep updating the PR.
For now, what's done here according to initial discussions:
Preserving deletes:
- We want to be able to optionally preserve recent deletes for some defined period of time, so that if a delete came in recently and might need to be included in the next incremental snapshot it would't get dropped by a compaction. This is done by adding new param to Options (preserve deletes flag) and new variable to DB Impl where we keep track of the sequence number after which we don't want to drop tombstones, even if they are otherwise eligible for deletion.
- I also added a new API call for clients to be able to advance this cutoff seqnum after which we drop deletes; i assume it's more flexible to let clients control this, since otherwise we'd need to keep some kind of timestamp < -- > seqnum mapping inside the DB, which sounds messy and painful to support. Clients could make use of it by periodically calling GetLatestSequenceNumber(), noting the timestamp, doing some calculation and figuring out by how much we need to advance the cutoff seqnum.
- Compaction codepath in compaction_iterator.cc has been modified to avoid dropping tombstones with seqnum > cutoff seqnum.
Iterator changes:
- couple params added to ReadOptions, to optionally allow client to request internal keys instead of user keys (so that client can get the latest value of a key, be it delete marker or a put), as well as min timestamp and min seqnum.
TableCache changes:
- I modified table_cache code to be able to quickly exclude SST files from iterators heep if creation_time on the file is less then iter_start_ts as passed in ReadOptions. That would help a lot in some DB settings (like reading very recent data only or using FIFO compactions), but not so much for universal compaction with more or less long iterator time span.
What's left:
- Still looking at how to best plug that inside DBIter codepath. So far it seems that FindNextUserKeyInternal only parses values as UserKeys, and iter->key() call generally returns user key. Can we add new API to DBIter as internal_key(), and modify this internal method to optionally set saved_key_ to point to the full internal key? I don't need to store actual seqnum there, but I do need to store type.
Closes https://github.com/facebook/rocksdb/pull/2999
Differential Revision: D6175602
Pulled By: mikhail-antonov
fbshipit-source-id: c779a6696ee2d574d86c69cec866a3ae095aa900
2017-11-02 02:43:29 +01:00
|
|
|
const SequenceNumber preserve_deletes_seqnum_;
|
2015-09-10 23:35:25 +02:00
|
|
|
bool bottommost_level_;
|
|
|
|
bool valid_ = false;
|
2016-10-13 19:49:06 +02:00
|
|
|
bool visible_at_tip_;
|
2015-09-10 23:35:25 +02:00
|
|
|
SequenceNumber earliest_snapshot_;
|
|
|
|
SequenceNumber latest_snapshot_;
|
2019-01-16 18:48:10 +01:00
|
|
|
|
2020-11-10 03:19:42 +01:00
|
|
|
std::shared_ptr<Logger> info_log_;
|
|
|
|
|
|
|
|
bool allow_data_in_errors_;
|
|
|
|
|
|
|
|
// Comes from comparator.
|
|
|
|
const size_t timestamp_size_;
|
|
|
|
|
|
|
|
// Lower bound timestamp to retain full history in terms of user-defined
|
|
|
|
// timestamp. If a key's timestamp is older than full_history_ts_low_, then
|
|
|
|
// the key *may* be eligible for garbage collection (GC). The skipping logic
|
|
|
|
// is in `NextFromInput()` and `PrepareOutput()`.
|
|
|
|
// If nullptr, NO GC will be performed and all history will be preserved.
|
|
|
|
const std::string* const full_history_ts_low_;
|
|
|
|
|
2015-09-10 23:35:25 +02:00
|
|
|
// State
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
//
|
|
|
|
// Points to a copy of the current compaction iterator output (current_key_)
|
|
|
|
// if valid_.
|
2015-09-10 23:35:25 +02:00
|
|
|
Slice key_;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
// Points to the value in the underlying iterator that corresponds to the
|
|
|
|
// current output.
|
2015-09-10 23:35:25 +02:00
|
|
|
Slice value_;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
// The status is OK unless compaction iterator encounters a merge operand
|
|
|
|
// while not having a merge operator defined.
|
2015-09-10 23:35:25 +02:00
|
|
|
Status status_;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
// Stores the user key, sequence number and type of the current compaction
|
|
|
|
// iterator output (or current key in the underlying iterator during
|
|
|
|
// NextFromInput()).
|
2015-09-10 23:35:25 +02:00
|
|
|
ParsedInternalKey ikey_;
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
// Stores whether ikey_.user_key is valid. If set to false, the user key is
|
|
|
|
// not compared against the current key in the underlying iterator.
|
2015-09-10 23:35:25 +02:00
|
|
|
bool has_current_user_key_ = false;
|
Allow compaction iterator to perform garbage collection (#7556)
Summary:
Add a threshold timestamp, full_history_ts_low_ of type `std::string*` to
`CompactionIterator`, so that RocksDB can also perform garbage collection during
compaction.
* If full_history_ts_low_ is nullptr, then compaction iterator does not perform
GC, preserving all timestamp history for all keys. Compaction iterator will
treat user key with different timestamps as different user keys.
* If full_history_ts_low_ is not nullptr, then compaction iterator performs
GC. GC will look at keys older than `*full_history_ts_low_` and determine their
eligibility based on factors including snapshots.
Current rules of GC:
* If an internal key is in the same snapshot as a previous counterpart
with the same user key, and this key is eligible for GC, and the key is
not single-delete or merge operand, then this key can be dropped. Note
that the previous internal key cannot be a merge operand either.
* If a tombstone is the most recent one in the earliest snapshot and it
is eligible for GC, and keyNotExistsBeyondLevel() is true, then this
tombstone can be dropped.
* If a tombstone is the most recent one in a snapshot and it is eligible
for GC, and the compaction is at bottommost level, then all other older
internal keys of the same user key must also be eligible for GC, thus
can be dropped
* Single-delete, delete-range and merge are not currently supported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7556
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D24507728
Pulled By: riversand963
fbshipit-source-id: 3c09c7301f41eed76dfcf4d1527e68cf6e0a8bb3
2020-10-24 07:58:05 +02:00
|
|
|
// If false, the iterator holds a copy of the current compaction iterator
|
|
|
|
// output (or current key in the underlying iterator during NextFromInput()).
|
|
|
|
bool at_next_ = false;
|
|
|
|
|
Support for SingleDelete()
Summary:
This patch fixes #7460559. It introduces SingleDelete as a new database
operation. This operation can be used to delete keys that were never
overwritten (no put following another put of the same key). If an overwritten
key is single deleted the behavior is undefined. Single deletion of a
non-existent key has no effect but multiple consecutive single deletions are
not allowed (see limitations).
In contrast to the conventional Delete() operation, the deletion entry is
removed along with the value when the two are lined up in a compaction. Note:
The semantics are similar to @igor's prototype that allowed to have this
behavior on the granularity of a column family (
https://reviews.facebook.net/D42093 ). This new patch, however, is more
aggressive when it comes to removing tombstones: It removes the SingleDelete
together with the value whenever there is no snapshot between them while the
older patch only did this when the sequence number of the deletion was older
than the earliest snapshot.
Most of the complex additions are in the Compaction Iterator, all other changes
should be relatively straightforward. The patch also includes basic support for
single deletions in db_stress and db_bench.
Limitations:
- Not compatible with cuckoo hash tables
- Single deletions cannot be used in combination with merges and normal
deletions on the same key (other keys are not affected by this)
- Consecutive single deletions are currently not allowed (and older version of
this patch supported this so it could be resurrected if needed)
Test Plan: make all check
Reviewers: yhchiang, sdong, rven, anthony, yoshinorim, igor
Reviewed By: igor
Subscribers: maykov, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D43179
2015-09-17 20:42:56 +02:00
|
|
|
IterKey current_key_;
|
|
|
|
Slice current_user_key_;
|
2020-11-10 03:19:42 +01:00
|
|
|
std::string curr_ts_;
|
2015-09-10 23:35:25 +02:00
|
|
|
SequenceNumber current_user_key_sequence_;
|
|
|
|
SequenceNumber current_user_key_snapshot_;
|
2015-11-05 05:52:22 +01:00
|
|
|
|
|
|
|
// True if the iterator has already returned a record for the current key.
|
|
|
|
bool has_outputted_key_ = false;
|
|
|
|
|
|
|
|
// truncated the value of the next key and output it without applying any
|
|
|
|
// compaction rules. This is used for outputting a put after a single delete.
|
|
|
|
bool clear_and_output_next_key_ = false;
|
|
|
|
|
2015-09-10 23:35:25 +02:00
|
|
|
MergeOutputIterator merge_out_iter_;
|
Introduce FullMergeV2 (eliminate memcpy from merge operators)
Summary:
This diff update the code to pin the merge operator operands while the merge operation is done, so that we can eliminate the memcpy cost, to do that we need a new public API for FullMerge that replace the std::deque<std::string> with std::vector<Slice>
This diff is stacked on top of D56493 and D56511
In this diff we
- Update FullMergeV2 arguments to be encapsulated in MergeOperationInput and MergeOperationOutput which will make it easier to add new arguments in the future
- Replace std::deque<std::string> with std::vector<Slice> to pass operands
- Replace MergeContext std::deque with std::vector (based on a simple benchmark I ran https://gist.github.com/IslamAbdelRahman/78fc86c9ab9f52b1df791e58943fb187)
- Allow FullMergeV2 output to be an existing operand
```
[Everything in Memtable | 10K operands | 10 KB each | 1 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=10000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 0.607 micros/op 1648235 ops/sec; 16121.2 MB/s
readseq : 0.478 micros/op 2091546 ops/sec; 20457.2 MB/s
readseq : 0.252 micros/op 3972081 ops/sec; 38850.5 MB/s
readseq : 0.237 micros/op 4218328 ops/sec; 41259.0 MB/s
readseq : 0.247 micros/op 4043927 ops/sec; 39553.2 MB/s
[master]
readseq : 3.935 micros/op 254140 ops/sec; 2485.7 MB/s
readseq : 3.722 micros/op 268657 ops/sec; 2627.7 MB/s
readseq : 3.149 micros/op 317605 ops/sec; 3106.5 MB/s
readseq : 3.125 micros/op 320024 ops/sec; 3130.1 MB/s
readseq : 4.075 micros/op 245374 ops/sec; 2400.0 MB/s
```
```
[Everything in Memtable | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="mergerandom,readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --merge_keys=1000 --num=10000 --disable_auto_compactions --value_size=10240 --write_buffer_size=1000000000
[FullMergeV2]
readseq : 3.472 micros/op 288018 ops/sec; 2817.1 MB/s
readseq : 2.304 micros/op 434027 ops/sec; 4245.2 MB/s
readseq : 1.163 micros/op 859845 ops/sec; 8410.0 MB/s
readseq : 1.192 micros/op 838926 ops/sec; 8205.4 MB/s
readseq : 1.250 micros/op 800000 ops/sec; 7824.7 MB/s
[master]
readseq : 24.025 micros/op 41623 ops/sec; 407.1 MB/s
readseq : 18.489 micros/op 54086 ops/sec; 529.0 MB/s
readseq : 18.693 micros/op 53495 ops/sec; 523.2 MB/s
readseq : 23.621 micros/op 42335 ops/sec; 414.1 MB/s
readseq : 18.775 micros/op 53262 ops/sec; 521.0 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 1 operand per key]
[FullMergeV2]
$ DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
readseq : 14.741 micros/op 67837 ops/sec; 663.5 MB/s
readseq : 1.029 micros/op 971446 ops/sec; 9501.6 MB/s
readseq : 0.974 micros/op 1026229 ops/sec; 10037.4 MB/s
readseq : 0.965 micros/op 1036080 ops/sec; 10133.8 MB/s
readseq : 0.943 micros/op 1060657 ops/sec; 10374.2 MB/s
[master]
readseq : 16.735 micros/op 59755 ops/sec; 584.5 MB/s
readseq : 3.029 micros/op 330151 ops/sec; 3229.2 MB/s
readseq : 3.136 micros/op 318883 ops/sec; 3119.0 MB/s
readseq : 3.065 micros/op 326245 ops/sec; 3191.0 MB/s
readseq : 3.014 micros/op 331813 ops/sec; 3245.4 MB/s
```
```
[Everything in Block cache | 10K operands | 10 KB each | 10 operand per key]
DEBUG_LEVEL=0 make db_bench -j64 && ./db_bench --benchmarks="readseq,readseq,readseq,readseq,readseq" --merge_operator="max" --num=100000 --db="/dev/shm/merge-random-10-operands-10K-10KB" --cache_size=1000000000 --use_existing_db --disable_auto_compactions
[FullMergeV2]
readseq : 24.325 micros/op 41109 ops/sec; 402.1 MB/s
readseq : 1.470 micros/op 680272 ops/sec; 6653.7 MB/s
readseq : 1.231 micros/op 812347 ops/sec; 7945.5 MB/s
readseq : 1.091 micros/op 916590 ops/sec; 8965.1 MB/s
readseq : 1.109 micros/op 901713 ops/sec; 8819.6 MB/s
[master]
readseq : 27.257 micros/op 36687 ops/sec; 358.8 MB/s
readseq : 4.443 micros/op 225073 ops/sec; 2201.4 MB/s
readseq : 5.830 micros/op 171526 ops/sec; 1677.7 MB/s
readseq : 4.173 micros/op 239635 ops/sec; 2343.8 MB/s
readseq : 4.150 micros/op 240963 ops/sec; 2356.8 MB/s
```
Test Plan: COMPILE_WITH_ASAN=1 make check -j64
Reviewers: yhchiang, andrewkr, sdong
Reviewed By: sdong
Subscribers: lovro, andrewkr, dhruba
Differential Revision: https://reviews.facebook.net/D57075
2016-07-20 18:49:03 +02:00
|
|
|
// PinnedIteratorsManager used to pin input_ Iterator blocks while reading
|
|
|
|
// merge operands and then releasing them after consuming them.
|
|
|
|
PinnedIteratorsManager pinned_iters_mgr_;
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
|
|
|
|
uint64_t blob_garbage_collection_cutoff_file_number_;
|
|
|
|
|
2020-09-15 06:10:09 +02:00
|
|
|
std::string blob_index_;
|
Integrated blob garbage collection: relocate blobs (#7694)
Summary:
The patch adds basic garbage collection support to the integrated BlobDB
implementation. Valid blobs residing in the oldest blob files are relocated
as they are encountered during compaction. The threshold that determines
which blob files qualify is computed based on the configuration option
`blob_garbage_collection_age_cutoff`, which was introduced in https://github.com/facebook/rocksdb/issues/7661 .
Once a blob is retrieved for the purposes of relocation, it passes through the
same logic that extracts large values to blob files in general. This means that
if, for instance, the size threshold for key-value separation (`min_blob_size`)
got changed or writing blob files got disabled altogether, it is possible for the
value to be moved back into the LSM tree. In particular, one way to re-inline
all blob values if needed would be to perform a full manual compaction with
`enable_blob_files` set to `false`, `enable_blob_garbage_collection` set to
`true`, and `blob_file_garbage_collection_age_cutoff` set to `1.0`.
Some TODOs that I plan to address in separate PRs:
1) We'll have to measure the amount of new garbage in each blob file and log
`BlobFileGarbage` entries as part of the compaction job's `VersionEdit`.
(For the time being, blob files are cleaned up solely based on the
`oldest_blob_file_number` relationships.)
2) When compression is used for blobs, the compression type hasn't changed,
and the blob still qualifies for being written to a blob file, we can simply copy
the compressed blob to the new file instead of going through decompression
and compression.
3) We need to update the formula for computing write amplification to account
for the amount of data read from blob files as part of GC.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7694
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D25069663
Pulled By: ltamasi
fbshipit-source-id: bdfa8feb09afcf5bca3b4eba2ba72ce2f15cd06a
2020-11-24 06:07:01 +01:00
|
|
|
PinnableSlice blob_value_;
|
2015-09-10 23:35:25 +02:00
|
|
|
std::string compaction_filter_value_;
|
2016-12-01 16:00:17 +01:00
|
|
|
InternalKey compaction_filter_skip_until_;
|
2015-09-10 23:35:25 +02:00
|
|
|
// "level_ptrs" holds indices that remember which file of an associated
|
|
|
|
// level we were last checking during the last call to compaction->
|
|
|
|
// KeyNotExistsBeyondOutputLevel(). This allows future calls to the function
|
|
|
|
// to pick off where it left off since each subcompaction's key range is
|
|
|
|
// increasing so a later call to the function must be looking for a key that
|
|
|
|
// is in or beyond the last file checked during the previous call
|
|
|
|
std::vector<size_t> level_ptrs_;
|
2016-11-28 20:44:40 +01:00
|
|
|
CompactionIterationStats iter_stats_;
|
2017-01-12 00:01:21 +01:00
|
|
|
|
2017-10-06 19:26:38 +02:00
|
|
|
// Used to avoid purging uncommitted values. The application can specify
|
|
|
|
// uncommitted values by providing a SnapshotChecker object.
|
|
|
|
bool current_key_committed_;
|
Allow compaction iterator to perform garbage collection (#7556)
Summary:
Add a threshold timestamp, full_history_ts_low_ of type `std::string*` to
`CompactionIterator`, so that RocksDB can also perform garbage collection during
compaction.
* If full_history_ts_low_ is nullptr, then compaction iterator does not perform
GC, preserving all timestamp history for all keys. Compaction iterator will
treat user key with different timestamps as different user keys.
* If full_history_ts_low_ is not nullptr, then compaction iterator performs
GC. GC will look at keys older than `*full_history_ts_low_` and determine their
eligibility based on factors including snapshots.
Current rules of GC:
* If an internal key is in the same snapshot as a previous counterpart
with the same user key, and this key is eligible for GC, and the key is
not single-delete or merge operand, then this key can be dropped. Note
that the previous internal key cannot be a merge operand either.
* If a tombstone is the most recent one in the earliest snapshot and it
is eligible for GC, and keyNotExistsBeyondLevel() is true, then this
tombstone can be dropped.
* If a tombstone is the most recent one in a snapshot and it is eligible
for GC, and the compaction is at bottommost level, then all other older
internal keys of the same user key must also be eligible for GC, thus
can be dropped
* Single-delete, delete-range and merge are not currently supported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7556
Test Plan: make check
Reviewed By: ltamasi
Differential Revision: D24507728
Pulled By: riversand963
fbshipit-source-id: 3c09c7301f41eed76dfcf4d1527e68cf6e0a8bb3
2020-10-24 07:58:05 +02:00
|
|
|
|
|
|
|
// Saved result of ucmp->CompareTimestamp(current_ts_, *full_history_ts_low_)
|
|
|
|
int cmp_with_history_ts_low_;
|
|
|
|
|
2021-05-08 01:00:37 +02:00
|
|
|
const int level_;
|
|
|
|
|
2021-11-03 23:53:40 +01:00
|
|
|
// True if the previous internal key (same user key)'s sequence number has
|
|
|
|
// just been zeroed out during bottommost compaction.
|
|
|
|
bool last_key_seq_zeroed_{false};
|
|
|
|
|
2021-05-21 01:06:12 +02:00
|
|
|
void AdvanceInputIter() { input_.Next(); }
|
|
|
|
|
|
|
|
void SkipUntil(const Slice& skip_until) { input_.Seek(skip_until); }
|
|
|
|
|
2017-01-12 00:01:21 +01:00
|
|
|
bool IsShuttingDown() {
|
|
|
|
// This is a best-effort facility, so memory_order_relaxed is sufficient.
|
|
|
|
return shutting_down_ && shutting_down_->load(std::memory_order_relaxed);
|
|
|
|
}
|
2019-09-17 06:00:13 +02:00
|
|
|
|
|
|
|
bool IsPausingManualCompaction() {
|
|
|
|
// This is a best-effort facility, so memory_order_relaxed is sufficient.
|
2021-06-07 20:40:31 +02:00
|
|
|
return (manual_compaction_paused_ &&
|
|
|
|
manual_compaction_paused_->load(std::memory_order_relaxed) > 0) ||
|
|
|
|
(manual_compaction_canceled_ &&
|
|
|
|
manual_compaction_canceled_->load(std::memory_order_relaxed));
|
2019-09-17 06:00:13 +02:00
|
|
|
}
|
2015-09-10 23:35:25 +02:00
|
|
|
};
|
2021-07-28 23:52:35 +02:00
|
|
|
|
|
|
|
inline bool CompactionIterator::DefinitelyInSnapshot(SequenceNumber seq,
|
|
|
|
SequenceNumber snapshot) {
|
|
|
|
return ((seq) <= (snapshot) &&
|
|
|
|
(snapshot_checker_ == nullptr ||
|
|
|
|
LIKELY(snapshot_checker_->CheckInSnapshot((seq), (snapshot)) ==
|
|
|
|
SnapshotCheckerResult::kInSnapshot)));
|
|
|
|
}
|
|
|
|
|
|
|
|
inline bool CompactionIterator::DefinitelyNotInSnapshot(
|
|
|
|
SequenceNumber seq, SequenceNumber snapshot) {
|
|
|
|
return ((seq) > (snapshot) ||
|
|
|
|
(snapshot_checker_ != nullptr &&
|
|
|
|
UNLIKELY(snapshot_checker_->CheckInSnapshot((seq), (snapshot)) ==
|
|
|
|
SnapshotCheckerResult::kNotInSnapshot)));
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|