Report corrupted keys during compaction (#7124)
Summary: Currently, RocksDB lets compaction to go through even in case of corrupted keys, the number of which is reported in CompactionJobStats. However, RocksDB does not check this value. We should let compaction run in a stricter mode. Temporarily disable two tests that allow corrupted keys in compaction. With this PR, the two tests will assert(false) and terminate. Still need to investigate what is the recommended google-test way of doing it. Death test (EXPECT_DEATH) in gtest has warnings now. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7124 Test Plan: make check Reviewed By: ajkr Differential Revision: D22530722 Pulled By: riversand963 fbshipit-source-id: 6a5a6a992028c6d4f92cb74693c92db462ae4ad6
This commit is contained in:
parent
3e1eb99ab7
commit
30bfa2a44e
@ -1,4 +1,8 @@
|
||||
# Rocksdb Change Log
|
||||
## Unreleased
|
||||
### Bug Fixes
|
||||
* Make compaction report InternalKey corruption while iterating over the input.
|
||||
|
||||
## 6.11.3 (7/9/2020)
|
||||
### Bug Fixes
|
||||
* Fix a bug when index_type == kTwoLevelIndexSearch in PartitionedIndexBuilder to update FlushPolicy to point to internal key partitioner when it changes from user-key mode to internal-key mode in index partition.
|
||||
|
@ -246,6 +246,7 @@ void CompactionIterator::NextFromInput() {
|
||||
iter_stats_.num_input_records++;
|
||||
|
||||
if (!ParseInternalKey(key_, &ikey_)) {
|
||||
iter_stats_.num_input_corrupt_records++;
|
||||
// If `expect_valid_internal_key_` is false, return the corrupted key
|
||||
// and let the caller decide what to do with it.
|
||||
// TODO(noetzli): We should have a more elegant solution for this.
|
||||
@ -258,7 +259,6 @@ void CompactionIterator::NextFromInput() {
|
||||
has_current_user_key_ = false;
|
||||
current_user_key_sequence_ = kMaxSequenceNumber;
|
||||
current_user_key_snapshot_ = 0;
|
||||
iter_stats_.num_input_corrupt_records++;
|
||||
valid_ = true;
|
||||
break;
|
||||
}
|
||||
|
@ -895,9 +895,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
|
||||
sub_compact->c_iter.reset(new CompactionIterator(
|
||||
input.get(), cfd->user_comparator(), &merge, versions_->LastSequence(),
|
||||
&existing_snapshots_, earliest_write_conflict_snapshot_,
|
||||
snapshot_checker_, env_, ShouldReportDetailedTime(env_, stats_), false,
|
||||
&range_del_agg, sub_compact->compaction, compaction_filter,
|
||||
shutting_down_, preserve_deletes_seqnum_, manual_compaction_paused_,
|
||||
snapshot_checker_, env_, ShouldReportDetailedTime(env_, stats_),
|
||||
/*expect_valid_internal_key=*/true, &range_del_agg,
|
||||
sub_compact->compaction, compaction_filter, shutting_down_,
|
||||
preserve_deletes_seqnum_, manual_compaction_paused_,
|
||||
db_options_.info_log));
|
||||
auto c_iter = sub_compact->c_iter.get();
|
||||
c_iter->SeekToFirst();
|
||||
|
@ -395,7 +395,7 @@ TEST_F(CompactionJobTest, Simple) {
|
||||
RunCompaction({ files }, expected_results);
|
||||
}
|
||||
|
||||
TEST_F(CompactionJobTest, SimpleCorrupted) {
|
||||
TEST_F(CompactionJobTest, DISABLED_SimpleCorrupted) {
|
||||
NewDB();
|
||||
|
||||
auto expected_results = CreateTwoFiles(true);
|
||||
@ -989,7 +989,7 @@ TEST_F(CompactionJobTest, MultiSingleDelete) {
|
||||
// single deletion and the (single) deletion gets removed while the corrupt key
|
||||
// gets written out. TODO(noetzli): We probably want a better way to treat
|
||||
// corrupt keys.
|
||||
TEST_F(CompactionJobTest, CorruptionAfterDeletion) {
|
||||
TEST_F(CompactionJobTest, DISABLED_CorruptionAfterDeletion) {
|
||||
NewDB();
|
||||
|
||||
auto file1 =
|
||||
|
Loading…
Reference in New Issue
Block a user