2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2014-10-17 23:58:30 +02:00
|
|
|
#include <algorithm>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <set>
|
2019-05-31 02:39:43 +02:00
|
|
|
#include <string>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2020-03-11 01:24:38 +01:00
|
|
|
#include "db/blob_file_addition.h"
|
|
|
|
#include "db/blob_file_garbage.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "memory/arena.h"
|
|
|
|
#include "rocksdb/cache.h"
|
2019-11-23 01:01:21 +01:00
|
|
|
#include "table/table_reader.h"
|
2014-07-11 21:52:41 +02:00
|
|
|
#include "util/autovector.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class VersionSet;
|
|
|
|
|
2019-10-15 00:19:31 +02:00
|
|
|
constexpr uint64_t kFileNumberMask = 0x3FFFFFFFFFFFFFFF;
|
2019-11-23 01:01:21 +01:00
|
|
|
constexpr uint64_t kUnknownOldestAncesterTime = 0;
|
2019-11-27 06:38:38 +01:00
|
|
|
constexpr uint64_t kUnknownFileCreationTime = 0;
|
2014-07-02 18:54:20 +02:00
|
|
|
|
2020-02-11 00:42:46 +01:00
|
|
|
extern const std::string kUnknownFileChecksum;
|
|
|
|
extern const std::string kUnknownFileChecksumFuncName;
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
extern uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id);
|
|
|
|
|
2014-06-14 00:54:19 +02:00
|
|
|
// A copyable structure contains information needed to read data from an SST
|
2018-08-20 23:54:03 +02:00
|
|
|
// file. It can contain a pointer to a table reader opened for the file, or
|
2014-06-14 00:54:19 +02:00
|
|
|
// file number and size, which can be used to create a new table reader for it.
|
|
|
|
// The behavior is undefined when a copied of the structure is used when the
|
|
|
|
// file is not in any live version any more.
|
|
|
|
struct FileDescriptor {
|
|
|
|
// Table reader in table_reader_handle
|
|
|
|
TableReader* table_reader;
|
2014-07-02 18:54:20 +02:00
|
|
|
uint64_t packed_number_and_path_id;
|
|
|
|
uint64_t file_size; // File size in bytes
|
2018-07-28 01:00:26 +02:00
|
|
|
SequenceNumber smallest_seqno; // The smallest seqno in this file
|
|
|
|
SequenceNumber largest_seqno; // The largest seqno in this file
|
2014-07-02 18:54:20 +02:00
|
|
|
|
|
|
|
FileDescriptor() : FileDescriptor(0, 0, 0) {}
|
2014-06-14 00:54:19 +02:00
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size)
|
2018-07-28 01:00:26 +02:00
|
|
|
: FileDescriptor(number, path_id, _file_size, kMaxSequenceNumber, 0) {}
|
|
|
|
|
|
|
|
FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size,
|
|
|
|
SequenceNumber _smallest_seqno, SequenceNumber _largest_seqno)
|
2014-07-02 18:54:20 +02:00
|
|
|
: table_reader(nullptr),
|
|
|
|
packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)),
|
2018-07-28 01:00:26 +02:00
|
|
|
file_size(_file_size),
|
|
|
|
smallest_seqno(_smallest_seqno),
|
|
|
|
largest_seqno(_largest_seqno) {}
|
2014-06-14 00:54:19 +02:00
|
|
|
|
2019-09-20 21:00:55 +02:00
|
|
|
FileDescriptor(const FileDescriptor& fd) { *this = fd; }
|
2019-07-13 02:26:19 +02:00
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
FileDescriptor& operator=(const FileDescriptor& fd) {
|
|
|
|
table_reader = fd.table_reader;
|
|
|
|
packed_number_and_path_id = fd.packed_number_and_path_id;
|
|
|
|
file_size = fd.file_size;
|
2018-07-28 01:00:26 +02:00
|
|
|
smallest_seqno = fd.smallest_seqno;
|
|
|
|
largest_seqno = fd.largest_seqno;
|
2014-07-02 18:54:20 +02:00
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t GetNumber() const {
|
|
|
|
return packed_number_and_path_id & kFileNumberMask;
|
|
|
|
}
|
|
|
|
uint32_t GetPathId() const {
|
2015-11-19 20:47:12 +01:00
|
|
|
return static_cast<uint32_t>(
|
|
|
|
packed_number_and_path_id / (kFileNumberMask + 1));
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
2014-06-14 00:54:19 +02:00
|
|
|
uint64_t GetFileSize() const { return file_size; }
|
|
|
|
};
|
|
|
|
|
2017-06-12 15:58:25 +02:00
|
|
|
struct FileSampledStats {
|
|
|
|
FileSampledStats() : num_reads_sampled(0) {}
|
|
|
|
FileSampledStats(const FileSampledStats& other) { *this = other; }
|
|
|
|
FileSampledStats& operator=(const FileSampledStats& other) {
|
|
|
|
num_reads_sampled = other.num_reads_sampled.load();
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
// number of user reads to this file.
|
|
|
|
mutable std::atomic<uint64_t> num_reads_sampled;
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
struct FileMetaData {
|
2014-06-14 00:54:19 +02:00
|
|
|
FileDescriptor fd;
|
2014-06-25 03:22:11 +02:00
|
|
|
InternalKey smallest; // Smallest internal key served by table
|
|
|
|
InternalKey largest; // Largest internal key served by table
|
|
|
|
|
|
|
|
// Needs to be disposed when refs becomes 0.
|
2019-10-15 00:19:31 +02:00
|
|
|
Cache::Handle* table_reader_handle = nullptr;
|
2014-06-25 03:22:11 +02:00
|
|
|
|
2017-06-12 15:58:25 +02:00
|
|
|
FileSampledStats stats;
|
|
|
|
|
2014-07-09 21:46:08 +02:00
|
|
|
// Stats for compensating deletion entries during compaction
|
|
|
|
|
|
|
|
// File size compensated by deletion entry.
|
2014-10-17 23:58:30 +02:00
|
|
|
// This is updated in Version::UpdateAccumulatedStats() first time when the
|
2015-02-05 01:04:51 +01:00
|
|
|
// file is created or loaded. After it is updated (!= 0), it is immutable.
|
2019-10-15 00:19:31 +02:00
|
|
|
uint64_t compensated_file_size = 0;
|
2015-02-05 01:04:51 +01:00
|
|
|
// These values can mutate, but they can only be read or written from
|
|
|
|
// single-threaded LogAndApply thread
|
2019-10-15 00:19:31 +02:00
|
|
|
uint64_t num_entries = 0; // the number of entries.
|
|
|
|
uint64_t num_deletions = 0; // the number of deletion entries.
|
|
|
|
uint64_t raw_key_size = 0; // total uncompressed key size.
|
|
|
|
uint64_t raw_value_size = 0; // total uncompressed value size.
|
|
|
|
|
|
|
|
int refs = 0; // Reference count
|
|
|
|
|
|
|
|
bool being_compacted = false; // Is this file undergoing compaction?
|
|
|
|
bool init_stats_from_file = false; // true if the data-entry stats of this
|
|
|
|
// file has initialized from file.
|
|
|
|
|
|
|
|
bool marked_for_compaction = false; // True if client asked us nicely to
|
|
|
|
// compact this file.
|
|
|
|
|
|
|
|
// Used only in BlobDB. The file number of the oldest blob file this SST file
|
|
|
|
// refers to. 0 is an invalid value; BlobDB numbers the files starting from 1.
|
|
|
|
uint64_t oldest_blob_file_number = kInvalidBlobFileNumber;
|
|
|
|
|
2019-11-23 01:01:21 +01:00
|
|
|
// The file could be the compaction output from other SST files, which could
|
|
|
|
// in turn be outputs for compact older SST files. We track the memtable
|
|
|
|
// flush timestamp for the oldest SST file that eventaully contribute data
|
|
|
|
// to this file. 0 means the information is not available.
|
2019-11-27 06:38:38 +01:00
|
|
|
uint64_t oldest_ancester_time = kUnknownOldestAncesterTime;
|
|
|
|
|
|
|
|
// Unix time when the SST file is created.
|
|
|
|
uint64_t file_creation_time = kUnknownFileCreationTime;
|
2019-11-23 01:01:21 +01:00
|
|
|
|
2020-02-11 00:42:46 +01:00
|
|
|
// File checksum
|
|
|
|
std::string file_checksum = kUnknownFileChecksum;
|
|
|
|
|
|
|
|
// File checksum function name
|
|
|
|
std::string file_checksum_func_name = kUnknownFileChecksumFuncName;
|
|
|
|
|
2019-10-15 00:19:31 +02:00
|
|
|
FileMetaData() = default;
|
|
|
|
|
|
|
|
FileMetaData(uint64_t file, uint32_t file_path_id, uint64_t file_size,
|
|
|
|
const InternalKey& smallest_key, const InternalKey& largest_key,
|
|
|
|
const SequenceNumber& smallest_seq,
|
|
|
|
const SequenceNumber& largest_seq, bool marked_for_compact,
|
2019-11-27 06:38:38 +01:00
|
|
|
uint64_t oldest_blob_file, uint64_t _oldest_ancester_time,
|
2020-02-11 00:42:46 +01:00
|
|
|
uint64_t _file_creation_time, const std::string& _file_checksum,
|
|
|
|
const std::string& _file_checksum_func_name)
|
2019-10-15 00:19:31 +02:00
|
|
|
: fd(file, file_path_id, file_size, smallest_seq, largest_seq),
|
|
|
|
smallest(smallest_key),
|
|
|
|
largest(largest_key),
|
|
|
|
marked_for_compaction(marked_for_compact),
|
2019-11-23 01:01:21 +01:00
|
|
|
oldest_blob_file_number(oldest_blob_file),
|
2019-11-27 06:38:38 +01:00
|
|
|
oldest_ancester_time(_oldest_ancester_time),
|
2020-02-11 00:42:46 +01:00
|
|
|
file_creation_time(_file_creation_time),
|
|
|
|
file_checksum(_file_checksum),
|
|
|
|
file_checksum_func_name(_file_checksum_func_name) {
|
2019-11-27 06:38:38 +01:00
|
|
|
TEST_SYNC_POINT_CALLBACK("FileMetaData::FileMetaData", this);
|
|
|
|
}
|
2015-09-10 23:35:25 +02:00
|
|
|
|
|
|
|
// REQUIRED: Keys must be given to the function in sorted order (it expects
|
|
|
|
// the last key to be the largest).
|
2019-10-15 00:19:31 +02:00
|
|
|
void UpdateBoundaries(const Slice& key, const Slice& value,
|
|
|
|
SequenceNumber seqno, ValueType value_type);
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 23:28:10 +02:00
|
|
|
|
|
|
|
// Unlike UpdateBoundaries, ranges do not need to be presented in any
|
|
|
|
// particular order.
|
|
|
|
void UpdateBoundariesForRange(const InternalKey& start,
|
|
|
|
const InternalKey& end, SequenceNumber seqno,
|
|
|
|
const InternalKeyComparator& icmp) {
|
|
|
|
if (smallest.size() == 0 || icmp.Compare(start, smallest) < 0) {
|
|
|
|
smallest = start;
|
|
|
|
}
|
|
|
|
if (largest.size() == 0 || icmp.Compare(largest, end) < 0) {
|
|
|
|
largest = end;
|
|
|
|
}
|
2018-07-28 01:00:26 +02:00
|
|
|
fd.smallest_seqno = std::min(fd.smallest_seqno, seqno);
|
|
|
|
fd.largest_seqno = std::max(fd.largest_seqno, seqno);
|
Range deletion performance improvements + cleanup (#4014)
Summary:
This fixes the same performance issue that #3992 fixes but with much more invasive cleanup.
I'm more excited about this PR because it paves the way for fixing another problem we uncovered at Cockroach where range deletion tombstones can cause massive compactions. For example, suppose L4 contains deletions from [a, c) and [x, z) and no other keys, and L5 is entirely empty. L6, however, is full of data. When compacting L4 -> L5, we'll end up with one file that spans, massively, from [a, z). When we go to compact L5 -> L6, we'll have to rewrite all of L6! If, instead of range deletions in L4, we had keys a, b, x, y, and z, RocksDB would have been smart enough to create two files in L5: one for a and b and another for x, y, and z.
With the changes in this PR, it will be possible to adjust the compaction logic to split tombstones/start new output files when they would span too many files in the grandparent level.
ajkr please take a look when you have a minute!
Pull Request resolved: https://github.com/facebook/rocksdb/pull/4014
Differential Revision: D8773253
Pulled By: ajkr
fbshipit-source-id: ec62fa85f648fdebe1380b83ed997f9baec35677
2018-07-12 23:28:10 +02:00
|
|
|
}
|
2019-11-23 01:01:21 +01:00
|
|
|
|
|
|
|
// Try to get oldest ancester time from the class itself or table properties
|
|
|
|
// if table reader is already pinned.
|
|
|
|
// 0 means the information is not available.
|
|
|
|
uint64_t TryGetOldestAncesterTime() {
|
2019-11-27 06:38:38 +01:00
|
|
|
if (oldest_ancester_time != kUnknownOldestAncesterTime) {
|
2019-11-23 01:01:21 +01:00
|
|
|
return oldest_ancester_time;
|
|
|
|
} else if (fd.table_reader != nullptr &&
|
|
|
|
fd.table_reader->GetTableProperties() != nullptr) {
|
|
|
|
return fd.table_reader->GetTableProperties()->creation_time;
|
|
|
|
}
|
2019-11-27 06:38:38 +01:00
|
|
|
return kUnknownOldestAncesterTime;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t TryGetFileCreationTime() {
|
|
|
|
if (file_creation_time != kUnknownFileCreationTime) {
|
|
|
|
return file_creation_time;
|
|
|
|
} else if (fd.table_reader != nullptr &&
|
|
|
|
fd.table_reader->GetTableProperties() != nullptr) {
|
|
|
|
return fd.table_reader->GetTableProperties()->file_creation_time;
|
|
|
|
}
|
|
|
|
return kUnknownFileCreationTime;
|
2019-11-23 01:01:21 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2017-06-12 15:58:25 +02:00
|
|
|
// A compressed copy of file meta data that just contain minimum data needed
|
|
|
|
// to server read operations, while still keeping the pointer to full metadata
|
|
|
|
// of the file in case it is needed.
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
struct FdWithKeyRange {
|
|
|
|
FileDescriptor fd;
|
2017-06-12 15:58:25 +02:00
|
|
|
FileMetaData* file_metadata; // Point to all metadata
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
Slice smallest_key; // slice that contain smallest key
|
|
|
|
Slice largest_key; // slice that contain largest key
|
|
|
|
|
|
|
|
FdWithKeyRange()
|
2014-07-10 08:40:03 +02:00
|
|
|
: fd(),
|
2017-12-07 20:52:12 +01:00
|
|
|
file_metadata(nullptr),
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
smallest_key(),
|
|
|
|
largest_key() {
|
|
|
|
}
|
|
|
|
|
2017-06-12 15:58:25 +02:00
|
|
|
FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key,
|
|
|
|
FileMetaData* _file_metadata)
|
|
|
|
: fd(_fd),
|
|
|
|
file_metadata(_file_metadata),
|
|
|
|
smallest_key(_smallest_key),
|
|
|
|
largest_key(_largest_key) {}
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Data structure to store an array of FdWithKeyRange in one level
|
|
|
|
// Actual data is guaranteed to be stored closely
|
2014-10-28 18:03:13 +01:00
|
|
|
struct LevelFilesBrief {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
size_t num_files;
|
|
|
|
FdWithKeyRange* files;
|
2014-10-28 18:03:13 +01:00
|
|
|
LevelFilesBrief() {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
num_files = 0;
|
|
|
|
files = nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2019-09-20 19:05:52 +02:00
|
|
|
// The state of a DB at any given time is referred to as a Version.
|
|
|
|
// Any modification to the Version is considered a Version Edit. A Version is
|
|
|
|
// constructed by joining a sequence of Version Edits. Version Edits are written
|
|
|
|
// to the MANIFEST file.
|
2011-03-18 23:37:00 +01:00
|
|
|
class VersionEdit {
|
|
|
|
public:
|
|
|
|
void Clear();
|
|
|
|
|
2019-09-03 17:50:47 +02:00
|
|
|
void SetDBId(const std::string& db_id) {
|
|
|
|
has_db_id_ = true;
|
|
|
|
db_id_ = db_id;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasDbId() const { return has_db_id_; }
|
|
|
|
const std::string& GetDbId() const { return db_id_; }
|
2019-09-03 17:50:47 +02:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void SetComparatorName(const Slice& name) {
|
|
|
|
has_comparator_ = true;
|
|
|
|
comparator_ = name.ToString();
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasComparatorName() const { return has_comparator_; }
|
|
|
|
const std::string& GetComparatorName() const { return comparator_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void SetLogNumber(uint64_t num) {
|
|
|
|
has_log_number_ = true;
|
|
|
|
log_number_ = num;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasLogNumber() const { return has_log_number_; }
|
|
|
|
uint64_t GetLogNumber() const { return log_number_; }
|
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
void SetPrevLogNumber(uint64_t num) {
|
|
|
|
has_prev_log_number_ = true;
|
|
|
|
prev_log_number_ = num;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasPrevLogNumber() const { return has_prev_log_number_; }
|
|
|
|
uint64_t GetPrevLogNumber() const { return prev_log_number_; }
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void SetNextFile(uint64_t num) {
|
|
|
|
has_next_file_number_ = true;
|
|
|
|
next_file_number_ = num;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasNextFile() const { return has_next_file_number_; }
|
|
|
|
uint64_t GetNextFile() const { return next_file_number_; }
|
|
|
|
|
2014-03-05 21:13:44 +01:00
|
|
|
void SetMaxColumnFamily(uint32_t max_column_family) {
|
|
|
|
has_max_column_family_ = true;
|
|
|
|
max_column_family_ = max_column_family;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasMaxColumnFamily() const { return has_max_column_family_; }
|
|
|
|
uint32_t GetMaxColumnFamily() const { return max_column_family_; }
|
|
|
|
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-04 00:35:11 +02:00
|
|
|
void SetMinLogNumberToKeep(uint64_t num) {
|
|
|
|
has_min_log_number_to_keep_ = true;
|
|
|
|
min_log_number_to_keep_ = num;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool HasMinLogNumberToKeep() const { return has_min_log_number_to_keep_; }
|
|
|
|
uint64_t GetMinLogNumberToKeep() const { return min_log_number_to_keep_; }
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-04 00:35:11 +02:00
|
|
|
|
2020-02-07 22:25:07 +01:00
|
|
|
void SetLastSequence(SequenceNumber seq) {
|
|
|
|
has_last_sequence_ = true;
|
|
|
|
last_sequence_ = seq;
|
|
|
|
}
|
|
|
|
bool HasLastSequence() const { return has_last_sequence_; }
|
|
|
|
SequenceNumber GetLastSequence() const { return last_sequence_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2020-02-25 03:38:00 +01:00
|
|
|
// Delete the specified table file from the specified level.
|
2020-02-07 22:25:07 +01:00
|
|
|
void DeleteFile(int level, uint64_t file) {
|
|
|
|
deleted_files_.emplace(level, file);
|
|
|
|
}
|
2019-02-11 19:58:30 +01:00
|
|
|
|
2020-02-25 03:38:00 +01:00
|
|
|
// Retrieve the table files deleted as well as their associated levels.
|
2020-02-07 22:25:07 +01:00
|
|
|
using DeletedFiles = std::set<std::pair<int, uint64_t>>;
|
|
|
|
const DeletedFiles& GetDeletedFiles() const { return deleted_files_; }
|
2019-02-11 19:58:30 +01:00
|
|
|
|
2020-02-25 03:38:00 +01:00
|
|
|
// Add the specified table file at the specified level.
|
2011-03-18 23:37:00 +01:00
|
|
|
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
2019-10-15 00:19:31 +02:00
|
|
|
// REQUIRES: "oldest_blob_file_number" is the number of the oldest blob file
|
|
|
|
// referred to by this file if any, kInvalidBlobFileNumber otherwise.
|
2014-11-11 22:47:22 +01:00
|
|
|
void AddFile(int level, uint64_t file, uint32_t file_path_id,
|
2014-09-05 05:09:45 +02:00
|
|
|
uint64_t file_size, const InternalKey& smallest,
|
2014-07-02 18:54:20 +02:00
|
|
|
const InternalKey& largest, const SequenceNumber& smallest_seqno,
|
2019-10-15 00:19:31 +02:00
|
|
|
const SequenceNumber& largest_seqno, bool marked_for_compaction,
|
2019-11-27 06:38:38 +01:00
|
|
|
uint64_t oldest_blob_file_number, uint64_t oldest_ancester_time,
|
2020-02-11 00:42:46 +01:00
|
|
|
uint64_t file_creation_time, const std::string& file_checksum,
|
|
|
|
const std::string& file_checksum_func_name) {
|
2013-12-31 03:33:57 +01:00
|
|
|
assert(smallest_seqno <= largest_seqno);
|
2019-10-15 00:19:31 +02:00
|
|
|
new_files_.emplace_back(
|
2019-11-27 06:38:38 +01:00
|
|
|
level, FileMetaData(file, file_path_id, file_size, smallest, largest,
|
|
|
|
smallest_seqno, largest_seqno,
|
|
|
|
marked_for_compaction, oldest_blob_file_number,
|
2020-02-11 00:42:46 +01:00
|
|
|
oldest_ancester_time, file_creation_time,
|
|
|
|
file_checksum, file_checksum_func_name));
|
2015-09-10 23:35:25 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void AddFile(int level, const FileMetaData& f) {
|
2018-07-28 01:00:26 +02:00
|
|
|
assert(f.fd.smallest_seqno <= f.fd.largest_seqno);
|
2015-09-10 23:35:25 +02:00
|
|
|
new_files_.emplace_back(level, f);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 03:38:00 +01:00
|
|
|
// Retrieve the table files added as well as their associated levels.
|
2020-02-07 22:25:07 +01:00
|
|
|
using NewFiles = std::vector<std::pair<int, FileMetaData>>;
|
|
|
|
const NewFiles& GetNewFiles() const { return new_files_; }
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2020-03-11 01:24:38 +01:00
|
|
|
// Add a new blob file.
|
|
|
|
void AddBlobFile(uint64_t blob_file_number, uint64_t total_blob_count,
|
|
|
|
uint64_t total_blob_bytes, std::string checksum_method,
|
|
|
|
std::string checksum_value) {
|
|
|
|
blob_file_additions_.emplace_back(
|
2020-02-25 03:38:00 +01:00
|
|
|
blob_file_number, total_blob_count, total_blob_bytes,
|
2020-03-11 01:24:38 +01:00
|
|
|
std::move(checksum_method), std::move(checksum_value));
|
2020-02-25 03:38:00 +01:00
|
|
|
}
|
|
|
|
|
2020-03-11 01:24:38 +01:00
|
|
|
// Retrieve all the blob files added.
|
|
|
|
using BlobFileAdditions = std::vector<BlobFileAddition>;
|
|
|
|
const BlobFileAdditions& GetBlobFileAdditions() const {
|
|
|
|
return blob_file_additions_;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add garbage for an existing blob file. Note: intentionally broken English
|
|
|
|
// follows.
|
|
|
|
void AddBlobFileGarbage(uint64_t blob_file_number,
|
|
|
|
uint64_t garbage_blob_count,
|
|
|
|
uint64_t garbage_blob_bytes) {
|
|
|
|
blob_file_garbages_.emplace_back(blob_file_number, garbage_blob_count,
|
|
|
|
garbage_blob_bytes);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Retrieve all the blob file garbage added.
|
|
|
|
using BlobFileGarbages = std::vector<BlobFileGarbage>;
|
|
|
|
const BlobFileGarbages& GetBlobFileGarbages() const {
|
|
|
|
return blob_file_garbages_;
|
|
|
|
}
|
2020-02-25 03:38:00 +01:00
|
|
|
|
2013-06-11 23:23:58 +02:00
|
|
|
// Number of edits
|
2020-02-25 03:38:00 +01:00
|
|
|
size_t NumEntries() const {
|
2020-03-11 01:24:38 +01:00
|
|
|
return new_files_.size() + deleted_files_.size() +
|
|
|
|
blob_file_additions_.size() + blob_file_garbages_.size();
|
2020-02-25 03:38:00 +01:00
|
|
|
}
|
2014-02-28 23:05:11 +01:00
|
|
|
|
2013-12-12 02:46:26 +01:00
|
|
|
void SetColumnFamily(uint32_t column_family_id) {
|
|
|
|
column_family_ = column_family_id;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
uint32_t GetColumnFamily() const { return column_family_; }
|
2013-12-12 02:46:26 +01:00
|
|
|
|
|
|
|
// set column family ID by calling SetColumnFamily()
|
|
|
|
void AddColumnFamily(const std::string& name) {
|
|
|
|
assert(!is_column_family_drop_);
|
|
|
|
assert(!is_column_family_add_);
|
|
|
|
assert(NumEntries() == 0);
|
|
|
|
is_column_family_add_ = true;
|
|
|
|
column_family_name_ = name;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set column family ID by calling SetColumnFamily()
|
|
|
|
void DropColumnFamily() {
|
|
|
|
assert(!is_column_family_drop_);
|
|
|
|
assert(!is_column_family_add_);
|
|
|
|
assert(NumEntries() == 0);
|
|
|
|
is_column_family_drop_ = true;
|
|
|
|
}
|
|
|
|
|
2020-02-07 22:25:07 +01:00
|
|
|
bool IsColumnFamilyManipulation() const {
|
|
|
|
return is_column_family_add_ || is_column_family_drop_;
|
2014-10-31 16:48:19 +01:00
|
|
|
}
|
|
|
|
|
2018-08-20 23:54:03 +02:00
|
|
|
void MarkAtomicGroup(uint32_t remaining_entries) {
|
|
|
|
is_in_atomic_group_ = true;
|
|
|
|
remaining_entries_ = remaining_entries;
|
|
|
|
}
|
2020-02-07 22:25:07 +01:00
|
|
|
bool IsInAtomicGroup() const { return is_in_atomic_group_; }
|
|
|
|
uint32_t GetRemainingEntries() const { return remaining_entries_; }
|
|
|
|
|
|
|
|
// return true on success.
|
|
|
|
bool EncodeTo(std::string* dst) const;
|
|
|
|
Status DecodeFrom(const Slice& src);
|
2018-08-20 23:54:03 +02:00
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
std::string DebugString(bool hex_key = false) const;
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
std::string DebugJSON(int edit_num, bool hex_key = false) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
private:
|
2019-03-27 00:41:31 +01:00
|
|
|
friend class ReactiveVersionSet;
|
2011-03-18 23:37:00 +01:00
|
|
|
friend class VersionSet;
|
2014-06-25 00:37:06 +02:00
|
|
|
friend class Version;
|
2019-06-04 19:51:22 +02:00
|
|
|
friend class AtomicGroupReadBuffer;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-01-24 19:54:26 +01:00
|
|
|
bool GetLevel(Slice* input, int* level, const char** msg);
|
2012-06-23 04:30:03 +02:00
|
|
|
|
2020-02-07 22:25:07 +01:00
|
|
|
const char* DecodeNewFile4From(Slice* input);
|
|
|
|
|
|
|
|
int max_level_ = 0;
|
2019-09-03 17:50:47 +02:00
|
|
|
std::string db_id_;
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string comparator_;
|
2020-02-07 22:25:07 +01:00
|
|
|
uint64_t log_number_ = 0;
|
|
|
|
uint64_t prev_log_number_ = 0;
|
|
|
|
uint64_t next_file_number_ = 0;
|
|
|
|
uint32_t max_column_family_ = 0;
|
Skip deleted WALs during recovery
Summary:
This patch record min log number to keep to the manifest while flushing SST files to ignore them and any WAL older than them during recovery. This is to avoid scenarios when we have a gap between the WAL files are fed to the recovery procedure. The gap could happen by for example out-of-order WAL deletion. Such gap could cause problems in 2PC recovery where the prepared and commit entry are placed into two separate WAL and gap in the WALs could result into not processing the WAL with the commit entry and hence breaking the 2PC recovery logic.
Before the commit, for 2PC case, we determined which log number to keep in FindObsoleteFiles(). We looked at the earliest logs with outstanding prepare entries, or prepare entries whose respective commit or abort are in memtable. With the commit, the same calculation is done while we apply the SST flush. Just before installing the flush file, we precompute the earliest log file to keep after the flush finishes using the same logic (but skipping the memtables just flushed), record this information to the manifest entry for this new flushed SST file. This pre-computed value is also remembered in memory, and will later be used to determine whether a log file can be deleted. This value is unlikely to change until next flush because the commit entry will stay in memtable. (In WritePrepared, we could have removed the older log files as soon as all prepared entries are committed. It's not yet done anyway. Even if we do it, the only thing we loss with this new approach is earlier log deletion between two flushes, which does not guarantee to happen anyway because the obsolete file clean-up function is only executed after flush or compaction)
This min log number to keep is stored in the manifest using the safely-ignore customized field of AddFile entry, in order to guarantee that the DB generated using newer release can be opened by previous releases no older than 4.2.
Closes https://github.com/facebook/rocksdb/pull/3765
Differential Revision: D7747618
Pulled By: siying
fbshipit-source-id: d00c92105b4f83852e9754a1b70d6b64cb590729
2018-05-04 00:35:11 +02:00
|
|
|
// The most recent WAL log number that is deleted
|
2020-02-07 22:25:07 +01:00
|
|
|
uint64_t min_log_number_to_keep_ = 0;
|
|
|
|
SequenceNumber last_sequence_ = 0;
|
|
|
|
bool has_db_id_ = false;
|
|
|
|
bool has_comparator_ = false;
|
|
|
|
bool has_log_number_ = false;
|
|
|
|
bool has_prev_log_number_ = false;
|
|
|
|
bool has_next_file_number_ = false;
|
|
|
|
bool has_max_column_family_ = false;
|
|
|
|
bool has_min_log_number_to_keep_ = false;
|
|
|
|
bool has_last_sequence_ = false;
|
|
|
|
|
|
|
|
DeletedFiles deleted_files_;
|
|
|
|
NewFiles new_files_;
|
2013-12-12 02:46:26 +01:00
|
|
|
|
2020-03-11 01:24:38 +01:00
|
|
|
BlobFileAdditions blob_file_additions_;
|
|
|
|
BlobFileGarbages blob_file_garbages_;
|
2020-02-25 03:38:00 +01:00
|
|
|
|
2018-08-20 23:54:03 +02:00
|
|
|
// Each version edit record should have column_family_ set
|
2013-12-12 02:46:26 +01:00
|
|
|
// If it's not set, it is default (0)
|
2020-02-07 22:25:07 +01:00
|
|
|
uint32_t column_family_ = 0;
|
2013-12-12 02:46:26 +01:00
|
|
|
// a version edit can be either column_family add or
|
|
|
|
// column_family drop. If it's column family add,
|
|
|
|
// it also includes column family name.
|
2020-02-07 22:25:07 +01:00
|
|
|
bool is_column_family_drop_ = false;
|
|
|
|
bool is_column_family_add_ = false;
|
2013-12-12 02:46:26 +01:00
|
|
|
std::string column_family_name_;
|
2018-08-20 23:54:03 +02:00
|
|
|
|
2020-02-07 22:25:07 +01:00
|
|
|
bool is_in_atomic_group_ = false;
|
|
|
|
uint32_t remaining_entries_ = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|