2013-10-16 23:59:46 +02:00
|
|
|
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2013-10-05 07:32:05 +02:00
|
|
|
#pragma once
|
2014-10-17 23:58:30 +02:00
|
|
|
#include <algorithm>
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <set>
|
|
|
|
#include <utility>
|
|
|
|
#include <vector>
|
2014-01-30 00:26:43 +01:00
|
|
|
#include <string>
|
2014-01-07 05:29:17 +01:00
|
|
|
#include "rocksdb/cache.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "db/dbformat.h"
|
2014-07-11 21:52:41 +02:00
|
|
|
#include "util/arena.h"
|
|
|
|
#include "util/autovector.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
class VersionSet;
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
const uint64_t kFileNumberMask = 0x3FFFFFFFFFFFFFFF;
|
|
|
|
|
|
|
|
extern uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id);
|
|
|
|
|
2014-06-14 00:54:19 +02:00
|
|
|
// A copyable structure contains information needed to read data from an SST
|
|
|
|
// file. It can contains a pointer to a table reader opened for the file, or
|
|
|
|
// file number and size, which can be used to create a new table reader for it.
|
|
|
|
// The behavior is undefined when a copied of the structure is used when the
|
|
|
|
// file is not in any live version any more.
|
|
|
|
struct FileDescriptor {
|
|
|
|
// Table reader in table_reader_handle
|
|
|
|
TableReader* table_reader;
|
2014-07-02 18:54:20 +02:00
|
|
|
uint64_t packed_number_and_path_id;
|
|
|
|
uint64_t file_size; // File size in bytes
|
|
|
|
|
|
|
|
FileDescriptor() : FileDescriptor(0, 0, 0) {}
|
2014-06-14 00:54:19 +02:00
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size)
|
2014-07-02 18:54:20 +02:00
|
|
|
: table_reader(nullptr),
|
|
|
|
packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)),
|
2014-10-31 19:59:54 +01:00
|
|
|
file_size(_file_size) {}
|
2014-06-14 00:54:19 +02:00
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
FileDescriptor& operator=(const FileDescriptor& fd) {
|
|
|
|
table_reader = fd.table_reader;
|
|
|
|
packed_number_and_path_id = fd.packed_number_and_path_id;
|
|
|
|
file_size = fd.file_size;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t GetNumber() const {
|
|
|
|
return packed_number_and_path_id & kFileNumberMask;
|
|
|
|
}
|
|
|
|
uint32_t GetPathId() const {
|
|
|
|
return packed_number_and_path_id / (kFileNumberMask + 1);
|
|
|
|
}
|
2014-06-14 00:54:19 +02:00
|
|
|
uint64_t GetFileSize() const { return file_size; }
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
struct FileMetaData {
|
|
|
|
int refs;
|
2014-06-14 00:54:19 +02:00
|
|
|
FileDescriptor fd;
|
2014-06-25 03:22:11 +02:00
|
|
|
InternalKey smallest; // Smallest internal key served by table
|
|
|
|
InternalKey largest; // Largest internal key served by table
|
|
|
|
bool being_compacted; // Is this file undergoing compaction?
|
|
|
|
SequenceNumber smallest_seqno; // The smallest seqno in this file
|
|
|
|
SequenceNumber largest_seqno; // The largest seqno in this file
|
|
|
|
|
|
|
|
// Needs to be disposed when refs becomes 0.
|
|
|
|
Cache::Handle* table_reader_handle;
|
|
|
|
|
2014-07-09 21:46:08 +02:00
|
|
|
// Stats for compensating deletion entries during compaction
|
|
|
|
|
|
|
|
// File size compensated by deletion entry.
|
2014-10-17 23:58:30 +02:00
|
|
|
// This is updated in Version::UpdateAccumulatedStats() first time when the
|
2015-02-05 01:04:51 +01:00
|
|
|
// file is created or loaded. After it is updated (!= 0), it is immutable.
|
2014-07-09 21:46:08 +02:00
|
|
|
uint64_t compensated_file_size;
|
2015-02-05 01:04:51 +01:00
|
|
|
// These values can mutate, but they can only be read or written from
|
|
|
|
// single-threaded LogAndApply thread
|
2014-06-25 00:37:06 +02:00
|
|
|
uint64_t num_entries; // the number of entries.
|
|
|
|
uint64_t num_deletions; // the number of deletion entries.
|
|
|
|
uint64_t raw_key_size; // total uncompressed key size.
|
|
|
|
uint64_t raw_value_size; // total uncompressed value size.
|
2014-08-15 21:17:44 +02:00
|
|
|
bool init_stats_from_file; // true if the data-entry stats of this file
|
|
|
|
// has initialized from file.
|
2014-01-07 05:29:17 +01:00
|
|
|
|
2014-06-14 00:54:19 +02:00
|
|
|
FileMetaData()
|
2014-02-07 00:42:16 +01:00
|
|
|
: refs(0),
|
2014-06-25 03:22:11 +02:00
|
|
|
being_compacted(false),
|
|
|
|
table_reader_handle(nullptr),
|
2014-06-25 00:37:06 +02:00
|
|
|
compensated_file_size(0),
|
|
|
|
num_entries(0),
|
|
|
|
num_deletions(0),
|
|
|
|
raw_key_size(0),
|
2014-08-15 21:17:44 +02:00
|
|
|
raw_value_size(0),
|
2015-02-06 17:44:30 +01:00
|
|
|
init_stats_from_file(false) {}
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
// A compressed copy of file meta data that just contain
|
|
|
|
// smallest and largest key's slice
|
|
|
|
struct FdWithKeyRange {
|
|
|
|
FileDescriptor fd;
|
|
|
|
Slice smallest_key; // slice that contain smallest key
|
|
|
|
Slice largest_key; // slice that contain largest key
|
|
|
|
|
|
|
|
FdWithKeyRange()
|
2014-07-10 08:40:03 +02:00
|
|
|
: fd(),
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
smallest_key(),
|
|
|
|
largest_key() {
|
|
|
|
}
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key)
|
|
|
|
: fd(_fd), smallest_key(_smallest_key), largest_key(_largest_key) {}
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Data structure to store an array of FdWithKeyRange in one level
|
|
|
|
// Actual data is guaranteed to be stored closely
|
2014-10-28 18:03:13 +01:00
|
|
|
struct LevelFilesBrief {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
size_t num_files;
|
|
|
|
FdWithKeyRange* files;
|
2014-10-28 18:03:13 +01:00
|
|
|
LevelFilesBrief() {
|
create compressed_levels_ in Version, allocate its space using arena. Make Version::Get, Version::FindFile faster
Summary:
Define CompressedFileMetaData that just contains fd, smallest_slice, largest_slice. Create compressed_levels_ in Version, the space is allocated using arena
Thus increase the file meta data locality, speed up "Get" and "FindFile"
benchmark with in-memory tmpfs, could have 4% improvement under "random read" and 2% improvement under "read while writing"
benchmark command:
./db_bench --db=/mnt/db/rocksdb --num_levels=6 --key_size=20 --prefix_size=20 --keys_per_prefix=0 --value_size=100 --block_size=4096 --cache_size=17179869184 --cache_numshardbits=6 --compression_type=none --compression_ratio=1 --min_level_to_compress=-1 --disable_seek_compaction=1 --hard_rate_limit=2 --write_buffer_size=134217728 --max_write_buffer_number=2 --level0_file_num_compaction_trigger=8 --target_file_size_base=33554432 --max_bytes_for_level_base=1073741824 --disable_wal=0 --sync=0 --disable_data_sync=1 --verify_checksum=1 --delete_obsolete_files_period_micros=314572800 --max_grandparent_overlap_factor=10 --max_background_compactions=4 --max_background_flushes=0 --level0_slowdown_writes_trigger=16 --level0_stop_writes_trigger=24 --statistics=0 --stats_per_interval=0 --stats_interval=1048576 --histogram=0 --use_plain_table=1 --open_files=-1 --mmap_read=1 --mmap_write=0 --memtablerep=prefix_hash --bloom_bits=10 --bloom_locality=1 --perf_level=0 --benchmarks=readwhilewriting,readwhilewriting,readwhilewriting --use_existing_db=1 --num=52428800 --threads=1 —writes_per_second=81920
Read Random:
From 1.8363 ms/op, improve to 1.7587 ms/op.
Read while writing:
From 2.985 ms/op, improve to 2.924 ms/op.
Test Plan:
make all check
Reviewers: ljin, haobo, yhchiang, sdong
Reviewed By: sdong
Subscribers: dhruba, igor
Differential Revision: https://reviews.facebook.net/D19419
2014-07-10 07:14:39 +02:00
|
|
|
num_files = 0;
|
|
|
|
files = nullptr;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
class VersionEdit {
|
|
|
|
public:
|
2014-01-15 00:27:09 +01:00
|
|
|
VersionEdit() { Clear(); }
|
2011-03-18 23:37:00 +01:00
|
|
|
~VersionEdit() { }
|
|
|
|
|
|
|
|
void Clear();
|
|
|
|
|
|
|
|
void SetComparatorName(const Slice& name) {
|
|
|
|
has_comparator_ = true;
|
|
|
|
comparator_ = name.ToString();
|
|
|
|
}
|
|
|
|
void SetLogNumber(uint64_t num) {
|
|
|
|
has_log_number_ = true;
|
|
|
|
log_number_ = num;
|
|
|
|
}
|
2011-04-12 21:38:58 +02:00
|
|
|
void SetPrevLogNumber(uint64_t num) {
|
|
|
|
has_prev_log_number_ = true;
|
|
|
|
prev_log_number_ = num;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
void SetNextFile(uint64_t num) {
|
|
|
|
has_next_file_number_ = true;
|
|
|
|
next_file_number_ = num;
|
|
|
|
}
|
|
|
|
void SetLastSequence(SequenceNumber seq) {
|
|
|
|
has_last_sequence_ = true;
|
|
|
|
last_sequence_ = seq;
|
|
|
|
}
|
2014-03-05 21:13:44 +01:00
|
|
|
void SetMaxColumnFamily(uint32_t max_column_family) {
|
|
|
|
has_max_column_family_ = true;
|
|
|
|
max_column_family_ = max_column_family;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Add the specified file at the specified number.
|
|
|
|
// REQUIRES: This version has not been saved (see VersionSet::SaveTo)
|
|
|
|
// REQUIRES: "smallest" and "largest" are smallest and largest keys in file
|
2014-11-11 22:47:22 +01:00
|
|
|
void AddFile(int level, uint64_t file, uint32_t file_path_id,
|
2014-09-05 05:09:45 +02:00
|
|
|
uint64_t file_size, const InternalKey& smallest,
|
2014-07-02 18:54:20 +02:00
|
|
|
const InternalKey& largest, const SequenceNumber& smallest_seqno,
|
2013-06-14 07:09:08 +02:00
|
|
|
const SequenceNumber& largest_seqno) {
|
2013-12-31 03:33:57 +01:00
|
|
|
assert(smallest_seqno <= largest_seqno);
|
2011-03-18 23:37:00 +01:00
|
|
|
FileMetaData f;
|
2014-09-05 05:09:45 +02:00
|
|
|
f.fd = FileDescriptor(file, file_path_id, file_size);
|
2011-03-18 23:37:00 +01:00
|
|
|
f.smallest = smallest;
|
|
|
|
f.largest = largest;
|
2013-06-14 07:09:08 +02:00
|
|
|
f.smallest_seqno = smallest_seqno;
|
|
|
|
f.largest_seqno = largest_seqno;
|
2011-03-18 23:37:00 +01:00
|
|
|
new_files_.push_back(std::make_pair(level, f));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete the specified "file" from the specified "level".
|
|
|
|
void DeleteFile(int level, uint64_t file) {
|
2013-12-31 03:33:57 +01:00
|
|
|
deleted_files_.insert({level, file});
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2013-06-11 23:23:58 +02:00
|
|
|
// Number of edits
|
2014-11-11 22:47:22 +01:00
|
|
|
size_t NumEntries() { return new_files_.size() + deleted_files_.size(); }
|
2013-06-11 23:23:58 +02:00
|
|
|
|
2014-02-28 23:05:11 +01:00
|
|
|
bool IsColumnFamilyManipulation() {
|
|
|
|
return is_column_family_add_ || is_column_family_drop_;
|
|
|
|
}
|
|
|
|
|
2013-12-12 02:46:26 +01:00
|
|
|
void SetColumnFamily(uint32_t column_family_id) {
|
|
|
|
column_family_ = column_family_id;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set column family ID by calling SetColumnFamily()
|
|
|
|
void AddColumnFamily(const std::string& name) {
|
|
|
|
assert(!is_column_family_drop_);
|
|
|
|
assert(!is_column_family_add_);
|
|
|
|
assert(NumEntries() == 0);
|
|
|
|
is_column_family_add_ = true;
|
|
|
|
column_family_name_ = name;
|
|
|
|
}
|
|
|
|
|
|
|
|
// set column family ID by calling SetColumnFamily()
|
|
|
|
void DropColumnFamily() {
|
|
|
|
assert(!is_column_family_drop_);
|
|
|
|
assert(!is_column_family_add_);
|
|
|
|
assert(NumEntries() == 0);
|
|
|
|
is_column_family_drop_ = true;
|
|
|
|
}
|
|
|
|
|
2014-10-28 22:27:26 +01:00
|
|
|
// return true on success.
|
|
|
|
bool EncodeTo(std::string* dst) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
Status DecodeFrom(const Slice& src);
|
|
|
|
|
2014-10-31 16:48:19 +01:00
|
|
|
typedef std::set<std::pair<int, uint64_t>> DeletedFileSet;
|
|
|
|
|
|
|
|
const DeletedFileSet& GetDeletedFiles() { return deleted_files_; }
|
|
|
|
const std::vector<std::pair<int, FileMetaData>>& GetNewFiles() {
|
|
|
|
return new_files_;
|
|
|
|
}
|
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
std::string DebugString(bool hex_key = false) const;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
private:
|
|
|
|
friend class VersionSet;
|
2014-06-25 00:37:06 +02:00
|
|
|
friend class Version;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-01-24 19:54:26 +01:00
|
|
|
bool GetLevel(Slice* input, int* level, const char** msg);
|
2012-06-23 04:30:03 +02:00
|
|
|
|
2014-01-15 00:27:09 +01:00
|
|
|
int max_level_;
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string comparator_;
|
|
|
|
uint64_t log_number_;
|
2011-04-12 21:38:58 +02:00
|
|
|
uint64_t prev_log_number_;
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t next_file_number_;
|
2014-03-05 21:13:44 +01:00
|
|
|
uint32_t max_column_family_;
|
2011-03-18 23:37:00 +01:00
|
|
|
SequenceNumber last_sequence_;
|
|
|
|
bool has_comparator_;
|
|
|
|
bool has_log_number_;
|
2011-04-12 21:38:58 +02:00
|
|
|
bool has_prev_log_number_;
|
2011-03-18 23:37:00 +01:00
|
|
|
bool has_next_file_number_;
|
|
|
|
bool has_last_sequence_;
|
2014-03-05 21:13:44 +01:00
|
|
|
bool has_max_column_family_;
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
DeletedFileSet deleted_files_;
|
2014-03-05 21:13:44 +01:00
|
|
|
std::vector<std::pair<int, FileMetaData>> new_files_;
|
2013-12-12 02:46:26 +01:00
|
|
|
|
|
|
|
// Each version edit record should have column_family_id set
|
|
|
|
// If it's not set, it is default (0)
|
|
|
|
uint32_t column_family_;
|
|
|
|
// a version edit can be either column_family add or
|
|
|
|
// column_family drop. If it's column family add,
|
|
|
|
// it also includes column family name.
|
|
|
|
bool is_column_family_drop_;
|
|
|
|
bool is_column_family_add_;
|
|
|
|
std::string column_family_name_;
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|