2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2013-10-16 23:59:46 +02:00
|
|
|
// This source code is licensed under the BSD-style license found in the
|
|
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#include "db/version_edit.h"
|
|
|
|
|
|
|
|
#include "db/version_set.h"
|
2017-04-06 23:49:13 +02:00
|
|
|
#include "rocksdb/slice.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
#include "util/coding.h"
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
#include "util/event_logger.h"
|
2017-04-06 23:49:13 +02:00
|
|
|
#include "util/string_util.h"
|
2015-10-03 02:32:46 +02:00
|
|
|
#include "util/sync_point.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
|
|
|
// Tag numbers for serialized VersionEdit. These numbers are written to
|
|
|
|
// disk and should not be changed.
|
|
|
|
enum Tag {
|
2014-07-02 18:54:20 +02:00
|
|
|
kComparator = 1,
|
|
|
|
kLogNumber = 2,
|
|
|
|
kNextFileNumber = 3,
|
|
|
|
kLastSequence = 4,
|
|
|
|
kCompactPointer = 5,
|
|
|
|
kDeletedFile = 6,
|
|
|
|
kNewFile = 7,
|
2011-04-21 00:48:11 +02:00
|
|
|
// 8 was used for large value refs
|
2014-07-02 18:54:20 +02:00
|
|
|
kPrevLogNumber = 9,
|
2013-06-14 07:09:08 +02:00
|
|
|
|
|
|
|
// these are new formats divergent from open source leveldb
|
2014-07-02 18:54:20 +02:00
|
|
|
kNewFile2 = 100,
|
|
|
|
kNewFile3 = 102,
|
2015-10-03 02:32:46 +02:00
|
|
|
kNewFile4 = 103, // 4th (the latest) format version of adding files
|
2014-07-02 18:54:20 +02:00
|
|
|
kColumnFamily = 200, // specify column family for version edit
|
|
|
|
kColumnFamilyAdd = 201,
|
|
|
|
kColumnFamilyDrop = 202,
|
|
|
|
kMaxColumnFamily = 203,
|
2011-03-18 23:37:00 +01:00
|
|
|
};
|
|
|
|
|
2015-10-03 02:32:46 +02:00
|
|
|
enum CustomTag {
|
|
|
|
kTerminate = 1, // The end of customized fields
|
|
|
|
kNeedCompaction = 2,
|
|
|
|
kPathId = 65,
|
|
|
|
};
|
|
|
|
// If this bit for the custom tag is set, opening DB should fail if
|
|
|
|
// we don't know this field.
|
|
|
|
uint32_t kCustomTagNonSafeIgnoreMask = 1 << 6;
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id) {
|
|
|
|
assert(number <= kFileNumberMask);
|
|
|
|
return number | (path_id * (kFileNumberMask + 1));
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
void VersionEdit::Clear() {
|
|
|
|
comparator_.clear();
|
2014-01-15 00:27:09 +01:00
|
|
|
max_level_ = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
log_number_ = 0;
|
2011-04-12 21:38:58 +02:00
|
|
|
prev_log_number_ = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
last_sequence_ = 0;
|
|
|
|
next_file_number_ = 0;
|
2014-03-05 21:13:44 +01:00
|
|
|
max_column_family_ = 0;
|
2011-03-18 23:37:00 +01:00
|
|
|
has_comparator_ = false;
|
|
|
|
has_log_number_ = false;
|
2011-04-12 21:38:58 +02:00
|
|
|
has_prev_log_number_ = false;
|
2011-03-18 23:37:00 +01:00
|
|
|
has_next_file_number_ = false;
|
|
|
|
has_last_sequence_ = false;
|
2014-03-05 21:13:44 +01:00
|
|
|
has_max_column_family_ = false;
|
2011-03-18 23:37:00 +01:00
|
|
|
deleted_files_.clear();
|
|
|
|
new_files_.clear();
|
2013-12-12 02:46:26 +01:00
|
|
|
column_family_ = 0;
|
|
|
|
is_column_family_add_ = 0;
|
|
|
|
is_column_family_drop_ = 0;
|
|
|
|
column_family_name_.clear();
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-10-28 22:27:26 +01:00
|
|
|
bool VersionEdit::EncodeTo(std::string* dst) const {
|
2011-03-18 23:37:00 +01:00
|
|
|
if (has_comparator_) {
|
|
|
|
PutVarint32(dst, kComparator);
|
|
|
|
PutLengthPrefixedSlice(dst, comparator_);
|
|
|
|
}
|
|
|
|
if (has_log_number_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint64(dst, kLogNumber, log_number_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2011-04-12 21:38:58 +02:00
|
|
|
if (has_prev_log_number_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint64(dst, kPrevLogNumber, prev_log_number_);
|
2011-04-12 21:38:58 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
if (has_next_file_number_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint64(dst, kNextFileNumber, next_file_number_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
if (has_last_sequence_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint64(dst, kLastSequence, last_sequence_);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-03-05 21:13:44 +01:00
|
|
|
if (has_max_column_family_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint32(dst, kMaxColumnFamily, max_column_family_);
|
2014-03-05 21:13:44 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-12-31 03:33:57 +01:00
|
|
|
for (const auto& deleted : deleted_files_) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint32Varint64(dst, kDeletedFile, deleted.first /* level */,
|
|
|
|
deleted.second /* file number */);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2011-04-21 00:48:11 +02:00
|
|
|
for (size_t i = 0; i < new_files_.size(); i++) {
|
2011-03-18 23:37:00 +01:00
|
|
|
const FileMetaData& f = new_files_[i].second;
|
2014-10-29 19:21:51 +01:00
|
|
|
if (!f.smallest.Valid() || !f.largest.Valid()) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-03 02:32:46 +02:00
|
|
|
bool has_customized_fields = false;
|
|
|
|
if (f.marked_for_compaction) {
|
|
|
|
PutVarint32(dst, kNewFile4);
|
|
|
|
has_customized_fields = true;
|
|
|
|
} else if (f.fd.GetPathId() == 0) {
|
2014-07-02 18:54:20 +02:00
|
|
|
// Use older format to make sure user can roll back the build if they
|
|
|
|
// don't config multiple DB paths.
|
|
|
|
PutVarint32(dst, kNewFile2);
|
|
|
|
} else {
|
|
|
|
PutVarint32(dst, kNewFile3);
|
|
|
|
}
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint64(dst, new_files_[i].first /* level */, f.fd.GetNumber());
|
2015-10-03 02:32:46 +02:00
|
|
|
if (f.fd.GetPathId() != 0 && !has_customized_fields) {
|
|
|
|
// kNewFile3
|
2014-07-02 18:54:20 +02:00
|
|
|
PutVarint32(dst, f.fd.GetPathId());
|
|
|
|
}
|
2014-06-14 00:54:19 +02:00
|
|
|
PutVarint64(dst, f.fd.GetFileSize());
|
2011-03-18 23:37:00 +01:00
|
|
|
PutLengthPrefixedSlice(dst, f.smallest.Encode());
|
|
|
|
PutLengthPrefixedSlice(dst, f.largest.Encode());
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint64Varint64(dst, f.smallest_seqno, f.largest_seqno);
|
2015-10-03 02:32:46 +02:00
|
|
|
if (has_customized_fields) {
|
|
|
|
// Customized fields' format:
|
|
|
|
// +-----------------------------+
|
|
|
|
// | 1st field's tag (varint32) |
|
|
|
|
// +-----------------------------+
|
|
|
|
// | 1st field's size (varint32) |
|
|
|
|
// +-----------------------------+
|
|
|
|
// | bytes for 1st field |
|
|
|
|
// | (based on size decoded) |
|
|
|
|
// +-----------------------------+
|
|
|
|
// | |
|
|
|
|
// | ...... |
|
|
|
|
// | |
|
|
|
|
// +-----------------------------+
|
|
|
|
// | last field's size (varint32)|
|
|
|
|
// +-----------------------------+
|
|
|
|
// | bytes for last field |
|
|
|
|
// | (based on size decoded) |
|
|
|
|
// +-----------------------------+
|
|
|
|
// | terminating tag (varint32) |
|
|
|
|
// +-----------------------------+
|
|
|
|
//
|
|
|
|
// Customized encoding for fields:
|
|
|
|
// tag kPathId: 1 byte as path_id
|
|
|
|
// tag kNeedCompaction:
|
|
|
|
// now only can take one char value 1 indicating need-compaction
|
|
|
|
//
|
|
|
|
if (f.fd.GetPathId() != 0) {
|
|
|
|
PutVarint32(dst, CustomTag::kPathId);
|
|
|
|
char p = static_cast<char>(f.fd.GetPathId());
|
|
|
|
PutLengthPrefixedSlice(dst, Slice(&p, 1));
|
|
|
|
}
|
|
|
|
if (f.marked_for_compaction) {
|
|
|
|
PutVarint32(dst, CustomTag::kNeedCompaction);
|
|
|
|
char p = static_cast<char>(1);
|
|
|
|
PutLengthPrefixedSlice(dst, Slice(&p, 1));
|
|
|
|
}
|
|
|
|
TEST_SYNC_POINT_CALLBACK("VersionEdit::EncodeTo:NewFile4:CustomizeFields",
|
|
|
|
dst);
|
|
|
|
|
|
|
|
PutVarint32(dst, CustomTag::kTerminate);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-12-12 02:46:26 +01:00
|
|
|
|
|
|
|
// 0 is default and does not need to be explicitly written
|
|
|
|
if (column_family_ != 0) {
|
2016-06-13 18:57:43 +02:00
|
|
|
PutVarint32Varint32(dst, kColumnFamily, column_family_);
|
2013-12-12 02:46:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is_column_family_add_) {
|
|
|
|
PutVarint32(dst, kColumnFamilyAdd);
|
|
|
|
PutLengthPrefixedSlice(dst, Slice(column_family_name_));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_column_family_drop_) {
|
|
|
|
PutVarint32(dst, kColumnFamilyDrop);
|
|
|
|
}
|
2014-10-28 22:27:26 +01:00
|
|
|
return true;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool GetInternalKey(Slice* input, InternalKey* dst) {
|
|
|
|
Slice str;
|
|
|
|
if (GetLengthPrefixedSlice(input, &str)) {
|
|
|
|
dst->DecodeFrom(str);
|
[fix] SIGSEGV when VersionEdit in MANIFEST is corrupted
Summary:
This was reported by our customers in task #4295529.
Cause:
* MANIFEST file contains a VersionEdit, which contains file entries whose 'smallest' and 'largest' internal keys are empty. String with zero characters. Root cause of corruption was not investigated. We should report corruption when this happens. However, we currently SIGSEGV.
Here's what happens:
* VersionEdit encodes zero-strings happily and stores them in smallest and largest InternalKeys. InternalKey::Encode() does assert when `rep_.empty()`, but we don't assert in production environemnts. Also, we should never assert as a result of DB corruption.
* As part of our ConsistencyCheck, we call GetLiveFilesMetaData()
* GetLiveFilesMetadata() calls `file->largest.user_key().ToString()`
* user_key() function does: 1. assert(size > 8) (ooops, no assert), 2. returns `Slice(internal_key.data(), internal_key.size() - 8)`
* since `internal_key.size()` is unsigned int, this call translates to `Slice(whatever, 1298471928561892576182756)`. Bazinga.
Fix:
* VersionEdit checks if InternalKey is valid in `VersionEdit::GetInternalKey()`. If it's invalid, returns corruption.
Lessons learned:
* Always keep in mind that even if you `assert()`, production code will continue execution even if assert fails.
* Never `assert` based on DB corruption. Assert only if the code should guarantee that assert can't fail.
Test Plan: dumped offending manifest. Before: assert. Now: corruption
Reviewers: dhruba, haobo, sdong
Reviewed By: dhruba
CC: leveldb
Differential Revision: https://reviews.facebook.net/D18507
2014-05-08 01:52:12 +02:00
|
|
|
return dst->Valid();
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-24 19:54:26 +01:00
|
|
|
bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) {
|
2011-03-18 23:37:00 +01:00
|
|
|
uint32_t v;
|
2014-01-15 00:27:09 +01:00
|
|
|
if (GetVarint32(input, &v)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
*level = v;
|
2014-01-15 00:27:09 +01:00
|
|
|
if (max_level_ < *level) {
|
|
|
|
max_level_ = *level;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
return true;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-03 02:32:46 +02:00
|
|
|
const char* VersionEdit::DecodeNewFile4From(Slice* input) {
|
|
|
|
const char* msg = nullptr;
|
|
|
|
int level;
|
|
|
|
FileMetaData f;
|
|
|
|
uint64_t number;
|
|
|
|
uint32_t path_id = 0;
|
|
|
|
uint64_t file_size;
|
|
|
|
if (GetLevel(input, &level, &msg) && GetVarint64(input, &number) &&
|
|
|
|
GetVarint64(input, &file_size) && GetInternalKey(input, &f.smallest) &&
|
|
|
|
GetInternalKey(input, &f.largest) &&
|
|
|
|
GetVarint64(input, &f.smallest_seqno) &&
|
|
|
|
GetVarint64(input, &f.largest_seqno)) {
|
|
|
|
// See comments in VersionEdit::EncodeTo() for format of customized fields
|
|
|
|
while (true) {
|
|
|
|
uint32_t custom_tag;
|
|
|
|
Slice field;
|
|
|
|
if (!GetVarint32(input, &custom_tag)) {
|
|
|
|
return "new-file4 custom field";
|
|
|
|
}
|
|
|
|
if (custom_tag == kTerminate) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (!GetLengthPrefixedSlice(input, &field)) {
|
|
|
|
return "new-file4 custom field lenth prefixed slice error";
|
|
|
|
}
|
|
|
|
switch (custom_tag) {
|
|
|
|
case kPathId:
|
|
|
|
if (field.size() != 1) {
|
|
|
|
return "path_id field wrong size";
|
|
|
|
}
|
|
|
|
path_id = field[0];
|
|
|
|
if (path_id > 3) {
|
|
|
|
return "path_id wrong vaue";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case kNeedCompaction:
|
|
|
|
if (field.size() != 1) {
|
|
|
|
return "need_compaction field wrong size";
|
|
|
|
}
|
|
|
|
f.marked_for_compaction = (field[0] == 1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if ((custom_tag & kCustomTagNonSafeIgnoreMask) != 0) {
|
|
|
|
// Should not proceed if cannot understand it
|
|
|
|
return "new-file4 custom field not supported";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return "new-file4 entry";
|
|
|
|
}
|
|
|
|
f.fd = FileDescriptor(number, path_id, file_size);
|
|
|
|
new_files_.push_back(std::make_pair(level, f));
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Status VersionEdit::DecodeFrom(const Slice& src) {
|
|
|
|
Clear();
|
|
|
|
Slice input = src;
|
2013-03-01 03:04:58 +01:00
|
|
|
const char* msg = nullptr;
|
2011-03-18 23:37:00 +01:00
|
|
|
uint32_t tag;
|
|
|
|
|
|
|
|
// Temporary storage for parsing
|
|
|
|
int level;
|
|
|
|
FileMetaData f;
|
|
|
|
Slice str;
|
|
|
|
InternalKey key;
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
while (msg == nullptr && GetVarint32(&input, &tag)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
switch (tag) {
|
|
|
|
case kComparator:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &str)) {
|
|
|
|
comparator_ = str.ToString();
|
|
|
|
has_comparator_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "comparator name";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case kLogNumber:
|
|
|
|
if (GetVarint64(&input, &log_number_)) {
|
|
|
|
has_log_number_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "log number";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-04-12 21:38:58 +02:00
|
|
|
case kPrevLogNumber:
|
|
|
|
if (GetVarint64(&input, &prev_log_number_)) {
|
|
|
|
has_prev_log_number_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "previous log number";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
case kNextFileNumber:
|
|
|
|
if (GetVarint64(&input, &next_file_number_)) {
|
|
|
|
has_next_file_number_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "next file number";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case kLastSequence:
|
|
|
|
if (GetVarint64(&input, &last_sequence_)) {
|
|
|
|
has_last_sequence_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "last sequence number";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-03-05 21:13:44 +01:00
|
|
|
case kMaxColumnFamily:
|
|
|
|
if (GetVarint32(&input, &max_column_family_)) {
|
|
|
|
has_max_column_family_ = true;
|
|
|
|
} else {
|
|
|
|
msg = "max column family";
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
case kCompactPointer:
|
2013-01-24 19:54:26 +01:00
|
|
|
if (GetLevel(&input, &level, &msg) &&
|
2011-03-18 23:37:00 +01:00
|
|
|
GetInternalKey(&input, &key)) {
|
2014-01-16 23:06:53 +01:00
|
|
|
// we don't use compact pointers anymore,
|
|
|
|
// but we should not fail if they are still
|
|
|
|
// in manifest
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
2013-01-24 19:54:26 +01:00
|
|
|
if (!msg) {
|
|
|
|
msg = "compaction pointer";
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2014-10-31 19:59:54 +01:00
|
|
|
case kDeletedFile: {
|
|
|
|
uint64_t number;
|
|
|
|
if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
deleted_files_.insert(std::make_pair(level, number));
|
|
|
|
} else {
|
2013-01-24 19:54:26 +01:00
|
|
|
if (!msg) {
|
|
|
|
msg = "deleted file";
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
break;
|
2014-10-31 19:59:54 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2014-06-14 00:54:19 +02:00
|
|
|
case kNewFile: {
|
|
|
|
uint64_t number;
|
|
|
|
uint64_t file_size;
|
|
|
|
if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
|
|
|
|
GetVarint64(&input, &file_size) &&
|
2011-03-18 23:37:00 +01:00
|
|
|
GetInternalKey(&input, &f.smallest) &&
|
|
|
|
GetInternalKey(&input, &f.largest)) {
|
2014-07-02 18:54:20 +02:00
|
|
|
f.fd = FileDescriptor(number, 0, file_size);
|
2011-03-18 23:37:00 +01:00
|
|
|
new_files_.push_back(std::make_pair(level, f));
|
|
|
|
} else {
|
2013-01-24 19:54:26 +01:00
|
|
|
if (!msg) {
|
|
|
|
msg = "new-file entry";
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
break;
|
2014-06-14 00:54:19 +02:00
|
|
|
}
|
|
|
|
case kNewFile2: {
|
|
|
|
uint64_t number;
|
|
|
|
uint64_t file_size;
|
|
|
|
if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
|
|
|
|
GetVarint64(&input, &file_size) &&
|
2013-06-14 07:09:08 +02:00
|
|
|
GetInternalKey(&input, &f.smallest) &&
|
|
|
|
GetInternalKey(&input, &f.largest) &&
|
|
|
|
GetVarint64(&input, &f.smallest_seqno) &&
|
2014-06-14 00:54:19 +02:00
|
|
|
GetVarint64(&input, &f.largest_seqno)) {
|
2014-07-02 18:54:20 +02:00
|
|
|
f.fd = FileDescriptor(number, 0, file_size);
|
|
|
|
new_files_.push_back(std::make_pair(level, f));
|
|
|
|
} else {
|
|
|
|
if (!msg) {
|
|
|
|
msg = "new-file2 entry";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
case kNewFile3: {
|
|
|
|
uint64_t number;
|
|
|
|
uint32_t path_id;
|
|
|
|
uint64_t file_size;
|
|
|
|
if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) &&
|
|
|
|
GetVarint32(&input, &path_id) && GetVarint64(&input, &file_size) &&
|
|
|
|
GetInternalKey(&input, &f.smallest) &&
|
|
|
|
GetInternalKey(&input, &f.largest) &&
|
|
|
|
GetVarint64(&input, &f.smallest_seqno) &&
|
|
|
|
GetVarint64(&input, &f.largest_seqno)) {
|
|
|
|
f.fd = FileDescriptor(number, path_id, file_size);
|
2013-06-14 07:09:08 +02:00
|
|
|
new_files_.push_back(std::make_pair(level, f));
|
|
|
|
} else {
|
|
|
|
if (!msg) {
|
2014-10-23 19:41:58 +02:00
|
|
|
msg = "new-file3 entry";
|
2013-06-14 07:09:08 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
2014-06-14 00:54:19 +02:00
|
|
|
}
|
2013-06-14 07:09:08 +02:00
|
|
|
|
2015-10-03 02:32:46 +02:00
|
|
|
case kNewFile4: {
|
|
|
|
msg = DecodeNewFile4From(&input);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2013-12-12 02:46:26 +01:00
|
|
|
case kColumnFamily:
|
|
|
|
if (!GetVarint32(&input, &column_family_)) {
|
|
|
|
if (!msg) {
|
|
|
|
msg = "set column family id";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case kColumnFamilyAdd:
|
|
|
|
if (GetLengthPrefixedSlice(&input, &str)) {
|
|
|
|
is_column_family_add_ = true;
|
|
|
|
column_family_name_ = str.ToString();
|
|
|
|
} else {
|
|
|
|
if (!msg) {
|
|
|
|
msg = "column family add";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
case kColumnFamilyDrop:
|
|
|
|
is_column_family_drop_ = true;
|
|
|
|
break;
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
default:
|
|
|
|
msg = "unknown tag";
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-01 03:04:58 +01:00
|
|
|
if (msg == nullptr && !input.empty()) {
|
2011-03-18 23:37:00 +01:00
|
|
|
msg = "invalid tag";
|
|
|
|
}
|
|
|
|
|
|
|
|
Status result;
|
2013-03-01 03:04:58 +01:00
|
|
|
if (msg != nullptr) {
|
2011-03-18 23:37:00 +01:00
|
|
|
result = Status::Corruption("VersionEdit", msg);
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2013-08-09 00:51:16 +02:00
|
|
|
std::string VersionEdit::DebugString(bool hex_key) const {
|
2011-03-18 23:37:00 +01:00
|
|
|
std::string r;
|
|
|
|
r.append("VersionEdit {");
|
|
|
|
if (has_comparator_) {
|
|
|
|
r.append("\n Comparator: ");
|
|
|
|
r.append(comparator_);
|
|
|
|
}
|
|
|
|
if (has_log_number_) {
|
|
|
|
r.append("\n LogNumber: ");
|
|
|
|
AppendNumberTo(&r, log_number_);
|
|
|
|
}
|
2011-04-12 21:38:58 +02:00
|
|
|
if (has_prev_log_number_) {
|
|
|
|
r.append("\n PrevLogNumber: ");
|
|
|
|
AppendNumberTo(&r, prev_log_number_);
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
if (has_next_file_number_) {
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
r.append("\n NextFileNumber: ");
|
2011-03-18 23:37:00 +01:00
|
|
|
AppendNumberTo(&r, next_file_number_);
|
|
|
|
}
|
|
|
|
if (has_last_sequence_) {
|
|
|
|
r.append("\n LastSeq: ");
|
|
|
|
AppendNumberTo(&r, last_sequence_);
|
|
|
|
}
|
|
|
|
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
|
|
|
iter != deleted_files_.end();
|
|
|
|
++iter) {
|
|
|
|
r.append("\n DeleteFile: ");
|
|
|
|
AppendNumberTo(&r, iter->first);
|
|
|
|
r.append(" ");
|
|
|
|
AppendNumberTo(&r, iter->second);
|
|
|
|
}
|
2011-04-21 00:48:11 +02:00
|
|
|
for (size_t i = 0; i < new_files_.size(); i++) {
|
2011-03-18 23:37:00 +01:00
|
|
|
const FileMetaData& f = new_files_[i].second;
|
|
|
|
r.append("\n AddFile: ");
|
|
|
|
AppendNumberTo(&r, new_files_[i].first);
|
|
|
|
r.append(" ");
|
2014-06-14 00:54:19 +02:00
|
|
|
AppendNumberTo(&r, f.fd.GetNumber());
|
2011-03-18 23:37:00 +01:00
|
|
|
r.append(" ");
|
2014-06-14 00:54:19 +02:00
|
|
|
AppendNumberTo(&r, f.fd.GetFileSize());
|
2011-10-06 01:30:28 +02:00
|
|
|
r.append(" ");
|
2013-08-09 00:51:16 +02:00
|
|
|
r.append(f.smallest.DebugString(hex_key));
|
2011-10-06 01:30:28 +02:00
|
|
|
r.append(" .. ");
|
2013-08-09 00:51:16 +02:00
|
|
|
r.append(f.largest.DebugString(hex_key));
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2013-12-12 02:46:26 +01:00
|
|
|
r.append("\n ColumnFamily: ");
|
|
|
|
AppendNumberTo(&r, column_family_);
|
|
|
|
if (is_column_family_add_) {
|
|
|
|
r.append("\n ColumnFamilyAdd: ");
|
|
|
|
r.append(column_family_name_);
|
|
|
|
}
|
|
|
|
if (is_column_family_drop_) {
|
|
|
|
r.append("\n ColumnFamilyDrop");
|
|
|
|
}
|
2014-03-11 03:59:44 +01:00
|
|
|
if (has_max_column_family_) {
|
|
|
|
r.append("\n MaxColumnFamily: ");
|
2014-03-11 20:14:09 +01:00
|
|
|
AppendNumberTo(&r, max_column_family_);
|
2014-03-11 03:59:44 +01:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
r.append("\n}\n");
|
|
|
|
return r;
|
|
|
|
}
|
|
|
|
|
Added JSON manifest dump option to ldb command
Summary:
Added a new flag --json to the ldb manifest_dump command
that prints out the version edits as JSON objects for easier
reading and parsing of information.
Test Plan:
**Sample usage: **
```
./ldb manifest_dump --json --path=path/to/manifest/file
```
**Sample output:**
```
{"EditNumber": 0, "Comparator": "leveldb.BytewiseComparator", "ColumnFamily": 0}
{"EditNumber": 1, "LogNumber": 0, "ColumnFamily": 0}
{"EditNumber": 2, "LogNumber": 4, "PrevLogNumber": 0, "NextFileNumber": 7, "LastSeq": 35356, "AddedFiles": [{"Level": 0, "FileNumber": 5, "FileSize": 1949284, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
{"EditNumber": 13, "PrevLogNumber": 0, "NextFileNumber": 36, "LastSeq": 290994, "DeletedFiles": [{"Level": 0, "FileNumber": 17}, {"Level": 0, "FileNumber": 20}, {"Level": 0, "FileNumber": 22}, {"Level": 0, "FileNumber": 24}, {"Level": 1, "FileNumber": 13}, {"Level": 1, "FileNumber": 14}, {"Level": 1, "FileNumber": 15}, {"Level": 1, "FileNumber": 18}], "AddedFiles": [{"Level": 1, "FileNumber": 25, "FileSize": 2114340, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 26, "FileSize": 2115213, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 27, "FileSize": 2114807, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 30, "FileSize": 2115271, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 31, "FileSize": 2115165, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 32, "FileSize": 2114683, "SmallestIKey": "'", "LargestIKey": "'"}, {"Level": 1, "FileNumber": 35, "FileSize": 1757512, "SmallestIKey": "'", "LargestIKey": "'"}], "ColumnFamily": 0}
...
```
Reviewers: sdong, anthony, yhchiang, igor
Reviewed By: igor
Subscribers: dhruba
Differential Revision: https://reviews.facebook.net/D41727
2015-07-17 19:07:40 +02:00
|
|
|
std::string VersionEdit::DebugJSON(int edit_num, bool hex_key) const {
|
|
|
|
JSONWriter jw;
|
|
|
|
jw << "EditNumber" << edit_num;
|
|
|
|
|
|
|
|
if (has_comparator_) {
|
|
|
|
jw << "Comparator" << comparator_;
|
|
|
|
}
|
|
|
|
if (has_log_number_) {
|
|
|
|
jw << "LogNumber" << log_number_;
|
|
|
|
}
|
|
|
|
if (has_prev_log_number_) {
|
|
|
|
jw << "PrevLogNumber" << prev_log_number_;
|
|
|
|
}
|
|
|
|
if (has_next_file_number_) {
|
|
|
|
jw << "NextFileNumber" << next_file_number_;
|
|
|
|
}
|
|
|
|
if (has_last_sequence_) {
|
|
|
|
jw << "LastSeq" << last_sequence_;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!deleted_files_.empty()) {
|
|
|
|
jw << "DeletedFiles";
|
|
|
|
jw.StartArray();
|
|
|
|
|
|
|
|
for (DeletedFileSet::const_iterator iter = deleted_files_.begin();
|
|
|
|
iter != deleted_files_.end();
|
|
|
|
++iter) {
|
|
|
|
jw.StartArrayedObject();
|
|
|
|
jw << "Level" << iter->first;
|
|
|
|
jw << "FileNumber" << iter->second;
|
|
|
|
jw.EndArrayedObject();
|
|
|
|
}
|
|
|
|
|
|
|
|
jw.EndArray();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!new_files_.empty()) {
|
|
|
|
jw << "AddedFiles";
|
|
|
|
jw.StartArray();
|
|
|
|
|
|
|
|
for (size_t i = 0; i < new_files_.size(); i++) {
|
|
|
|
jw.StartArrayedObject();
|
|
|
|
jw << "Level" << new_files_[i].first;
|
|
|
|
const FileMetaData& f = new_files_[i].second;
|
|
|
|
jw << "FileNumber" << f.fd.GetNumber();
|
|
|
|
jw << "FileSize" << f.fd.GetFileSize();
|
|
|
|
jw << "SmallestIKey" << f.smallest.DebugString(hex_key);
|
|
|
|
jw << "LargestIKey" << f.largest.DebugString(hex_key);
|
|
|
|
jw.EndArrayedObject();
|
|
|
|
}
|
|
|
|
|
|
|
|
jw.EndArray();
|
|
|
|
}
|
|
|
|
|
|
|
|
jw << "ColumnFamily" << column_family_;
|
|
|
|
|
|
|
|
if (is_column_family_add_) {
|
|
|
|
jw << "ColumnFamilyAdd" << column_family_name_;
|
|
|
|
}
|
|
|
|
if (is_column_family_drop_) {
|
|
|
|
jw << "ColumnFamilyDrop" << column_family_name_;
|
|
|
|
}
|
|
|
|
if (has_max_column_family_) {
|
|
|
|
jw << "MaxColumnFamily" << max_column_family_;
|
|
|
|
}
|
|
|
|
|
|
|
|
jw.EndObject();
|
|
|
|
|
|
|
|
return jw.Get();
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|