2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2013-10-16 23:59:46 +02:00
|
|
|
//
|
2011-03-18 23:37:00 +01:00
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
2019-05-30 05:44:08 +02:00
|
|
|
#include "file/filename.h"
|
2019-06-06 22:52:39 +02:00
|
|
|
#include <cinttypes>
|
2012-11-30 02:28:37 +01:00
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
#include <ctype.h>
|
|
|
|
#include <stdio.h>
|
2014-07-02 18:54:20 +02:00
|
|
|
#include <vector>
|
2019-09-16 19:31:27 +02:00
|
|
|
#include "file/writable_file_writer.h"
|
2019-06-01 02:19:43 +02:00
|
|
|
#include "logging/logging.h"
|
2013-08-23 17:38:13 +02:00
|
|
|
#include "rocksdb/env.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "test_util/sync_point.h"
|
2015-01-22 20:43:38 +01:00
|
|
|
#include "util/stop_watch.h"
|
2015-11-11 07:58:01 +01:00
|
|
|
#include "util/string_util.h"
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
namespace rocksdb {
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2015-10-07 02:46:22 +02:00
|
|
|
static const std::string kRocksDbTFileExt = "sst";
|
|
|
|
static const std::string kLevelDbTFileExt = "ldb";
|
2017-04-18 21:00:36 +02:00
|
|
|
static const std::string kRocksDBBlobFileExt = "blob";
|
2015-10-07 02:46:22 +02:00
|
|
|
|
2012-09-06 02:44:13 +02:00
|
|
|
// Given a path, flatten the path name by replacing all chars not in
|
2014-08-14 19:05:16 +02:00
|
|
|
// {[0-9,a-z,A-Z,-,_,.]} with _. And append '_LOG\0' at the end.
|
2012-09-06 02:44:13 +02:00
|
|
|
// Return the number of chars stored in dest not including the trailing '\0'.
|
2014-08-14 19:05:16 +02:00
|
|
|
static size_t GetInfoLogPrefix(const std::string& path, char* dest, int len) {
|
|
|
|
const char suffix[] = "_LOG";
|
2012-09-06 02:44:13 +02:00
|
|
|
|
2014-08-14 19:05:16 +02:00
|
|
|
size_t write_idx = 0;
|
|
|
|
size_t i = 0;
|
|
|
|
size_t src_len = path.size();
|
|
|
|
|
|
|
|
while (i < src_len && write_idx < len - sizeof(suffix)) {
|
2012-09-06 02:44:13 +02:00
|
|
|
if ((path[i] >= 'a' && path[i] <= 'z') ||
|
|
|
|
(path[i] >= '0' && path[i] <= '9') ||
|
|
|
|
(path[i] >= 'A' && path[i] <= 'Z') ||
|
|
|
|
path[i] == '-' ||
|
|
|
|
path[i] == '.' ||
|
|
|
|
path[i] == '_'){
|
|
|
|
dest[write_idx++] = path[i];
|
|
|
|
} else {
|
2015-11-11 07:58:01 +01:00
|
|
|
if (i > 0) {
|
2012-09-06 02:44:13 +02:00
|
|
|
dest[write_idx++] = '_';
|
2015-11-11 07:58:01 +01:00
|
|
|
}
|
2012-09-06 02:44:13 +02:00
|
|
|
}
|
|
|
|
i++;
|
|
|
|
}
|
2014-08-14 19:05:16 +02:00
|
|
|
assert(sizeof(suffix) <= len - write_idx);
|
|
|
|
// "\0" is automatically added by snprintf
|
|
|
|
snprintf(dest + write_idx, len - write_idx, suffix);
|
|
|
|
write_idx += sizeof(suffix) - 1;
|
2012-09-06 02:44:13 +02:00
|
|
|
return write_idx;
|
|
|
|
}
|
|
|
|
|
2019-08-02 00:45:19 +02:00
|
|
|
static std::string MakeFileName(uint64_t number, const char* suffix) {
|
2011-03-18 23:37:00 +01:00
|
|
|
char buf[100];
|
2019-08-02 00:45:19 +02:00
|
|
|
snprintf(buf, sizeof(buf), "%06llu.%s",
|
New API to get all merge operands for a Key (#5604)
Summary:
This is a new API added to db.h to allow for fetching all merge operands associated with a Key. The main motivation for this API is to support use cases where doing a full online merge is not necessary as it is performance sensitive. Example use-cases:
1. Update subset of columns and read subset of columns -
Imagine a SQL Table, a row is encoded as a K/V pair (as it is done in MyRocks). If there are many columns and users only updated one of them, we can use merge operator to reduce write amplification. While users only read one or two columns in the read query, this feature can avoid a full merging of the whole row, and save some CPU.
2. Updating very few attributes in a value which is a JSON-like document -
Updating one attribute can be done efficiently using merge operator, while reading back one attribute can be done more efficiently if we don't need to do a full merge.
----------------------------------------------------------------------------------------------------
API :
Status GetMergeOperands(
const ReadOptions& options, ColumnFamilyHandle* column_family,
const Slice& key, PinnableSlice* merge_operands,
GetMergeOperandsOptions* get_merge_operands_options,
int* number_of_operands)
Example usage :
int size = 100;
int number_of_operands = 0;
std::vector<PinnableSlice> values(size);
GetMergeOperandsOptions merge_operands_info;
db_->GetMergeOperands(ReadOptions(), db_->DefaultColumnFamily(), "k1", values.data(), merge_operands_info, &number_of_operands);
Description :
Returns all the merge operands corresponding to the key. If the number of merge operands in DB is greater than merge_operands_options.expected_max_number_of_operands no merge operands are returned and status is Incomplete. Merge operands returned are in the order of insertion.
merge_operands-> Points to an array of at-least merge_operands_options.expected_max_number_of_operands and the caller is responsible for allocating it. If the status returned is Incomplete then number_of_operands will contain the total number of merge operands found in DB for key.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5604
Test Plan:
Added unit test and perf test in db_bench that can be run using the command:
./db_bench -benchmarks=getmergeoperands --merge_operator=sortlist
Differential Revision: D16657366
Pulled By: vjnadimpalli
fbshipit-source-id: 0faadd752351745224ee12d4ae9ef3cb529951bf
2019-08-06 23:22:34 +02:00
|
|
|
static_cast<unsigned long long>(number), suffix);
|
2019-08-02 00:45:19 +02:00
|
|
|
return buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
static std::string MakeFileName(const std::string& name, uint64_t number,
|
|
|
|
const char* suffix) {
|
|
|
|
return name + "/" + MakeFileName(number, suffix);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string LogFileName(const std::string& name, uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(name, number, "log");
|
|
|
|
}
|
|
|
|
|
2019-08-02 00:45:19 +02:00
|
|
|
std::string LogFileName(uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(number, "log");
|
|
|
|
}
|
|
|
|
|
2017-04-18 21:00:36 +02:00
|
|
|
std::string BlobFileName(const std::string& blobdirname, uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(blobdirname, number, kRocksDBBlobFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2018-08-31 20:59:49 +02:00
|
|
|
std::string BlobFileName(const std::string& dbname, const std::string& blob_dir,
|
|
|
|
uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
return MakeFileName(dbname + "/" + blob_dir, number,
|
|
|
|
kRocksDBBlobFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2013-10-01 23:46:52 +02:00
|
|
|
std::string ArchivalDirectory(const std::string& dir) {
|
|
|
|
return dir + "/" + ARCHIVAL_DIR;
|
2012-12-08 01:30:22 +01:00
|
|
|
}
|
2012-11-30 02:28:37 +01:00
|
|
|
std::string ArchivedLogFileName(const std::string& name, uint64_t number) {
|
|
|
|
assert(number > 0);
|
2013-08-29 23:30:52 +02:00
|
|
|
return MakeFileName(name + "/" + ARCHIVAL_DIR, number, "log");
|
2012-11-30 02:28:37 +01:00
|
|
|
}
|
|
|
|
|
2014-07-02 18:54:20 +02:00
|
|
|
std::string MakeTableFileName(const std::string& path, uint64_t number) {
|
2015-10-07 02:46:22 +02:00
|
|
|
return MakeFileName(path, number, kRocksDbTFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2019-08-02 00:45:19 +02:00
|
|
|
std::string MakeTableFileName(uint64_t number) {
|
|
|
|
return MakeFileName(number, kRocksDbTFileExt.c_str());
|
|
|
|
}
|
|
|
|
|
2015-10-07 02:46:22 +02:00
|
|
|
std::string Rocks2LevelTableFileName(const std::string& fullname) {
|
|
|
|
assert(fullname.size() > kRocksDbTFileExt.size() + 1);
|
|
|
|
if (fullname.size() <= kRocksDbTFileExt.size() + 1) {
|
|
|
|
return "";
|
|
|
|
}
|
|
|
|
return fullname.substr(0, fullname.size() - kRocksDbTFileExt.size()) +
|
|
|
|
kLevelDbTFileExt;
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
|
|
|
|
CompactFiles, EventListener and GetDatabaseMetaData
Summary:
This diff adds three sets of APIs to RocksDB.
= GetColumnFamilyMetaData =
* This APIs allow users to obtain the current state of a RocksDB instance on one column family.
* See GetColumnFamilyMetaData in include/rocksdb/db.h
= EventListener =
* A virtual class that allows users to implement a set of
call-back functions which will be called when specific
events of a RocksDB instance happens.
* To register EventListener, simply insert an EventListener to ColumnFamilyOptions::listeners
= CompactFiles =
* CompactFiles API inputs a set of file numbers and an output level, and RocksDB
will try to compact those files into the specified level.
= Example =
* Example code can be found in example/compact_files_example.cc, which implements
a simple external compactor using EventListener, GetColumnFamilyMetaData, and
CompactFiles API.
Test Plan:
listener_test
compactor_test
example/compact_files_example
export ROCKSDB_TESTS=CompactFiles
db_test
export ROCKSDB_TESTS=MetaData
db_test
Reviewers: ljin, igor, rven, sdong
Reviewed By: sdong
Subscribers: MarkCallaghan, dhruba, leveldb
Differential Revision: https://reviews.facebook.net/D24705
2014-11-07 23:45:18 +01:00
|
|
|
uint64_t TableFileNameToNumber(const std::string& name) {
|
|
|
|
uint64_t number = 0;
|
|
|
|
uint64_t base = 1;
|
|
|
|
int pos = static_cast<int>(name.find_last_of('.'));
|
|
|
|
while (--pos >= 0 && name[pos] >= '0' && name[pos] <= '9') {
|
|
|
|
number += (name[pos] - '0') * base;
|
|
|
|
base *= 10;
|
|
|
|
}
|
|
|
|
return number;
|
|
|
|
}
|
|
|
|
|
2014-07-15 00:34:30 +02:00
|
|
|
std::string TableFileName(const std::vector<DbPath>& db_paths, uint64_t number,
|
|
|
|
uint32_t path_id) {
|
2011-03-18 23:37:00 +01:00
|
|
|
assert(number > 0);
|
2014-07-02 18:54:20 +02:00
|
|
|
std::string path;
|
|
|
|
if (path_id >= db_paths.size()) {
|
2014-07-15 00:34:30 +02:00
|
|
|
path = db_paths.back().path;
|
2014-07-02 18:54:20 +02:00
|
|
|
} else {
|
2014-07-15 00:34:30 +02:00
|
|
|
path = db_paths[path_id].path;
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
|
|
|
return MakeTableFileName(path, number);
|
|
|
|
}
|
|
|
|
|
2014-08-13 20:57:40 +02:00
|
|
|
void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf,
|
|
|
|
size_t out_buf_size) {
|
2014-07-02 18:54:20 +02:00
|
|
|
if (path_id == 0) {
|
2014-08-13 20:57:40 +02:00
|
|
|
snprintf(out_buf, out_buf_size, "%" PRIu64, number);
|
2014-07-02 18:54:20 +02:00
|
|
|
} else {
|
2014-08-13 20:57:40 +02:00
|
|
|
snprintf(out_buf, out_buf_size, "%" PRIu64
|
|
|
|
"(path "
|
|
|
|
"%" PRIu32 ")",
|
|
|
|
number, path_id);
|
2014-07-02 18:54:20 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string DescriptorFileName(const std::string& dbname, uint64_t number) {
|
|
|
|
assert(number > 0);
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "/MANIFEST-%06llu",
|
|
|
|
static_cast<unsigned long long>(number));
|
|
|
|
return dbname + buf;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string CurrentFileName(const std::string& dbname) {
|
|
|
|
return dbname + "/CURRENT";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string LockFileName(const std::string& dbname) {
|
|
|
|
return dbname + "/LOCK";
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string TempFileName(const std::string& dbname, uint64_t number) {
|
2015-11-11 07:58:01 +01:00
|
|
|
return MakeFileName(dbname, number, kTempFileNameSuffix.c_str());
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2014-08-14 19:05:16 +02:00
|
|
|
InfoLogPrefix::InfoLogPrefix(bool has_log_dir,
|
|
|
|
const std::string& db_absolute_path) {
|
|
|
|
if (!has_log_dir) {
|
|
|
|
const char kInfoLogPrefix[] = "LOG";
|
|
|
|
// "\0" is automatically added to the end
|
|
|
|
snprintf(buf, sizeof(buf), kInfoLogPrefix);
|
|
|
|
prefix = Slice(buf, sizeof(kInfoLogPrefix) - 1);
|
|
|
|
} else {
|
|
|
|
size_t len = GetInfoLogPrefix(db_absolute_path, buf, sizeof(buf));
|
|
|
|
prefix = Slice(buf, len);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-09-06 02:44:13 +02:00
|
|
|
std::string InfoLogFileName(const std::string& dbname,
|
|
|
|
const std::string& db_path, const std::string& log_dir) {
|
2015-09-23 21:39:16 +02:00
|
|
|
if (log_dir.empty()) {
|
2012-09-06 02:44:13 +02:00
|
|
|
return dbname + "/LOG";
|
2015-09-23 21:39:16 +02:00
|
|
|
}
|
2012-09-06 02:44:13 +02:00
|
|
|
|
2014-08-14 19:05:16 +02:00
|
|
|
InfoLogPrefix info_log_prefix(true, db_path);
|
|
|
|
return log_dir + "/" + info_log_prefix.buf;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return the name of the old info log file for "dbname".
|
2012-09-06 02:44:13 +02:00
|
|
|
std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts,
|
|
|
|
const std::string& db_path, const std::string& log_dir) {
|
2012-08-18 01:06:05 +02:00
|
|
|
char buf[50];
|
|
|
|
snprintf(buf, sizeof(buf), "%llu", static_cast<unsigned long long>(ts));
|
2012-09-06 02:44:13 +02:00
|
|
|
|
2015-09-23 21:39:16 +02:00
|
|
|
if (log_dir.empty()) {
|
2012-09-06 02:44:13 +02:00
|
|
|
return dbname + "/LOG.old." + buf;
|
2015-09-23 21:39:16 +02:00
|
|
|
}
|
2012-09-06 02:44:13 +02:00
|
|
|
|
2014-08-14 19:05:16 +02:00
|
|
|
InfoLogPrefix info_log_prefix(true, db_path);
|
|
|
|
return log_dir + "/" + info_log_prefix.buf + ".old." + buf;
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
|
|
|
|
2015-11-11 07:58:01 +01:00
|
|
|
std::string OptionsFileName(const std::string& dbname, uint64_t file_num) {
|
|
|
|
char buffer[256];
|
|
|
|
snprintf(buffer, sizeof(buffer), "%s%06" PRIu64,
|
|
|
|
kOptionsFileNamePrefix.c_str(), file_num);
|
|
|
|
return dbname + "/" + buffer;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string TempOptionsFileName(const std::string& dbname, uint64_t file_num) {
|
|
|
|
char buffer[256];
|
|
|
|
snprintf(buffer, sizeof(buffer), "%s%06" PRIu64 ".%s",
|
|
|
|
kOptionsFileNamePrefix.c_str(), file_num,
|
|
|
|
kTempFileNameSuffix.c_str());
|
|
|
|
return dbname + "/" + buffer;
|
|
|
|
}
|
|
|
|
|
2012-12-17 20:26:59 +01:00
|
|
|
std::string MetaDatabaseName(const std::string& dbname, uint64_t number) {
|
|
|
|
char buf[100];
|
|
|
|
snprintf(buf, sizeof(buf), "/METADB-%llu",
|
|
|
|
static_cast<unsigned long long>(number));
|
|
|
|
return dbname + buf;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
|
2013-10-18 23:50:54 +02:00
|
|
|
std::string IdentityFileName(const std::string& dbname) {
|
|
|
|
return dbname + "/IDENTITY";
|
|
|
|
}
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
// Owned filenames have the form:
|
2013-10-18 23:50:54 +02:00
|
|
|
// dbname/IDENTITY
|
2011-03-18 23:37:00 +01:00
|
|
|
// dbname/CURRENT
|
|
|
|
// dbname/LOCK
|
2014-08-14 19:05:16 +02:00
|
|
|
// dbname/<info_log_name_prefix>
|
|
|
|
// dbname/<info_log_name_prefix>.old.[0-9]+
|
2011-03-18 23:37:00 +01:00
|
|
|
// dbname/MANIFEST-[0-9]+
|
2017-04-18 21:00:36 +02:00
|
|
|
// dbname/[0-9]+.(log|sst|blob)
|
2012-12-17 20:26:59 +01:00
|
|
|
// dbname/METADB-[0-9]+
|
2015-11-11 07:58:01 +01:00
|
|
|
// dbname/OPTIONS-[0-9]+
|
|
|
|
// dbname/OPTIONS-[0-9]+.dbtmp
|
2013-09-01 10:52:32 +02:00
|
|
|
// Disregards / at the beginning
|
2011-03-18 23:37:00 +01:00
|
|
|
bool ParseFileName(const std::string& fname,
|
|
|
|
uint64_t* number,
|
2013-10-24 08:39:23 +02:00
|
|
|
FileType* type,
|
|
|
|
WalFileType* log_type) {
|
2014-08-14 19:05:16 +02:00
|
|
|
return ParseFileName(fname, number, "", type, log_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
bool ParseFileName(const std::string& fname, uint64_t* number,
|
|
|
|
const Slice& info_log_name_prefix, FileType* type,
|
|
|
|
WalFileType* log_type) {
|
2011-03-18 23:37:00 +01:00
|
|
|
Slice rest(fname);
|
2013-09-01 10:52:32 +02:00
|
|
|
if (fname.length() > 1 && fname[0] == '/') {
|
|
|
|
rest.remove_prefix(1);
|
|
|
|
}
|
2013-10-18 23:50:54 +02:00
|
|
|
if (rest == "IDENTITY") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kIdentityFile;
|
|
|
|
} else if (rest == "CURRENT") {
|
2011-03-18 23:37:00 +01:00
|
|
|
*number = 0;
|
|
|
|
*type = kCurrentFile;
|
|
|
|
} else if (rest == "LOCK") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kDBLockFile;
|
2014-08-14 19:05:16 +02:00
|
|
|
} else if (info_log_name_prefix.size() > 0 &&
|
|
|
|
rest.starts_with(info_log_name_prefix)) {
|
|
|
|
rest.remove_prefix(info_log_name_prefix.size());
|
|
|
|
if (rest == "" || rest == ".old") {
|
|
|
|
*number = 0;
|
|
|
|
*type = kInfoLogFile;
|
|
|
|
} else if (rest.starts_with(".old.")) {
|
|
|
|
uint64_t ts_suffix;
|
|
|
|
// sizeof also counts the trailing '\0'.
|
|
|
|
rest.remove_prefix(sizeof(".old.") - 1);
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = ts_suffix;
|
|
|
|
*type = kInfoLogFile;
|
2012-08-18 01:06:05 +02:00
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
} else if (rest.starts_with("MANIFEST-")) {
|
|
|
|
rest.remove_prefix(strlen("MANIFEST-"));
|
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!rest.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*type = kDescriptorFile;
|
|
|
|
*number = num;
|
2012-12-17 20:26:59 +01:00
|
|
|
} else if (rest.starts_with("METADB-")) {
|
|
|
|
rest.remove_prefix(strlen("METADB-"));
|
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (!rest.empty()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*type = kMetaDatabase;
|
|
|
|
*number = num;
|
2015-11-11 07:58:01 +01:00
|
|
|
} else if (rest.starts_with(kOptionsFileNamePrefix)) {
|
|
|
|
uint64_t ts_suffix;
|
|
|
|
bool is_temp_file = false;
|
|
|
|
rest.remove_prefix(kOptionsFileNamePrefix.size());
|
|
|
|
const std::string kTempFileNameSuffixWithDot =
|
|
|
|
std::string(".") + kTempFileNameSuffix;
|
|
|
|
if (rest.ends_with(kTempFileNameSuffixWithDot)) {
|
|
|
|
rest.remove_suffix(kTempFileNameSuffixWithDot.size());
|
|
|
|
is_temp_file = true;
|
|
|
|
}
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &ts_suffix)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = ts_suffix;
|
|
|
|
*type = is_temp_file ? kTempFile : kOptionsFile;
|
2011-03-18 23:37:00 +01:00
|
|
|
} else {
|
|
|
|
// Avoid strtoull() to keep filename format independent of the
|
|
|
|
// current locale
|
2013-10-24 08:39:23 +02:00
|
|
|
bool archive_dir_found = false;
|
|
|
|
if (rest.starts_with(ARCHIVAL_DIR)) {
|
|
|
|
if (rest.size() <= ARCHIVAL_DIR.size()) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
rest.remove_prefix(ARCHIVAL_DIR.size() + 1); // Add 1 to remove / also
|
|
|
|
if (log_type) {
|
|
|
|
*log_type = kArchivedLogFile;
|
|
|
|
}
|
|
|
|
archive_dir_found = true;
|
|
|
|
}
|
2011-03-18 23:37:00 +01:00
|
|
|
uint64_t num;
|
|
|
|
if (!ConsumeDecimalNumber(&rest, &num)) {
|
|
|
|
return false;
|
|
|
|
}
|
2015-10-07 02:46:22 +02:00
|
|
|
if (rest.size() <= 1 || rest[0] != '.') {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
rest.remove_prefix(1);
|
|
|
|
|
2011-03-18 23:37:00 +01:00
|
|
|
Slice suffix = rest;
|
2015-10-07 02:46:22 +02:00
|
|
|
if (suffix == Slice("log")) {
|
2011-03-18 23:37:00 +01:00
|
|
|
*type = kLogFile;
|
2013-10-24 08:39:23 +02:00
|
|
|
if (log_type && !archive_dir_found) {
|
|
|
|
*log_type = kAliveLogFile;
|
|
|
|
}
|
|
|
|
} else if (archive_dir_found) {
|
|
|
|
return false; // Archive dir can contain only log files
|
2015-10-07 02:46:22 +02:00
|
|
|
} else if (suffix == Slice(kRocksDbTFileExt) ||
|
|
|
|
suffix == Slice(kLevelDbTFileExt)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
*type = kTableFile;
|
2017-04-18 21:00:36 +02:00
|
|
|
} else if (suffix == Slice(kRocksDBBlobFileExt)) {
|
|
|
|
*type = kBlobFile;
|
2015-11-11 07:58:01 +01:00
|
|
|
} else if (suffix == Slice(kTempFileNameSuffix)) {
|
2011-03-18 23:37:00 +01:00
|
|
|
*type = kTempFile;
|
|
|
|
} else {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
*number = num;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
Status SetCurrentFile(Env* env, const std::string& dbname,
|
2014-05-06 23:51:33 +02:00
|
|
|
uint64_t descriptor_number,
|
|
|
|
Directory* directory_to_fsync) {
|
2011-03-18 23:37:00 +01:00
|
|
|
// Remove leading "dbname/" and add newline to manifest file name
|
|
|
|
std::string manifest = DescriptorFileName(dbname, descriptor_number);
|
|
|
|
Slice contents = manifest;
|
|
|
|
assert(contents.starts_with(dbname + "/"));
|
|
|
|
contents.remove_prefix(dbname.size() + 1);
|
|
|
|
std::string tmp = TempFileName(dbname, descriptor_number);
|
2014-04-10 06:17:14 +02:00
|
|
|
Status s = WriteStringToFile(env, contents.ToString() + "\n", tmp, true);
|
2011-03-18 23:37:00 +01:00
|
|
|
if (s.ok()) {
|
2015-10-16 23:33:47 +02:00
|
|
|
TEST_KILL_RANDOM("SetCurrentFile:0", rocksdb_kill_odds * REDUCE_ODDS2);
|
2011-03-18 23:37:00 +01:00
|
|
|
s = env->RenameFile(tmp, CurrentFileName(dbname));
|
2015-10-16 23:33:47 +02:00
|
|
|
TEST_KILL_RANDOM("SetCurrentFile:1", rocksdb_kill_odds * REDUCE_ODDS2);
|
2011-03-18 23:37:00 +01:00
|
|
|
}
|
2014-05-06 23:51:33 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
if (directory_to_fsync != nullptr) {
|
2018-09-14 22:17:12 +02:00
|
|
|
s = directory_to_fsync->Fsync();
|
2014-05-06 23:51:33 +02:00
|
|
|
}
|
|
|
|
} else {
|
2011-03-18 23:37:00 +01:00
|
|
|
env->DeleteFile(tmp);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2019-09-03 17:50:47 +02:00
|
|
|
Status SetIdentityFile(Env* env, const std::string& dbname,
|
|
|
|
const std::string& db_id) {
|
|
|
|
std::string id;
|
|
|
|
if (db_id.empty()) {
|
|
|
|
id = env->GenerateUniqueId();
|
|
|
|
} else {
|
|
|
|
id = db_id;
|
|
|
|
}
|
2013-10-18 23:50:54 +02:00
|
|
|
assert(!id.empty());
|
2013-10-23 19:59:08 +02:00
|
|
|
// Reserve the filename dbname/000000.dbtmp for the temporary identity file
|
|
|
|
std::string tmp = TempFileName(dbname, 0);
|
2014-04-10 06:17:14 +02:00
|
|
|
Status s = WriteStringToFile(env, id, tmp, true);
|
2013-10-18 23:50:54 +02:00
|
|
|
if (s.ok()) {
|
|
|
|
s = env->RenameFile(tmp, IdentityFileName(dbname));
|
|
|
|
}
|
|
|
|
if (!s.ok()) {
|
|
|
|
env->DeleteFile(tmp);
|
|
|
|
}
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
2016-09-24 01:34:04 +02:00
|
|
|
Status SyncManifest(Env* env, const ImmutableDBOptions* db_options,
|
Move rate_limiter, write buffering, most perf context instrumentation and most random kill out of Env
Summary: We want to keep Env a think layer for better portability. Less platform dependent codes should be moved out of Env. In this patch, I create a wrapper of file readers and writers, and put rate limiting, write buffering, as well as most perf context instrumentation and random kill out of Env. It will make it easier to maintain multiple Env in the future.
Test Plan: Run all existing unit tests.
Reviewers: anthony, kradhakrishnan, IslamAbdelRahman, yhchiang, igor
Reviewed By: igor
Subscribers: leveldb, dhruba
Differential Revision: https://reviews.facebook.net/D42321
2015-07-18 01:16:11 +02:00
|
|
|
WritableFileWriter* file) {
|
2015-10-16 23:33:47 +02:00
|
|
|
TEST_KILL_RANDOM("SyncManifest:0", rocksdb_kill_odds * REDUCE_ODDS2);
|
2017-02-13 19:54:38 +01:00
|
|
|
StopWatch sw(env, db_options->statistics.get(), MANIFEST_FILE_SYNC_MICROS);
|
|
|
|
return file->Sync(db_options->use_fsync);
|
2015-01-22 20:43:38 +01:00
|
|
|
}
|
|
|
|
|
2019-05-31 19:45:20 +02:00
|
|
|
Status GetInfoLogFiles(Env* env, const std::string& db_log_dir,
|
|
|
|
const std::string& dbname, std::string* parent_dir,
|
|
|
|
std::vector<std::string>* info_log_list) {
|
|
|
|
assert(parent_dir != nullptr);
|
|
|
|
assert(info_log_list != nullptr);
|
|
|
|
uint64_t number = 0;
|
2019-06-01 01:59:00 +02:00
|
|
|
FileType type = kLogFile;
|
2019-05-31 19:45:20 +02:00
|
|
|
|
|
|
|
if (!db_log_dir.empty()) {
|
|
|
|
*parent_dir = db_log_dir;
|
|
|
|
} else {
|
|
|
|
*parent_dir = dbname;
|
|
|
|
}
|
|
|
|
|
|
|
|
InfoLogPrefix info_log_prefix(!db_log_dir.empty(), dbname);
|
|
|
|
|
|
|
|
std::vector<std::string> file_names;
|
|
|
|
Status s = env->GetChildren(*parent_dir, &file_names);
|
|
|
|
|
|
|
|
if (!s.ok()) {
|
|
|
|
return s;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto& f : file_names) {
|
|
|
|
if (ParseFileName(f, &number, info_log_prefix.prefix, &type) &&
|
|
|
|
(type == kInfoLogFile)) {
|
|
|
|
info_log_list->push_back(f);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return Status::OK();
|
|
|
|
}
|
|
|
|
|
2013-10-04 06:49:15 +02:00
|
|
|
} // namespace rocksdb
|