6fbc4f9f3e
introduce a new DBOption random_access_max_buffer_size to limit the size of the random access buffer used for unbuffered access. Implement read ahead buffering when enabled. To that effect propagate compaction_readahead_size and the new option to the env options to make it available for the implementation. Add Hint() override so SetupForCompaction() call would call Hint() readahead can now be setup from both Hint() and EnableReadAhead() Add new option random_access_max_buffer_size support db_bench, options_helper to make it string parsable and the unit test.
325 lines
8.3 KiB
C++
325 lines
8.3 KiB
C++
// Copyright (c) 2013, Facebook, Inc. All rights reserved.
|
|
// This source code is licensed under the BSD-style license found in the
|
|
// LICENSE file in the root directory of this source tree. An additional grant
|
|
// of patent rights can be found in the PATENTS file in the same directory.
|
|
//
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include <thread>
|
|
#include "port/port.h"
|
|
#include "port/sys_time.h"
|
|
|
|
#include "rocksdb/options.h"
|
|
#include "util/arena.h"
|
|
#include "util/autovector.h"
|
|
|
|
namespace rocksdb {
|
|
|
|
Env::~Env() {
|
|
}
|
|
|
|
uint64_t Env::GetThreadID() const {
|
|
std::hash<std::thread::id> hasher;
|
|
return hasher(std::this_thread::get_id());
|
|
}
|
|
|
|
Status Env::ReuseWritableFile(const std::string& fname,
|
|
const std::string& old_fname,
|
|
unique_ptr<WritableFile>* result,
|
|
const EnvOptions& options) {
|
|
Status s = RenameFile(old_fname, fname);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
return NewWritableFile(fname, result, options);
|
|
}
|
|
|
|
SequentialFile::~SequentialFile() {
|
|
}
|
|
|
|
RandomAccessFile::~RandomAccessFile() {
|
|
}
|
|
|
|
WritableFile::~WritableFile() {
|
|
}
|
|
|
|
Logger::~Logger() {
|
|
}
|
|
|
|
FileLock::~FileLock() {
|
|
}
|
|
|
|
void LogFlush(Logger *info_log) {
|
|
if (info_log) {
|
|
info_log->Flush();
|
|
}
|
|
}
|
|
|
|
void Log(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Logger::Logv(const InfoLogLevel log_level, const char* format, va_list ap) {
|
|
static const char* kInfoLogLevelNames[5] = { "DEBUG", "INFO", "WARN",
|
|
"ERROR", "FATAL" };
|
|
if (log_level < log_level_) {
|
|
return;
|
|
}
|
|
|
|
if (log_level == InfoLogLevel::INFO_LEVEL) {
|
|
// Doesn't print log level if it is INFO level.
|
|
// This is to avoid unexpected performance regression after we add
|
|
// the feature of log level. All the logs before we add the feature
|
|
// are INFO level. We don't want to add extra costs to those existing
|
|
// logging.
|
|
Logv(format, ap);
|
|
} else {
|
|
char new_format[500];
|
|
snprintf(new_format, sizeof(new_format) - 1, "[%s] %s",
|
|
kInfoLogLevelNames[log_level], format);
|
|
Logv(new_format, ap);
|
|
}
|
|
}
|
|
|
|
|
|
void Log(const InfoLogLevel log_level, Logger* info_log, const char* format,
|
|
...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= log_level) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
|
|
if (log_level == InfoLogLevel::HEADER_LEVEL) {
|
|
info_log->LogHeader(format, ap);
|
|
} else {
|
|
info_log->Logv(log_level, format, ap);
|
|
}
|
|
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Header(Logger* info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->LogHeader(format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Debug(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::DEBUG_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::DEBUG_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Info(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::INFO_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Warn(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::WARN_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::WARN_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
void Error(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::ERROR_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::ERROR_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
void Fatal(Logger* info_log, const char* format, ...) {
|
|
if (info_log && info_log->GetInfoLogLevel() <= InfoLogLevel::FATAL_LEVEL) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::FATAL_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void LogFlush(const shared_ptr<Logger>& info_log) {
|
|
if (info_log) {
|
|
info_log->Flush();
|
|
}
|
|
}
|
|
|
|
void Log(const InfoLogLevel log_level, const shared_ptr<Logger>& info_log,
|
|
const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(log_level, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Header(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->LogHeader(format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Debug(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::DEBUG_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Info(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Warn(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::WARN_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Error(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::ERROR_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Fatal(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::FATAL_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
void Log(const shared_ptr<Logger>& info_log, const char* format, ...) {
|
|
if (info_log) {
|
|
va_list ap;
|
|
va_start(ap, format);
|
|
info_log->Logv(InfoLogLevel::INFO_LEVEL, format, ap);
|
|
va_end(ap);
|
|
}
|
|
}
|
|
|
|
Status WriteStringToFile(Env* env, const Slice& data, const std::string& fname,
|
|
bool should_sync) {
|
|
unique_ptr<WritableFile> file;
|
|
EnvOptions soptions;
|
|
Status s = env->NewWritableFile(fname, &file, soptions);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
s = file->Append(data);
|
|
if (s.ok() && should_sync) {
|
|
s = file->Sync();
|
|
}
|
|
if (!s.ok()) {
|
|
env->DeleteFile(fname);
|
|
}
|
|
return s;
|
|
}
|
|
|
|
Status ReadFileToString(Env* env, const std::string& fname, std::string* data) {
|
|
EnvOptions soptions;
|
|
data->clear();
|
|
unique_ptr<SequentialFile> file;
|
|
Status s = env->NewSequentialFile(fname, &file, soptions);
|
|
if (!s.ok()) {
|
|
return s;
|
|
}
|
|
static const int kBufferSize = 8192;
|
|
char* space = new char[kBufferSize];
|
|
while (true) {
|
|
Slice fragment;
|
|
s = file->Read(kBufferSize, &fragment, space);
|
|
if (!s.ok()) {
|
|
break;
|
|
}
|
|
data->append(fragment.data(), fragment.size());
|
|
if (fragment.empty()) {
|
|
break;
|
|
}
|
|
}
|
|
delete[] space;
|
|
return s;
|
|
}
|
|
|
|
EnvWrapper::~EnvWrapper() {
|
|
}
|
|
|
|
namespace { // anonymous namespace
|
|
|
|
void AssignEnvOptions(EnvOptions* env_options, const DBOptions& options) {
|
|
env_options->use_os_buffer = options.allow_os_buffer;
|
|
env_options->use_mmap_reads = options.allow_mmap_reads;
|
|
env_options->use_mmap_writes = options.allow_mmap_writes;
|
|
env_options->set_fd_cloexec = options.is_fd_close_on_exec;
|
|
env_options->bytes_per_sync = options.bytes_per_sync;
|
|
env_options->compaction_readahead_size = options.compaction_readahead_size;
|
|
env_options->random_access_max_buffer_size = options.random_access_max_buffer_size;
|
|
env_options->rate_limiter = options.rate_limiter.get();
|
|
env_options->allow_fallocate = options.allow_fallocate;
|
|
}
|
|
|
|
}
|
|
|
|
EnvOptions Env::OptimizeForLogWrite(const EnvOptions& env_options,
|
|
const DBOptions& db_options) const {
|
|
EnvOptions optimized_env_options(env_options);
|
|
optimized_env_options.bytes_per_sync = db_options.wal_bytes_per_sync;
|
|
return optimized_env_options;
|
|
}
|
|
|
|
EnvOptions Env::OptimizeForManifestWrite(const EnvOptions& env_options) const {
|
|
return env_options;
|
|
}
|
|
|
|
EnvOptions::EnvOptions(const DBOptions& options) {
|
|
AssignEnvOptions(this, options);
|
|
}
|
|
|
|
EnvOptions::EnvOptions() {
|
|
DBOptions options;
|
|
AssignEnvOptions(this, options);
|
|
}
|
|
|
|
|
|
} // namespace rocksdb
|