Add macros to include file name and line number during Logging

Summary:
current logging
```
2017/03/14-14:20:30.393432 7fedde9f5700 (Original Log Time 2017/03/14-14:20:30.393414) [default] Level summary: base level 1 max bytes base 268435456 files[1 0 0 0 0 0 0] max score 0.25
2017/03/14-14:20:30.393438 7fedde9f5700 [JOB 2] Try to delete WAL files size 61417909, prev total WAL file size 73820858, number of live WAL files 2.
2017/03/14-14:20:30.393464 7fedde9f5700 [DEBUG] [JOB 2] Delete /dev/shm/old_logging//MANIFEST-000001 type=3 #1 -- OK
2017/03/14-14:20:30.393472 7fedde9f5700 [DEBUG] [JOB 2] Delete /dev/shm/old_logging//000003.log type=0 #3 -- OK
2017/03/14-14:20:31.427103 7fedd49f1700 [default] New memtable created with log file: #9. Immutable memtables: 0.
2017/03/14-14:20:31.427179 7fedde9f5700 [JOB 3] Syncing log #6
2017/03/14-14:20:31.427190 7fedde9f5700 (Original Log Time 2017/03/14-14:20:31.427170) Calling FlushMemTableToOutputFile with column family [default], flush slots available 1, compaction slots allowed 1, compaction slots scheduled 1
2017/03/14-14:20:31.
Closes https://github.com/facebook/rocksdb/pull/1990

Differential Revision: D4708695

Pulled By: IslamAbdelRahman

fbshipit-source-id: cb8968f
This commit is contained in:
Islam AbdelRahman 2017-03-15 19:22:52 -07:00 committed by Facebook Github Bot
parent d525718a93
commit e19163688b
37 changed files with 1182 additions and 1045 deletions

View File

@ -6,21 +6,22 @@
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
#include "db/auto_roll_logger.h"
#include <errno.h>
#include <sys/stat.h>
#include <algorithm>
#include <cmath>
#include <fstream>
#include <iostream>
#include <iterator>
#include <string> #include <string>
#include <thread> #include <thread>
#include <vector> #include <vector>
#include <cmath>
#include <iostream>
#include <fstream>
#include <iterator>
#include <algorithm>
#include "db/auto_roll_logger.h"
#include "port/port.h" #include "port/port.h"
#include "rocksdb/db.h"
#include "util/logging.h"
#include "util/sync_point.h" #include "util/sync_point.h"
#include "util/testharness.h" #include "util/testharness.h"
#include "rocksdb/db.h"
#include <sys/stat.h>
#include <errno.h>
namespace rocksdb { namespace rocksdb {
@ -62,10 +63,10 @@ Env* AutoRollLoggerTest::env = Env::Default();
// In this test we only want to Log some simple log message with // In this test we only want to Log some simple log message with
// no format. LogMessage() provides such a simple interface and // no format. LogMessage() provides such a simple interface and
// avoids the [format-security] warning which occurs when you // avoids the [format-security] warning which occurs when you
// call Log(logger, log_message) directly. // call ROCKS_LOG_INFO(logger, log_message) directly.
namespace { namespace {
void LogMessage(Logger* logger, const char* message) { void LogMessage(Logger* logger, const char* message) {
Log(logger, "%s", message); ROCKS_LOG_INFO(logger, "%s", message);
} }
void LogMessage(const InfoLogLevel log_level, Logger* logger, void LogMessage(const InfoLogLevel log_level, Logger* logger,
@ -332,12 +333,12 @@ TEST_F(AutoRollLoggerTest, InfoLogLevel) {
logger.SetInfoLogLevel((InfoLogLevel)log_level); logger.SetInfoLogLevel((InfoLogLevel)log_level);
// again, messages with level smaller than log_level will not be logged. // again, messages with level smaller than log_level will not be logged.
Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_HEADER(&logger, "%s", kSampleMessage.c_str());
Debug(&logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_DEBUG(&logger, "%s", kSampleMessage.c_str());
Info(&logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_INFO(&logger, "%s", kSampleMessage.c_str());
Warn(&logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_WARN(&logger, "%s", kSampleMessage.c_str());
Error(&logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_ERROR(&logger, "%s", kSampleMessage.c_str());
Fatal(&logger, "%s", kSampleMessage.c_str()); ROCKS_LOG_FATAL(&logger, "%s", kSampleMessage.c_str());
log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1; log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1;
} }
} }
@ -411,8 +412,7 @@ TEST_F(AutoRollLoggerTest, LogHeaderTest) {
} else if (test_num == 1) { } else if (test_num == 1) {
// HEADER_LEVEL should make this behave like calling Header() // HEADER_LEVEL should make this behave like calling Header()
for (size_t i = 0; i < MAX_HEADERS; i++) { for (size_t i = 0; i < MAX_HEADERS; i++) {
Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s %d", ROCKS_LOG_HEADER(&logger, "%s %d", HEADER_STR.c_str(), i);
HEADER_STR.c_str(), i);
} }
} }

View File

@ -205,8 +205,8 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
} }
if (result.level0_file_num_compaction_trigger == 0) { if (result.level0_file_num_compaction_trigger == 0) {
Warn(db_options.info_log.get(), ROCKS_LOG_WARN(db_options.info_log.get(),
"level0_file_num_compaction_trigger cannot be 0"); "level0_file_num_compaction_trigger cannot be 0");
result.level0_file_num_compaction_trigger = 1; result.level0_file_num_compaction_trigger = 1;
} }
@ -214,14 +214,14 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.level0_slowdown_writes_trigger || result.level0_slowdown_writes_trigger ||
result.level0_slowdown_writes_trigger < result.level0_slowdown_writes_trigger <
result.level0_file_num_compaction_trigger) { result.level0_file_num_compaction_trigger) {
Warn(db_options.info_log.get(), ROCKS_LOG_WARN(db_options.info_log.get(),
"This condition must be satisfied: " "This condition must be satisfied: "
"level0_stop_writes_trigger(%d) >= " "level0_stop_writes_trigger(%d) >= "
"level0_slowdown_writes_trigger(%d) >= " "level0_slowdown_writes_trigger(%d) >= "
"level0_file_num_compaction_trigger(%d)", "level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger, result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger, result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger); result.level0_file_num_compaction_trigger);
if (result.level0_slowdown_writes_trigger < if (result.level0_slowdown_writes_trigger <
result.level0_file_num_compaction_trigger) { result.level0_file_num_compaction_trigger) {
result.level0_slowdown_writes_trigger = result.level0_slowdown_writes_trigger =
@ -231,14 +231,14 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
result.level0_slowdown_writes_trigger) { result.level0_slowdown_writes_trigger) {
result.level0_stop_writes_trigger = result.level0_slowdown_writes_trigger; result.level0_stop_writes_trigger = result.level0_slowdown_writes_trigger;
} }
Warn(db_options.info_log.get(), ROCKS_LOG_WARN(db_options.info_log.get(),
"Adjust the value to " "Adjust the value to "
"level0_stop_writes_trigger(%d)" "level0_stop_writes_trigger(%d)"
"level0_slowdown_writes_trigger(%d)" "level0_slowdown_writes_trigger(%d)"
"level0_file_num_compaction_trigger(%d)", "level0_file_num_compaction_trigger(%d)",
result.level0_stop_writes_trigger, result.level0_stop_writes_trigger,
result.level0_slowdown_writes_trigger, result.level0_slowdown_writes_trigger,
result.level0_file_num_compaction_trigger); result.level0_file_num_compaction_trigger);
} }
if (result.soft_pending_compaction_bytes_limit == 0) { if (result.soft_pending_compaction_bytes_limit == 0) {
@ -386,27 +386,27 @@ ColumnFamilyData::ColumnFamilyData(
} else if (ioptions_.compaction_style == kCompactionStyleNone) { } else if (ioptions_.compaction_style == kCompactionStyleNone) {
compaction_picker_.reset(new NullCompactionPicker( compaction_picker_.reset(new NullCompactionPicker(
ioptions_, &internal_comparator_)); ioptions_, &internal_comparator_));
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(ioptions_.info_log,
"Column family %s does not use any background compaction. " "Column family %s does not use any background compaction. "
"Compactions can only be done via CompactFiles\n", "Compactions can only be done via CompactFiles\n",
GetName().c_str()); GetName().c_str());
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, ioptions_.info_log, ROCKS_LOG_ERROR(ioptions_.info_log,
"Unable to recognize the specified compaction style %d. " "Unable to recognize the specified compaction style %d. "
"Column family %s will use kCompactionStyleLevel.\n", "Column family %s will use kCompactionStyleLevel.\n",
ioptions_.compaction_style, GetName().c_str()); ioptions_.compaction_style, GetName().c_str());
compaction_picker_.reset( compaction_picker_.reset(
new LevelCompactionPicker(ioptions_, &internal_comparator_)); new LevelCompactionPicker(ioptions_, &internal_comparator_));
} }
if (column_family_set_->NumberOfColumnFamilies() < 10) { if (column_family_set_->NumberOfColumnFamilies() < 10) {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, ROCKS_LOG_INFO(ioptions_.info_log,
"--------------- Options for column family [%s]:\n", name.c_str()); "--------------- Options for column family [%s]:\n",
name.c_str());
initial_cf_options_.Dump(ioptions_.info_log); initial_cf_options_.Dump(ioptions_.info_log);
} else { } else {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, ROCKS_LOG_INFO(ioptions_.info_log, "\t(skipping printing options)\n");
"\t(skipping printing options)\n");
} }
} }
@ -620,7 +620,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
if (imm()->NumNotFlushed() >= mutable_cf_options.max_write_buffer_number) { if (imm()->NumNotFlushed() >= mutable_cf_options.max_write_buffer_number) {
write_controller_token_ = write_controller->GetStopToken(); write_controller_token_ = write_controller->GetStopToken();
internal_stats_->AddCFStats(InternalStats::MEMTABLE_COMPACTION, 1); internal_stats_->AddCFStats(InternalStats::MEMTABLE_COMPACTION, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stopping writes because we have %d immutable memtables " "[%s] Stopping writes because we have %d immutable memtables "
"(waiting for flush), max_write_buffer_number is set to %d", "(waiting for flush), max_write_buffer_number is set to %d",
name_.c_str(), imm()->NumNotFlushed(), name_.c_str(), imm()->NumNotFlushed(),
@ -634,9 +635,9 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
internal_stats_->AddCFStats( internal_stats_->AddCFStats(
InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1); InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1);
} }
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(ioptions_.info_log,
"[%s] Stopping writes because we have %d level-0 files", "[%s] Stopping writes because we have %d level-0 files",
name_.c_str(), vstorage->l0_delay_trigger_count()); name_.c_str(), vstorage->l0_delay_trigger_count());
} else if (!mutable_cf_options.disable_auto_compactions && } else if (!mutable_cf_options.disable_auto_compactions &&
mutable_cf_options.hard_pending_compaction_bytes_limit > 0 && mutable_cf_options.hard_pending_compaction_bytes_limit > 0 &&
compaction_needed_bytes >= compaction_needed_bytes >=
@ -644,7 +645,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
write_controller_token_ = write_controller->GetStopToken(); write_controller_token_ = write_controller->GetStopToken();
internal_stats_->AddCFStats( internal_stats_->AddCFStats(
InternalStats::HARD_PENDING_COMPACTION_BYTES_LIMIT, 1); InternalStats::HARD_PENDING_COMPACTION_BYTES_LIMIT, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stopping writes because of estimated pending compaction " "[%s] Stopping writes because of estimated pending compaction "
"bytes %" PRIu64, "bytes %" PRIu64,
name_.c_str(), compaction_needed_bytes); name_.c_str(), compaction_needed_bytes);
@ -656,7 +658,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
prev_compaction_needed_bytes_, was_stopped, prev_compaction_needed_bytes_, was_stopped,
mutable_cf_options.disable_auto_compactions); mutable_cf_options.disable_auto_compactions);
internal_stats_->AddCFStats(InternalStats::MEMTABLE_SLOWDOWN, 1); internal_stats_->AddCFStats(InternalStats::MEMTABLE_SLOWDOWN, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stalling writes because we have %d immutable memtables " "[%s] Stalling writes because we have %d immutable memtables "
"(waiting for flush), max_write_buffer_number is set to %d " "(waiting for flush), max_write_buffer_number is set to %d "
"rate %" PRIu64, "rate %" PRIu64,
@ -679,11 +682,11 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
internal_stats_->AddCFStats( internal_stats_->AddCFStats(
InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1); InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1);
} }
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(ioptions_.info_log,
"[%s] Stalling writes because we have %d level-0 files " "[%s] Stalling writes because we have %d level-0 files "
"rate %" PRIu64, "rate %" PRIu64,
name_.c_str(), vstorage->l0_delay_trigger_count(), name_.c_str(), vstorage->l0_delay_trigger_count(),
write_controller->delayed_write_rate()); write_controller->delayed_write_rate());
} else if (!mutable_cf_options.disable_auto_compactions && } else if (!mutable_cf_options.disable_auto_compactions &&
mutable_cf_options.soft_pending_compaction_bytes_limit > 0 && mutable_cf_options.soft_pending_compaction_bytes_limit > 0 &&
vstorage->estimated_compaction_needed_bytes() >= vstorage->estimated_compaction_needed_bytes() >=
@ -705,7 +708,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
mutable_cf_options.disable_auto_compactions); mutable_cf_options.disable_auto_compactions);
internal_stats_->AddCFStats( internal_stats_->AddCFStats(
InternalStats::SOFT_PENDING_COMPACTION_BYTES_LIMIT, 1); InternalStats::SOFT_PENDING_COMPACTION_BYTES_LIMIT, 1);
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Stalling writes because of estimated pending compaction " "[%s] Stalling writes because of estimated pending compaction "
"bytes %" PRIu64 " rate %" PRIu64, "bytes %" PRIu64 " rate %" PRIu64,
name_.c_str(), vstorage->estimated_compaction_needed_bytes(), name_.c_str(), vstorage->estimated_compaction_needed_bytes(),
@ -717,7 +721,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
mutable_cf_options.level0_slowdown_writes_trigger)) { mutable_cf_options.level0_slowdown_writes_trigger)) {
write_controller_token_ = write_controller_token_ =
write_controller->GetCompactionPressureToken(); write_controller->GetCompactionPressureToken();
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Increasing compaction threads because we have %d level-0 " "[%s] Increasing compaction threads because we have %d level-0 "
"files ", "files ",
name_.c_str(), vstorage->l0_delay_trigger_count()); name_.c_str(), vstorage->l0_delay_trigger_count());
@ -730,7 +735,8 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
write_controller_token_ = write_controller_token_ =
write_controller->GetCompactionPressureToken(); write_controller->GetCompactionPressureToken();
if (mutable_cf_options.soft_pending_compaction_bytes_limit > 0) { if (mutable_cf_options.soft_pending_compaction_bytes_limit > 0) {
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] Increasing compaction threads because of estimated pending " "[%s] Increasing compaction threads because of estimated pending "
"compaction " "compaction "
"bytes %" PRIu64, "bytes %" PRIu64,

View File

@ -154,8 +154,8 @@ Status CompactedDBImpl::Open(const Options& options,
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname)); std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
Status s = db->Init(options); Status s = db->Init(options);
if (s.ok()) { if (s.ok()) {
Log(INFO_LEVEL, db->immutable_db_options_.info_log, ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
"Opened the db as fully compacted mode"); "Opened the db as fully compacted mode");
LogFlush(db->immutable_db_options_.info_log); LogFlush(db->immutable_db_options_.info_log);
*dbptr = db.release(); *dbptr = db.release();
} }

View File

@ -600,7 +600,7 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
write_amp = stats.bytes_written / write_amp = stats.bytes_written /
static_cast<double>(stats.bytes_read_non_output_levels); static_cast<double>(stats.bytes_read_non_output_levels);
} }
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer_, log_buffer_,
"[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, " "[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, "
"files in(%d, %d) out(%d) " "files in(%d, %d) out(%d) "
@ -1091,12 +1091,12 @@ Status CompactionJob::FinishCompactionOutputFile(
tp = sub_compact->builder->GetTableProperties(); tp = sub_compact->builder->GetTableProperties();
sub_compact->current_output()->table_properties = sub_compact->current_output()->table_properties =
std::make_shared<TableProperties>(tp); std::make_shared<TableProperties>(tp);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64 "[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64
" keys, %" PRIu64 " bytes%s", " keys, %" PRIu64 " bytes%s",
cfd->GetName().c_str(), job_id_, output_number, current_entries, cfd->GetName().c_str(), job_id_, output_number,
current_bytes, current_entries, current_bytes,
meta->marked_for_compaction ? " (need compaction)" : ""); meta->marked_for_compaction ? " (need compaction)" : "");
} }
} }
std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(), std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(),
@ -1142,17 +1142,16 @@ Status CompactionJob::InstallCompactionResults(
if (!versions_->VerifyCompactionFileConsistency(compaction)) { if (!versions_->VerifyCompactionFileConsistency(compaction)) {
Compaction::InputLevelSummaryBuffer inputs_summary; Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log, "[%s] [JOB %d] Compaction %s aborted",
"[%s] [JOB %d] Compaction %s aborted", compaction->column_family_data()->GetName().c_str(),
compaction->column_family_data()->GetName().c_str(), job_id_, job_id_, compaction->InputLevelSummary(&inputs_summary));
compaction->InputLevelSummary(&inputs_summary));
return Status::Corruption("Compaction input files inconsistent"); return Status::Corruption("Compaction input files inconsistent");
} }
{ {
Compaction::InputLevelSummaryBuffer inputs_summary; Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(
"[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes", db_options_.info_log, "[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes",
compaction->column_family_data()->GetName().c_str(), job_id_, compaction->column_family_data()->GetName().c_str(), job_id_,
compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes); compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes);
} }
@ -1200,7 +1199,8 @@ Status CompactionJob::OpenCompactionOutputFile(
unique_ptr<WritableFile> writable_file; unique_ptr<WritableFile> writable_file;
Status s = NewWritableFile(env_, fname, &writable_file, env_options_); Status s = NewWritableFile(env_, fname, &writable_file, env_options_);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(
db_options_.info_log,
"[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64 "[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64
" fails at NewWritableFile with status %s", " fails at NewWritableFile with status %s",
sub_compact->compaction->column_family_data()->GetName().c_str(), sub_compact->compaction->column_family_data()->GetName().c_str(),
@ -1376,14 +1376,14 @@ void CompactionJob::LogCompaction() {
// we're not logging // we're not logging
if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) { if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) {
Compaction::InputLevelSummaryBuffer inputs_summary; Compaction::InputLevelSummaryBuffer inputs_summary;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(
"[%s] [JOB %d] Compacting %s, score %.2f", cfd->GetName().c_str(), db_options_.info_log, "[%s] [JOB %d] Compacting %s, score %.2f",
job_id_, compaction->InputLevelSummary(&inputs_summary), cfd->GetName().c_str(), job_id_,
compaction->score()); compaction->InputLevelSummary(&inputs_summary), compaction->score());
char scratch[2345]; char scratch[2345];
compaction->Summary(scratch, sizeof(scratch)); compaction->Summary(scratch, sizeof(scratch));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log, "[%s] Compaction start summary: %s\n",
"[%s] Compaction start summary: %s\n", cfd->GetName().c_str(), scratch); cfd->GetName().c_str(), scratch);
// build event logger report // build event logger report
auto stream = event_logger_->Log(); auto stream = event_logger_->Log();
stream << "job" << job_id_ << "event" stream << "job" << job_id_ << "event"

View File

@ -266,7 +266,8 @@ bool CompactionPicker::ExpandWhileOverlapping(const std::string& cf_name,
// If, after the expansion, there are files that are already under // If, after the expansion, there are files that are already under
// compaction, then we must drop/cancel this compaction. // compaction, then we must drop/cancel this compaction.
if (FilesInCompaction(inputs->files)) { if (FilesInCompaction(inputs->files)) {
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, ROCKS_LOG_WARN(
ioptions_.info_log,
"[%s] ExpandWhileOverlapping() failure because some of the necessary" "[%s] ExpandWhileOverlapping() failure because some of the necessary"
" compaction input files are currently being compacted.", " compaction input files are currently being compacted.",
cf_name.c_str()); cf_name.c_str());
@ -514,14 +515,15 @@ bool CompactionPicker::SetupOtherInputs(
} }
} }
if (expand_inputs) { if (expand_inputs) {
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, ROCKS_LOG_INFO(ioptions_.info_log,
"[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64 "[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt
"+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64 "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt
" (%" PRIu64 "+%" PRIu64 "bytes)\n", "+%" ROCKSDB_PRIszt " (%" PRIu64 "+%" PRIu64 "bytes)\n",
cf_name.c_str(), input_level, inputs->size(), cf_name.c_str(), input_level, inputs->size(),
output_level_inputs->size(), inputs_size, output_level_inputs_size, output_level_inputs->size(), inputs_size,
expanded_inputs.size(), output_level_inputs->size(), output_level_inputs_size, expanded_inputs.size(),
expanded_inputs_size, output_level_inputs_size); output_level_inputs->size(), expanded_inputs_size,
output_level_inputs_size);
inputs->files = expanded_inputs.files; inputs->files = expanded_inputs.files;
} }
} }
@ -1442,24 +1444,25 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if (sorted_runs.size() == 0 || if (sorted_runs.size() == 0 ||
sorted_runs.size() < sorted_runs.size() <
(unsigned int)mutable_cf_options.level0_file_num_compaction_trigger) { (unsigned int)mutable_cf_options.level0_file_num_compaction_trigger) {
LogToBuffer(log_buffer, "[%s] Universal: nothing to do\n", cf_name.c_str()); ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: nothing to do\n",
cf_name.c_str());
TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return", TEST_SYNC_POINT_CALLBACK("UniversalCompactionPicker::PickCompaction:Return",
nullptr); nullptr);
return nullptr; return nullptr;
} }
VersionStorageInfo::LevelSummaryStorage tmp; VersionStorageInfo::LevelSummaryStorage tmp;
LogToBuffer(log_buffer, 3072, ROCKS_LOG_BUFFER_MAX_SZ(
"[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n", log_buffer, 3072,
cf_name.c_str(), sorted_runs.size(), "[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n",
vstorage->LevelSummary(&tmp)); cf_name.c_str(), sorted_runs.size(), vstorage->LevelSummary(&tmp));
// Check for size amplification first. // Check for size amplification first.
Compaction* c; Compaction* c;
if ((c = PickCompactionUniversalSizeAmp(cf_name, mutable_cf_options, vstorage, if ((c = PickCompactionUniversalSizeAmp(cf_name, mutable_cf_options, vstorage,
score, sorted_runs, log_buffer)) != score, sorted_runs, log_buffer)) !=
nullptr) { nullptr) {
LogToBuffer(log_buffer, "[%s] Universal: compacting for size amp\n", ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: compacting for size amp\n",
cf_name.c_str()); cf_name.c_str());
} else { } else {
// Size amplification is within limits. Try reducing read // Size amplification is within limits. Try reducing read
// amplification while maintaining file size ratios. // amplification while maintaining file size ratios.
@ -1468,8 +1471,9 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if ((c = PickCompactionUniversalReadAmp( if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, ratio, UINT_MAX, cf_name, mutable_cf_options, vstorage, score, ratio, UINT_MAX,
sorted_runs, log_buffer)) != nullptr) { sorted_runs, log_buffer)) != nullptr) {
LogToBuffer(log_buffer, "[%s] Universal: compacting for size ratio\n", ROCKS_LOG_BUFFER(log_buffer,
cf_name.c_str()); "[%s] Universal: compacting for size ratio\n",
cf_name.c_str());
} else { } else {
// Size amplification and file size ratios are within configured limits. // Size amplification and file size ratios are within configured limits.
// If max read amplification is exceeding configured limits, then force // If max read amplification is exceeding configured limits, then force
@ -1497,9 +1501,9 @@ Compaction* UniversalCompactionPicker::PickCompaction(
if ((c = PickCompactionUniversalReadAmp( if ((c = PickCompactionUniversalReadAmp(
cf_name, mutable_cf_options, vstorage, score, UINT_MAX, cf_name, mutable_cf_options, vstorage, score, UINT_MAX,
num_files, sorted_runs, log_buffer)) != nullptr) { num_files, sorted_runs, log_buffer)) != nullptr) {
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer,
"[%s] Universal: compacting for file num -- %u\n", "[%s] Universal: compacting for file num -- %u\n",
cf_name.c_str(), num_files); cf_name.c_str(), num_files);
} }
} }
} }
@ -1640,10 +1644,10 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
} }
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf)); sr->Dump(file_num_buf, sizeof(file_num_buf));
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer,
"[%s] Universal: %s" "[%s] Universal: %s"
"[%d] being compacted, skipping", "[%d] being compacted, skipping",
cf_name.c_str(), file_num_buf, loop); cf_name.c_str(), file_num_buf, loop);
sr = nullptr; sr = nullptr;
} }
@ -1654,8 +1658,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
if (sr != nullptr) { if (sr != nullptr) {
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true); sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, "[%s] Universal: Possible candidate %s[%d].", ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Possible candidate %s[%d].",
cf_name.c_str(), file_num_buf, loop); cf_name.c_str(), file_num_buf, loop);
} }
// Check if the succeeding files need compaction. // Check if the succeeding files need compaction.
@ -1706,8 +1710,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
const SortedRun* skipping_sr = &sorted_runs[i]; const SortedRun* skipping_sr = &sorted_runs[i];
char file_num_buf[256]; char file_num_buf[256];
skipping_sr->DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop); skipping_sr->DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
LogToBuffer(log_buffer, "[%s] Universal: Skipping %s", cf_name.c_str(), ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Skipping %s",
file_num_buf); cf_name.c_str(), file_num_buf);
} }
} }
} }
@ -1768,8 +1772,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp(
} }
char file_num_buf[256]; char file_num_buf[256];
picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), i); picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), i);
LogToBuffer(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(), ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(),
file_num_buf); file_num_buf);
} }
CompactionReason compaction_reason; CompactionReason compaction_reason;
@ -1815,9 +1819,9 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
} }
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true); sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s", ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s",
cf_name.c_str(), file_num_buf, loop, cf_name.c_str(), file_num_buf, loop,
" cannot be a candidate to reduce size amp.\n"); " cannot be a candidate to reduce size amp.\n");
sr = nullptr; sr = nullptr;
} }
@ -1827,10 +1831,10 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
{ {
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true); sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(
"[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s", log_buffer,
cf_name.c_str(), file_num_buf, start_index, "[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s",
" to reduce size amp.\n"); cf_name.c_str(), file_num_buf, start_index, " to reduce size amp.\n");
} }
// keep adding up all the remaining files // keep adding up all the remaining files
@ -1839,7 +1843,7 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
if (sr->being_compacted) { if (sr->being_compacted) {
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
sr->Dump(file_num_buf, sizeof(file_num_buf), true); sr->Dump(file_num_buf, sizeof(file_num_buf), true);
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer, "[%s] Universal: Possible candidate %s[%d] %s", log_buffer, "[%s] Universal: Possible candidate %s[%d] %s",
cf_name.c_str(), file_num_buf, start_index, cf_name.c_str(), file_num_buf, start_index,
" is already being compacted. No size amp reduction possible.\n"); " is already being compacted. No size amp reduction possible.\n");
@ -1857,14 +1861,14 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
// size amplification = percentage of additional size // size amplification = percentage of additional size
if (candidate_size * 100 < ratio * earliest_file_size) { if (candidate_size * 100 < ratio * earliest_file_size) {
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer, log_buffer,
"[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64 "[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64
" earliest-file-size %" PRIu64, " earliest-file-size %" PRIu64,
cf_name.c_str(), candidate_size, earliest_file_size); cf_name.c_str(), candidate_size, earliest_file_size);
return nullptr; return nullptr;
} else { } else {
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer, log_buffer,
"[%s] Universal: size amp needed. newer-files-total-size %" PRIu64 "[%s] Universal: size amp needed. newer-files-total-size %" PRIu64
" earliest-file-size %" PRIu64, " earliest-file-size %" PRIu64,
@ -1898,8 +1902,8 @@ Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp(
} }
char file_num_buf[256]; char file_num_buf[256];
picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop); picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop);
LogToBuffer(log_buffer, "[%s] Universal: size amp picking %s", ROCKS_LOG_BUFFER(log_buffer, "[%s] Universal: size amp picking %s",
cf_name.c_str(), file_num_buf); cf_name.c_str(), file_num_buf);
} }
return new Compaction( return new Compaction(
@ -1934,19 +1938,20 @@ Compaction* FIFOCompactionPicker::PickCompaction(
if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size || if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size ||
level_files.size() == 0) { level_files.size() == 0) {
// total size not exceeded // total size not exceeded
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer,
"[%s] FIFO compaction: nothing to do. Total size %" PRIu64 "[%s] FIFO compaction: nothing to do. Total size %" PRIu64
", max size %" PRIu64 "\n", ", max size %" PRIu64 "\n",
cf_name.c_str(), total_size, cf_name.c_str(), total_size,
ioptions_.compaction_options_fifo.max_table_files_size); ioptions_.compaction_options_fifo.max_table_files_size);
return nullptr; return nullptr;
} }
if (!level0_compactions_in_progress_.empty()) { if (!level0_compactions_in_progress_.empty()) {
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(
"[%s] FIFO compaction: Already executing compaction. No need " log_buffer,
"to run parallel compactions since compactions are very fast", "[%s] FIFO compaction: Already executing compaction. No need "
cf_name.c_str()); "to run parallel compactions since compactions are very fast",
cf_name.c_str());
return nullptr; return nullptr;
} }
@ -1960,9 +1965,9 @@ Compaction* FIFOCompactionPicker::PickCompaction(
inputs[0].files.push_back(f); inputs[0].files.push_back(f);
char tmp_fsize[16]; char tmp_fsize[16];
AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize)); AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize));
LogToBuffer(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64 ROCKS_LOG_BUFFER(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64
" with size %s for deletion", " with size %s for deletion",
cf_name.c_str(), f->fd.GetNumber(), tmp_fsize); cf_name.c_str(), f->fd.GetNumber(), tmp_fsize);
if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size) { if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size) {
break; break;
} }

View File

@ -34,12 +34,11 @@ Status DBImpl::DisableFileDeletions() {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
++disable_delete_obsolete_files_; ++disable_delete_obsolete_files_;
if (disable_delete_obsolete_files_ == 1) { if (disable_delete_obsolete_files_ == 1) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Disabled");
"File Deletions Disabled");
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"File Deletions Disabled, but already disabled. Counter: %d", "File Deletions Disabled, but already disabled. Counter: %d",
disable_delete_obsolete_files_); disable_delete_obsolete_files_);
} }
return Status::OK(); return Status::OK();
} }
@ -58,12 +57,12 @@ Status DBImpl::EnableFileDeletions(bool force) {
--disable_delete_obsolete_files_; --disable_delete_obsolete_files_;
} }
if (disable_delete_obsolete_files_ == 0) { if (disable_delete_obsolete_files_ == 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "File Deletions Enabled");
"File Deletions Enabled");
should_purge_files = true; should_purge_files = true;
FindObsoleteFiles(&job_context, true); FindObsoleteFiles(&job_context, true);
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"File Deletions Enable, but not really enabled. Counter: %d", "File Deletions Enable, but not really enabled. Counter: %d",
disable_delete_obsolete_files_); disable_delete_obsolete_files_);
} }
@ -109,8 +108,8 @@ Status DBImpl::GetLiveFiles(std::vector<std::string>& ret,
if (!status.ok()) { if (!status.ok()) {
mutex_.Unlock(); mutex_.Unlock();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log, "Cannot Flush data %s\n",
"Cannot Flush data %s\n", status.ToString().c_str()); status.ToString().c_str());
return status; return status;
} }
} }

View File

@ -299,13 +299,14 @@ CompressionType GetCompressionFlush(
} }
void DumpSupportInfo(Logger* logger) { void DumpSupportInfo(Logger* logger) {
Header(logger, "Compression algorithms supported:"); ROCKS_LOG_HEADER(logger, "Compression algorithms supported:");
Header(logger, "\tSnappy supported: %d", Snappy_Supported()); ROCKS_LOG_HEADER(logger, "\tSnappy supported: %d", Snappy_Supported());
Header(logger, "\tZlib supported: %d", Zlib_Supported()); ROCKS_LOG_HEADER(logger, "\tZlib supported: %d", Zlib_Supported());
Header(logger, "\tBzip supported: %d", BZip2_Supported()); ROCKS_LOG_HEADER(logger, "\tBzip supported: %d", BZip2_Supported());
Header(logger, "\tLZ4 supported: %d", LZ4_Supported()); ROCKS_LOG_HEADER(logger, "\tLZ4 supported: %d", LZ4_Supported());
Header(logger, "\tZSTD supported: %d", ZSTD_Supported()); ROCKS_LOG_HEADER(logger, "\tZSTD supported: %d", ZSTD_Supported());
Header(logger, "Fast CRC32 supported: %d", crc32c::IsFastCrc32Supported()); ROCKS_LOG_HEADER(logger, "Fast CRC32 supported: %d",
crc32c::IsFastCrc32Supported());
} }
} // namespace } // namespace
@ -387,8 +388,8 @@ DBImpl::DBImpl(const DBOptions& options, const std::string& dbname)
void DBImpl::CancelAllBackgroundWork(bool wait) { void DBImpl::CancelAllBackgroundWork(bool wait) {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Shutdown: canceling all background work"); "Shutdown: canceling all background work");
if (!shutting_down_.load(std::memory_order_acquire) && if (!shutting_down_.load(std::memory_order_acquire) &&
has_unpersisted_data_.load(std::memory_order_relaxed) && has_unpersisted_data_.load(std::memory_order_relaxed) &&
@ -514,8 +515,7 @@ DBImpl::~DBImpl() {
env_->UnlockFile(db_lock_); env_->UnlockFile(db_lock_);
} }
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "Shutdown complete");
"Shutdown complete");
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
} }
@ -527,8 +527,7 @@ Status DBImpl::NewDB() {
Status s; Status s;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "Creating manifest 1 \n");
"Creating manifest 1 \n");
const std::string manifest = DescriptorFileName(dbname_, 1); const std::string manifest = DescriptorFileName(dbname_, 1);
{ {
unique_ptr<WritableFile> file; unique_ptr<WritableFile> file;
@ -562,8 +561,8 @@ void DBImpl::MaybeIgnoreError(Status* s) const {
if (s->ok() || immutable_db_options_.paranoid_checks) { if (s->ok() || immutable_db_options_.paranoid_checks) {
// No change needed // No change needed
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log, "Ignoring error %s",
"Ignoring error %s", s->ToString().c_str()); s->ToString().c_str());
*s = Status::OK(); *s = Status::OK();
} }
} }
@ -580,8 +579,8 @@ const Status DBImpl::CreateArchivalDirectory() {
void DBImpl::PrintStatistics() { void DBImpl::PrintStatistics() {
auto dbstats = immutable_db_options_.statistics.get(); auto dbstats = immutable_db_options_.statistics.get();
if (dbstats) { if (dbstats) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log, "STATISTICS:\n %s",
"STATISTICS:\n %s", dbstats->ToString().c_str()); dbstats->ToString().c_str());
} }
} }
@ -650,18 +649,16 @@ void DBImpl::MaybeDumpStats() {
default_cf_internal_stats_->GetStringProperty( default_cf_internal_stats_->GetStringProperty(
*db_property_info, DB::Properties::kDBStats, &stats); *db_property_info, DB::Properties::kDBStats, &stats);
} }
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"------- DUMPING STATS -------"); "------- DUMPING STATS -------");
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, "%s", ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
stats.c_str());
if (immutable_db_options_.dump_malloc_stats) { if (immutable_db_options_.dump_malloc_stats) {
stats.clear(); stats.clear();
DumpMallocStats(&stats); DumpMallocStats(&stats);
if (!stats.empty()) { if (!stats.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"------- Malloc STATS -------"); "------- Malloc STATS -------");
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, "%s", ROCKS_LOG_WARN(immutable_db_options_.info_log, "%s", stats.c_str());
stats.c_str());
} }
} }
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
@ -899,8 +896,9 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
auto& earliest = *alive_log_files_.begin(); auto& earliest = *alive_log_files_.begin();
if (immutable_db_options_.recycle_log_file_num > if (immutable_db_options_.recycle_log_file_num >
log_recycle_files.size()) { log_recycle_files.size()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"adding log %" PRIu64 " to recycle list\n", earliest.number); "adding log %" PRIu64 " to recycle list\n",
earliest.number);
log_recycle_files.push_back(earliest.number); log_recycle_files.push_back(earliest.number);
} else { } else {
job_context->log_delete_files.push_back(earliest.number); job_context->log_delete_files.push_back(earliest.number);
@ -962,19 +960,22 @@ void DBImpl::DeleteObsoleteFileImpl(Status file_deletion_status, int job_id,
file_deletion_status = env_->DeleteFile(fname); file_deletion_status = env_->DeleteFile(fname);
} }
if (file_deletion_status.ok()) { if (file_deletion_status.ok()) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id, "[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id,
fname.c_str(), type, number, file_deletion_status.ToString().c_str()); fname.c_str(), type, number,
file_deletion_status.ToString().c_str());
} else if (env_->FileExists(fname).IsNotFound()) { } else if (env_->FileExists(fname).IsNotFound()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[JOB %d] Tried to delete a non-existing file %s type=%d #%" PRIu64 "[JOB %d] Tried to delete a non-existing file %s type=%d #%" PRIu64
" -- %s\n", " -- %s\n",
job_id, fname.c_str(), type, number, job_id, fname.c_str(), type, number,
file_deletion_status.ToString().c_str()); file_deletion_status.ToString().c_str());
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n", job_id, "[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n",
fname.c_str(), type, number, file_deletion_status.ToString().c_str()); job_id, fname.c_str(), type, number,
file_deletion_status.ToString().c_str());
} }
if (type == kTableFile) { if (type == kTableFile) {
EventHelpers::LogAndNotifyTableFileDeletion( EventHelpers::LogAndNotifyTableFileDeletion(
@ -1038,12 +1039,12 @@ void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) {
candidate_files.end()); candidate_files.end());
if (state.prev_total_log_size > 0) { if (state.prev_total_log_size > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Try to delete WAL files size %" PRIu64 "[JOB %d] Try to delete WAL files size %" PRIu64
", prev total WAL file size %" PRIu64 ", prev total WAL file size %" PRIu64
", number of live WAL files %" ROCKSDB_PRIszt ".\n", ", number of live WAL files %" ROCKSDB_PRIszt ".\n",
state.job_id, state.size_log_to_delete, state.prev_total_log_size, state.job_id, state.size_log_to_delete,
state.num_alive_log_files); state.prev_total_log_size, state.num_alive_log_files);
} }
std::vector<std::string> old_info_log_files; std::vector<std::string> old_info_log_files;
@ -1152,20 +1153,22 @@ void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) {
? dbname_ ? dbname_
: immutable_db_options_.db_log_dir) + : immutable_db_options_.db_log_dir) +
"/" + to_delete; "/" + to_delete;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s\n", state.job_id, "[JOB %d] Delete info log file %s\n", state.job_id,
full_path_to_delete.c_str()); full_path_to_delete.c_str());
Status s = env_->DeleteFile(full_path_to_delete); Status s = env_->DeleteFile(full_path_to_delete);
if (!s.ok()) { if (!s.ok()) {
if (env_->FileExists(full_path_to_delete).IsNotFound()) { if (env_->FileExists(full_path_to_delete).IsNotFound()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[JOB %d] Tried to delete non-existing info log file %s FAILED " "[JOB %d] Tried to delete non-existing info log file %s FAILED "
"-- %s\n", "-- %s\n",
state.job_id, to_delete.c_str(), s.ToString().c_str()); state.job_id, to_delete.c_str(), s.ToString().c_str());
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"[JOB %d] Delete info log file %s FAILED -- %s\n", state.job_id, "[JOB %d] Delete info log file %s FAILED -- %s\n",
to_delete.c_str(), s.ToString().c_str()); state.job_id, to_delete.c_str(),
s.ToString().c_str());
} }
} }
} }
@ -1399,10 +1402,9 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
const char* fname; const char* fname;
Status* status; // nullptr if immutable_db_options_.paranoid_checks==false Status* status; // nullptr if immutable_db_options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) override { virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::WARN_LEVEL, ROCKS_LOG_WARN(info_log, "%s%s: dropping %d bytes; %s",
info_log, "%s%s: dropping %d bytes; %s", (this->status == nullptr ? "(ignoring error) " : ""),
(this->status == nullptr ? "(ignoring error) " : ""), fname, static_cast<int>(bytes), s.ToString().c_str());
fname, static_cast<int>(bytes), s.ToString().c_str());
if (this->status != nullptr && this->status->ok()) { if (this->status != nullptr && this->status->ok()) {
*this->status = s; *this->status = s;
} }
@ -1458,15 +1460,15 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
// Open the log file // Open the log file
std::string fname = LogFileName(immutable_db_options_.wal_dir, log_number); std::string fname = LogFileName(immutable_db_options_.wal_dir, log_number);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Recovering log #%" PRIu64 " mode %d", log_number, "Recovering log #%" PRIu64 " mode %d", log_number,
immutable_db_options_.wal_recovery_mode); immutable_db_options_.wal_recovery_mode);
auto logFileDropped = [this, &fname]() { auto logFileDropped = [this, &fname]() {
uint64_t bytes; uint64_t bytes;
if (env_->GetFileSize(fname, &bytes).ok()) { if (env_->GetFileSize(fname, &bytes).ok()) {
auto info_log = immutable_db_options_.info_log.get(); auto info_log = immutable_db_options_.info_log.get();
Log(InfoLogLevel::WARN_LEVEL, info_log, "%s: dropping %d bytes", ROCKS_LOG_WARN(info_log, "%s: dropping %d bytes", fname.c_str(),
fname.c_str(), static_cast<int>(bytes)); static_cast<int>(bytes));
} }
}; };
if (stop_replay_by_wal_filter) { if (stop_replay_by_wal_filter) {
@ -1597,7 +1599,8 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
int new_count = WriteBatchInternal::Count(&new_batch); int new_count = WriteBatchInternal::Count(&new_batch);
int original_count = WriteBatchInternal::Count(&batch); int original_count = WriteBatchInternal::Count(&batch);
if (new_count > original_count) { if (new_count > original_count) {
Log(InfoLogLevel::FATAL_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_FATAL(
immutable_db_options_.info_log,
"Recovering log #%" PRIu64 "Recovering log #%" PRIu64
" mode %d log filter %s returned " " mode %d log filter %s returned "
"more records (%d) than original (%d) which is not allowed. " "more records (%d) than original (%d) which is not allowed. "
@ -1675,9 +1678,10 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
// We should ignore the error but not continue replaying // We should ignore the error but not continue replaying
status = Status::OK(); status = Status::OK();
stop_replay_for_corruption = true; stop_replay_for_corruption = true;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Point in time recovered to log #%" PRIu64 " seq #%" PRIu64, "Point in time recovered to log #%" PRIu64
log_number, *next_sequence); " seq #%" PRIu64,
log_number, *next_sequence);
} else { } else {
assert(immutable_db_options_.wal_recovery_mode == assert(immutable_db_options_.wal_recovery_mode ==
WALRecoveryMode::kTolerateCorruptedTailRecords || WALRecoveryMode::kTolerateCorruptedTailRecords ||
@ -1788,10 +1792,10 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
TableProperties table_properties; TableProperties table_properties;
{ {
ScopedArenaIterator iter(mem->NewIterator(ro, &arena)); ScopedArenaIterator iter(mem->NewIterator(ro, &arena));
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]" "[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": started", " Level-0 table #%" PRIu64 ": started",
cfd->GetName().c_str(), meta.fd.GetNumber()); cfd->GetName().c_str(), meta.fd.GetNumber());
// Get the latest mutable cf options while the mutex is still locked // Get the latest mutable cf options while the mutex is still locked
const MutableCFOptions mutable_cf_options = const MutableCFOptions mutable_cf_options =
@ -1817,11 +1821,11 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
cfd->internal_stats(), TableFileCreationReason::kRecovery, cfd->internal_stats(), TableFileCreationReason::kRecovery,
&event_logger_, job_id); &event_logger_, job_id);
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] [WriteLevel0TableForRecovery]" "[%s] [WriteLevel0TableForRecovery]"
" Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s", " Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s",
cfd->GetName().c_str(), meta.fd.GetNumber(), meta.fd.GetFileSize(), cfd->GetName().c_str(), meta.fd.GetNumber(),
s.ToString().c_str()); meta.fd.GetFileSize(), s.ToString().c_str());
mutex_.Lock(); mutex_.Lock();
} }
} }
@ -1870,9 +1874,9 @@ Status DBImpl::SyncClosedLogs(JobContext* job_context) {
mutex_.Unlock(); mutex_.Unlock();
for (log::Writer* log : logs_to_sync) { for (log::Writer* log : logs_to_sync) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[JOB %d] Syncing log #%" PRIu64, job_context->job_id, "[JOB %d] Syncing log #%" PRIu64, job_context->job_id,
log->get_log_number()); log->get_log_number());
s = log->file()->Sync(immutable_db_options_.use_fsync); s = log->file()->Sync(immutable_db_options_.use_fsync);
} }
if (s.ok()) { if (s.ok()) {
@ -1947,8 +1951,9 @@ Status DBImpl::FlushMemTableToOutputFile(
*made_progress = 1; *made_progress = 1;
} }
VersionStorageInfo::LevelSummaryStorage tmp; VersionStorageInfo::LevelSummaryStorage tmp;
LogToBuffer(log_buffer, "[%s] Level summary: %s\n", cfd->GetName().c_str(), ROCKS_LOG_BUFFER(log_buffer, "[%s] Level summary: %s\n",
cfd->current()->storage_info()->LevelSummary(&tmp)); cfd->GetName().c_str(),
cfd->current()->storage_info()->LevelSummary(&tmp));
} }
if (!s.ok() && !s.IsShutdownInProgress() && if (!s.ok() && !s.IsShutdownInProgress() &&
@ -2114,8 +2119,8 @@ Status DBImpl::CompactRange(const CompactRangeOptions& options,
} }
if (options.change_level) { if (options.change_level) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[RefitLevel] waiting for background threads to stop"); "[RefitLevel] waiting for background threads to stop");
s = PauseBackgroundWork(); s = PauseBackgroundWork();
if (s.ok()) { if (s.ok()) {
s = ReFitLevel(cfd, final_output_level, options.target_level); s = ReFitLevel(cfd, final_output_level, options.target_level);
@ -2332,10 +2337,10 @@ Status DBImpl::CompactFilesImpl(
} else if (status.IsShutdownInProgress()) { } else if (status.IsShutdownInProgress()) {
// Ignore compaction errors found during shutting down // Ignore compaction errors found during shutting down
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"[%s] [JOB %d] Compaction error: %s", "[%s] [JOB %d] Compaction error: %s",
c->column_family_data()->GetName().c_str(), job_context->job_id, c->column_family_data()->GetName().c_str(),
status.ToString().c_str()); job_context->job_id, status.ToString().c_str());
if (immutable_db_options_.paranoid_checks && bg_error_.ok()) { if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
bg_error_ = status; bg_error_ = status;
} }
@ -2442,9 +2447,9 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
#else #else
auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd(); auto* cfd = reinterpret_cast<ColumnFamilyHandleImpl*>(column_family)->cfd();
if (options_map.empty()) { if (options_map.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"SetOptions() on column family [%s], empty input", "SetOptions() on column family [%s], empty input",
cfd->GetName().c_str()); cfd->GetName().c_str());
return Status::InvalidArgument("empty input"); return Status::InvalidArgument("empty input");
} }
@ -2471,15 +2476,16 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
} }
} }
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"SetOptions() on column family [%s], inputs:", cfd->GetName().c_str()); "SetOptions() on column family [%s], inputs:",
cfd->GetName().c_str());
for (const auto& o : options_map) { for (const auto& o : options_map) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, "%s: %s\n", ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
o.first.c_str(), o.second.c_str()); o.second.c_str());
} }
if (s.ok()) { if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] SetOptions() succeeded", cfd->GetName().c_str()); "[%s] SetOptions() succeeded", cfd->GetName().c_str());
new_options.Dump(immutable_db_options_.info_log.get()); new_options.Dump(immutable_db_options_.info_log.get());
if (!persist_options_status.ok()) { if (!persist_options_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) { if (immutable_db_options_.fail_if_options_file_error) {
@ -2487,13 +2493,13 @@ Status DBImpl::SetOptions(ColumnFamilyHandle* column_family,
"SetOptions() succeeded, but unable to persist options", "SetOptions() succeeded, but unable to persist options",
persist_options_status.ToString()); persist_options_status.ToString());
} }
Warn(immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in SetOptions() -- %s", "Unable to persist options in SetOptions() -- %s",
persist_options_status.ToString().c_str()); persist_options_status.ToString().c_str());
} }
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log, "[%s] SetOptions() failed",
"[%s] SetOptions() failed", cfd->GetName().c_str()); cfd->GetName().c_str());
} }
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
return s; return s;
@ -2506,8 +2512,8 @@ Status DBImpl::SetDBOptions(
return Status::NotSupported("Not supported in ROCKSDB LITE"); return Status::NotSupported("Not supported in ROCKSDB LITE");
#else #else
if (options_map.empty()) { if (options_map.empty()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"SetDBOptions(), empty input."); "SetDBOptions(), empty input.");
return Status::InvalidArgument("empty input"); return Status::InvalidArgument("empty input");
} }
@ -2537,15 +2543,13 @@ Status DBImpl::SetDBOptions(
persist_options_status = PersistOptions(); persist_options_status = PersistOptions();
} }
} }
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions(), inputs:");
"SetDBOptions(), inputs:");
for (const auto& o : options_map) { for (const auto& o : options_map) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, "%s: %s\n", ROCKS_LOG_INFO(immutable_db_options_.info_log, "%s: %s\n", o.first.c_str(),
o.first.c_str(), o.second.c_str()); o.second.c_str());
} }
if (s.ok()) { if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log, "SetDBOptions() succeeded");
"SetDBOptions() succeeded");
new_options.Dump(immutable_db_options_.info_log.get()); new_options.Dump(immutable_db_options_.info_log.get());
if (!persist_options_status.ok()) { if (!persist_options_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) { if (immutable_db_options_.fail_if_options_file_error) {
@ -2553,13 +2557,12 @@ Status DBImpl::SetDBOptions(
"SetDBOptions() succeeded, but unable to persist options", "SetDBOptions() succeeded, but unable to persist options",
persist_options_status.ToString()); persist_options_status.ToString());
} }
Warn(immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in SetDBOptions() -- %s", "Unable to persist options in SetDBOptions() -- %s",
persist_options_status.ToString().c_str()); persist_options_status.ToString().c_str());
} }
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log, "SetDBOptions failed");
"SetDBOptions failed");
} }
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
return s; return s;
@ -2611,8 +2614,8 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
// only allow one thread refitting // only allow one thread refitting
if (refitting_level_) { if (refitting_level_) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[ReFitLevel] another thread is refitting"); "[ReFitLevel] another thread is refitting");
return Status::NotSupported("another thread is refitting"); return Status::NotSupported("another thread is refitting");
} }
refitting_level_ = true; refitting_level_ = true;
@ -2639,9 +2642,9 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
} }
} }
if (to_level != level) { if (to_level != level) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] Before refitting:\n%s", cfd->GetName().c_str(), "[%s] Before refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data()); cfd->current()->DebugString().data());
VersionEdit edit; VersionEdit edit;
edit.SetColumnFamily(cfd->GetID()); edit.SetColumnFamily(cfd->GetID());
@ -2652,23 +2655,22 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
f->smallest_seqno, f->largest_seqno, f->smallest_seqno, f->largest_seqno,
f->marked_for_compaction); f->marked_for_compaction);
} }
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(), "[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
edit.DebugString().data()); edit.DebugString().data());
status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_, status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_,
directories_.GetDbDir()); directories_.GetDbDir());
superversion_to_free.reset(InstallSuperVersionAndScheduleWork( superversion_to_free.reset(InstallSuperVersionAndScheduleWork(
cfd, new_superversion.release(), mutable_cf_options)); cfd, new_superversion.release(), mutable_cf_options));
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
"[%s] LogAndApply: %s\n", cfd->GetName().c_str(), cfd->GetName().c_str(), status.ToString().data());
status.ToString().data());
if (status.ok()) { if (status.ok()) {
Log(InfoLogLevel::DEBUG_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_DEBUG(immutable_db_options_.info_log,
"[%s] After refitting:\n%s", cfd->GetName().c_str(), "[%s] After refitting:\n%s", cfd->GetName().c_str(),
cfd->current()->DebugString().data()); cfd->current()->DebugString().data());
} }
} }
@ -2850,7 +2852,8 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_); TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_);
if (exclusive) { if (exclusive) {
while (bg_compaction_scheduled_ > 0) { while (bg_compaction_scheduled_ > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"[%s] Manual compaction waiting for all other scheduled background " "[%s] Manual compaction waiting for all other scheduled background "
"compactions to finish", "compactions to finish",
cfd->GetName().c_str()); cfd->GetName().c_str());
@ -2858,8 +2861,8 @@ Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level,
} }
} }
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] Manual compaction starting", cfd->GetName().c_str()); "[%s] Manual compaction starting", cfd->GetName().c_str());
// We don't check bg_error_ here, because if we get the error in compaction, // We don't check bg_error_ here, because if we get the error in compaction,
// the compaction will set manual.status to bg_error_ and set manual.done to // the compaction will set manual.status to bg_error_ and set manual.done to
@ -3240,7 +3243,7 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context,
if (cfd != nullptr) { if (cfd != nullptr) {
const MutableCFOptions mutable_cf_options = const MutableCFOptions mutable_cf_options =
*cfd->GetLatestMutableCFOptions(); *cfd->GetLatestMutableCFOptions();
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer, log_buffer,
"Calling FlushMemTableToOutputFile with column " "Calling FlushMemTableToOutputFile with column "
"family [%s], flush slots available %d, compaction slots allowed %d, " "family [%s], flush slots available %d, compaction slots allowed %d, "
@ -3282,10 +3285,10 @@ void DBImpl::BackgroundCallFlush() {
default_cf_internal_stats_->BumpAndGetBackgroundErrorCount(); default_cf_internal_stats_->BumpAndGetBackgroundErrorCount();
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
mutex_.Unlock(); mutex_.Unlock();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Waiting after background flush error: %s" "Waiting after background flush error: %s"
"Accumulated background error counts: %" PRIu64, "Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt); s.ToString().c_str(), error_cnt);
log_buffer.FlushBufferToLog(); log_buffer.FlushBufferToLog();
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
env_->SleepForMicroseconds(1000000); env_->SleepForMicroseconds(1000000);
@ -3360,10 +3363,10 @@ void DBImpl::BackgroundCallCompaction(void* arg) {
bg_cv_.SignalAll(); // In case a waiter can proceed despite the error bg_cv_.SignalAll(); // In case a waiter can proceed despite the error
mutex_.Unlock(); mutex_.Unlock();
log_buffer.FlushBufferToLog(); log_buffer.FlushBufferToLog();
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Waiting after background compaction error: %s, " "Waiting after background compaction error: %s, "
"Accumulated background error counts: %" PRIu64, "Accumulated background error counts: %" PRIu64,
s.ToString().c_str(), error_cnt); s.ToString().c_str(), error_cnt);
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
env_->SleepForMicroseconds(1000000); env_->SleepForMicroseconds(1000000);
mutex_.Lock(); mutex_.Lock();
@ -3464,22 +3467,23 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
if (!c) { if (!c) {
m->done = true; m->done = true;
m->manual_end = nullptr; m->manual_end = nullptr;
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer,
"[%s] Manual compaction from level-%d from %s .. " "[%s] Manual compaction from level-%d from %s .. "
"%s; nothing to do\n", "%s; nothing to do\n",
m->cfd->GetName().c_str(), m->input_level, m->cfd->GetName().c_str(), m->input_level,
(m->begin ? m->begin->DebugString().c_str() : "(begin)"), (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
(m->end ? m->end->DebugString().c_str() : "(end)")); (m->end ? m->end->DebugString().c_str() : "(end)"));
} else { } else {
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(
"[%s] Manual compaction from level-%d to level-%d from %s .. " log_buffer,
"%s; will stop at %s\n", "[%s] Manual compaction from level-%d to level-%d from %s .. "
m->cfd->GetName().c_str(), m->input_level, c->output_level(), "%s; will stop at %s\n",
(m->begin ? m->begin->DebugString().c_str() : "(begin)"), m->cfd->GetName().c_str(), m->input_level, c->output_level(),
(m->end ? m->end->DebugString().c_str() : "(end)"), (m->begin ? m->begin->DebugString().c_str() : "(begin)"),
((m->done || m->manual_end == nullptr) (m->end ? m->end->DebugString().c_str() : "(end)"),
? "(end)" ((m->done || m->manual_end == nullptr)
: m->manual_end->DebugString().c_str())); ? "(end)"
: m->manual_end->DebugString().c_str()));
} }
} else if (!compaction_queue_.empty()) { } else if (!compaction_queue_.empty()) {
// cfd is referenced here // cfd is referenced here
@ -3543,7 +3547,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
if (!c) { if (!c) {
// Nothing to do // Nothing to do
LogToBuffer(log_buffer, "Compaction nothing to do"); ROCKS_LOG_BUFFER(log_buffer, "Compaction nothing to do");
} else if (c->deletion_compaction()) { } else if (c->deletion_compaction()) {
// TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old // TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old
// file if there is alive snapshot pointing to it // file if there is alive snapshot pointing to it
@ -3562,9 +3566,9 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
&mutex_, directories_.GetDbDir()); &mutex_, directories_.GetDbDir());
InstallSuperVersionAndScheduleWorkWrapper( InstallSuperVersionAndScheduleWorkWrapper(
c->column_family_data(), job_context, *c->mutable_cf_options()); c->column_family_data(), job_context, *c->mutable_cf_options());
LogToBuffer(log_buffer, "[%s] Deleted %d files\n", ROCKS_LOG_BUFFER(log_buffer, "[%s] Deleted %d files\n",
c->column_family_data()->GetName().c_str(), c->column_family_data()->GetName().c_str(),
c->num_input_files(0)); c->num_input_files(0));
*made_progress = true; *made_progress = true;
} else if (!trivial_move_disallowed && c->IsTrivialMove()) { } else if (!trivial_move_disallowed && c->IsTrivialMove()) {
TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove"); TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove");
@ -3592,10 +3596,11 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
f->largest, f->smallest_seqno, f->largest_seqno, f->largest, f->smallest_seqno, f->largest_seqno,
f->marked_for_compaction); f->marked_for_compaction);
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer, "[%s] Moving #%" PRIu64
"[%s] Moving #%" PRIu64 " to level-%d %" PRIu64 " bytes\n", " to level-%d %" PRIu64 " bytes\n",
c->column_family_data()->GetName().c_str(), c->column_family_data()->GetName().c_str(),
f->fd.GetNumber(), c->output_level(), f->fd.GetFileSize()); f->fd.GetNumber(), c->output_level(),
f->fd.GetFileSize());
++moved_files; ++moved_files;
moved_bytes += f->fd.GetFileSize(); moved_bytes += f->fd.GetFileSize();
} }
@ -3618,7 +3623,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
<< "destination_level" << c->output_level() << "files" << moved_files << "destination_level" << c->output_level() << "files" << moved_files
<< "total_files_size" << moved_bytes; << "total_files_size" << moved_bytes;
} }
LogToBuffer( ROCKS_LOG_BUFFER(
log_buffer, log_buffer,
"[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n", "[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n",
c->column_family_data()->GetName().c_str(), moved_files, c->column_family_data()->GetName().c_str(), moved_files,
@ -3676,8 +3681,8 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
} else if (status.IsShutdownInProgress()) { } else if (status.IsShutdownInProgress()) {
// Ignore compaction errors found during shutting down // Ignore compaction errors found during shutting down
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log, "Compaction error: %s",
"Compaction error: %s", status.ToString().c_str()); status.ToString().c_str());
if (immutable_db_options_.paranoid_checks && bg_error_.ok()) { if (immutable_db_options_.paranoid_checks && bg_error_.ok()) {
bg_error_ = status; bg_error_ = status;
} }
@ -4271,13 +4276,13 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
} }
*handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_); *handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_);
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Created column family [%s] (ID %u)", column_family_name.c_str(), "Created column family [%s] (ID %u)",
(unsigned)cfd->GetID()); column_family_name.c_str(), (unsigned)cfd->GetID());
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Creating column family [%s] FAILED -- %s", "Creating column family [%s] FAILED -- %s",
column_family_name.c_str(), s.ToString().c_str()); column_family_name.c_str(), s.ToString().c_str());
} }
} // InstrumentedMutexLock l(&mutex_) } // InstrumentedMutexLock l(&mutex_)
@ -4292,9 +4297,9 @@ Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options,
"options in CreateColumnFamily()", "options in CreateColumnFamily()",
persist_options_status.ToString().c_str()); persist_options_status.ToString().c_str());
} }
Warn(immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in CreateColumnFamily() -- %s", "Unable to persist options in CreateColumnFamily() -- %s",
persist_options_status.ToString().c_str()); persist_options_status.ToString().c_str());
} }
} }
return s; return s;
@ -4357,8 +4362,8 @@ Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); auto* mutable_cf_options = cfd->GetLatestMutableCFOptions();
max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size * max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size *
mutable_cf_options->max_write_buffer_number; mutable_cf_options->max_write_buffer_number;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Dropped column family with id %u\n", cfd->GetID()); "Dropped column family with id %u\n", cfd->GetID());
if (!options_persist_status.ok()) { if (!options_persist_status.ok()) {
if (immutable_db_options_.fail_if_options_file_error) { if (immutable_db_options_.fail_if_options_file_error) {
@ -4367,14 +4372,14 @@ Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) {
"options in DropColumnFamily()", "options in DropColumnFamily()",
options_persist_status.ToString().c_str()); options_persist_status.ToString().c_str());
} }
Warn(immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"Unable to persist options in DropColumnFamily() -- %s", "Unable to persist options in DropColumnFamily() -- %s",
options_persist_status.ToString().c_str()); options_persist_status.ToString().c_str());
} }
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Dropping column family with id %u FAILED -- %s\n", cfd->GetID(), "Dropping column family with id %u FAILED -- %s\n",
s.ToString().c_str()); cfd->GetID(), s.ToString().c_str());
} }
return s; return s;
@ -4742,7 +4747,8 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
// thread is writing to another DB with the same write buffer, they may also // thread is writing to another DB with the same write buffer, they may also
// be flushed. We may end up with flushing much more DBs than needed. It's // be flushed. We may end up with flushing much more DBs than needed. It's
// suboptimal but still correct. // suboptimal but still correct.
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(
immutable_db_options_.info_log,
"Flushing column family with largest mem table size. Write buffer is " "Flushing column family with largest mem table size. Write buffer is "
"using %" PRIu64 " bytes out of a total of %" PRIu64 ".", "using %" PRIu64 " bytes out of a total of %" PRIu64 ".",
write_buffer_manager_->memory_usage(), write_buffer_manager_->memory_usage(),
@ -5074,7 +5080,8 @@ void DBImpl::MaybeFlushColumnFamilies() {
// is still nothing that we can do. // is still nothing that we can do.
return; return;
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"Unable to release oldest log due to uncommited transaction"); "Unable to release oldest log due to uncommited transaction");
unable_to_flush_oldest_log_ = true; unable_to_flush_oldest_log_ = true;
} }
@ -5088,10 +5095,11 @@ void DBImpl::MaybeFlushColumnFamilies() {
WriteContext context; WriteContext context;
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Flushing all column families with data in WAL number %" PRIu64 "Flushing all column families with data in WAL number %" PRIu64
". Total log size is %" PRIu64 " while max_total_wal_size is %" PRIu64, ". Total log size is %" PRIu64
oldest_alive_log, total_log_size_, GetMaxTotalWalSize()); " while max_total_wal_size is %" PRIu64,
oldest_alive_log, total_log_size_, GetMaxTotalWalSize());
// no need to refcount because drop is happening in write thread, so can't // no need to refcount because drop is happening in write thread, so can't
// happen while we're in the write thread // happen while we're in the write thread
for (auto cfd : *versions_->GetColumnFamilySet()) { for (auto cfd : *versions_->GetColumnFamilySet()) {
@ -5234,8 +5242,9 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
EnvOptions opt_env_opt = EnvOptions opt_env_opt =
env_->OptimizeForLogWrite(env_options_, db_options); env_->OptimizeForLogWrite(env_options_, db_options);
if (recycle_log_number) { if (recycle_log_number) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"reusing log %" PRIu64 " from recycle list\n", recycle_log_number); "reusing log %" PRIu64 " from recycle list\n",
recycle_log_number);
s = env_->ReuseWritableFile( s = env_->ReuseWritableFile(
LogFileName(immutable_db_options_.wal_dir, new_log_number), LogFileName(immutable_db_options_.wal_dir, new_log_number),
LogFileName(immutable_db_options_.wal_dir, recycle_log_number), LogFileName(immutable_db_options_.wal_dir, recycle_log_number),
@ -5273,10 +5282,10 @@ Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) {
NotifyOnMemTableSealed(cfd, memtable_info); NotifyOnMemTableSealed(cfd, memtable_info);
#endif //ROCKSDB_LITE #endif //ROCKSDB_LITE
} }
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"[%s] New memtable created with log file: #%" PRIu64 "[%s] New memtable created with log file: #%" PRIu64
". Immutable memtables: %d.\n", ". Immutable memtables: %d.\n",
cfd->GetName().c_str(), new_log_number, num_imm_unflushed); cfd->GetName().c_str(), new_log_number, num_imm_unflushed);
mutex_.Lock(); mutex_.Lock();
if (!s.ok()) { if (!s.ok()) {
// how do we fail if we're not creating new log? // how do we fail if we're not creating new log?
@ -5630,8 +5639,8 @@ Status DBImpl::DeleteFile(std::string name) {
WalFileType log_type; WalFileType log_type;
if (!ParseFileName(name, &number, &type, &log_type) || if (!ParseFileName(name, &number, &type, &log_type) ||
(type != kTableFile && type != kLogFile)) { (type != kTableFile && type != kLogFile)) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log, "DeleteFile %s failed.\n",
"DeleteFile %s failed.\n", name.c_str()); name.c_str());
return Status::InvalidArgument("Invalid file name"); return Status::InvalidArgument("Invalid file name");
} }
@ -5639,16 +5648,17 @@ Status DBImpl::DeleteFile(std::string name) {
if (type == kLogFile) { if (type == kLogFile) {
// Only allow deleting archived log files // Only allow deleting archived log files
if (log_type != kArchivedLogFile) { if (log_type != kArchivedLogFile) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed - not archived log.\n", name.c_str()); "DeleteFile %s failed - not archived log.\n",
name.c_str());
return Status::NotSupported("Delete only supported for archived logs"); return Status::NotSupported("Delete only supported for archived logs");
} }
status = status =
env_->DeleteFile(immutable_db_options_.wal_dir + "/" + name.c_str()); env_->DeleteFile(immutable_db_options_.wal_dir + "/" + name.c_str());
if (!status.ok()) { if (!status.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"DeleteFile %s failed -- %s.\n", name.c_str(), "DeleteFile %s failed -- %s.\n", name.c_str(),
status.ToString().c_str()); status.ToString().c_str());
} }
return status; return status;
} }
@ -5662,8 +5672,8 @@ Status DBImpl::DeleteFile(std::string name) {
InstrumentedMutexLock l(&mutex_); InstrumentedMutexLock l(&mutex_);
status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd); status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd);
if (!status.ok()) { if (!status.ok()) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed. File not found\n", name.c_str()); "DeleteFile %s failed. File not found\n", name.c_str());
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("File not found"); return Status::InvalidArgument("File not found");
} }
@ -5671,8 +5681,9 @@ Status DBImpl::DeleteFile(std::string name) {
// If the file is being compacted no need to delete. // If the file is being compacted no need to delete.
if (metadata->being_compacted) { if (metadata->being_compacted) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"DeleteFile %s Skipped. File about to be compacted\n", name.c_str()); "DeleteFile %s Skipped. File about to be compacted\n",
name.c_str());
job_context.Clean(); job_context.Clean();
return Status::OK(); return Status::OK();
} }
@ -5683,8 +5694,9 @@ Status DBImpl::DeleteFile(std::string name) {
auto* vstoreage = cfd->current()->storage_info(); auto* vstoreage = cfd->current()->storage_info();
for (int i = level + 1; i < cfd->NumberLevels(); i++) { for (int i = level + 1; i < cfd->NumberLevels(); i++) {
if (vstoreage->NumLevelFiles(i) != 0) { if (vstoreage->NumLevelFiles(i) != 0) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s FAILED. File not in last level\n", name.c_str()); "DeleteFile %s FAILED. File not in last level\n",
name.c_str());
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("File not in last level"); return Status::InvalidArgument("File not in last level");
} }
@ -5692,10 +5704,10 @@ Status DBImpl::DeleteFile(std::string name) {
// if level == 0, it has to be the oldest file // if level == 0, it has to be the oldest file
if (level == 0 && if (level == 0 &&
vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) { vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) {
Log(InfoLogLevel::WARN_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_WARN(immutable_db_options_.info_log,
"DeleteFile %s failed ---" "DeleteFile %s failed ---"
" target file in level 0 must be the oldest.", " target file in level 0 must be the oldest.",
name.c_str()); name.c_str());
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("File in level 0, but not oldest"); return Status::InvalidArgument("File in level 0, but not oldest");
} }
@ -6131,8 +6143,7 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
#endif // !ROCKSDB_LITE #endif // !ROCKSDB_LITE
if (s.ok()) { if (s.ok()) {
Log(InfoLogLevel::INFO_LEVEL, impl->immutable_db_options_.info_log, ROCKS_LOG_INFO(impl->immutable_db_options_.info_log, "DB pointer %p", impl);
"DB pointer %p", impl);
LogFlush(impl->immutable_db_options_.info_log); LogFlush(impl->immutable_db_options_.info_log);
if (!persist_options_status.ok()) { if (!persist_options_status.ok()) {
if (db_options.fail_if_options_file_error) { if (db_options.fail_if_options_file_error) {
@ -6140,9 +6151,9 @@ Status DB::Open(const DBOptions& db_options, const std::string& dbname,
"DB::Open() failed --- Unable to persist Options file", "DB::Open() failed --- Unable to persist Options file",
persist_options_status.ToString()); persist_options_status.ToString());
} }
Warn(impl->immutable_db_options_.info_log, ROCKS_LOG_WARN(impl->immutable_db_options_.info_log,
"Unable to persist options in DB::Open() -- %s", "Unable to persist options in DB::Open() -- %s",
persist_options_status.ToString().c_str()); persist_options_status.ToString().c_str());
} }
} }
if (!s.ok()) { if (!s.ok()) {
@ -6304,7 +6315,8 @@ void DeleteOptionsFilesHelper(const std::map<uint64_t, std::string>& filenames,
for (auto iter = std::next(filenames.begin(), num_files_to_keep); for (auto iter = std::next(filenames.begin(), num_files_to_keep);
iter != filenames.end(); ++iter) { iter != filenames.end(); ++iter) {
if (!env->DeleteFile(iter->second).ok()) { if (!env->DeleteFile(iter->second).ok()) {
Warn(info_log, "Unable to delete options file %s", iter->second.c_str()); ROCKS_LOG_WARN(info_log, "Unable to delete options file %s",
iter->second.c_str());
} }
} }
} }
@ -6401,10 +6413,10 @@ void DumpRocksDBBuildVersion(Logger * log) {
#if !defined(IOS_CROSS_COMPILE) #if !defined(IOS_CROSS_COMPILE)
// if we compile with Xcode, we don't run build_detect_vesion, so we don't // if we compile with Xcode, we don't run build_detect_vesion, so we don't
// generate util/build_version.cc // generate util/build_version.cc
Header(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR, ROCKSDB_MINOR, ROCKS_LOG_HEADER(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR,
ROCKSDB_PATCH); ROCKSDB_MINOR, ROCKSDB_PATCH);
Header(log, "Git sha %s", rocksdb_build_git_sha); ROCKS_LOG_HEADER(log, "Git sha %s", rocksdb_build_git_sha);
Header(log, "Compile date %s", rocksdb_build_compile_date); ROCKS_LOG_HEADER(log, "Compile date %s", rocksdb_build_compile_date);
#endif #endif
} }
@ -6446,9 +6458,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable. // unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from MemTable::Get: %s\n", "Unexpected status returned from MemTable::Get: %s\n",
s.ToString().c_str()); s.ToString().c_str());
return s; return s;
} }
@ -6465,9 +6477,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable. // unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from MemTableList::Get: %s\n", "Unexpected status returned from MemTableList::Get: %s\n",
s.ToString().c_str()); s.ToString().c_str());
return s; return s;
} }
@ -6484,7 +6496,8 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading memtable. // unexpected error reading memtable.
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(
immutable_db_options_.info_log,
"Unexpected status returned from MemTableList::GetFromHistory: %s\n", "Unexpected status returned from MemTableList::GetFromHistory: %s\n",
s.ToString().c_str()); s.ToString().c_str());
@ -6507,9 +6520,9 @@ Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key,
if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) {
// unexpected error reading SST files // unexpected error reading SST files
Log(InfoLogLevel::ERROR_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_ERROR(immutable_db_options_.info_log,
"Unexpected status returned from Version::Get: %s\n", "Unexpected status returned from Version::Get: %s\n",
s.ToString().c_str()); s.ToString().c_str());
return s; return s;
} }

View File

@ -61,8 +61,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
assert(column_family); assert(column_family);
if (target_level < 1) { if (target_level < 1) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Invalid target level %d\n", target_level); "PromoteL0 FAILED. Invalid target level %d\n", target_level);
return Status::InvalidArgument("Invalid target level"); return Status::InvalidArgument("Invalid target level");
} }
@ -75,8 +75,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
const auto* vstorage = cfd->current()->storage_info(); const auto* vstorage = cfd->current()->storage_info();
if (target_level >= vstorage->num_levels()) { if (target_level >= vstorage->num_levels()) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Target level %d does not exist\n", target_level); "PromoteL0 FAILED. Target level %d does not exist\n",
target_level);
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("Target level does not exist"); return Status::InvalidArgument("Target level does not exist");
} }
@ -94,9 +95,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
for (size_t i = 0; i < l0_files.size(); ++i) { for (size_t i = 0; i < l0_files.size(); ++i) {
auto f = l0_files[i]; auto f = l0_files[i];
if (f->being_compacted) { if (f->being_compacted) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. File %" PRIu64 " being compacted\n", "PromoteL0 FAILED. File %" PRIu64 " being compacted\n",
f->fd.GetNumber()); f->fd.GetNumber());
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("PromoteL0 called during L0 compaction"); return Status::InvalidArgument("PromoteL0 called during L0 compaction");
} }
@ -104,10 +105,10 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
if (i == 0) continue; if (i == 0) continue;
auto prev_f = l0_files[i - 1]; auto prev_f = l0_files[i - 1];
if (icmp->Compare(prev_f->largest, f->smallest) >= 0) { if (icmp->Compare(prev_f->largest, f->smallest) >= 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64 "PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64
" have overlapping ranges\n", " have overlapping ranges\n",
prev_f->fd.GetNumber(), f->fd.GetNumber()); prev_f->fd.GetNumber(), f->fd.GetNumber());
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument("L0 has overlapping files"); return Status::InvalidArgument("L0 has overlapping files");
} }
@ -116,8 +117,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) {
// Check that all levels up to target_level are empty. // Check that all levels up to target_level are empty.
for (int level = 1; level <= target_level; ++level) { for (int level = 1; level <= target_level; ++level) {
if (vstorage->NumLevelFiles(level) > 0) { if (vstorage->NumLevelFiles(level) > 0) {
Log(InfoLogLevel::INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"PromoteL0 FAILED. Level %d not empty\n", level); "PromoteL0 FAILED. Level %d not empty\n", level);
job_context.Clean(); job_context.Clean();
return Status::InvalidArgument( return Status::InvalidArgument(
"All levels up to target_level " "All levels up to target_level "

View File

@ -20,8 +20,8 @@ namespace rocksdb {
DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options, DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options,
const std::string& dbname) const std::string& dbname)
: DBImpl(db_options, dbname) { : DBImpl(db_options, dbname) {
Log(INFO_LEVEL, immutable_db_options_.info_log, ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Opening the db in read only mode"); "Opening the db in read only mode");
LogFlush(immutable_db_options_.info_log); LogFlush(immutable_db_options_.info_log);
} }

View File

@ -291,9 +291,8 @@ class DBIter: public Iterator {
inline bool DBIter::ParseKey(ParsedInternalKey* ikey) { inline bool DBIter::ParseKey(ParsedInternalKey* ikey) {
if (!ParseInternalKey(iter_->key(), ikey)) { if (!ParseInternalKey(iter_->key(), ikey)) {
status_ = Status::Corruption("corrupted internal key in DBIter"); status_ = Status::Corruption("corrupted internal key in DBIter");
Log(InfoLogLevel::ERROR_LEVEL, ROCKS_LOG_ERROR(logger_, "corrupted internal key in DBIter: %s",
logger_, "corrupted internal key in DBIter: %s", iter_->key().ToString(true).c_str());
iter_->key().ToString(true).c_str());
return false; return false;
} else { } else {
return true; return true;
@ -509,8 +508,7 @@ void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) {
// iter_ points to the next entry (or invalid) // iter_ points to the next entry (or invalid)
void DBIter::MergeValuesNewToOld() { void DBIter::MergeValuesNewToOld() {
if (!merge_operator_) { if (!merge_operator_) {
Log(InfoLogLevel::ERROR_LEVEL, ROCKS_LOG_ERROR(logger_, "Options::merge_operator is null.");
logger_, "Options::merge_operator is null.");
status_ = Status::InvalidArgument("merge_operator_ must be set."); status_ = Status::InvalidArgument("merge_operator_ must be set.");
valid_ = false; valid_ = false;
return; return;

View File

@ -120,9 +120,9 @@ Status ExternalSstFileIngestionJob::Prepare(
} }
Status s = env_->DeleteFile(f.internal_file_path); Status s = env_->DeleteFile(f.internal_file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"AddFile() clean up for file %s failed : %s", "AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str()); f.internal_file_path.c_str(), s.ToString().c_str());
} }
} }
} }
@ -212,7 +212,8 @@ void ExternalSstFileIngestionJob::UpdateStats() {
if (f.picked_level == 0) { if (f.picked_level == 0) {
total_l0_files += 1; total_l0_files += 1;
} }
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(
db_options_.info_log,
"[AddFile] External SST file %s was ingested in L%d with path %s " "[AddFile] External SST file %s was ingested in L%d with path %s "
"(global_seqno=%" PRIu64 ")\n", "(global_seqno=%" PRIu64 ")\n",
f.external_file_path.c_str(), f.picked_level, f.external_file_path.c_str(), f.picked_level,
@ -233,9 +234,9 @@ void ExternalSstFileIngestionJob::Cleanup(const Status& status) {
for (IngestedFileInfo& f : files_to_ingest_) { for (IngestedFileInfo& f : files_to_ingest_) {
Status s = env_->DeleteFile(f.internal_file_path); Status s = env_->DeleteFile(f.internal_file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"AddFile() clean up for file %s failed : %s", "AddFile() clean up for file %s failed : %s",
f.internal_file_path.c_str(), s.ToString().c_str()); f.internal_file_path.c_str(), s.ToString().c_str());
} }
} }
} else if (status.ok() && ingestion_options_.move_files) { } else if (status.ok() && ingestion_options_.move_files) {
@ -243,7 +244,8 @@ void ExternalSstFileIngestionJob::Cleanup(const Status& status) {
for (IngestedFileInfo& f : files_to_ingest_) { for (IngestedFileInfo& f : files_to_ingest_) {
Status s = env_->DeleteFile(f.external_file_path); Status s = env_->DeleteFile(f.external_file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(
db_options_.info_log,
"%s was added to DB successfully but failed to remove original " "%s was added to DB successfully but failed to remove original "
"file link : %s", "file link : %s",
f.external_file_path.c_str(), s.ToString().c_str()); f.external_file_path.c_str(), s.ToString().c_str());

View File

@ -158,8 +158,8 @@ Status FlushJob::Run(FileMetaData* file_meta) {
AutoThreadOperationStageUpdater stage_run( AutoThreadOperationStageUpdater stage_run(
ThreadStatus::STAGE_FLUSH_RUN); ThreadStatus::STAGE_FLUSH_RUN);
if (mems_.empty()) { if (mems_.empty()) {
LogToBuffer(log_buffer_, "[%s] Nothing in memtable to flush", ROCKS_LOG_BUFFER(log_buffer_, "[%s] Nothing in memtable to flush",
cfd_->GetName().c_str()); cfd_->GetName().c_str());
return Status::OK(); return Status::OK();
} }
@ -258,7 +258,8 @@ Status FlushJob::WriteLevel0Table() {
uint64_t total_num_entries = 0, total_num_deletes = 0; uint64_t total_num_entries = 0, total_num_deletes = 0;
size_t total_memory_usage = 0; size_t total_memory_usage = 0;
for (MemTable* m : mems_) { for (MemTable* m : mems_) {
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(
db_options_.info_log,
"[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n", "[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n",
cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber()); cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber());
memtables.push_back(m->NewIterator(ro, &arena)); memtables.push_back(m->NewIterator(ro, &arena));
@ -286,9 +287,10 @@ Status FlushJob::WriteLevel0Table() {
&cfd_->internal_comparator(), &cfd_->internal_comparator(),
range_del_iters.empty() ? nullptr : &range_del_iters[0], range_del_iters.empty() ? nullptr : &range_del_iters[0],
static_cast<int>(range_del_iters.size()))); static_cast<int>(range_del_iters.size())));
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started", "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started",
cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber()); cfd_->GetName().c_str(), job_context_->job_id,
meta_.fd.GetNumber());
TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression", TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression",
&output_compression_); &output_compression_);
@ -305,13 +307,14 @@ Status FlushJob::WriteLevel0Table() {
Env::IO_HIGH, &table_properties_, 0 /* level */); Env::IO_HIGH, &table_properties_, 0 /* level */);
LogFlush(db_options_.info_log); LogFlush(db_options_.info_log);
} }
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log,
"[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64 "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64
" bytes %s" " bytes %s"
"%s", "%s",
cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber(), cfd_->GetName().c_str(), job_context_->job_id,
meta_.fd.GetFileSize(), s.ToString().c_str(), meta_.fd.GetNumber(), meta_.fd.GetFileSize(),
meta_.marked_for_compaction ? " (needs compaction)" : ""); s.ToString().c_str(),
meta_.marked_for_compaction ? " (needs compaction)" : "");
if (output_file_directory_ != nullptr) { if (output_file_directory_ != nullptr) {
output_file_directory_->Fsync(); output_file_directory_->Fsync();

View File

@ -355,9 +355,9 @@ Status MemTableList::InstallMemtableFlushResults(
} }
if (it == memlist.rbegin() || batch_file_number != m->file_number_) { if (it == memlist.rbegin() || batch_file_number != m->file_number_) {
batch_file_number = m->file_number_; batch_file_number = m->file_number_;
LogToBuffer(log_buffer, ROCKS_LOG_BUFFER(log_buffer,
"[%s] Level-0 commit table #%" PRIu64 " started", "[%s] Level-0 commit table #%" PRIu64 " started",
cfd->GetName().c_str(), m->file_number_); cfd->GetName().c_str(), m->file_number_);
edit_list.push_back(&m->edit_); edit_list.push_back(&m->edit_);
} }
batch_count++; batch_count++;
@ -378,9 +378,9 @@ Status MemTableList::InstallMemtableFlushResults(
if (s.ok()) { // commit new state if (s.ok()) { // commit new state
while (batch_count-- > 0) { while (batch_count-- > 0) {
MemTable* m = current_->memlist_.back(); MemTable* m = current_->memlist_.back();
LogToBuffer(log_buffer, "[%s] Level-0 commit table #%" PRIu64 ROCKS_LOG_BUFFER(log_buffer, "[%s] Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " done", ": memtable #%" PRIu64 " done",
cfd->GetName().c_str(), m->file_number_, mem_id); cfd->GetName().c_str(), m->file_number_, mem_id);
assert(m->file_number_ > 0); assert(m->file_number_ > 0);
current_->Remove(m, to_delete); current_->Remove(m, to_delete);
++mem_id; ++mem_id;
@ -389,9 +389,9 @@ Status MemTableList::InstallMemtableFlushResults(
for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) { for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) {
MemTable* m = *it; MemTable* m = *it;
// commit failed. setup state so that we can flush again. // commit failed. setup state so that we can flush again.
LogToBuffer(log_buffer, "Level-0 commit table #%" PRIu64 ROCKS_LOG_BUFFER(log_buffer, "Level-0 commit table #%" PRIu64
": memtable #%" PRIu64 " failed", ": memtable #%" PRIu64 " failed",
m->file_number_, mem_id); m->file_number_, mem_id);
m->flush_completed_ = false; m->flush_completed_ = false;
m->flush_in_progress_ = false; m->flush_in_progress_ = false;
m->edit_.Clear(); m->edit_.Clear();

View File

@ -199,13 +199,13 @@ class Repairer {
for (size_t i = 0; i < tables_.size(); i++) { for (size_t i = 0; i < tables_.size(); i++) {
bytes += tables_[i].meta.fd.GetFileSize(); bytes += tables_[i].meta.fd.GetFileSize();
} }
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"**** Repaired rocksdb %s; " "**** Repaired rocksdb %s; "
"recovered %" ROCKSDB_PRIszt " files; %" PRIu64 "recovered %" ROCKSDB_PRIszt " files; %" PRIu64
"bytes. " "bytes. "
"Some data may have been lost. " "Some data may have been lost. "
"****", "****",
dbname_.c_str(), tables_.size(), bytes); dbname_.c_str(), tables_.size(), bytes);
} }
return status; return status;
} }
@ -291,9 +291,9 @@ class Repairer {
std::string logname = LogFileName(dbname_, logs_[i]); std::string logname = LogFileName(dbname_, logs_[i]);
Status status = ConvertLogToTable(logs_[i]); Status status = ConvertLogToTable(logs_[i]);
if (!status.ok()) { if (!status.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"Log #%" PRIu64 ": ignoring conversion error: %s", logs_[i], "Log #%" PRIu64 ": ignoring conversion error: %s",
status.ToString().c_str()); logs_[i], status.ToString().c_str());
} }
ArchiveFile(logname); ArchiveFile(logname);
} }
@ -306,9 +306,8 @@ class Repairer {
uint64_t lognum; uint64_t lognum;
virtual void Corruption(size_t bytes, const Status& s) override { virtual void Corruption(size_t bytes, const Status& s) override {
// We print error messages for corruption, but continue repairing. // We print error messages for corruption, but continue repairing.
Log(InfoLogLevel::ERROR_LEVEL, info_log, ROCKS_LOG_ERROR(info_log, "Log #%" PRIu64 ": dropping %d bytes; %s",
"Log #%" PRIu64 ": dropping %d bytes; %s", lognum, lognum, static_cast<int>(bytes), s.ToString().c_str());
static_cast<int>(bytes), s.ToString().c_str());
} }
}; };
@ -357,8 +356,8 @@ class Repairer {
if (status.ok()) { if (status.ok()) {
counter += WriteBatchInternal::Count(&batch); counter += WriteBatchInternal::Count(&batch);
} else { } else {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log, "Log #%" PRIu64 ": ignoring %s",
"Log #%" PRIu64 ": ignoring %s", log, status.ToString().c_str()); log, status.ToString().c_str());
status = Status::OK(); // Keep going with rest of file status = Status::OK(); // Keep going with rest of file
} }
} }
@ -386,9 +385,10 @@ class Repairer {
cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(), cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(),
{}, kMaxSequenceNumber, kNoCompression, CompressionOptions(), false, {}, kMaxSequenceNumber, kNoCompression, CompressionOptions(), false,
nullptr /* internal_stats */, TableFileCreationReason::kRecovery); nullptr /* internal_stats */, TableFileCreationReason::kRecovery);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log,
"Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s", log, "Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s",
counter, meta.fd.GetNumber(), status.ToString().c_str()); log, counter, meta.fd.GetNumber(),
status.ToString().c_str());
if (status.ok()) { if (status.ok()) {
if (meta.fd.GetFileSize() > 0) { if (meta.fd.GetFileSize() > 0) {
table_fds_.push_back(meta.fd); table_fds_.push_back(meta.fd);
@ -412,8 +412,8 @@ class Repairer {
char file_num_buf[kFormatFileNumberBufSize]; char file_num_buf[kFormatFileNumberBufSize];
FormatFileNumber(t.meta.fd.GetNumber(), t.meta.fd.GetPathId(), FormatFileNumber(t.meta.fd.GetNumber(), t.meta.fd.GetPathId(),
file_num_buf, sizeof(file_num_buf)); file_num_buf, sizeof(file_num_buf));
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log, "Table #%s: ignoring %s",
"Table #%s: ignoring %s", file_num_buf, status.ToString().c_str()); file_num_buf, status.ToString().c_str());
ArchiveFile(fname); ArchiveFile(fname);
} else { } else {
tables_.push_back(t); tables_.push_back(t);
@ -438,7 +438,8 @@ class Repairer {
t->column_family_id = static_cast<uint32_t>(props->column_family_id); t->column_family_id = static_cast<uint32_t>(props->column_family_id);
if (t->column_family_id == if (t->column_family_id ==
TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) { TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(
db_options_.info_log,
"Table #%" PRIu64 "Table #%" PRIu64
": column family unknown (probably due to legacy format); " ": column family unknown (probably due to legacy format); "
"adding to default column family id 0.", "adding to default column family id 0.",
@ -456,7 +457,8 @@ class Repairer {
if (status.ok()) { if (status.ok()) {
cfd = vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id); cfd = vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id);
if (cfd->GetName() != props->column_family_name) { if (cfd->GetName() != props->column_family_name) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(
db_options_.info_log,
"Table #%" PRIu64 "Table #%" PRIu64
": inconsistent column family name '%s'; expected '%s' for column " ": inconsistent column family name '%s'; expected '%s' for column "
"family id %" PRIu32 ".", "family id %" PRIu32 ".",
@ -476,9 +478,9 @@ class Repairer {
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
Slice key = iter->key(); Slice key = iter->key();
if (!ParseInternalKey(key, &parsed)) { if (!ParseInternalKey(key, &parsed)) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log,
"Table #%" PRIu64 ": unparsable key %s", t->meta.fd.GetNumber(), "Table #%" PRIu64 ": unparsable key %s",
EscapeString(key).c_str()); t->meta.fd.GetNumber(), EscapeString(key).c_str());
continue; continue;
} }
@ -501,9 +503,9 @@ class Repairer {
} }
delete iter; delete iter;
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log, "Table #%" PRIu64 ": %d entries %s",
"Table #%" PRIu64 ": %d entries %s", t->meta.fd.GetNumber(), counter, t->meta.fd.GetNumber(), counter,
status.ToString().c_str()); status.ToString().c_str());
} }
return status; return status;
} }
@ -563,8 +565,8 @@ class Repairer {
new_file.append("/"); new_file.append("/");
new_file.append((slash == nullptr) ? fname.c_str() : slash + 1); new_file.append((slash == nullptr) ? fname.c_str() : slash + 1);
Status s = env_->RenameFile(fname, new_file); Status s = env_->RenameFile(fname, new_file);
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, "Archiving %s: %s\n", ROCKS_LOG_INFO(db_options_.info_log, "Archiving %s: %s\n", fname.c_str(),
fname.c_str(), s.ToString().c_str()); s.ToString().c_str());
} }
}; };

View File

@ -92,13 +92,10 @@ class TransactionLogIteratorImpl : public TransactionLogIterator {
Env* env; Env* env;
Logger* info_log; Logger* info_log;
virtual void Corruption(size_t bytes, const Status& s) override { virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::ERROR_LEVEL, info_log, ROCKS_LOG_ERROR(info_log, "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes,
"dropping %" ROCKSDB_PRIszt " bytes; %s", bytes, s.ToString().c_str());
s.ToString().c_str());
}
virtual void Info(const char* s) {
Log(InfoLogLevel::INFO_LEVEL, info_log, "%s", s);
} }
virtual void Info(const char* s) { ROCKS_LOG_INFO(info_log, "%s", s); }
} reporter_; } reporter_;
SequenceNumber currentBatchSeq_; // sequence number at start of current batch SequenceNumber currentBatchSeq_; // sequence number at start of current batch

View File

@ -1056,9 +1056,10 @@ bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) {
Status s = GetTableProperties(&tp, file_meta); Status s = GetTableProperties(&tp, file_meta);
file_meta->init_stats_from_file = true; file_meta->init_stats_from_file = true;
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, vset_->db_options_->info_log, ROCKS_LOG_ERROR(vset_->db_options_->info_log,
"Unable to load table properties for file %" PRIu64 " --- %s\n", "Unable to load table properties for file %" PRIu64
file_meta->fd.GetNumber(), s.ToString().c_str()); " --- %s\n",
file_meta->fd.GetNumber(), s.ToString().c_str());
return false; return false;
} }
if (tp.get() == nullptr) return false; if (tp.get() == nullptr) return false;
@ -2073,9 +2074,9 @@ void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions,
// base_bytes_min. We set it be base_bytes_min. // base_bytes_min. We set it be base_bytes_min.
base_level_size = base_bytes_min + 1U; base_level_size = base_bytes_min + 1U;
base_level_ = first_non_empty_level; base_level_ = first_non_empty_level;
Warn(ioptions.info_log, ROCKS_LOG_WARN(ioptions.info_log,
"More existing levels in DB than needed. " "More existing levels in DB than needed. "
"max_bytes_for_level_multiplier may not be guaranteed."); "max_bytes_for_level_multiplier may not be guaranteed.");
} else { } else {
// Find base level (where L0 data is compacted to). // Find base level (where L0 data is compacted to).
base_level_ = first_non_empty_level; base_level_ = first_non_empty_level;
@ -2381,8 +2382,8 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
// only one thread can be here at the same time // only one thread can be here at the same time
if (new_descriptor_log) { if (new_descriptor_log) {
// create manifest file // create manifest file
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(db_options_->info_log, "Creating manifest %" PRIu64 "\n",
"Creating manifest %" PRIu64 "\n", pending_manifest_file_number_); pending_manifest_file_number_);
unique_ptr<WritableFile> descriptor_file; unique_ptr<WritableFile> descriptor_file;
EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_); EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_);
s = NewWritableFile( s = NewWritableFile(
@ -2425,8 +2426,8 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
s = SyncManifest(env_, db_options_, descriptor_log_->file()); s = SyncManifest(env_, db_options_, descriptor_log_->file());
} }
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log, ROCKS_LOG_ERROR(db_options_->info_log, "MANIFEST write: %s\n",
"MANIFEST write: %s\n", s.ToString().c_str()); s.ToString().c_str());
} }
} }
@ -2496,15 +2497,16 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
for (auto& e : batch_edits) { for (auto& e : batch_edits) {
version_edits = version_edits + "\n" + e->DebugString(true); version_edits = version_edits + "\n" + e->DebugString(true);
} }
Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log, ROCKS_LOG_ERROR(
db_options_->info_log,
"[%s] Error in committing version edit to MANIFEST: %s", "[%s] Error in committing version edit to MANIFEST: %s",
column_family_data ? column_family_data->GetName().c_str() : "<null>", column_family_data ? column_family_data->GetName().c_str() : "<null>",
version_edits.c_str()); version_edits.c_str());
delete v; delete v;
if (new_descriptor_log) { if (new_descriptor_log) {
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(db_options_->info_log, "Deleting manifest %" PRIu64
"Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n", " current manifest %" PRIu64 "\n",
manifest_file_number_, pending_manifest_file_number_); manifest_file_number_, pending_manifest_file_number_);
descriptor_log_.reset(); descriptor_log_.reset();
env_->DeleteFile( env_->DeleteFile(
DescriptorFileName(dbname_, pending_manifest_file_number_)); DescriptorFileName(dbname_, pending_manifest_file_number_));
@ -2594,9 +2596,8 @@ Status VersionSet::Recover(
return Status::Corruption("CURRENT file corrupted"); return Status::Corruption("CURRENT file corrupted");
} }
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(db_options_->info_log, "Recovering from manifest file: %s\n",
"Recovering from manifest file: %s\n", manifest_filename.c_str());
manifest_filename.c_str());
manifest_filename = dbname_ + "/" + manifest_filename; manifest_filename = dbname_ + "/" + manifest_filename;
unique_ptr<SequentialFileReader> manifest_file_reader; unique_ptr<SequentialFileReader> manifest_file_reader;
@ -2734,7 +2735,8 @@ Status VersionSet::Recover(
if (cfd != nullptr) { if (cfd != nullptr) {
if (edit.has_log_number_) { if (edit.has_log_number_) {
if (cfd->GetLogNumber() > edit.log_number_) { if (cfd->GetLogNumber() > edit.log_number_) {
Log(InfoLogLevel::WARN_LEVEL, db_options_->info_log, ROCKS_LOG_WARN(
db_options_->info_log,
"MANIFEST corruption detected, but ignored - Log numbers in " "MANIFEST corruption detected, but ignored - Log numbers in "
"records NOT monotonically increasing"); "records NOT monotonically increasing");
} else { } else {
@ -2835,7 +2837,8 @@ Status VersionSet::Recover(
last_sequence_ = last_sequence; last_sequence_ = last_sequence;
prev_log_number_ = previous_log_number; prev_log_number_ = previous_log_number;
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(
db_options_->info_log,
"Recovered from manifest file:%s succeeded," "Recovered from manifest file:%s succeeded,"
"manifest_file_number is %lu, next_file_number is %lu, " "manifest_file_number is %lu, next_file_number is %lu, "
"last_sequence is %lu, log_number is %lu," "last_sequence is %lu, log_number is %lu,"
@ -2850,9 +2853,9 @@ Status VersionSet::Recover(
if (cfd->IsDropped()) { if (cfd->IsDropped()) {
continue; continue;
} }
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(db_options_->info_log,
"Column family [%s] (ID %u), log number is %" PRIu64 "\n", "Column family [%s] (ID %u), log number is %" PRIu64 "\n",
cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber()); cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber());
} }
} }
@ -3492,7 +3495,8 @@ bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) {
Version* version = c->column_family_data()->current(); Version* version = c->column_family_data()->current();
const VersionStorageInfo* vstorage = version->storage_info(); const VersionStorageInfo* vstorage = version->storage_info();
if (c->input_version() != version) { if (c->input_version() != version) {
Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, ROCKS_LOG_INFO(
db_options_->info_log,
"[%s] compaction output being applied to a different base version from" "[%s] compaction output being applied to a different base version from"
" input version", " input version",
c->column_family_data()->GetName().c_str()); c->column_family_data()->GetName().c_str());

View File

@ -73,9 +73,8 @@ Status WalManager::GetSortedWalFiles(VectorLogPtr& files) {
uint64_t latest_archived_log_number = 0; uint64_t latest_archived_log_number = 0;
if (!files.empty()) { if (!files.empty()) {
latest_archived_log_number = files.back()->LogNumber(); latest_archived_log_number = files.back()->LogNumber();
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log, "Latest Archived log: %" PRIu64,
"Latest Archived log: %" PRIu64, latest_archived_log_number);
latest_archived_log_number);
} }
files.reserve(files.size() + logs.size()); files.reserve(files.size() + logs.size());
@ -87,8 +86,8 @@ Status WalManager::GetSortedWalFiles(VectorLogPtr& files) {
// same log in both db dir and archived dir. Simply // same log in both db dir and archived dir. Simply
// ignore the one in db dir. Note that, if we read // ignore the one in db dir. Note that, if we read
// archived dir first, we would have missed the log file. // archived dir first, we would have missed the log file.
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log, "%s already moved to archive",
"%s already moved to archive", log->PathName().c_str()); log->PathName().c_str());
} }
} }
@ -137,8 +136,8 @@ void WalManager::PurgeObsoleteWALFiles() {
int64_t current_time; int64_t current_time;
Status s = env_->GetCurrentTime(&current_time); Status s = env_->GetCurrentTime(&current_time);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log, "Can't get current time: %s",
"Can't get current time: %s", s.ToString().c_str()); s.ToString().c_str());
assert(false); assert(false);
return; return;
} }
@ -157,8 +156,8 @@ void WalManager::PurgeObsoleteWALFiles() {
std::vector<std::string> files; std::vector<std::string> files;
s = env_->GetChildren(archival_dir, &files); s = env_->GetChildren(archival_dir, &files);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log, "Can't get archive files: %s",
"Can't get archive files: %s", s.ToString().c_str()); s.ToString().c_str());
assert(false); assert(false);
return; return;
} }
@ -175,17 +174,16 @@ void WalManager::PurgeObsoleteWALFiles() {
uint64_t file_m_time; uint64_t file_m_time;
s = env_->GetFileModificationTime(file_path, &file_m_time); s = env_->GetFileModificationTime(file_path, &file_m_time);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"Can't get file mod time: %s: %s", "Can't get file mod time: %s: %s", file_path.c_str(),
file_path.c_str(), s.ToString().c_str()); s.ToString().c_str());
continue; continue;
} }
if (now_seconds - file_m_time > db_options_.wal_ttl_seconds) { if (now_seconds - file_m_time > db_options_.wal_ttl_seconds) {
s = env_->DeleteFile(file_path); s = env_->DeleteFile(file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log, "Can't delete file: %s: %s",
"Can't delete file: %s: %s", file_path.c_str(), s.ToString().c_str());
file_path.c_str(), s.ToString().c_str());
continue; continue;
} else { } else {
MutexLock l(&read_first_record_cache_mutex_); MutexLock l(&read_first_record_cache_mutex_);
@ -199,9 +197,9 @@ void WalManager::PurgeObsoleteWALFiles() {
uint64_t file_size; uint64_t file_size;
s = env_->GetFileSize(file_path, &file_size); s = env_->GetFileSize(file_path, &file_size);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log,
"Unable to get file size: %s: %s", "Unable to get file size: %s: %s", file_path.c_str(),
file_path.c_str(), s.ToString().c_str()); s.ToString().c_str());
return; return;
} else { } else {
if (file_size > 0) { if (file_size > 0) {
@ -210,9 +208,9 @@ void WalManager::PurgeObsoleteWALFiles() {
} else { } else {
s = env_->DeleteFile(file_path); s = env_->DeleteFile(file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"Unable to delete file: %s: %s", "Unable to delete file: %s: %s", file_path.c_str(),
file_path.c_str(), s.ToString().c_str()); s.ToString().c_str());
continue; continue;
} else { } else {
MutexLock l(&read_first_record_cache_mutex_); MutexLock l(&read_first_record_cache_mutex_);
@ -239,9 +237,9 @@ void WalManager::PurgeObsoleteWALFiles() {
GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile); GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
if (files_del_num > archived_logs.size()) { if (files_del_num > archived_logs.size()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log,
"Trying to delete more archived log files than " "Trying to delete more archived log files than "
"exist. Deleting all"); "exist. Deleting all");
files_del_num = archived_logs.size(); files_del_num = archived_logs.size();
} }
@ -249,9 +247,8 @@ void WalManager::PurgeObsoleteWALFiles() {
std::string const file_path = archived_logs[i]->PathName(); std::string const file_path = archived_logs[i]->PathName();
s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path); s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, ROCKS_LOG_WARN(db_options_.info_log, "Unable to delete file: %s: %s",
"Unable to delete file: %s: %s", file_path.c_str(), file_path.c_str(), s.ToString().c_str());
s.ToString().c_str());
continue; continue;
} else { } else {
MutexLock l(&read_first_record_cache_mutex_); MutexLock l(&read_first_record_cache_mutex_);
@ -267,9 +264,9 @@ void WalManager::ArchiveWALFile(const std::string& fname, uint64_t number) {
Status s = env_->RenameFile(fname, archived_log_name); Status s = env_->RenameFile(fname, archived_log_name);
// The sync point below is used in (DBTest,TransactionLogIteratorRace) // The sync point below is used in (DBTest,TransactionLogIteratorRace)
TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:2"); TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:2");
Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, ROCKS_LOG_INFO(db_options_.info_log, "Move log file %s to %s -- %s\n",
"Move log file %s to %s -- %s\n", fname.c_str(), fname.c_str(), archived_log_name.c_str(),
archived_log_name.c_str(), s.ToString().c_str()); s.ToString().c_str());
} }
namespace { namespace {
@ -367,8 +364,8 @@ Status WalManager::ReadFirstRecord(const WalFileType type,
SequenceNumber* sequence) { SequenceNumber* sequence) {
*sequence = 0; *sequence = 0;
if (type != kAliveLogFile && type != kArchivedLogFile) { if (type != kAliveLogFile && type != kArchivedLogFile) {
Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log, "[WalManger] Unknown file type %s",
"[WalManger] Unknown file type %s", ToString(type).c_str()); ToString(type).c_str());
return Status::NotSupported( return Status::NotSupported(
"File Type Not Known " + ToString(type)); "File Type Not Known " + ToString(type));
} }
@ -423,10 +420,9 @@ Status WalManager::ReadFirstLine(const std::string& fname,
Status* status; Status* status;
bool ignore_error; // true if db_options_.paranoid_checks==false bool ignore_error; // true if db_options_.paranoid_checks==false
virtual void Corruption(size_t bytes, const Status& s) override { virtual void Corruption(size_t bytes, const Status& s) override {
Log(InfoLogLevel::WARN_LEVEL, info_log, ROCKS_LOG_WARN(info_log, "[WalManager] %s%s: dropping %d bytes; %s",
"[WalManager] %s%s: dropping %d bytes; %s", (this->ignore_error ? "(ignoring error) " : ""), fname,
(this->ignore_error ? "(ignoring error) " : ""), fname, static_cast<int>(bytes), s.ToString().c_str());
static_cast<int>(bytes), s.ToString().c_str());
if (this->status->ok()) { if (this->status->ok()) {
// only keep the first error // only keep the first error
*this->status = s; *this->status = s;

View File

@ -338,7 +338,8 @@ BlockBasedTableBuilder::BlockBasedTableBuilder(
BlockBasedTableOptions sanitized_table_options(table_options); BlockBasedTableOptions sanitized_table_options(table_options);
if (sanitized_table_options.format_version == 0 && if (sanitized_table_options.format_version == 0 &&
sanitized_table_options.checksum != kCRC32c) { sanitized_table_options.checksum != kCRC32c) {
Log(InfoLogLevel::WARN_LEVEL, ioptions.info_log, ROCKS_LOG_WARN(
ioptions.info_log,
"Silently converting format_version to 1 because checksum is " "Silently converting format_version to 1 because checksum is "
"non-default"); "non-default");
// silently convert format_version to 1 to keep consistent with current // silently convert format_version to 1 to keep consistent with current
@ -489,8 +490,8 @@ void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents,
if (!compressed_ok) { if (!compressed_ok) {
// The result of the compression was invalid. abort. // The result of the compression was invalid. abort.
abort_compression = true; abort_compression = true;
Log(InfoLogLevel::ERROR_LEVEL, r->ioptions.info_log, ROCKS_LOG_ERROR(r->ioptions.info_log,
"Decompressed block did not match raw block"); "Decompressed block did not match raw block");
r->status = r->status =
Status::Corruption("Decompressed block did not match raw block"); Status::Corruption("Decompressed block did not match raw block");
} }

View File

@ -430,9 +430,8 @@ bool IsFeatureSupported(const TableProperties& table_properties,
if (pos->second == kPropFalse) { if (pos->second == kPropFalse) {
return false; return false;
} else if (pos->second != kPropTrue) { } else if (pos->second != kPropTrue) {
Log(InfoLogLevel::WARN_LEVEL, info_log, ROCKS_LOG_WARN(info_log, "Property %s has invalidate value %s",
"Property %s has invalidate value %s", user_prop_name.c_str(), user_prop_name.c_str(), pos->second.c_str());
pos->second.c_str());
} }
} }
return true; return true;
@ -449,7 +448,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (seqno_pos != props.end()) { if (seqno_pos != props.end()) {
// This is not an external sst file, global_seqno is not supported. // This is not an external sst file, global_seqno is not supported.
assert(false); assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log, ROCKS_LOG_ERROR(
info_log,
"A non-external sst file have global seqno property with value %s", "A non-external sst file have global seqno property with value %s",
seqno_pos->second.c_str()); seqno_pos->second.c_str());
} }
@ -461,7 +461,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (seqno_pos != props.end() || version != 1) { if (seqno_pos != props.end() || version != 1) {
// This is a v1 external sst file, global_seqno is not supported. // This is a v1 external sst file, global_seqno is not supported.
assert(false); assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log, ROCKS_LOG_ERROR(
info_log,
"An external sst file with version %u have global seqno property " "An external sst file with version %u have global seqno property "
"with value %s", "with value %s",
version, seqno_pos->second.c_str()); version, seqno_pos->second.c_str());
@ -473,7 +474,8 @@ SequenceNumber GetGlobalSequenceNumber(const TableProperties& table_properties,
if (global_seqno > kMaxSequenceNumber) { if (global_seqno > kMaxSequenceNumber) {
assert(false); assert(false);
Log(InfoLogLevel::ERROR_LEVEL, info_log, ROCKS_LOG_ERROR(
info_log,
"An external sst file with version %u have global seqno property " "An external sst file with version %u have global seqno property "
"with value %llu, which is greater than kMaxSequenceNumber", "with value %llu, which is greater than kMaxSequenceNumber",
version, global_seqno); version, global_seqno);
@ -571,9 +573,9 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block); s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(rep->ioptions.info_log,
"Error when seeking to properties block from file: %s", "Error when seeking to properties block from file: %s",
s.ToString().c_str()); s.ToString().c_str());
} else if (found_properties_block) { } else if (found_properties_block) {
s = meta_iter->status(); s = meta_iter->status();
TableProperties* table_properties = nullptr; TableProperties* table_properties = nullptr;
@ -583,22 +585,24 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
} }
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(rep->ioptions.info_log,
"Encountered error while reading data from properties " "Encountered error while reading data from properties "
"block %s", s.ToString().c_str()); "block %s",
s.ToString().c_str());
} else { } else {
rep->table_properties.reset(table_properties); rep->table_properties.reset(table_properties);
} }
} else { } else {
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log, ROCKS_LOG_ERROR(rep->ioptions.info_log,
"Cannot find Properties block from file."); "Cannot find Properties block from file.");
} }
// Read the compression dictionary meta block // Read the compression dictionary meta block
bool found_compression_dict; bool found_compression_dict;
s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict); s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Error when seeking to compression dictionary block from file: %s", "Error when seeking to compression dictionary block from file: %s",
s.ToString().c_str()); s.ToString().c_str());
} else if (found_compression_dict) { } else if (found_compression_dict) {
@ -613,7 +617,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
rocksdb::kCompressionDictBlock, rocksdb::kCompressionDictBlock,
compression_dict_block.get()); compression_dict_block.get());
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Encountered error while reading data from compression dictionary " "Encountered error while reading data from compression dictionary "
"block %s", "block %s",
s.ToString().c_str()); s.ToString().c_str());
@ -627,7 +632,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
s = SeekToRangeDelBlock(meta_iter.get(), &found_range_del_block, s = SeekToRangeDelBlock(meta_iter.get(), &found_range_del_block,
&rep->range_del_handle); &rep->range_del_handle);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Error when seeking to range delete tombstones block from file: %s", "Error when seeking to range delete tombstones block from file: %s",
s.ToString().c_str()); s.ToString().c_str());
} else { } else {
@ -637,7 +643,8 @@ Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions,
Slice() /* compression_dict */, Slice() /* compression_dict */,
&rep->range_del_entry); &rep->range_del_entry);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, ROCKS_LOG_WARN(
rep->ioptions.info_log,
"Encountered error while reading data from range del block %s", "Encountered error while reading data from range del block %s",
s.ToString().c_str()); s.ToString().c_str());
} }
@ -773,9 +780,10 @@ Status BlockBasedTable::ReadMetaBlock(Rep* rep,
0 /* read_amp_bytes_per_bit */); 0 /* read_amp_bytes_per_bit */);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log, ROCKS_LOG_ERROR(rep->ioptions.info_log,
"Encountered error while reading data from properties" "Encountered error while reading data from properties"
" block %s", s.ToString().c_str()); " block %s",
s.ToString().c_str());
return s; return s;
} }
@ -1641,10 +1649,10 @@ Status BlockBasedTable::CreateIndexReader(
const Footer& footer = rep_->footer; const Footer& footer = rep_->footer;
if (index_type_on_file == BlockBasedTableOptions::kHashSearch && if (index_type_on_file == BlockBasedTableOptions::kHashSearch &&
rep_->ioptions.prefix_extractor == nullptr) { rep_->ioptions.prefix_extractor == nullptr) {
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, ROCKS_LOG_WARN(rep_->ioptions.info_log,
"BlockBasedTableOptions::kHashSearch requires " "BlockBasedTableOptions::kHashSearch requires "
"options.prefix_extractor to be set." "options.prefix_extractor to be set."
" Fall back to binary search index."); " Fall back to binary search index.");
index_type_on_file = BlockBasedTableOptions::kBinarySearch; index_type_on_file = BlockBasedTableOptions::kBinarySearch;
} }
@ -1668,9 +1676,9 @@ Status BlockBasedTable::CreateIndexReader(
if (!s.ok()) { if (!s.ok()) {
// we simply fall back to binary search in case there is any // we simply fall back to binary search in case there is any
// problem with prefix hash index loading. // problem with prefix hash index loading.
Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, ROCKS_LOG_WARN(rep_->ioptions.info_log,
"Unable to read the metaindex block." "Unable to read the metaindex block."
" Fall back to binary search index."); " Fall back to binary search index.");
return BinarySearchIndexReader::Create( return BinarySearchIndexReader::Create(
file, footer, footer.index_handle(), rep_->ioptions, comparator, file, footer, footer.index_handle(), rep_->ioptions, comparator,
index_reader, rep_->persistent_cache_options); index_reader, rep_->persistent_cache_options);

View File

@ -20,12 +20,12 @@
#include "util/compression.h" #include "util/compression.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/file_reader_writer.h" #include "util/file_reader_writer.h"
#include "util/logging.h"
#include "util/perf_context_imp.h" #include "util/perf_context_imp.h"
#include "util/string_util.h"
#include "util/xxhash.h"
#include "util/statistics.h" #include "util/statistics.h"
#include "util/stop_watch.h" #include "util/stop_watch.h"
#include "util/string_util.h"
#include "util/xxhash.h"
namespace rocksdb { namespace rocksdb {
@ -331,9 +331,9 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
// uncompressed page is not found // uncompressed page is not found
if (ioptions.info_log && !status.IsNotFound()) { if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok()); assert(!status.ok());
Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log, ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s", "Error reading from persistent cache. %s",
status.ToString().c_str()); status.ToString().c_str());
} }
} }
} }
@ -354,8 +354,9 @@ Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer,
} else { } else {
if (ioptions.info_log && !status.IsNotFound()) { if (ioptions.info_log && !status.IsNotFound()) {
assert(!status.ok()); assert(!status.ok());
Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log, ROCKS_LOG_INFO(ioptions.info_log,
"Error reading from persistent cache. %s", status.ToString().c_str()); "Error reading from persistent cache. %s",
status.ToString().c_str());
} }
// cache miss read from device // cache miss read from device
if (decompression_requested && if (decompression_requested &&

View File

@ -114,7 +114,7 @@ void LogPropertiesCollectionError(
std::string msg = std::string msg =
"Encountered error when calling TablePropertiesCollector::" + "Encountered error when calling TablePropertiesCollector::" +
method + "() with collector name: " + name; method + "() with collector name: " + name;
Log(InfoLogLevel::ERROR_LEVEL, info_log, "%s", msg.c_str()); ROCKS_LOG_ERROR(info_log, "%s", msg.c_str());
} }
bool NotifyCollectTableCollectorsOnAdd( bool NotifyCollectTableCollectorsOnAdd(
@ -227,8 +227,7 @@ Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file,
auto error_msg = auto error_msg =
"Detect malformed value in properties meta-block:" "Detect malformed value in properties meta-block:"
"\tkey: " + key + "\tval: " + raw_val.ToString(); "\tkey: " + key + "\tval: " + raw_val.ToString();
Log(InfoLogLevel::ERROR_LEVEL, ioptions.info_log, "%s", ROCKS_LOG_ERROR(ioptions.info_log, "%s", error_msg.c_str());
error_msg.c_str());
continue; continue;
} }
*(pos->second) = val; *(pos->second) = val;

View File

@ -102,9 +102,8 @@ Slice PlainTableIndexBuilder::Finish() {
BucketizeIndexes(&hash_to_offsets, &entries_per_bucket); BucketizeIndexes(&hash_to_offsets, &entries_per_bucket);
keys_per_prefix_hist_.Add(num_keys_per_prefix_); keys_per_prefix_hist_.Add(num_keys_per_prefix_);
Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, ROCKS_LOG_INFO(ioptions_.info_log, "Number of Keys per prefix Histogram: %s",
"Number of Keys per prefix Histogram: %s", keys_per_prefix_hist_.ToString().c_str());
keys_per_prefix_hist_.ToString().c_str());
// From the temp data structure, populate indexes. // From the temp data structure, populate indexes.
return FillIndexes(hash_to_offsets, entries_per_bucket); return FillIndexes(hash_to_offsets, entries_per_bucket);
@ -158,9 +157,9 @@ void PlainTableIndexBuilder::BucketizeIndexes(
Slice PlainTableIndexBuilder::FillIndexes( Slice PlainTableIndexBuilder::FillIndexes(
const std::vector<IndexRecord*>& hash_to_offsets, const std::vector<IndexRecord*>& hash_to_offsets,
const std::vector<uint32_t>& entries_per_bucket) { const std::vector<uint32_t>& entries_per_bucket) {
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log, ROCKS_LOG_DEBUG(ioptions_.info_log,
"Reserving %" PRIu32 " bytes for plain table's sub_index", "Reserving %" PRIu32 " bytes for plain table's sub_index",
sub_index_size_); sub_index_size_);
auto total_allocate_size = GetTotalSize(); auto total_allocate_size = GetTotalSize();
char* allocated = arena_->AllocateAligned( char* allocated = arena_->AllocateAligned(
total_allocate_size, huge_page_tlb_size_, ioptions_.info_log); total_allocate_size, huge_page_tlb_size_, ioptions_.info_log);
@ -203,9 +202,9 @@ Slice PlainTableIndexBuilder::FillIndexes(
} }
assert(sub_index_offset == sub_index_size_); assert(sub_index_offset == sub_index_size_);
Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log, ROCKS_LOG_DEBUG(ioptions_.info_log,
"hash table size: %d, suffix_map length %" ROCKSDB_PRIszt, index_size_, "hash table size: %d, suffix_map length %" ROCKSDB_PRIszt,
sub_index_size_); index_size_, sub_index_size_);
return Slice(allocated, GetTotalSize()); return Slice(allocated, GetTotalSize());
} }

View File

@ -18,9 +18,10 @@
#ifndef OS_WIN #ifndef OS_WIN
#include <sys/mman.h> #include <sys/mman.h>
#endif #endif
#include "port/port.h"
#include <algorithm> #include <algorithm>
#include "port/port.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/logging.h"
namespace rocksdb { namespace rocksdb {
@ -152,8 +153,9 @@ char* Arena::AllocateAligned(size_t bytes, size_t huge_page_size,
char* addr = AllocateFromHugePage(reserved_size); char* addr = AllocateFromHugePage(reserved_size);
if (addr == nullptr) { if (addr == nullptr) {
Warn(logger, "AllocateAligned fail to allocate huge TLB pages: %s", ROCKS_LOG_WARN(logger,
strerror(errno)); "AllocateAligned fail to allocate huge TLB pages: %s",
strerror(errno));
// fail back to malloc // fail back to malloc
} else { } else {
return addr; return addr;

View File

@ -109,42 +109,47 @@ uint64_t MutableCFOptions::MaxFileSizeForLevel(int level) const {
void MutableCFOptions::Dump(Logger* log) const { void MutableCFOptions::Dump(Logger* log) const {
// Memtable related options // Memtable related options
Log(log, " write_buffer_size: %" ROCKSDB_PRIszt, ROCKS_LOG_INFO(log,
write_buffer_size); " write_buffer_size: %" ROCKSDB_PRIszt,
Log(log, " max_write_buffer_number: %d", write_buffer_size);
max_write_buffer_number); ROCKS_LOG_INFO(log, " max_write_buffer_number: %d",
Log(log, " arena_block_size: %" ROCKSDB_PRIszt, max_write_buffer_number);
arena_block_size); ROCKS_LOG_INFO(log,
Log(log, " memtable_prefix_bloom_ratio: %f", " arena_block_size: %" ROCKSDB_PRIszt,
memtable_prefix_bloom_size_ratio); arena_block_size);
Log(log, " memtable_huge_page_size: %" ROCKSDB_PRIszt, ROCKS_LOG_INFO(log, " memtable_prefix_bloom_ratio: %f",
memtable_huge_page_size); memtable_prefix_bloom_size_ratio);
Log(log, " max_successive_merges: %" ROCKSDB_PRIszt, ROCKS_LOG_INFO(log,
max_successive_merges); " memtable_huge_page_size: %" ROCKSDB_PRIszt,
Log(log, " inplace_update_num_locks: %" ROCKSDB_PRIszt, memtable_huge_page_size);
inplace_update_num_locks); ROCKS_LOG_INFO(log,
Log(log, " disable_auto_compactions: %d", " max_successive_merges: %" ROCKSDB_PRIszt,
disable_auto_compactions); max_successive_merges);
Log(log, " soft_pending_compaction_bytes_limit: %" PRIu64, ROCKS_LOG_INFO(log,
soft_pending_compaction_bytes_limit); " inplace_update_num_locks: %" ROCKSDB_PRIszt,
Log(log, " hard_pending_compaction_bytes_limit: %" PRIu64, inplace_update_num_locks);
hard_pending_compaction_bytes_limit); ROCKS_LOG_INFO(log, " disable_auto_compactions: %d",
Log(log, " level0_file_num_compaction_trigger: %d", disable_auto_compactions);
level0_file_num_compaction_trigger); ROCKS_LOG_INFO(log, " soft_pending_compaction_bytes_limit: %" PRIu64,
Log(log, " level0_slowdown_writes_trigger: %d", soft_pending_compaction_bytes_limit);
level0_slowdown_writes_trigger); ROCKS_LOG_INFO(log, " hard_pending_compaction_bytes_limit: %" PRIu64,
Log(log, " level0_stop_writes_trigger: %d", hard_pending_compaction_bytes_limit);
level0_stop_writes_trigger); ROCKS_LOG_INFO(log, " level0_file_num_compaction_trigger: %d",
Log(log, " max_compaction_bytes: %" PRIu64, level0_file_num_compaction_trigger);
max_compaction_bytes); ROCKS_LOG_INFO(log, " level0_slowdown_writes_trigger: %d",
Log(log, " target_file_size_base: %" PRIu64, level0_slowdown_writes_trigger);
target_file_size_base); ROCKS_LOG_INFO(log, " level0_stop_writes_trigger: %d",
Log(log, " target_file_size_multiplier: %d", level0_stop_writes_trigger);
target_file_size_multiplier); ROCKS_LOG_INFO(log, " max_compaction_bytes: %" PRIu64,
Log(log, " max_bytes_for_level_base: %" PRIu64, max_compaction_bytes);
max_bytes_for_level_base); ROCKS_LOG_INFO(log, " target_file_size_base: %" PRIu64,
Log(log, " max_bytes_for_level_multiplier: %f", target_file_size_base);
max_bytes_for_level_multiplier); ROCKS_LOG_INFO(log, " target_file_size_multiplier: %d",
target_file_size_multiplier);
ROCKS_LOG_INFO(log, " max_bytes_for_level_base: %" PRIu64,
max_bytes_for_level_base);
ROCKS_LOG_INFO(log, " max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier);
std::string result; std::string result;
char buf[10]; char buf[10];
for (const auto m : max_bytes_for_level_multiplier_additional) { for (const auto m : max_bytes_for_level_multiplier_additional) {
@ -157,14 +162,16 @@ void MutableCFOptions::Dump(Logger* log) const {
result = ""; result = "";
} }
Log(log, "max_bytes_for_level_multiplier_additional: %s", result.c_str()); ROCKS_LOG_INFO(log, "max_bytes_for_level_multiplier_additional: %s",
Log(log, " max_sequential_skip_in_iterations: %" PRIu64, result.c_str());
max_sequential_skip_in_iterations); ROCKS_LOG_INFO(log, " max_sequential_skip_in_iterations: %" PRIu64,
Log(log, " paranoid_file_checks: %d", max_sequential_skip_in_iterations);
paranoid_file_checks); ROCKS_LOG_INFO(log, " paranoid_file_checks: %d",
Log(log, " report_bg_io_stats: %d", report_bg_io_stats); paranoid_file_checks);
Log(log, " compression: %d", ROCKS_LOG_INFO(log, " report_bg_io_stats: %d",
static_cast<int>(compression)); report_bg_io_stats);
ROCKS_LOG_INFO(log, " compression: %d",
static_cast<int>(compression));
} }
} // namespace rocksdb } // namespace rocksdb

View File

@ -16,6 +16,7 @@
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "rocksdb/sst_file_manager.h" #include "rocksdb/sst_file_manager.h"
#include "rocksdb/wal_filter.h" #include "rocksdb/wal_filter.h"
#include "util/logging.h"
namespace rocksdb { namespace rocksdb {
@ -88,120 +89,132 @@ ImmutableDBOptions::ImmutableDBOptions(const DBOptions& options)
} }
void ImmutableDBOptions::Dump(Logger* log) const { void ImmutableDBOptions::Dump(Logger* log) const {
Header(log, " Options.error_if_exists: %d", ROCKS_LOG_HEADER(log, " Options.error_if_exists: %d",
error_if_exists); error_if_exists);
Header(log, " Options.create_if_missing: %d", ROCKS_LOG_HEADER(log, " Options.create_if_missing: %d",
create_if_missing); create_if_missing);
Header(log, " Options.paranoid_checks: %d", ROCKS_LOG_HEADER(log, " Options.paranoid_checks: %d",
paranoid_checks); paranoid_checks);
Header(log, " Options.env: %p", env); ROCKS_LOG_HEADER(log, " Options.env: %p",
Header(log, " Options.info_log: %p", env);
info_log.get()); ROCKS_LOG_HEADER(log, " Options.info_log: %p",
Header(log, " Options.max_open_files: %d", info_log.get());
max_open_files); ROCKS_LOG_HEADER(log, " Options.max_open_files: %d",
Header(log, " Options.max_file_opening_threads: %d", max_open_files);
max_file_opening_threads); ROCKS_LOG_HEADER(log, " Options.max_file_opening_threads: %d",
Header(log, " Options.use_fsync: %d", use_fsync); max_file_opening_threads);
Header(log, ROCKS_LOG_HEADER(log, " Options.use_fsync: %d",
" Options.max_log_file_size: %" ROCKSDB_PRIszt, use_fsync);
max_log_file_size); ROCKS_LOG_HEADER(
Header(log, " Options.max_manifest_file_size: %" PRIu64, log, " Options.max_log_file_size: %" ROCKSDB_PRIszt,
max_manifest_file_size); max_log_file_size);
Header(log, ROCKS_LOG_HEADER(log,
" Options.log_file_time_to_roll: %" ROCKSDB_PRIszt, " Options.max_manifest_file_size: %" PRIu64,
log_file_time_to_roll); max_manifest_file_size);
Header(log, ROCKS_LOG_HEADER(
" Options.keep_log_file_num: %" ROCKSDB_PRIszt, log, " Options.log_file_time_to_roll: %" ROCKSDB_PRIszt,
keep_log_file_num); log_file_time_to_roll);
Header(log, ROCKS_LOG_HEADER(
" Options.recycle_log_file_num: %" ROCKSDB_PRIszt, log, " Options.keep_log_file_num: %" ROCKSDB_PRIszt,
recycle_log_file_num); keep_log_file_num);
Header(log, " Options.allow_fallocate: %d", ROCKS_LOG_HEADER(
allow_fallocate); log, " Options.recycle_log_file_num: %" ROCKSDB_PRIszt,
Header(log, " Options.allow_mmap_reads: %d", recycle_log_file_num);
allow_mmap_reads); ROCKS_LOG_HEADER(log, " Options.allow_fallocate: %d",
Header(log, " Options.allow_mmap_writes: %d", allow_fallocate);
allow_mmap_writes); ROCKS_LOG_HEADER(log, " Options.allow_mmap_reads: %d",
Header(log, " Options.use_direct_reads: %d", allow_mmap_reads);
use_direct_reads); ROCKS_LOG_HEADER(log, " Options.allow_mmap_writes: %d",
Header(log, " Options.use_direct_writes: %d", allow_mmap_writes);
use_direct_writes); ROCKS_LOG_HEADER(log, " Options.use_direct_reads: %d",
Header(log, " Options.create_missing_column_families: %d", use_direct_reads);
create_missing_column_families); ROCKS_LOG_HEADER(log, " Options.use_direct_writes: %d",
Header(log, " Options.db_log_dir: %s", use_direct_writes);
db_log_dir.c_str()); ROCKS_LOG_HEADER(log, " Options.create_missing_column_families: %d",
Header(log, " Options.wal_dir: %s", create_missing_column_families);
wal_dir.c_str()); ROCKS_LOG_HEADER(log, " Options.db_log_dir: %s",
Header(log, " Options.table_cache_numshardbits: %d", db_log_dir.c_str());
table_cache_numshardbits); ROCKS_LOG_HEADER(log, " Options.wal_dir: %s",
Header(log, " Options.max_subcompactions: %" PRIu32, wal_dir.c_str());
max_subcompactions); ROCKS_LOG_HEADER(log, " Options.table_cache_numshardbits: %d",
Header(log, " Options.max_background_flushes: %d", table_cache_numshardbits);
max_background_flushes); ROCKS_LOG_HEADER(log,
Header(log, " Options.WAL_ttl_seconds: %" PRIu64, " Options.max_subcompactions: %" PRIu32,
wal_ttl_seconds); max_subcompactions);
Header(log, " Options.WAL_size_limit_MB: %" PRIu64, ROCKS_LOG_HEADER(log, " Options.max_background_flushes: %d",
wal_size_limit_mb); max_background_flushes);
Header(log, ROCKS_LOG_HEADER(log,
" Options.manifest_preallocation_size: %" ROCKSDB_PRIszt, " Options.WAL_ttl_seconds: %" PRIu64,
manifest_preallocation_size); wal_ttl_seconds);
Header(log, " Options.is_fd_close_on_exec: %d", ROCKS_LOG_HEADER(log,
is_fd_close_on_exec); " Options.WAL_size_limit_MB: %" PRIu64,
Header(log, " Options.stats_dump_period_sec: %u", wal_size_limit_mb);
stats_dump_period_sec); ROCKS_LOG_HEADER(
Header(log, " Options.advise_random_on_open: %d", log, " Options.manifest_preallocation_size: %" ROCKSDB_PRIszt,
advise_random_on_open); manifest_preallocation_size);
Header(log, ROCKS_LOG_HEADER(log, " Options.is_fd_close_on_exec: %d",
" Options.db_write_buffer_size: %" ROCKSDB_PRIszt, is_fd_close_on_exec);
db_write_buffer_size); ROCKS_LOG_HEADER(log, " Options.stats_dump_period_sec: %u",
Header(log, " Options.access_hint_on_compaction_start: %d", stats_dump_period_sec);
static_cast<int>(access_hint_on_compaction_start)); ROCKS_LOG_HEADER(log, " Options.advise_random_on_open: %d",
Header(log, " Options.new_table_reader_for_compaction_inputs: %d", advise_random_on_open);
new_table_reader_for_compaction_inputs); ROCKS_LOG_HEADER(
Header(log, log, " Options.db_write_buffer_size: %" ROCKSDB_PRIszt,
" Options.compaction_readahead_size: %" ROCKSDB_PRIszt, db_write_buffer_size);
compaction_readahead_size); ROCKS_LOG_HEADER(log, " Options.access_hint_on_compaction_start: %d",
Header(log, static_cast<int>(access_hint_on_compaction_start));
" Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt, ROCKS_LOG_HEADER(log, " Options.new_table_reader_for_compaction_inputs: %d",
random_access_max_buffer_size); new_table_reader_for_compaction_inputs);
Header(log, ROCKS_LOG_HEADER(
" Options.writable_file_max_buffer_size: %" ROCKSDB_PRIszt, log, " Options.compaction_readahead_size: %" ROCKSDB_PRIszt,
writable_file_max_buffer_size); compaction_readahead_size);
Header(log, " Options.use_adaptive_mutex: %d", ROCKS_LOG_HEADER(
use_adaptive_mutex); log, " Options.random_access_max_buffer_size: %" ROCKSDB_PRIszt,
Header(log, " Options.rate_limiter: %p", random_access_max_buffer_size);
rate_limiter.get()); ROCKS_LOG_HEADER(
log, " Options.writable_file_max_buffer_size: %" ROCKSDB_PRIszt,
writable_file_max_buffer_size);
ROCKS_LOG_HEADER(log, " Options.use_adaptive_mutex: %d",
use_adaptive_mutex);
ROCKS_LOG_HEADER(log, " Options.rate_limiter: %p",
rate_limiter.get());
Header( Header(
log, " Options.sst_file_manager.rate_bytes_per_sec: %" PRIi64, log, " Options.sst_file_manager.rate_bytes_per_sec: %" PRIi64,
sst_file_manager ? sst_file_manager->GetDeleteRateBytesPerSecond() : 0); sst_file_manager ? sst_file_manager->GetDeleteRateBytesPerSecond() : 0);
Header(log, " Options.bytes_per_sync: %" PRIu64, ROCKS_LOG_HEADER(log,
bytes_per_sync); " Options.bytes_per_sync: %" PRIu64,
Header(log, " Options.wal_bytes_per_sync: %" PRIu64, bytes_per_sync);
wal_bytes_per_sync); ROCKS_LOG_HEADER(log,
Header(log, " Options.wal_recovery_mode: %d", " Options.wal_bytes_per_sync: %" PRIu64,
wal_recovery_mode); wal_bytes_per_sync);
Header(log, " Options.enable_thread_tracking: %d", ROCKS_LOG_HEADER(log, " Options.wal_recovery_mode: %d",
enable_thread_tracking); wal_recovery_mode);
Header(log, " Options.allow_concurrent_memtable_write: %d", ROCKS_LOG_HEADER(log, " Options.enable_thread_tracking: %d",
allow_concurrent_memtable_write); enable_thread_tracking);
Header(log, " Options.enable_write_thread_adaptive_yield: %d", ROCKS_LOG_HEADER(log, " Options.allow_concurrent_memtable_write: %d",
enable_write_thread_adaptive_yield); allow_concurrent_memtable_write);
Header(log, " Options.write_thread_max_yield_usec: %" PRIu64, ROCKS_LOG_HEADER(log, " Options.enable_write_thread_adaptive_yield: %d",
write_thread_max_yield_usec); enable_write_thread_adaptive_yield);
Header(log, " Options.write_thread_slow_yield_usec: %" PRIu64, ROCKS_LOG_HEADER(log,
write_thread_slow_yield_usec); " Options.write_thread_max_yield_usec: %" PRIu64,
write_thread_max_yield_usec);
ROCKS_LOG_HEADER(log,
" Options.write_thread_slow_yield_usec: %" PRIu64,
write_thread_slow_yield_usec);
if (row_cache) { if (row_cache) {
Header(log, " Options.row_cache: %" PRIu64, ROCKS_LOG_HEADER(
row_cache->GetCapacity()); log, " Options.row_cache: %" PRIu64,
row_cache->GetCapacity());
} else { } else {
Header(log, " Options.row_cache: None"); ROCKS_LOG_HEADER(log,
" Options.row_cache: None");
} }
#ifndef ROCKSDB_LITE #ifndef ROCKSDB_LITE
Header(log, " Options.wal_filter: %s", ROCKS_LOG_HEADER(log, " Options.wal_filter: %s",
wal_filter ? wal_filter->Name() : "None"); wal_filter ? wal_filter->Name() : "None");
#endif // ROCKDB_LITE #endif // ROCKDB_LITE
Header(log, " Options.avoid_flush_during_recovery: %d", ROCKS_LOG_HEADER(log, " Options.avoid_flush_during_recovery: %d",
avoid_flush_during_recovery); avoid_flush_during_recovery);
} }
MutableDBOptions::MutableDBOptions() MutableDBOptions::MutableDBOptions()
@ -222,19 +235,19 @@ MutableDBOptions::MutableDBOptions(const DBOptions& options)
options.delete_obsolete_files_period_micros) {} options.delete_obsolete_files_period_micros) {}
void MutableDBOptions::Dump(Logger* log) const { void MutableDBOptions::Dump(Logger* log) const {
Header(log, " Options.base_background_compactions: %d", ROCKS_LOG_HEADER(log, " Options.base_background_compactions: %d",
base_background_compactions); base_background_compactions);
Header(log, " Options.max_background_compactions: %d", ROCKS_LOG_HEADER(log, " Options.max_background_compactions: %d",
max_background_compactions); max_background_compactions);
Header(log, " Options.avoid_flush_during_shutdown: %d", ROCKS_LOG_HEADER(log, " Options.avoid_flush_during_shutdown: %d",
avoid_flush_during_shutdown); avoid_flush_during_shutdown);
Header(log, " Options.delayed_write_rate : %" PRIu64, ROCKS_LOG_HEADER(log, " Options.delayed_write_rate : %" PRIu64,
delayed_write_rate); delayed_write_rate);
Header(log, " Options.max_total_wal_size: %" PRIu64, ROCKS_LOG_HEADER(log, " Options.max_total_wal_size: %" PRIu64,
max_total_wal_size); max_total_wal_size);
Header(log, ROCKS_LOG_HEADER(
" Options.delete_obsolete_files_period_micros: %" PRIu64, log, " Options.delete_obsolete_files_period_micros: %" PRIu64,
delete_obsolete_files_period_micros); delete_obsolete_files_period_micros);
} }
} // namespace rocksdb } // namespace rocksdb

View File

@ -12,8 +12,9 @@
#include "port/port.h" #include "port/port.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/sst_file_manager_impl.h" #include "util/logging.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/sst_file_manager_impl.h"
#include "util/sync_point.h" #include "util/sync_point.h"
namespace rocksdb { namespace rocksdb {
@ -64,9 +65,8 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path) {
std::string path_in_trash; std::string path_in_trash;
s = MoveToTrash(file_path, &path_in_trash); s = MoveToTrash(file_path, &path_in_trash);
if (!s.ok()) { if (!s.ok()) {
Log(InfoLogLevel::ERROR_LEVEL, info_log_, ROCKS_LOG_ERROR(info_log_, "Failed to move %s to trash directory (%s)",
"Failed to move %s to trash directory (%s)", file_path.c_str(), file_path.c_str(), trash_dir_.c_str());
trash_dir_.c_str());
s = env_->DeleteFile(file_path); s = env_->DeleteFile(file_path);
if (s.ok() && sst_file_manager_) { if (s.ok() && sst_file_manager_) {
sst_file_manager_->OnDeleteFile(file_path); sst_file_manager_->OnDeleteFile(file_path);
@ -191,9 +191,8 @@ Status DeleteScheduler::DeleteTrashFile(const std::string& path_in_trash,
if (!s.ok()) { if (!s.ok()) {
// Error while getting file size or while deleting // Error while getting file size or while deleting
Log(InfoLogLevel::ERROR_LEVEL, info_log_, ROCKS_LOG_ERROR(info_log_, "Failed to delete %s from trash -- %s",
"Failed to delete %s from trash -- %s", path_in_trash.c_str(), path_in_trash.c_str(), s.ToString().c_str());
s.ToString().c_str());
*deleted_bytes = 0; *deleted_bytes = 0;
} else { } else {
*deleted_bytes = file_size; *deleted_bytes = file_size;

View File

@ -57,23 +57,20 @@ class HdfsReadableFile : virtual public SequentialFile,
public: public:
HdfsReadableFile(hdfsFS fileSys, const std::string& fname) HdfsReadableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname), hfile_(nullptr) { : fileSys_(fileSys), filename_(fname), hfile_(nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile opening file %s\n",
"[hdfs] HdfsReadableFile opening file %s\n", filename_.c_str());
filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0); hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_RDONLY, 0, 0, 0);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog,
"[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n", "[hdfs] HdfsReadableFile opened file %s hfile_=0x%p\n",
filename_.c_str(), hfile_); filename_.c_str(), hfile_);
} }
virtual ~HdfsReadableFile() { virtual ~HdfsReadableFile() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closing file %s\n",
"[hdfs] HdfsReadableFile closing file %s\n", filename_.c_str());
filename_.c_str());
hdfsCloseFile(fileSys_, hfile_); hdfsCloseFile(fileSys_, hfile_);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile closed file %s\n",
"[hdfs] HdfsReadableFile closed file %s\n", filename_.c_str());
filename_.c_str());
hfile_ = nullptr; hfile_ = nullptr;
} }
@ -84,9 +81,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// sequential access, read data at current offset in file // sequential access, read data at current offset in file
virtual Status Read(size_t n, Slice* result, char* scratch) { virtual Status Read(size_t n, Slice* result, char* scratch) {
Status s; Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile reading %s %ld\n",
"[hdfs] HdfsReadableFile reading %s %ld\n", filename_.c_str(), n);
filename_.c_str(), n);
char* buffer = scratch; char* buffer = scratch;
size_t total_bytes_read = 0; size_t total_bytes_read = 0;
@ -107,8 +103,8 @@ class HdfsReadableFile : virtual public SequentialFile,
} }
assert(total_bytes_read <= n); assert(total_bytes_read <= n);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile read %s\n",
"[hdfs] HdfsReadableFile read %s\n", filename_.c_str()); filename_.c_str());
if (bytes_read < 0) { if (bytes_read < 0) {
s = IOError(filename_, errno); s = IOError(filename_, errno);
@ -123,12 +119,12 @@ class HdfsReadableFile : virtual public SequentialFile,
virtual Status Read(uint64_t offset, size_t n, Slice* result, virtual Status Read(uint64_t offset, size_t n, Slice* result,
char* scratch) const { char* scratch) const {
Status s; Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile preading %s\n",
"[hdfs] HdfsReadableFile preading %s\n", filename_.c_str()); filename_.c_str());
ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset, ssize_t bytes_read = hdfsPread(fileSys_, hfile_, offset,
(void*)scratch, (tSize)n); (void*)scratch, (tSize)n);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile pread %s\n",
"[hdfs] HdfsReadableFile pread %s\n", filename_.c_str()); filename_.c_str());
*result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read); *result = Slice(scratch, (bytes_read < 0) ? 0 : bytes_read);
if (bytes_read < 0) { if (bytes_read < 0) {
// An error: return a non-ok status // An error: return a non-ok status
@ -138,8 +134,8 @@ class HdfsReadableFile : virtual public SequentialFile,
} }
virtual Status Skip(uint64_t n) { virtual Status Skip(uint64_t n) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile skip %s\n",
"[hdfs] HdfsReadableFile skip %s\n", filename_.c_str()); filename_.c_str());
// get current offset from file // get current offset from file
tOffset current = hdfsTell(fileSys_, hfile_); tOffset current = hdfsTell(fileSys_, hfile_);
if (current < 0) { if (current < 0) {
@ -158,8 +154,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// returns true if we are at the end of file, false otherwise // returns true if we are at the end of file, false otherwise
bool feof() { bool feof() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile feof %s\n",
"[hdfs] HdfsReadableFile feof %s\n", filename_.c_str()); filename_.c_str());
if (hdfsTell(fileSys_, hfile_) == fileSize()) { if (hdfsTell(fileSys_, hfile_) == fileSize()) {
return true; return true;
} }
@ -168,8 +164,8 @@ class HdfsReadableFile : virtual public SequentialFile,
// the current size of the file // the current size of the file
tOffset fileSize() { tOffset fileSize() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsReadableFile fileSize %s\n",
"[hdfs] HdfsReadableFile fileSize %s\n", filename_.c_str()); filename_.c_str());
hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str()); hdfsFileInfo* pFileInfo = hdfsGetPathInfo(fileSys_, filename_.c_str());
tOffset size = 0L; tOffset size = 0L;
if (pFileInfo != nullptr) { if (pFileInfo != nullptr) {
@ -192,20 +188,20 @@ class HdfsWritableFile: public WritableFile {
public: public:
HdfsWritableFile(hdfsFS fileSys, const std::string& fname) HdfsWritableFile(hdfsFS fileSys, const std::string& fname)
: fileSys_(fileSys), filename_(fname) , hfile_(nullptr) { : fileSys_(fileSys), filename_(fname) , hfile_(nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opening %s\n",
"[hdfs] HdfsWritableFile opening %s\n", filename_.c_str()); filename_.c_str());
hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0); hfile_ = hdfsOpenFile(fileSys_, filename_.c_str(), O_WRONLY, 0, 0, 0);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile opened %s\n",
"[hdfs] HdfsWritableFile opened %s\n", filename_.c_str()); filename_.c_str());
assert(hfile_ != nullptr); assert(hfile_ != nullptr);
} }
virtual ~HdfsWritableFile() { virtual ~HdfsWritableFile() {
if (hfile_ != nullptr) { if (hfile_ != nullptr) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str()); filename_.c_str());
hdfsCloseFile(fileSys_, hfile_); hdfsCloseFile(fileSys_, hfile_);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str()); filename_.c_str());
hfile_ = nullptr; hfile_ = nullptr;
} }
} }
@ -222,13 +218,13 @@ class HdfsWritableFile: public WritableFile {
} }
virtual Status Append(const Slice& data) { virtual Status Append(const Slice& data) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Append %s\n",
"[hdfs] HdfsWritableFile Append %s\n", filename_.c_str()); filename_.c_str());
const char* src = data.data(); const char* src = data.data();
size_t left = data.size(); size_t left = data.size();
size_t ret = hdfsWrite(fileSys_, hfile_, src, left); size_t ret = hdfsWrite(fileSys_, hfile_, src, left);
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Appended %s\n",
"[hdfs] HdfsWritableFile Appended %s\n", filename_.c_str()); filename_.c_str());
if (ret != left) { if (ret != left) {
return IOError(filename_, errno); return IOError(filename_, errno);
} }
@ -241,16 +237,16 @@ class HdfsWritableFile: public WritableFile {
virtual Status Sync() { virtual Status Sync() {
Status s; Status s;
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Sync %s\n",
"[hdfs] HdfsWritableFile Sync %s\n", filename_.c_str()); filename_.c_str());
if (hdfsFlush(fileSys_, hfile_) == -1) { if (hdfsFlush(fileSys_, hfile_) == -1) {
return IOError(filename_, errno); return IOError(filename_, errno);
} }
if (hdfsHSync(fileSys_, hfile_) == -1) { if (hdfsHSync(fileSys_, hfile_) == -1) {
return IOError(filename_, errno); return IOError(filename_, errno);
} }
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile Synced %s\n",
"[hdfs] HdfsWritableFile Synced %s\n", filename_.c_str()); filename_.c_str());
return Status::OK(); return Status::OK();
} }
@ -263,13 +259,13 @@ class HdfsWritableFile: public WritableFile {
} }
virtual Status Close() { virtual Status Close() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closing %s\n",
"[hdfs] HdfsWritableFile closing %s\n", filename_.c_str()); filename_.c_str());
if (hdfsCloseFile(fileSys_, hfile_) != 0) { if (hdfsCloseFile(fileSys_, hfile_) != 0) {
return IOError(filename_, errno); return IOError(filename_, errno);
} }
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsWritableFile closed %s\n",
"[hdfs] HdfsWritableFile closed %s\n", filename_.c_str()); filename_.c_str());
hfile_ = nullptr; hfile_ = nullptr;
return Status::OK(); return Status::OK();
} }
@ -284,15 +280,13 @@ class HdfsLogger : public Logger {
public: public:
HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)()) HdfsLogger(HdfsWritableFile* f, uint64_t (*gettid)())
: file_(f), gettid_(gettid) { : file_(f), gettid_(gettid) {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger opened %s\n",
"[hdfs] HdfsLogger opened %s\n", file_->getName().c_str());
file_->getName().c_str());
} }
virtual ~HdfsLogger() { virtual ~HdfsLogger() {
Log(InfoLogLevel::DEBUG_LEVEL, mylog, ROCKS_LOG_DEBUG(mylog, "[hdfs] HdfsLogger closed %s\n",
"[hdfs] HdfsLogger closed %s\n", file_->getName().c_str());
file_->getName().c_str());
delete file_; delete file_;
if (mylog != nullptr && mylog == this) { if (mylog != nullptr && mylog == this) {
mylog = nullptr; mylog = nullptr;
@ -439,8 +433,7 @@ Status HdfsEnv::NewDirectory(const std::string& name,
result->reset(new HdfsDirectory(0)); result->reset(new HdfsDirectory(0));
return Status::OK(); return Status::OK();
default: // fail if the directory doesn't exist default: // fail if the directory doesn't exist
Log(InfoLogLevel::FATAL_LEVEL, ROCKS_LOG_FATAL(mylog, "NewDirectory hdfsExists call failed");
mylog, "NewDirectory hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " + throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + " on path " + name + ToString(value) + " on path " + name +
".\n"); ".\n");
@ -455,8 +448,7 @@ Status HdfsEnv::FileExists(const std::string& fname) {
case HDFS_DOESNT_EXIST: case HDFS_DOESNT_EXIST:
return Status::NotFound(); return Status::NotFound();
default: // anything else should be an error default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL, ROCKS_LOG_FATAL(mylog, "FileExists hdfsExists call failed");
mylog, "FileExists hdfsExists call failed");
return Status::IOError("hdfsExists call failed with error " + return Status::IOError("hdfsExists call failed with error " +
ToString(value) + " on path " + fname + ".\n"); ToString(value) + " on path " + fname + ".\n");
} }
@ -483,8 +475,7 @@ Status HdfsEnv::GetChildren(const std::string& path,
} }
} else { } else {
// numEntries < 0 indicates error // numEntries < 0 indicates error
Log(InfoLogLevel::FATAL_LEVEL, mylog, ROCKS_LOG_FATAL(mylog, "hdfsListDirectory call failed with error ");
"hdfsListDirectory call failed with error ");
throw HdfsFatalException( throw HdfsFatalException(
"hdfsListDirectory call failed negative error.\n"); "hdfsListDirectory call failed negative error.\n");
} }
@ -493,8 +484,7 @@ Status HdfsEnv::GetChildren(const std::string& path,
case HDFS_DOESNT_EXIST: // directory does not exist, exit case HDFS_DOESNT_EXIST: // directory does not exist, exit
return Status::NotFound(); return Status::NotFound();
default: // anything else should be an error default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL, mylog, ROCKS_LOG_FATAL(mylog, "GetChildren hdfsExists call failed");
"GetChildren hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " + throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + ".\n"); ToString(value) + ".\n");
} }
@ -524,8 +514,7 @@ Status HdfsEnv::CreateDirIfMissing(const std::string& name) {
case HDFS_DOESNT_EXIST: case HDFS_DOESNT_EXIST:
return CreateDir(name); return CreateDir(name);
default: // anything else should be an error default: // anything else should be an error
Log(InfoLogLevel::FATAL_LEVEL, mylog, ROCKS_LOG_FATAL(mylog, "CreateDirIfMissing hdfsExists call failed");
"CreateDirIfMissing hdfsExists call failed");
throw HdfsFatalException("hdfsExists call failed with error " + throw HdfsFatalException("hdfsExists call failed with error " +
ToString(value) + ".\n"); ToString(value) + ".\n");
} }

View File

@ -1075,15 +1075,16 @@ TEST_P(EnvPosixTestWithParam, LogBufferTest) {
std::fill_n(bytes9000, sizeof(bytes9000), '1'); std::fill_n(bytes9000, sizeof(bytes9000), '1');
bytes9000[sizeof(bytes9000) - 1] = '\0'; bytes9000[sizeof(bytes9000) - 1] = '\0';
LogToBuffer(&log_buffer, "x%sx", bytes200); ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes200);
LogToBuffer(&log_buffer, "x%sx", bytes600); ROCKS_LOG_BUFFER(&log_buffer, "x%sx", bytes600);
LogToBuffer(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200); ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx%sx", bytes200, bytes200, bytes200);
LogToBuffer(&log_buffer, "x%sx%sx", bytes200, bytes600); ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes200, bytes600);
LogToBuffer(&log_buffer, "x%sx%sx", bytes600, bytes9000); ROCKS_LOG_BUFFER(&log_buffer, "x%sx%sx", bytes600, bytes9000);
LogToBuffer(&log_buffer_debug, "x%sx", bytes200); ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx", bytes200);
test_logger.SetInfoLogLevel(DEBUG_LEVEL); test_logger.SetInfoLogLevel(DEBUG_LEVEL);
LogToBuffer(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000, bytes200); ROCKS_LOG_BUFFER(&log_buffer_debug, "x%sx%sx%sx", bytes600, bytes9000,
bytes200);
ASSERT_EQ(0, test_logger.log_count); ASSERT_EQ(0, test_logger.log_count);
log_buffer.FlushBufferToLog(); log_buffer.FlushBufferToLog();
@ -1124,7 +1125,7 @@ TEST_P(EnvPosixTestWithParam, LogBufferMaxSizeTest) {
TestLogger2 test_logger(max_log_size); TestLogger2 test_logger(max_log_size);
test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL); test_logger.SetInfoLogLevel(InfoLogLevel::INFO_LEVEL);
LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger); LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, &test_logger);
LogToBuffer(&log_buffer, max_log_size, "%s", bytes9000); ROCKS_LOG_BUFFER_MAX_SZ(&log_buffer, max_log_size, "%s", bytes9000);
log_buffer.FlushBufferToLog(); log_buffer.FlushBufferToLog();
} }
} }

View File

@ -14,6 +14,7 @@
#include <sstream> #include <sstream>
#include <string> #include <string>
#include "util/logging.h"
#include "util/string_util.h" #include "util/string_util.h"
namespace rocksdb { namespace rocksdb {

View File

@ -16,6 +16,42 @@
#include <string> #include <string>
#include "port/port.h" #include "port/port.h"
// Helper macros that include information about file name and line number
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define PREPEND_FILE_LINE(FMT) ("[" __FILE__ ":" TOSTRING(__LINE__) "] " FMT)
// Don't inclide file/line info in HEADER level
#define ROCKS_LOG_HEADER(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::HEADER_LEVEL, LGR, FMT, ##__VA_ARGS__)
#define ROCKS_LOG_DEBUG(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::DEBUG_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_INFO(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::INFO_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_WARN(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::WARN_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_ERROR(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::ERROR_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_FATAL(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::FATAL_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_BUFFER(LOG_BUF, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, PREPEND_FILE_LINE(FMT), ##__VA_ARGS__)
#define ROCKS_LOG_BUFFER_MAX_SZ(LOG_BUF, MAX_LOG_SIZE, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, MAX_LOG_SIZE, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
namespace rocksdb { namespace rocksdb {
class Slice; class Slice;

View File

@ -196,91 +196,108 @@ void DBOptions::Dump(Logger* log) const {
} // DBOptions::Dump } // DBOptions::Dump
void ColumnFamilyOptions::Dump(Logger* log) const { void ColumnFamilyOptions::Dump(Logger* log) const {
Header(log, " Options.comparator: %s", comparator->Name()); ROCKS_LOG_HEADER(log, " Options.comparator: %s",
Header(log, " Options.merge_operator: %s", comparator->Name());
merge_operator ? merge_operator->Name() : "None"); ROCKS_LOG_HEADER(log, " Options.merge_operator: %s",
Header(log, " Options.compaction_filter: %s", merge_operator ? merge_operator->Name() : "None");
compaction_filter ? compaction_filter->Name() : "None"); ROCKS_LOG_HEADER(log, " Options.compaction_filter: %s",
Header(log, " Options.compaction_filter_factory: %s", compaction_filter ? compaction_filter->Name() : "None");
ROCKS_LOG_HEADER(
log, " Options.compaction_filter_factory: %s",
compaction_filter_factory ? compaction_filter_factory->Name() : "None"); compaction_filter_factory ? compaction_filter_factory->Name() : "None");
Header(log, " Options.memtable_factory: %s", memtable_factory->Name()); ROCKS_LOG_HEADER(log, " Options.memtable_factory: %s",
Header(log, " Options.table_factory: %s", table_factory->Name()); memtable_factory->Name());
Header(log, " table_factory options: %s", ROCKS_LOG_HEADER(log, " Options.table_factory: %s",
table_factory->GetPrintableTableOptions().c_str()); table_factory->Name());
Header(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt, ROCKS_LOG_HEADER(log, " table_factory options: %s",
write_buffer_size); table_factory->GetPrintableTableOptions().c_str());
Header(log, " Options.max_write_buffer_number: %d", max_write_buffer_number); ROCKS_LOG_HEADER(log, " Options.write_buffer_size: %" ROCKSDB_PRIszt,
if (!compression_per_level.empty()) { write_buffer_size);
for (unsigned int i = 0; i < compression_per_level.size(); i++) { ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number: %d",
Header(log, " Options.compression[%d]: %s", i, max_write_buffer_number);
CompressionTypeToString(compression_per_level[i]).c_str()); if (!compression_per_level.empty()) {
} for (unsigned int i = 0; i < compression_per_level.size(); i++) {
} else { ROCKS_LOG_HEADER(
Header(log, " Options.compression: %s", log, " Options.compression[%d]: %s", i,
CompressionTypeToString(compression).c_str()); CompressionTypeToString(compression_per_level[i]).c_str());
} }
Header(log, " Options.bottommost_compression: %s", } else {
bottommost_compression == kDisableCompressionOption ROCKS_LOG_HEADER(log, " Options.compression: %s",
? "Disabled" CompressionTypeToString(compression).c_str());
: CompressionTypeToString(bottommost_compression).c_str()); }
Header(log, " Options.prefix_extractor: %s", ROCKS_LOG_HEADER(
log, " Options.bottommost_compression: %s",
bottommost_compression == kDisableCompressionOption
? "Disabled"
: CompressionTypeToString(bottommost_compression).c_str());
ROCKS_LOG_HEADER(
log, " Options.prefix_extractor: %s",
prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name()); prefix_extractor == nullptr ? "nullptr" : prefix_extractor->Name());
Header(log, " Options.memtable_insert_with_hint_prefix_extractor: %s", ROCKS_LOG_HEADER(log,
memtable_insert_with_hint_prefix_extractor == nullptr " Options.memtable_insert_with_hint_prefix_extractor: %s",
? "nullptr" memtable_insert_with_hint_prefix_extractor == nullptr
: memtable_insert_with_hint_prefix_extractor->Name()); ? "nullptr"
Header(log, " Options.num_levels: %d", num_levels); : memtable_insert_with_hint_prefix_extractor->Name());
Header(log, " Options.min_write_buffer_number_to_merge: %d", ROCKS_LOG_HEADER(log, " Options.num_levels: %d", num_levels);
min_write_buffer_number_to_merge); ROCKS_LOG_HEADER(log, " Options.min_write_buffer_number_to_merge: %d",
Header(log, " Options.max_write_buffer_number_to_maintain: %d", min_write_buffer_number_to_merge);
max_write_buffer_number_to_maintain); ROCKS_LOG_HEADER(log, " Options.max_write_buffer_number_to_maintain: %d",
Header(log, " Options.compression_opts.window_bits: %d", max_write_buffer_number_to_maintain);
compression_opts.window_bits); ROCKS_LOG_HEADER(log, " Options.compression_opts.window_bits: %d",
Header(log, " Options.compression_opts.level: %d", compression_opts.window_bits);
compression_opts.level); ROCKS_LOG_HEADER(log, " Options.compression_opts.level: %d",
Header(log, " Options.compression_opts.strategy: %d", compression_opts.level);
compression_opts.strategy); ROCKS_LOG_HEADER(log, " Options.compression_opts.strategy: %d",
Header(log, compression_opts.strategy);
ROCKS_LOG_HEADER(
log,
" Options.compression_opts.max_dict_bytes: %" ROCKSDB_PRIszt, " Options.compression_opts.max_dict_bytes: %" ROCKSDB_PRIszt,
compression_opts.max_dict_bytes); compression_opts.max_dict_bytes);
Header(log, " Options.level0_file_num_compaction_trigger: %d", ROCKS_LOG_HEADER(log, " Options.level0_file_num_compaction_trigger: %d",
level0_file_num_compaction_trigger); level0_file_num_compaction_trigger);
Header(log, " Options.level0_slowdown_writes_trigger: %d", ROCKS_LOG_HEADER(log, " Options.level0_slowdown_writes_trigger: %d",
level0_slowdown_writes_trigger); level0_slowdown_writes_trigger);
Header(log, " Options.level0_stop_writes_trigger: %d", ROCKS_LOG_HEADER(log, " Options.level0_stop_writes_trigger: %d",
level0_stop_writes_trigger); level0_stop_writes_trigger);
Header(log, " Options.target_file_size_base: %" PRIu64, ROCKS_LOG_HEADER(
log, " Options.target_file_size_base: %" PRIu64,
target_file_size_base); target_file_size_base);
Header(log, " Options.target_file_size_multiplier: %d", ROCKS_LOG_HEADER(log, " Options.target_file_size_multiplier: %d",
target_file_size_multiplier); target_file_size_multiplier);
Header(log, " Options.max_bytes_for_level_base: %" PRIu64, ROCKS_LOG_HEADER(
log, " Options.max_bytes_for_level_base: %" PRIu64,
max_bytes_for_level_base); max_bytes_for_level_base);
Header(log, "Options.level_compaction_dynamic_level_bytes: %d", ROCKS_LOG_HEADER(log, "Options.level_compaction_dynamic_level_bytes: %d",
level_compaction_dynamic_level_bytes); level_compaction_dynamic_level_bytes);
Header(log, " Options.max_bytes_for_level_multiplier: %f", ROCKS_LOG_HEADER(log, " Options.max_bytes_for_level_multiplier: %f",
max_bytes_for_level_multiplier); max_bytes_for_level_multiplier);
for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size(); for (size_t i = 0; i < max_bytes_for_level_multiplier_additional.size();
i++) { i++) {
Header(log, ROCKS_LOG_HEADER(
"Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt log, "Options.max_bytes_for_level_multiplier_addtl[%" ROCKSDB_PRIszt
"]: %d", "]: %d",
i, max_bytes_for_level_multiplier_additional[i]); i, max_bytes_for_level_multiplier_additional[i]);
} }
Header(log, " Options.max_sequential_skip_in_iterations: %" PRIu64, ROCKS_LOG_HEADER(
log, " Options.max_sequential_skip_in_iterations: %" PRIu64,
max_sequential_skip_in_iterations); max_sequential_skip_in_iterations);
Header(log, " Options.max_compaction_bytes: %" PRIu64, ROCKS_LOG_HEADER(
max_compaction_bytes); log, " Options.max_compaction_bytes: %" PRIu64,
Header(log, max_compaction_bytes);
" Options.arena_block_size: %" ROCKSDB_PRIszt, ROCKS_LOG_HEADER(
arena_block_size); log,
Header(log, " Options.soft_pending_compaction_bytes_limit: %" PRIu64, " Options.arena_block_size: %" ROCKSDB_PRIszt,
soft_pending_compaction_bytes_limit); arena_block_size);
Header(log, " Options.hard_pending_compaction_bytes_limit: %" PRIu64, ROCKS_LOG_HEADER(log,
hard_pending_compaction_bytes_limit); " Options.soft_pending_compaction_bytes_limit: %" PRIu64,
Header(log, " Options.rate_limit_delay_max_milliseconds: %u", soft_pending_compaction_bytes_limit);
rate_limit_delay_max_milliseconds); ROCKS_LOG_HEADER(log,
Header(log, " Options.disable_auto_compactions: %d", " Options.hard_pending_compaction_bytes_limit: %" PRIu64,
disable_auto_compactions); hard_pending_compaction_bytes_limit);
ROCKS_LOG_HEADER(log, " Options.rate_limit_delay_max_milliseconds: %u",
rate_limit_delay_max_milliseconds);
ROCKS_LOG_HEADER(log, " Options.disable_auto_compactions: %d",
disable_auto_compactions);
const auto& it_compaction_style = const auto& it_compaction_style =
compaction_style_to_string.find(compaction_style); compaction_style_to_string.find(compaction_style);
@ -291,8 +308,9 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
} else { } else {
str_compaction_style = it_compaction_style->second; str_compaction_style = it_compaction_style->second;
} }
Header(log, " Options.compaction_style: %s", ROCKS_LOG_HEADER(log,
str_compaction_style.c_str()); " Options.compaction_style: %s",
str_compaction_style.c_str());
const auto& it_compaction_pri = const auto& it_compaction_pri =
compaction_pri_to_string.find(compaction_pri); compaction_pri_to_string.find(compaction_pri);
@ -303,55 +321,69 @@ void ColumnFamilyOptions::Dump(Logger* log) const {
} else { } else {
str_compaction_pri = it_compaction_pri->second; str_compaction_pri = it_compaction_pri->second;
} }
Header(log, " Options.compaction_pri: %s", ROCKS_LOG_HEADER(log,
str_compaction_pri.c_str()); " Options.compaction_pri: %s",
Header(log, " Options.compaction_options_universal.size_ratio: %u", str_compaction_pri.c_str());
compaction_options_universal.size_ratio); ROCKS_LOG_HEADER(log,
Header(log, "Options.compaction_options_universal.min_merge_width: %u", " Options.compaction_options_universal.size_ratio: %u",
compaction_options_universal.min_merge_width); compaction_options_universal.size_ratio);
Header(log, "Options.compaction_options_universal.max_merge_width: %u", ROCKS_LOG_HEADER(log,
compaction_options_universal.max_merge_width); "Options.compaction_options_universal.min_merge_width: %u",
Header(log, "Options.compaction_options_universal." compaction_options_universal.min_merge_width);
"max_size_amplification_percent: %u", ROCKS_LOG_HEADER(log,
"Options.compaction_options_universal.max_merge_width: %u",
compaction_options_universal.max_merge_width);
ROCKS_LOG_HEADER(
log,
"Options.compaction_options_universal."
"max_size_amplification_percent: %u",
compaction_options_universal.max_size_amplification_percent); compaction_options_universal.max_size_amplification_percent);
Header(log, ROCKS_LOG_HEADER(
log,
"Options.compaction_options_universal.compression_size_percent: %d", "Options.compaction_options_universal.compression_size_percent: %d",
compaction_options_universal.compression_size_percent); compaction_options_universal.compression_size_percent);
Header(log, ROCKS_LOG_HEADER(
"Options.compaction_options_fifo.max_table_files_size: %" PRIu64, log, "Options.compaction_options_fifo.max_table_files_size: %" PRIu64,
compaction_options_fifo.max_table_files_size); compaction_options_fifo.max_table_files_size);
std::string collector_names; std::string collector_names;
for (const auto& collector_factory : table_properties_collector_factories) { for (const auto& collector_factory : table_properties_collector_factories) {
collector_names.append(collector_factory->Name()); collector_names.append(collector_factory->Name());
collector_names.append("; "); collector_names.append("; ");
} }
Header(log, " Options.table_properties_collectors: %s", ROCKS_LOG_HEADER(
log, " Options.table_properties_collectors: %s",
collector_names.c_str()); collector_names.c_str());
Header(log, " Options.inplace_update_support: %d", ROCKS_LOG_HEADER(log,
inplace_update_support); " Options.inplace_update_support: %d",
Header(log, inplace_update_support);
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt, ROCKS_LOG_HEADER(
inplace_update_num_locks); log,
" Options.inplace_update_num_locks: %" ROCKSDB_PRIszt,
inplace_update_num_locks);
// TODO: easier config for bloom (maybe based on avg key/value size) // TODO: easier config for bloom (maybe based on avg key/value size)
Header(log, " Options.memtable_prefix_bloom_size_ratio: %f", ROCKS_LOG_HEADER(
memtable_prefix_bloom_size_ratio); log, " Options.memtable_prefix_bloom_size_ratio: %f",
memtable_prefix_bloom_size_ratio);
Header(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt, ROCKS_LOG_HEADER(log, " Options.memtable_huge_page_size: %" ROCKSDB_PRIszt,
memtable_huge_page_size); memtable_huge_page_size);
Header(log, " Options.bloom_locality: %d", ROCKS_LOG_HEADER(log,
bloom_locality); " Options.bloom_locality: %d",
bloom_locality);
Header(log, ROCKS_LOG_HEADER(
" Options.max_successive_merges: %" ROCKSDB_PRIszt, log,
max_successive_merges); " Options.max_successive_merges: %" ROCKSDB_PRIszt,
Header(log, " Options.optimize_filters_for_hits: %d", max_successive_merges);
optimize_filters_for_hits); ROCKS_LOG_HEADER(log,
Header(log, " Options.paranoid_file_checks: %d", " Options.optimize_filters_for_hits: %d",
paranoid_file_checks); optimize_filters_for_hits);
Header(log, " Options.force_consistency_checks: %d", ROCKS_LOG_HEADER(log, " Options.paranoid_file_checks: %d",
force_consistency_checks); paranoid_file_checks);
Header(log, " Options.report_bg_io_stats: %d", ROCKS_LOG_HEADER(log, " Options.force_consistency_checks: %d",
report_bg_io_stats); force_consistency_checks);
ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d",
report_bg_io_stats);
} // ColumnFamilyOptions::Dump } // ColumnFamilyOptions::Dump
void Options::Dump(Logger* log) const { void Options::Dump(Logger* log) const {

View File

@ -66,21 +66,24 @@ std::string BackupStatistics::ToString() const {
} }
void BackupableDBOptions::Dump(Logger* logger) const { void BackupableDBOptions::Dump(Logger* logger) const {
Log(logger, " Options.backup_dir: %s", backup_dir.c_str()); ROCKS_LOG_INFO(logger, " Options.backup_dir: %s",
Log(logger, " Options.backup_env: %p", backup_env); backup_dir.c_str());
Log(logger, " Options.share_table_files: %d", ROCKS_LOG_INFO(logger, " Options.backup_env: %p", backup_env);
static_cast<int>(share_table_files)); ROCKS_LOG_INFO(logger, " Options.share_table_files: %d",
Log(logger, " Options.info_log: %p", info_log); static_cast<int>(share_table_files));
Log(logger, " Options.sync: %d", static_cast<int>(sync)); ROCKS_LOG_INFO(logger, " Options.info_log: %p", info_log);
Log(logger, " Options.destroy_old_data: %d", ROCKS_LOG_INFO(logger, " Options.sync: %d",
static_cast<int>(destroy_old_data)); static_cast<int>(sync));
Log(logger, " Options.backup_log_files: %d", ROCKS_LOG_INFO(logger, " Options.destroy_old_data: %d",
static_cast<int>(backup_log_files)); static_cast<int>(destroy_old_data));
Log(logger, " Options.backup_rate_limit: %" PRIu64, backup_rate_limit); ROCKS_LOG_INFO(logger, " Options.backup_log_files: %d",
Log(logger, " Options.restore_rate_limit: %" PRIu64, static_cast<int>(backup_log_files));
restore_rate_limit); ROCKS_LOG_INFO(logger, " Options.backup_rate_limit: %" PRIu64,
Log(logger, "Options.max_background_operations: %d", backup_rate_limit);
max_background_operations); ROCKS_LOG_INFO(logger, " Options.restore_rate_limit: %" PRIu64,
restore_rate_limit);
ROCKS_LOG_INFO(logger, "Options.max_background_operations: %d",
max_background_operations);
} }
// -------- BackupEngineImpl class --------- // -------- BackupEngineImpl class ---------
@ -520,7 +523,7 @@ Status BackupEngineImpl::Initialize() {
assert(!initialized_); assert(!initialized_);
initialized_ = true; initialized_ = true;
if (read_only_) { if (read_only_) {
Log(options_.info_log, "Starting read_only backup engine"); ROCKS_LOG_INFO(options_.info_log, "Starting read_only backup engine");
} }
options_.Dump(options_.info_log); options_.Dump(options_.info_log);
@ -568,15 +571,16 @@ Status BackupEngineImpl::Initialize() {
if (file == "." || file == "..") { if (file == "." || file == "..") {
continue; continue;
} }
Log(options_.info_log, "Detected backup %s", file.c_str()); ROCKS_LOG_INFO(options_.info_log, "Detected backup %s", file.c_str());
BackupID backup_id = 0; BackupID backup_id = 0;
sscanf(file.c_str(), "%u", &backup_id); sscanf(file.c_str(), "%u", &backup_id);
if (backup_id == 0 || file != rocksdb::ToString(backup_id)) { if (backup_id == 0 || file != rocksdb::ToString(backup_id)) {
if (!read_only_) { if (!read_only_) {
// invalid file name, delete that // invalid file name, delete that
auto s = backup_env_->DeleteFile(GetBackupMetaDir() + "/" + file); auto s = backup_env_->DeleteFile(GetBackupMetaDir() + "/" + file);
Log(options_.info_log, "Unrecognized meta file %s, deleting -- %s", ROCKS_LOG_INFO(options_.info_log,
file.c_str(), s.ToString().c_str()); "Unrecognized meta file %s, deleting -- %s",
file.c_str(), s.ToString().c_str());
} }
continue; continue;
} }
@ -590,7 +594,8 @@ Status BackupEngineImpl::Initialize() {
latest_backup_id_ = 0; latest_backup_id_ = 0;
if (options_.destroy_old_data) { // Destroy old data if (options_.destroy_old_data) { // Destroy old data
assert(!read_only_); assert(!read_only_);
Log(options_.info_log, ROCKS_LOG_INFO(
options_.info_log,
"Backup Engine started with destroy_old_data == true, deleting all " "Backup Engine started with destroy_old_data == true, deleting all "
"backups"); "backups");
auto s = PurgeOldBackups(0); auto s = PurgeOldBackups(0);
@ -615,8 +620,8 @@ Status BackupEngineImpl::Initialize() {
Status s = Status s =
backup.second->LoadFromFile(options_.backup_dir, abs_path_to_size); backup.second->LoadFromFile(options_.backup_dir, abs_path_to_size);
if (s.IsCorruption()) { if (s.IsCorruption()) {
Log(options_.info_log, "Backup %u corrupted -- %s", backup.first, ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s",
s.ToString().c_str()); backup.first, s.ToString().c_str());
corrupt_backups_.insert(std::make_pair( corrupt_backups_.insert(std::make_pair(
backup.first, std::make_pair(s, std::move(backup.second)))); backup.first, std::make_pair(s, std::move(backup.second))));
} else if (!s.ok()) { } else if (!s.ok()) {
@ -625,8 +630,8 @@ Status BackupEngineImpl::Initialize() {
// fail, whereas corruption errors would not cause Open() failures. // fail, whereas corruption errors would not cause Open() failures.
return s; return s;
} else { } else {
Log(options_.info_log, "Loading backup %" PRIu32 " OK:\n%s", ROCKS_LOG_INFO(options_.info_log, "Loading backup %" PRIu32 " OK:\n%s",
backup.first, backup.second->GetInfoString().c_str()); backup.first, backup.second->GetInfoString().c_str());
latest_backup_id_ = std::max(latest_backup_id_, backup.first); latest_backup_id_ = std::max(latest_backup_id_, backup.first);
} }
} }
@ -636,7 +641,7 @@ Status BackupEngineImpl::Initialize() {
} }
} }
Log(options_.info_log, "Latest backup is %u", latest_backup_id_); ROCKS_LOG_INFO(options_.info_log, "Latest backup is %u", latest_backup_id_);
// set up threads perform copies from files_to_copy_or_create_ in the // set up threads perform copies from files_to_copy_or_create_ in the
// background // background
@ -655,7 +660,7 @@ Status BackupEngineImpl::Initialize() {
}); });
} }
Log(options_.info_log, "Initialized BackupEngine"); ROCKS_LOG_INFO(options_.info_log, "Initialized BackupEngine");
return Status::OK(); return Status::OK();
} }
@ -706,8 +711,9 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
auto start_backup = backup_env_-> NowMicros(); auto start_backup = backup_env_-> NowMicros();
Log(options_.info_log, "Started the backup process -- creating backup %u", ROCKS_LOG_INFO(options_.info_log,
new_backup_id); "Started the backup process -- creating backup %u",
new_backup_id);
auto private_tmp_dir = GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)); auto private_tmp_dir = GetAbsolutePath(GetPrivateFileRel(new_backup_id, true));
s = backup_env_->FileExists(private_tmp_dir); s = backup_env_->FileExists(private_tmp_dir);
@ -790,14 +796,16 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
manifest_fname.size(), 0 /* size_limit */, false /* shared_checksum */, manifest_fname.size(), 0 /* size_limit */, false /* shared_checksum */,
progress_callback, manifest_fname.substr(1) + "\n"); progress_callback, manifest_fname.substr(1) + "\n");
} }
Log(options_.info_log, "begin add wal files for backup -- %" ROCKSDB_PRIszt, ROCKS_LOG_INFO(options_.info_log,
live_wal_files.size()); "begin add wal files for backup -- %" ROCKSDB_PRIszt,
live_wal_files.size());
// Add a CopyOrCreateWorkItem to the channel for each WAL file // Add a CopyOrCreateWorkItem to the channel for each WAL file
for (size_t i = 0; s.ok() && i < live_wal_files.size(); ++i) { for (size_t i = 0; s.ok() && i < live_wal_files.size(); ++i) {
uint64_t size_bytes = live_wal_files[i]->SizeFileBytes(); uint64_t size_bytes = live_wal_files[i]->SizeFileBytes();
if (live_wal_files[i]->Type() == kAliveLogFile) { if (live_wal_files[i]->Type() == kAliveLogFile) {
Log(options_.info_log, "add wal file for backup %s -- %" PRIu64, ROCKS_LOG_INFO(options_.info_log,
live_wal_files[i]->PathName().c_str(), size_bytes); "add wal file for backup %s -- %" PRIu64,
live_wal_files[i]->PathName().c_str(), size_bytes);
// we only care about live log files // we only care about live log files
// copy the file into backup_dir/files/<new backup>/ // copy the file into backup_dir/files/<new backup>/
s = AddBackupFileWorkItem(live_dst_paths, backup_items_to_finish, s = AddBackupFileWorkItem(live_dst_paths, backup_items_to_finish,
@ -807,7 +815,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
size_bytes, size_bytes); size_bytes, size_bytes);
} }
} }
Log(options_.info_log, "add files for backup done, wait finish."); ROCKS_LOG_INFO(options_.info_log, "add files for backup done, wait finish.");
Status item_status; Status item_status;
for (auto& item : backup_items_to_finish) { for (auto& item : backup_items_to_finish) {
item.result.wait(); item.result.wait();
@ -833,7 +841,8 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
if (s.ok()) { if (s.ok()) {
// move tmp private backup to real backup folder // move tmp private backup to real backup folder
Log(options_.info_log, ROCKS_LOG_INFO(
options_.info_log,
"Moving tmp backup directory to the real one: %s -> %s\n", "Moving tmp backup directory to the real one: %s -> %s\n",
GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)).c_str(), GetAbsolutePath(GetPrivateFileRel(new_backup_id, true)).c_str(),
GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)).c_str()); GetAbsolutePath(GetPrivateFileRel(new_backup_id, false)).c_str());
@ -876,9 +885,10 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
if (!s.ok()) { if (!s.ok()) {
backup_statistics_.IncrementNumberFailBackup(); backup_statistics_.IncrementNumberFailBackup();
// clean all the files we might have created // clean all the files we might have created
Log(options_.info_log, "Backup failed -- %s", s.ToString().c_str()); ROCKS_LOG_INFO(options_.info_log, "Backup failed -- %s",
Log(options_.info_log, "Backup Statistics %s\n", s.ToString().c_str());
backup_statistics_.ToString().c_str()); ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s\n",
backup_statistics_.ToString().c_str());
// delete files that we might have already written // delete files that we might have already written
DeleteBackup(new_backup_id); DeleteBackup(new_backup_id);
GarbageCollect(); GarbageCollect();
@ -888,27 +898,28 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
// here we know that we succeeded and installed the new backup // here we know that we succeeded and installed the new backup
// in the LATEST_BACKUP file // in the LATEST_BACKUP file
latest_backup_id_ = new_backup_id; latest_backup_id_ = new_backup_id;
Log(options_.info_log, "Backup DONE. All is good"); ROCKS_LOG_INFO(options_.info_log, "Backup DONE. All is good");
// backup_speed is in byte/second // backup_speed is in byte/second
double backup_speed = new_backup->GetSize() / (1.048576 * backup_time); double backup_speed = new_backup->GetSize() / (1.048576 * backup_time);
Log(options_.info_log, "Backup number of files: %u", ROCKS_LOG_INFO(options_.info_log, "Backup number of files: %u",
new_backup->GetNumberFiles()); new_backup->GetNumberFiles());
char human_size[16]; char human_size[16];
AppendHumanBytes(new_backup->GetSize(), human_size, sizeof(human_size)); AppendHumanBytes(new_backup->GetSize(), human_size, sizeof(human_size));
Log(options_.info_log, "Backup size: %s", human_size); ROCKS_LOG_INFO(options_.info_log, "Backup size: %s", human_size);
Log(options_.info_log, "Backup time: %" PRIu64 " microseconds", backup_time); ROCKS_LOG_INFO(options_.info_log, "Backup time: %" PRIu64 " microseconds",
Log(options_.info_log, "Backup speed: %.3f MB/s", backup_speed); backup_time);
Log(options_.info_log, "Backup Statistics %s", ROCKS_LOG_INFO(options_.info_log, "Backup speed: %.3f MB/s", backup_speed);
backup_statistics_.ToString().c_str()); ROCKS_LOG_INFO(options_.info_log, "Backup Statistics %s",
backup_statistics_.ToString().c_str());
return s; return s;
} }
Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) { Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) {
assert(initialized_); assert(initialized_);
assert(!read_only_); assert(!read_only_);
Log(options_.info_log, "Purging old backups, keeping %u", ROCKS_LOG_INFO(options_.info_log, "Purging old backups, keeping %u",
num_backups_to_keep); num_backups_to_keep);
std::vector<BackupID> to_delete; std::vector<BackupID> to_delete;
auto itr = backups_.begin(); auto itr = backups_.begin();
while ((backups_.size() - to_delete.size()) > num_backups_to_keep) { while ((backups_.size() - to_delete.size()) > num_backups_to_keep) {
@ -927,7 +938,7 @@ Status BackupEngineImpl::PurgeOldBackups(uint32_t num_backups_to_keep) {
Status BackupEngineImpl::DeleteBackup(BackupID backup_id) { Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
assert(initialized_); assert(initialized_);
assert(!read_only_); assert(!read_only_);
Log(options_.info_log, "Deleting backup %u", backup_id); ROCKS_LOG_INFO(options_.info_log, "Deleting backup %u", backup_id);
auto backup = backups_.find(backup_id); auto backup = backups_.find(backup_id);
if (backup != backups_.end()) { if (backup != backups_.end()) {
auto s = backup->second->Delete(); auto s = backup->second->Delete();
@ -951,8 +962,8 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
for (auto& itr : backuped_file_infos_) { for (auto& itr : backuped_file_infos_) {
if (itr.second->refs == 0) { if (itr.second->refs == 0) {
Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first)); Status s = backup_env_->DeleteFile(GetAbsolutePath(itr.first));
Log(options_.info_log, "Deleting %s -- %s", itr.first.c_str(), ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s", itr.first.c_str(),
s.ToString().c_str()); s.ToString().c_str());
to_delete.push_back(itr.first); to_delete.push_back(itr.first);
} }
} }
@ -964,8 +975,8 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
// if they are not empty // if they are not empty
std::string private_dir = GetPrivateFileRel(backup_id); std::string private_dir = GetPrivateFileRel(backup_id);
Status s = backup_env_->DeleteDir(GetAbsolutePath(private_dir)); Status s = backup_env_->DeleteDir(GetAbsolutePath(private_dir));
Log(options_.info_log, "Deleting private dir %s -- %s", ROCKS_LOG_INFO(options_.info_log, "Deleting private dir %s -- %s",
private_dir.c_str(), s.ToString().c_str()); private_dir.c_str(), s.ToString().c_str());
return Status::OK(); return Status::OK();
} }
@ -1008,9 +1019,9 @@ Status BackupEngineImpl::RestoreDBFromBackup(
return Status::NotFound("Backup not found"); return Status::NotFound("Backup not found");
} }
Log(options_.info_log, "Restoring backup id %u\n", backup_id); ROCKS_LOG_INFO(options_.info_log, "Restoring backup id %u\n", backup_id);
Log(options_.info_log, "keep_log_files: %d\n", ROCKS_LOG_INFO(options_.info_log, "keep_log_files: %d\n",
static_cast<int>(restore_options.keep_log_files)); static_cast<int>(restore_options.keep_log_files));
// just in case. Ignore errors // just in case. Ignore errors
db_env_->CreateDirIfMissing(db_dir); db_env_->CreateDirIfMissing(db_dir);
@ -1028,8 +1039,9 @@ Status BackupEngineImpl::RestoreDBFromBackup(
FileType type; FileType type;
bool ok = ParseFileName(f, &number, &type); bool ok = ParseFileName(f, &number, &type);
if (ok && type == kLogFile) { if (ok && type == kLogFile) {
Log(options_.info_log, "Moving log file from archive/ to wal_dir: %s", ROCKS_LOG_INFO(options_.info_log,
f.c_str()); "Moving log file from archive/ to wal_dir: %s",
f.c_str());
Status s = Status s =
db_env_->RenameFile(archive_dir + "/" + f, wal_dir + "/" + f); db_env_->RenameFile(archive_dir + "/" + f, wal_dir + "/" + f);
if (!s.ok()) { if (!s.ok()) {
@ -1079,7 +1091,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(
dst = ((type == kLogFile) ? wal_dir : db_dir) + dst = ((type == kLogFile) ? wal_dir : db_dir) +
"/" + dst; "/" + dst;
Log(options_.info_log, "Restoring %s to %s\n", file.c_str(), dst.c_str()); ROCKS_LOG_INFO(options_.info_log, "Restoring %s to %s\n", file.c_str(),
dst.c_str());
CopyOrCreateWorkItem copy_or_create_work_item( CopyOrCreateWorkItem copy_or_create_work_item(
GetAbsolutePath(file), dst, "" /* contents */, backup_env_, db_env_, GetAbsolutePath(file), dst, "" /* contents */, backup_env_, db_env_,
false, rate_limiter, 0 /* size_limit */); false, rate_limiter, 0 /* size_limit */);
@ -1106,7 +1119,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(
} }
} }
Log(options_.info_log, "Restoring done -- %s\n", s.ToString().c_str()); ROCKS_LOG_INFO(options_.info_log, "Restoring done -- %s\n",
s.ToString().c_str());
return s; return s;
} }
@ -1127,7 +1141,7 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id) {
return Status::NotFound(); return Status::NotFound();
} }
Log(options_.info_log, "Verifying backup id %u\n", backup_id); ROCKS_LOG_INFO(options_.info_log, "Verifying backup id %u\n", backup_id);
std::unordered_map<std::string, uint64_t> curr_abs_path_to_size; std::unordered_map<std::string, uint64_t> curr_abs_path_to_size;
for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(), for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(),
@ -1302,14 +1316,15 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
} else if (shared && (same_path || file_exists)) { } else if (shared && (same_path || file_exists)) {
need_to_copy = false; need_to_copy = false;
if (shared_checksum) { if (shared_checksum) {
Log(options_.info_log, ROCKS_LOG_INFO(options_.info_log,
"%s already present, with checksum %u and size %" PRIu64, "%s already present, with checksum %u and size %" PRIu64,
fname.c_str(), checksum_value, size_bytes); fname.c_str(), checksum_value, size_bytes);
} else if (backuped_file_infos_.find(dst_relative) == } else if (backuped_file_infos_.find(dst_relative) ==
backuped_file_infos_.end() && !same_path) { backuped_file_infos_.end() && !same_path) {
// file already exists, but it's not referenced by any backup. overwrite // file already exists, but it's not referenced by any backup. overwrite
// the file // the file
Log(options_.info_log, ROCKS_LOG_INFO(
options_.info_log,
"%s already present, but not referenced by any backup. We will " "%s already present, but not referenced by any backup. We will "
"overwrite the file.", "overwrite the file.",
fname.c_str()); fname.c_str());
@ -1317,8 +1332,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
backup_env_->DeleteFile(dst_path); backup_env_->DeleteFile(dst_path);
} else { } else {
// the file is present and referenced by a backup // the file is present and referenced by a backup
Log(options_.info_log, "%s already present, calculate checksum", ROCKS_LOG_INFO(options_.info_log,
fname.c_str()); "%s already present, calculate checksum", fname.c_str());
s = CalculateChecksum(src_dir + fname, db_env_, size_limit, s = CalculateChecksum(src_dir + fname, db_env_, size_limit,
&checksum_value); &checksum_value);
} }
@ -1326,8 +1341,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
live_dst_paths.insert(dst_path); live_dst_paths.insert(dst_path);
if (!contents.empty() || need_to_copy) { if (!contents.empty() || need_to_copy) {
Log(options_.info_log, "Copying %s to %s", fname.c_str(), ROCKS_LOG_INFO(options_.info_log, "Copying %s to %s", fname.c_str(),
dst_path_tmp.c_str()); dst_path_tmp.c_str());
CopyOrCreateWorkItem copy_or_create_work_item( CopyOrCreateWorkItem copy_or_create_work_item(
src_dir.empty() ? "" : src_dir + fname, dst_path_tmp, contents, db_env_, src_dir.empty() ? "" : src_dir + fname, dst_path_tmp, contents, db_env_,
backup_env_, options_.sync, rate_limiter, size_limit, backup_env_, options_.sync, rate_limiter, size_limit,
@ -1433,7 +1448,7 @@ Status BackupEngineImpl::InsertPathnameToSizeBytes(
Status BackupEngineImpl::GarbageCollect() { Status BackupEngineImpl::GarbageCollect() {
assert(!read_only_); assert(!read_only_);
Log(options_.info_log, "Starting garbage collection"); ROCKS_LOG_INFO(options_.info_log, "Starting garbage collection");
if (options_.share_table_files) { if (options_.share_table_files) {
// delete obsolete shared files // delete obsolete shared files
@ -1459,8 +1474,8 @@ Status BackupEngineImpl::GarbageCollect() {
// this might be a directory, but DeleteFile will just fail in that // this might be a directory, but DeleteFile will just fail in that
// case, so we're good // case, so we're good
Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname)); Status s = backup_env_->DeleteFile(GetAbsolutePath(rel_fname));
Log(options_.info_log, "Deleting %s -- %s", rel_fname.c_str(), ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
s.ToString().c_str()); rel_fname.c_str(), s.ToString().c_str());
backuped_file_infos_.erase(rel_fname); backuped_file_infos_.erase(rel_fname);
} }
} }
@ -1491,13 +1506,14 @@ Status BackupEngineImpl::GarbageCollect() {
backup_env_->GetChildren(full_private_path, &subchildren); backup_env_->GetChildren(full_private_path, &subchildren);
for (auto& subchild : subchildren) { for (auto& subchild : subchildren) {
Status s = backup_env_->DeleteFile(full_private_path + subchild); Status s = backup_env_->DeleteFile(full_private_path + subchild);
Log(options_.info_log, "Deleting %s -- %s", ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
(full_private_path + subchild).c_str(), s.ToString().c_str()); (full_private_path + subchild).c_str(),
s.ToString().c_str());
} }
// finally delete the private dir // finally delete the private dir
Status s = backup_env_->DeleteDir(full_private_path); Status s = backup_env_->DeleteDir(full_private_path);
Log(options_.info_log, "Deleting dir %s -- %s", full_private_path.c_str(), ROCKS_LOG_INFO(options_.info_log, "Deleting dir %s -- %s",
s.ToString().c_str()); full_private_path.c_str(), s.ToString().c_str());
} }
return Status::OK(); return Status::OK();

View File

@ -122,7 +122,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
} }
size_t wal_size = live_wal_files.size(); size_t wal_size = live_wal_files.size();
Log(db_options.info_log, ROCKS_LOG_INFO(
db_options.info_log,
"Started the snapshot process -- creating snapshot in directory %s", "Started the snapshot process -- creating snapshot in directory %s",
checkpoint_dir.c_str()); checkpoint_dir.c_str());
@ -161,7 +162,7 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
// * if it's kDescriptorFile, limit the size to manifest_file_size // * if it's kDescriptorFile, limit the size to manifest_file_size
// * always copy if cross-device link // * always copy if cross-device link
if ((type == kTableFile) && same_fs) { if ((type == kTableFile) && same_fs) {
Log(db_options.info_log, "Hard Linking %s", src_fname.c_str()); ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s", src_fname.c_str());
s = db_->GetEnv()->LinkFile(db_->GetName() + src_fname, s = db_->GetEnv()->LinkFile(db_->GetName() + src_fname,
full_private_path + src_fname); full_private_path + src_fname);
if (s.IsNotSupported()) { if (s.IsNotSupported()) {
@ -170,7 +171,7 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
} }
} }
if ((type != kTableFile) || (!same_fs)) { if ((type != kTableFile) || (!same_fs)) {
Log(db_options.info_log, "Copying %s", src_fname.c_str()); ROCKS_LOG_INFO(db_options.info_log, "Copying %s", src_fname.c_str());
s = CopyFile(db_->GetEnv(), db_->GetName() + src_fname, s = CopyFile(db_->GetEnv(), db_->GetName() + src_fname,
full_private_path + src_fname, full_private_path + src_fname,
(type == kDescriptorFile) ? manifest_file_size : 0, (type == kDescriptorFile) ? manifest_file_size : 0,
@ -181,8 +182,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
s = CreateFile(db_->GetEnv(), full_private_path + current_fname, s = CreateFile(db_->GetEnv(), full_private_path + current_fname,
manifest_fname.substr(1) + "\n"); manifest_fname.substr(1) + "\n");
} }
Log(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt, ROCKS_LOG_INFO(db_options.info_log, "Number of log files %" ROCKSDB_PRIszt,
live_wal_files.size()); live_wal_files.size());
// Link WAL files. Copy exact size of last one because it is the only one // Link WAL files. Copy exact size of last one because it is the only one
// that has changes after the last flush. // that has changes after the last flush.
@ -191,8 +192,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
(live_wal_files[i]->StartSequence() >= sequence_number || (live_wal_files[i]->StartSequence() >= sequence_number ||
live_wal_files[i]->LogNumber() >= min_log_num)) { live_wal_files[i]->LogNumber() >= min_log_num)) {
if (i + 1 == wal_size) { if (i + 1 == wal_size) {
Log(db_options.info_log, "Copying %s", ROCKS_LOG_INFO(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str()); live_wal_files[i]->PathName().c_str());
s = CopyFile(db_->GetEnv(), s = CopyFile(db_->GetEnv(),
db_options.wal_dir + live_wal_files[i]->PathName(), db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName(), full_private_path + live_wal_files[i]->PathName(),
@ -201,8 +202,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
} }
if (same_fs) { if (same_fs) {
// we only care about live log files // we only care about live log files
Log(db_options.info_log, "Hard Linking %s", ROCKS_LOG_INFO(db_options.info_log, "Hard Linking %s",
live_wal_files[i]->PathName().c_str()); live_wal_files[i]->PathName().c_str());
s = db_->GetEnv()->LinkFile( s = db_->GetEnv()->LinkFile(
db_options.wal_dir + live_wal_files[i]->PathName(), db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName()); full_private_path + live_wal_files[i]->PathName());
@ -212,8 +213,8 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
} }
} }
if (!same_fs) { if (!same_fs) {
Log(db_options.info_log, "Copying %s", ROCKS_LOG_INFO(db_options.info_log, "Copying %s",
live_wal_files[i]->PathName().c_str()); live_wal_files[i]->PathName().c_str());
s = CopyFile(db_->GetEnv(), s = CopyFile(db_->GetEnv(),
db_options.wal_dir + live_wal_files[i]->PathName(), db_options.wal_dir + live_wal_files[i]->PathName(),
full_private_path + live_wal_files[i]->PathName(), 0, full_private_path + live_wal_files[i]->PathName(), 0,
@ -239,27 +240,28 @@ Status CheckpointImpl::CreateCheckpoint(const std::string& checkpoint_dir) {
if (!s.ok()) { if (!s.ok()) {
// clean all the files we might have created // clean all the files we might have created
Log(db_options.info_log, "Snapshot failed -- %s", s.ToString().c_str()); ROCKS_LOG_INFO(db_options.info_log, "Snapshot failed -- %s",
s.ToString().c_str());
// we have to delete the dir and all its children // we have to delete the dir and all its children
std::vector<std::string> subchildren; std::vector<std::string> subchildren;
db_->GetEnv()->GetChildren(full_private_path, &subchildren); db_->GetEnv()->GetChildren(full_private_path, &subchildren);
for (auto& subchild : subchildren) { for (auto& subchild : subchildren) {
std::string subchild_path = full_private_path + "/" + subchild; std::string subchild_path = full_private_path + "/" + subchild;
Status s1 = db_->GetEnv()->DeleteFile(subchild_path); Status s1 = db_->GetEnv()->DeleteFile(subchild_path);
Log(db_options.info_log, "Delete file %s -- %s", subchild_path.c_str(), ROCKS_LOG_INFO(db_options.info_log, "Delete file %s -- %s",
s1.ToString().c_str()); subchild_path.c_str(), s1.ToString().c_str());
} }
// finally delete the private dir // finally delete the private dir
Status s1 = db_->GetEnv()->DeleteDir(full_private_path); Status s1 = db_->GetEnv()->DeleteDir(full_private_path);
Log(db_options.info_log, "Delete dir %s -- %s", full_private_path.c_str(), ROCKS_LOG_INFO(db_options.info_log, "Delete dir %s -- %s",
s1.ToString().c_str()); full_private_path.c_str(), s1.ToString().c_str());
return s; return s;
} }
// here we know that we succeeded and installed the new snapshot // here we know that we succeeded and installed the new snapshot
Log(db_options.info_log, "Snapshot DONE. All is good"); ROCKS_LOG_INFO(db_options.info_log, "Snapshot DONE. All is good");
Log(db_options.info_log, "Snapshot sequence number: %" PRIu64, ROCKS_LOG_INFO(db_options.info_log, "Snapshot sequence number: %" PRIu64,
sequence_number); sequence_number);
return s; return s;
} }

View File

@ -9,6 +9,7 @@
#include "rocksdb/merge_operator.h" #include "rocksdb/merge_operator.h"
#include "rocksdb/slice.h" #include "rocksdb/slice.h"
#include "util/coding.h" #include "util/coding.h"
#include "util/logging.h"
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
using namespace rocksdb; using namespace rocksdb;
@ -51,10 +52,9 @@ class UInt64AddOperator : public AssociativeMergeOperator {
result = DecodeFixed64(value.data()); result = DecodeFixed64(value.data());
} else if (logger != nullptr) { } else if (logger != nullptr) {
// If value is corrupted, treat it as 0 // If value is corrupted, treat it as 0
Log(InfoLogLevel::ERROR_LEVEL, logger, ROCKS_LOG_ERROR(logger, "uint64 value corruption, size: %" ROCKSDB_PRIszt
"uint64 value corruption, size: %" ROCKSDB_PRIszt " > %" ROCKSDB_PRIszt,
" > %" ROCKSDB_PRIszt, value.size(), sizeof(uint64_t));
value.size(), sizeof(uint64_t));
} }
return result; return result;

View File

@ -11,6 +11,7 @@
#include <vector> #include <vector>
#include "port/port.h" #include "port/port.h"
#include "util/logging.h"
#include "util/stop_watch.h" #include "util/stop_watch.h"
#include "util/sync_point.h" #include "util/sync_point.h"
#include "utilities/persistent_cache/block_cache_tier_file.h" #include "utilities/persistent_cache/block_cache_tier_file.h"
@ -109,7 +110,7 @@ Status BlockCacheTier::CleanupCacheFolder(const std::string& folder) {
return status; return status;
} }
} else { } else {
Debug(opt_.log, "Skipping file %s", file.c_str()); ROCKS_LOG_DEBUG(opt_.log, "Skipping file %s", file.c_str());
} }
} }
return Status::OK(); return Status::OK();
@ -233,8 +234,8 @@ Status BlockCacheTier::InsertImpl(const Slice& key, const Slice& data) {
while (!cache_file_->Append(key, data, &lba)) { while (!cache_file_->Append(key, data, &lba)) {
if (!cache_file_->Eof()) { if (!cache_file_->Eof()) {
Debug(opt_.log, "Error inserting to cache file %d", ROCKS_LOG_DEBUG(opt_.log, "Error inserting to cache file %d",
cache_file_->cacheid()); cache_file_->cacheid());
stats_.write_latency_.Add(timer.ElapsedNanos() / 1000); stats_.write_latency_.Add(timer.ElapsedNanos() / 1000);
return Status::TryAgain(); return Status::TryAgain();
} }

View File

@ -13,8 +13,9 @@
#include <memory> #include <memory>
#include <vector> #include <vector>
#include "util/crc32c.h"
#include "port/port.h" #include "port/port.h"
#include "util/crc32c.h"
#include "util/logging.h"
namespace rocksdb { namespace rocksdb {
@ -201,7 +202,7 @@ bool RandomAccessCacheFile::Open(const bool enable_direct_reads) {
bool RandomAccessCacheFile::OpenImpl(const bool enable_direct_reads) { bool RandomAccessCacheFile::OpenImpl(const bool enable_direct_reads) {
rwlock_.AssertHeld(); rwlock_.AssertHeld();
Debug(log_, "Opening cache file %s", Path().c_str()); ROCKS_LOG_DEBUG(log_, "Opening cache file %s", Path().c_str());
std::unique_ptr<RandomAccessFile> file; std::unique_ptr<RandomAccessFile> file;
Status status = Status status =
@ -282,19 +283,19 @@ bool WriteableCacheFile::Create(const bool enable_direct_writes,
enable_direct_reads_ = enable_direct_reads; enable_direct_reads_ = enable_direct_reads;
Debug(log_, "Creating new cache %s (max size is %d B)", Path().c_str(), ROCKS_LOG_DEBUG(log_, "Creating new cache %s (max size is %d B)",
max_size_); Path().c_str(), max_size_);
Status s = env_->FileExists(Path()); Status s = env_->FileExists(Path());
if (s.ok()) { if (s.ok()) {
Warn(log_, "File %s already exists. %s", Path().c_str(), ROCKS_LOG_WARN(log_, "File %s already exists. %s", Path().c_str(),
s.ToString().c_str()); s.ToString().c_str());
} }
s = NewWritableCacheFile(env_, Path(), &file_); s = NewWritableCacheFile(env_, Path(), &file_);
if (!s.ok()) { if (!s.ok()) {
Warn(log_, "Unable to create file %s. %s", Path().c_str(), ROCKS_LOG_WARN(log_, "Unable to create file %s. %s", Path().c_str(),
s.ToString().c_str()); s.ToString().c_str());
return false; return false;
} }
@ -317,7 +318,7 @@ bool WriteableCacheFile::Append(const Slice& key, const Slice& val, LBA* lba) {
if (!ExpandBuffer(rec_size)) { if (!ExpandBuffer(rec_size)) {
// unable to expand the buffer // unable to expand the buffer
Debug(log_, "Error expanding buffers. size=%d", rec_size); ROCKS_LOG_DEBUG(log_, "Error expanding buffers. size=%d", rec_size);
return false; return false;
} }
@ -360,7 +361,7 @@ bool WriteableCacheFile::ExpandBuffer(const size_t size) {
while (free < size) { while (free < size) {
CacheWriteBuffer* const buf = alloc_->Allocate(); CacheWriteBuffer* const buf = alloc_->Allocate();
if (!buf) { if (!buf) {
Debug(log_, "Unable to allocate buffers"); ROCKS_LOG_DEBUG(log_, "Unable to allocate buffers");
return false; return false;
} }

View File

@ -233,8 +233,8 @@ class TtlMergeOperator : public MergeOperator {
MergeOperationOutput* merge_out) const override { MergeOperationOutput* merge_out) const override {
const uint32_t ts_len = DBWithTTLImpl::kTSLength; const uint32_t ts_len = DBWithTTLImpl::kTSLength;
if (merge_in.existing_value && merge_in.existing_value->size() < ts_len) { if (merge_in.existing_value && merge_in.existing_value->size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger, ROCKS_LOG_ERROR(merge_in.logger,
"Error: Could not remove timestamp from existing value."); "Error: Could not remove timestamp from existing value.");
return false; return false;
} }
@ -242,7 +242,8 @@ class TtlMergeOperator : public MergeOperator {
std::vector<Slice> operands_without_ts; std::vector<Slice> operands_without_ts;
for (const auto& operand : merge_in.operand_list) { for (const auto& operand : merge_in.operand_list) {
if (operand.size() < ts_len) { if (operand.size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger, ROCKS_LOG_ERROR(
merge_in.logger,
"Error: Could not remove timestamp from operand value."); "Error: Could not remove timestamp from operand value.");
return false; return false;
} }
@ -282,7 +283,8 @@ class TtlMergeOperator : public MergeOperator {
// Augment the *new_value with the ttl time-stamp // Augment the *new_value with the ttl time-stamp
int64_t curtime; int64_t curtime;
if (!env_->GetCurrentTime(&curtime).ok()) { if (!env_->GetCurrentTime(&curtime).ok()) {
Log(InfoLogLevel::ERROR_LEVEL, merge_in.logger, ROCKS_LOG_ERROR(
merge_in.logger,
"Error: Could not get current time to be attached internally " "Error: Could not get current time to be attached internally "
"to the new value."); "to the new value.");
return false; return false;
@ -303,8 +305,8 @@ class TtlMergeOperator : public MergeOperator {
for (const auto& operand : operand_list) { for (const auto& operand : operand_list) {
if (operand.size() < ts_len) { if (operand.size() < ts_len) {
Log(InfoLogLevel::ERROR_LEVEL, logger, ROCKS_LOG_ERROR(logger,
"Error: Could not remove timestamp from value."); "Error: Could not remove timestamp from value.");
return false; return false;
} }
@ -322,7 +324,8 @@ class TtlMergeOperator : public MergeOperator {
// Augment the *new_value with the ttl time-stamp // Augment the *new_value with the ttl time-stamp
int64_t curtime; int64_t curtime;
if (!env_->GetCurrentTime(&curtime).ok()) { if (!env_->GetCurrentTime(&curtime).ok()) {
Log(InfoLogLevel::ERROR_LEVEL, logger, ROCKS_LOG_ERROR(
logger,
"Error: Could not get current time to be attached internally " "Error: Could not get current time to be attached internally "
"to the new value."); "to the new value.");
return false; return false;