Add counters for L0 stall while L0-L1 compaction is taking place

Summary:
Although there are currently counters to keep track of the
stall caused by having too many L0 files, there is no distinction as
to whether when that stall occurs either (A) L0-L1 compaction is taking
place to try and mitigate it, or (B) no L0-L1 compaction has been scheduled
at the moment. This diff adds a counter for (A) so that the nature of L0
stalls can be better understood.

Test Plan: make all && make check

Reviewers: sdong, igor, anthony, noetzli, yhchiang

Reviewed By: yhchiang

Subscribers: MarkCallaghan, dhruba

Differential Revision: https://reviews.facebook.net/D46749
This commit is contained in:
Ari Ekmekji 2015-09-14 11:03:37 -07:00
parent a3fc49bfdd
commit 03ddce9a01
6 changed files with 41 additions and 15 deletions

View File

@ -446,7 +446,11 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
} else if (vstorage->l0_delay_trigger_count() >=
mutable_cf_options.level0_stop_writes_trigger) {
write_controller_token_ = write_controller->GetStopToken();
internal_stats_->AddCFStats(InternalStats::LEVEL0_NUM_FILES, 1);
internal_stats_->AddCFStats(InternalStats::LEVEL0_NUM_FILES_TOTAL, 1);
if (compaction_picker_->IsLevel0CompactionInProgress()) {
internal_stats_->AddCFStats(
InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1);
}
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
"[%s] Stopping writes because we have %d level-0 files",
name_.c_str(), vstorage->l0_delay_trigger_count());
@ -454,7 +458,11 @@ void ColumnFamilyData::RecalculateWriteStallConditions(
vstorage->l0_delay_trigger_count() >=
mutable_cf_options.level0_slowdown_writes_trigger) {
write_controller_token_ = write_controller->GetDelayToken();
internal_stats_->AddCFStats(InternalStats::LEVEL0_SLOWDOWN, 1);
internal_stats_->AddCFStats(InternalStats::LEVEL0_SLOWDOWN_TOTAL, 1);
if (compaction_picker_->IsLevel0CompactionInProgress()) {
internal_stats_->AddCFStats(
InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1);
}
Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log,
"[%s] Stalling writes because we have %d level-0 files",
name_.c_str(), vstorage->l0_delay_trigger_count());

View File

@ -316,6 +316,8 @@ void CompactionJob::Prepare() {
Slice* end = i == boundaries_.size() ? nullptr : &boundaries_[i];
compact_->sub_compact_states.emplace_back(c, start, end, sizes_[i]);
}
MeasureTime(stats_, NUM_SUBCOMPACTIONS_SCHEDULED,
compact_->sub_compact_states.size());
} else {
compact_->sub_compact_states.emplace_back(c, nullptr, nullptr);
}

View File

@ -110,6 +110,11 @@ class CompactionPicker {
// overlapping.
bool IsInputNonOverlapping(Compaction* c);
// Is there currently a compaction involving level 0 taking place
bool IsLevel0CompactionInProgress() const {
return !level0_compactions_in_progress_.empty();
}
protected:
int NumberLevels() const { return ioptions_.num_levels; }

View File

@ -669,12 +669,11 @@ void InternalStats::DumpCFStats(std::string* value) {
total_files += files;
total_files_being_compacted += files_being_compacted[level];
if (comp_stats_[level].micros > 0 || files > 0) {
uint64_t stalls = level == 0 ?
(cf_stats_count_[LEVEL0_SLOWDOWN] +
cf_stats_count_[LEVEL0_NUM_FILES] +
cf_stats_count_[MEMTABLE_COMPACTION])
: (stall_leveln_slowdown_count_soft_[level] +
stall_leveln_slowdown_count_hard_[level]);
uint64_t stalls = level == 0 ? (cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL] +
cf_stats_count_[LEVEL0_NUM_FILES_TOTAL] +
cf_stats_count_[MEMTABLE_COMPACTION])
: (stall_leveln_slowdown_count_soft_[level] +
stall_leveln_slowdown_count_hard_[level]);
stats_sum.Add(comp_stats_[level]);
total_file_size += vstorage->NumLevelBytes(level);
@ -718,11 +717,16 @@ void InternalStats::DumpCFStats(std::string* value) {
snprintf(buf, sizeof(buf),
"Stalls(count): %" PRIu64 " level0_slowdown, "
"%" PRIu64 " level0_numfiles, %" PRIu64 " memtable_compaction, "
"%" PRIu64 " level0_slowdown_with_compaction, "
"%" PRIu64 " level0_numfiles, "
"%" PRIu64 " level0_numfiles_with_compaction, "
"%" PRIu64 " memtable_compaction, "
"%" PRIu64 " leveln_slowdown_soft, "
"%" PRIu64 " leveln_slowdown_hard\n",
cf_stats_count_[LEVEL0_SLOWDOWN],
cf_stats_count_[LEVEL0_NUM_FILES],
cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL],
cf_stats_count_[LEVEL0_SLOWDOWN_WITH_COMPACTION],
cf_stats_count_[LEVEL0_NUM_FILES_TOTAL],
cf_stats_count_[LEVEL0_NUM_FILES_WITH_COMPACTION],
cf_stats_count_[MEMTABLE_COMPACTION],
total_slowdown_count_soft, total_slowdown_count_hard);
value->append(buf);

View File

@ -80,9 +80,11 @@ extern DBPropertyType GetPropertyType(const Slice& property,
class InternalStats {
public:
enum InternalCFStatsType {
LEVEL0_SLOWDOWN,
LEVEL0_SLOWDOWN_TOTAL,
LEVEL0_SLOWDOWN_WITH_COMPACTION,
MEMTABLE_COMPACTION,
LEVEL0_NUM_FILES,
LEVEL0_NUM_FILES_TOTAL,
LEVEL0_NUM_FILES_WITH_COMPACTION,
WRITE_STALLS_ENUM_MAX,
BYTES_FLUSHED,
INTERNAL_CF_STATS_ENUM_MAX,
@ -350,9 +352,11 @@ class InternalStats {
class InternalStats {
public:
enum InternalCFStatsType {
LEVEL0_SLOWDOWN,
LEVEL0_SLOWDOWN_TOTAL,
LEVEL0_SLOWDOWN_WITH_COMPACTION,
MEMTABLE_COMPACTION,
LEVEL0_NUM_FILES,
LEVEL0_NUM_FILES_TOTAL,
LEVEL0_NUM_FILES_WITH_COMPACTION,
WRITE_STALLS_ENUM_MAX,
BYTES_FLUSHED,
INTERNAL_CF_STATS_ENUM_MAX,

View File

@ -271,6 +271,8 @@ enum Histograms : uint32_t {
DB_SEEK,
WRITE_STALL,
SST_READ_MICROS,
// The number of subcompactions actually scheduled during a compaction
NUM_SUBCOMPACTIONS_SCHEDULED,
HISTOGRAM_ENUM_MAX, // TODO(ldemailly): enforce HistogramsNameMap match
};
@ -297,6 +299,7 @@ const std::vector<std::pair<Histograms, std::string>> HistogramsNameMap = {
{DB_SEEK, "rocksdb.db.seek.micros"},
{WRITE_STALL, "rocksdb.db.write.stall"},
{SST_READ_MICROS, "rocksdb.sst.read.micros"},
{NUM_SUBCOMPACTIONS_SCHEDULED, "rocksdb.num.subcompactions.scheduled"},
};
struct HistogramData {