Correct Statistics FLUSH_WRITE_BYTES
Summary: In https://reviews.facebook.net/D56271, we fixed an issue where we consider flush as compaction. However, that makes us mistakenly count FLUSH_WRITE_BYTES twice (one in flush_job and one in db_impl.) This patch removes the one incremented in db_impl. Test Plan: db_test Reviewers: yiwu, andrewkr, IslamAbdelRahman, kradhakrishnan, sdong Reviewed By: sdong Subscribers: andrewkr, dhruba, leveldb Differential Revision: https://reviews.facebook.net/D57111
This commit is contained in:
parent
b71c4e613f
commit
24110ce90c
@ -1586,7 +1586,6 @@ Status DBImpl::FlushMemTableToOutputFile(
|
||||
// true, mark DB read-only
|
||||
bg_error_ = s;
|
||||
}
|
||||
RecordFlushIOStats();
|
||||
if (s.ok()) {
|
||||
#ifndef ROCKSDB_LITE
|
||||
// may temporarily unlock and lock the mutex.
|
||||
@ -2655,11 +2654,6 @@ void DBImpl::SchedulePendingCompaction(ColumnFamilyData* cfd) {
|
||||
}
|
||||
}
|
||||
|
||||
void DBImpl::RecordFlushIOStats() {
|
||||
RecordTick(stats_, FLUSH_WRITE_BYTES, IOSTATS(bytes_written));
|
||||
IOSTATS_RESET(bytes_written);
|
||||
}
|
||||
|
||||
void DBImpl::BGWorkFlush(void* db) {
|
||||
IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH);
|
||||
TEST_SYNC_POINT("DBImpl::BGWorkFlush");
|
||||
@ -2793,7 +2787,6 @@ void DBImpl::BackgroundCallFlush() {
|
||||
bg_flush_scheduled_--;
|
||||
// See if there's more work to be done
|
||||
MaybeScheduleFlushOrCompaction();
|
||||
RecordFlushIOStats();
|
||||
bg_cv_.SignalAll();
|
||||
// IMPORTANT: there should be no code after calling SignalAll. This call may
|
||||
// signal the DB destructor that it's OK to proceed with destruction. In
|
||||
|
@ -544,9 +544,6 @@ class DBImpl : public DB {
|
||||
// Wait for memtable flushed
|
||||
Status WaitForFlushMemTable(ColumnFamilyData* cfd);
|
||||
|
||||
void RecordFlushIOStats();
|
||||
void RecordCompactionIOStats();
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
Status CompactFilesImpl(
|
||||
const CompactionOptions& compact_options, ColumnFamilyData* cfd,
|
||||
|
@ -115,8 +115,10 @@ void FlushJob::ReportFlushInputSize(const autovector<MemTable*>& mems) {
|
||||
}
|
||||
|
||||
void FlushJob::RecordFlushIOStats() {
|
||||
ThreadStatusUtil::SetThreadOperationProperty(
|
||||
RecordTick(stats_, FLUSH_WRITE_BYTES, IOSTATS(bytes_written));
|
||||
ThreadStatusUtil::IncreaseThreadOperationProperty(
|
||||
ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written));
|
||||
IOSTATS_RESET(bytes_written);
|
||||
}
|
||||
|
||||
Status FlushJob::Run(FileMetaData* file_meta) {
|
||||
@ -322,13 +324,14 @@ Status FlushJob::WriteLevel0Table(const autovector<MemTable*>& mems,
|
||||
meta->marked_for_compaction);
|
||||
}
|
||||
|
||||
// Note that here we treat flush as level 0 compaction in internal stats
|
||||
InternalStats::CompactionStats stats(1);
|
||||
stats.micros = db_options_.env->NowMicros() - start_micros;
|
||||
stats.bytes_written = meta->fd.GetFileSize();
|
||||
cfd_->internal_stats()->AddCompactionStats(0 /* level */, stats);
|
||||
cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_FLUSHED,
|
||||
meta->fd.GetFileSize());
|
||||
RecordTick(stats_, FLUSH_WRITE_BYTES, meta->fd.GetFileSize());
|
||||
RecordFlushIOStats();
|
||||
return s;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user