Bug fix: table readers created by TableCache::Get() doesn't have latency histogram reported

Summary: TableCache::Get() puts parameters in the wrong places so that table readers created by Get() will not have the histogram updated.

Test Plan: Will write a unit test for that.

Subscribers: leveldb, dhruba

Differential Revision: https://reviews.facebook.net/D46035
This commit is contained in:
sdong 2015-09-02 10:25:20 -07:00
parent b42cd6bed5
commit 3e0a672c50
5 changed files with 104 additions and 16 deletions

View File

@ -596,6 +596,82 @@ TEST_F(DBTest, AggregatedTableProperties) {
}
}
TEST_F(DBTest, ReadLatencyHistogramByLevel) {
Options options = CurrentOptions();
options.write_buffer_size = 110 << 10;
options.level0_file_num_compaction_trigger = 3;
options.num_levels = 4;
options.compression = kNoCompression;
options.max_bytes_for_level_base = 450 << 10;
options.target_file_size_base = 98 << 10;
options.max_write_buffer_number = 2;
options.statistics = rocksdb::CreateDBStatistics();
options.max_open_files = 100;
BlockBasedTableOptions table_options;
table_options.no_block_cache = true;
DestroyAndReopen(options);
Random rnd(301);
for (int num = 0; num < 5; num++) {
Put("foo", "bar");
GenerateNewRandomFile(&rnd);
}
std::string prop;
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
// Get() after flushes, See latency histogram tracked.
for (int key = 0; key < 50; key++) {
Get(Key(key));
}
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
// Reopen and issue Get(). See thee latency tracked
Reopen(options);
for (int key = 0; key < 50; key++) {
Get(Key(key));
}
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
// Reopen and issue iterating. See thee latency tracked
Reopen(options);
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
{
unique_ptr<Iterator> iter(db_->NewIterator(ReadOptions()));
for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) {
}
}
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
// options.max_open_files preloads table readers.
options.max_open_files = -1;
Reopen(options);
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
for (int key = 0; key < 50; key++) {
Get(Key(key));
}
ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop));
ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram"));
ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram"));
ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram"));
}
TEST_F(DBTest, AggregatedTablePropertiesAtLevel) {
const int kTableCount = 100;
const int kKeysPerTable = 10;

View File

@ -176,9 +176,10 @@ Iterator* TableCache::NewIterator(const ReadOptions& options,
} else {
table_reader = fd.table_reader;
if (table_reader == nullptr) {
Status s = FindTable(env_options, icomparator, fd, &handle,
options.read_tier == kBlockCacheTier,
!for_compaction, file_read_hist);
Status s =
FindTable(env_options, icomparator, fd, &handle,
options.read_tier == kBlockCacheTier /* no_io */,
!for_compaction /* record read_stats */, file_read_hist);
if (!s.ok()) {
return NewErrorIterator(s, arena);
}
@ -254,7 +255,8 @@ Status TableCache::Get(const ReadOptions& options,
if (!t) {
s = FindTable(env_options_, internal_comparator, fd, &handle,
options.read_tier == kBlockCacheTier, file_read_hist);
options.read_tier == kBlockCacheTier /* no_io */,
true /* record_read_stats */, file_read_hist);
if (s.ok()) {
t = GetTableReaderFromHandle(handle);
}

View File

@ -20,9 +20,11 @@
#include <thread>
#include <unordered_map>
#include <unordered_set>
#include <utility>
#include <vector>
#include "db/dbformat.h"
#include "db/internal_stats.h"
#include "db/table_cache.h"
#include "db/version_set.h"
#include "table/table_reader.h"
@ -280,14 +282,15 @@ class VersionBuilder::Rep {
CheckConsistency(vstorage);
}
void LoadTableHandlers(int max_threads) {
void LoadTableHandlers(InternalStats* internal_stats, int max_threads) {
assert(table_cache_ != nullptr);
std::vector<FileMetaData*> files_meta;
// <file metadata, level>
std::vector<std::pair<FileMetaData*, int>> files_meta;
for (int level = 0; level < base_vstorage_->num_levels(); level++) {
for (auto& file_meta_pair : levels_[level].added_files) {
auto* file_meta = file_meta_pair.second;
assert(!file_meta->table_reader_handle);
files_meta.push_back(file_meta);
files_meta.emplace_back(file_meta, level);
}
}
@ -299,10 +302,13 @@ class VersionBuilder::Rep {
break;
}
auto* file_meta = files_meta[file_idx];
table_cache_->FindTable(
env_options_, *(base_vstorage_->InternalComparator()),
file_meta->fd, &file_meta->table_reader_handle, false);
auto* file_meta = files_meta[file_idx].first;
int level = files_meta[file_idx].second;
table_cache_->FindTable(env_options_,
*(base_vstorage_->InternalComparator()),
file_meta->fd, &file_meta->table_reader_handle,
false /*no_io */, true /* record_read_stats */,
internal_stats->GetFileReadHist(level));
if (file_meta->table_reader_handle != nullptr) {
// Load table_reader
file_meta->fd.table_reader = table_cache_->GetTableReaderFromHandle(
@ -350,8 +356,9 @@ void VersionBuilder::Apply(VersionEdit* edit) { rep_->Apply(edit); }
void VersionBuilder::SaveTo(VersionStorageInfo* vstorage) {
rep_->SaveTo(vstorage);
}
void VersionBuilder::LoadTableHandlers(int max_threads) {
rep_->LoadTableHandlers(max_threads);
void VersionBuilder::LoadTableHandlers(InternalStats* internal_stats,
int max_threads) {
rep_->LoadTableHandlers(internal_stats, max_threads);
}
void VersionBuilder::MaybeAddFile(VersionStorageInfo* vstorage, int level,
FileMetaData* f) {

View File

@ -16,6 +16,7 @@ class TableCache;
class VersionStorageInfo;
class VersionEdit;
struct FileMetaData;
class InternalStats;
// A helper class so we can efficiently apply a whole sequence
// of edits to a particular state without creating intermediate
@ -30,7 +31,7 @@ class VersionBuilder {
int level);
void Apply(VersionEdit* edit);
void SaveTo(VersionStorageInfo* vstorage);
void LoadTableHandlers(int max_threads = 1);
void LoadTableHandlers(InternalStats* internal_stats, int max_threads = 1);
void MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f);
private:

View File

@ -2045,7 +2045,8 @@ Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data,
db_options_->max_open_files == -1) {
// unlimited table cache. Pre-load table handle now.
// Need to do it out of the mutex.
builder_guard->version_builder()->LoadTableHandlers();
builder_guard->version_builder()->LoadTableHandlers(
column_family_data->internal_stats());
}
// This is fine because everything inside of this block is serialized --
@ -2496,7 +2497,8 @@ Status VersionSet::Recover(
if (db_options_->max_open_files == -1) {
// unlimited table cache. Pre-load table handle now.
// Need to do it out of the mutex.
builder->LoadTableHandlers(db_options_->max_file_opening_threads);
builder->LoadTableHandlers(cfd->internal_stats(),
db_options_->max_file_opening_threads);
}
Version* v = new Version(cfd, this, current_version_number_++);