add Status check enforcement for stats_history_test (#7496)

Summary: Pull Request resolved: https://github.com/facebook/rocksdb/pull/7496

Reviewed By: zhichao-cao

Differential Revision: D24070007

Pulled By: ajkr

fbshipit-source-id: 4320413a4d7707774ee23a7e6232714d7ee7a57f
This commit is contained in:
Andrew Kryczka 2020-10-02 08:22:06 -07:00 committed by Facebook GitHub Bot
parent 1e00909730
commit 29ed766193
4 changed files with 71 additions and 50 deletions

View File

@ -625,6 +625,7 @@ ifdef ASSERT_STATUS_CHECKED
slice_test \
sst_dump_test \
statistics_test \
stats_history_test \
thread_local_test \
trace_analyzer_test \
env_timed_test \

View File

@ -745,29 +745,34 @@ void DBImpl::PersistStats() {
if (immutable_db_options_.persist_stats_to_disk) {
WriteBatch batch;
Status s = Status::OK();
if (stats_slice_initialized_) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Reading %" ROCKSDB_PRIszt " stats from statistics\n",
stats_slice_.size());
for (const auto& stat : stats_map) {
char key[100];
int length =
EncodePersistentStatsKey(now_seconds, stat.first, 100, key);
// calculate the delta from last time
if (stats_slice_.find(stat.first) != stats_slice_.end()) {
uint64_t delta = stat.second - stats_slice_[stat.first];
batch.Put(persist_stats_cf_handle_, Slice(key, std::min(100, length)),
ToString(delta));
if (s.ok()) {
char key[100];
int length =
EncodePersistentStatsKey(now_seconds, stat.first, 100, key);
// calculate the delta from last time
if (stats_slice_.find(stat.first) != stats_slice_.end()) {
uint64_t delta = stat.second - stats_slice_[stat.first];
s = batch.Put(persist_stats_cf_handle_,
Slice(key, std::min(100, length)), ToString(delta));
}
}
}
}
stats_slice_initialized_ = true;
std::swap(stats_slice_, stats_map);
WriteOptions wo;
wo.low_pri = true;
wo.no_slowdown = true;
wo.sync = false;
Status s = Write(wo, &batch);
if (s.ok()) {
WriteOptions wo;
wo.low_pri = true;
wo.no_slowdown = true;
wo.sync = false;
s = Write(wo, &batch);
}
if (!s.ok()) {
ROCKS_LOG_INFO(immutable_db_options_.info_log,
"Writing to persistent stats CF failed -- %s",

View File

@ -685,41 +685,56 @@ Status DBImpl::PersistentStatsProcessFormatVersion() {
(kStatsCFCurrentFormatVersion < format_version_recovered &&
kStatsCFCompatibleFormatVersion < compatible_version_recovered)) {
if (!s_format.ok() || !s_compatible.ok()) {
ROCKS_LOG_INFO(
ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"Reading persistent stats version key failed. Format key: %s, "
"compatible key: %s",
"Recreating persistent stats column family since reading "
"persistent stats version key failed. Format key: %s, compatible "
"key: %s",
s_format.ToString().c_str(), s_compatible.ToString().c_str());
} else {
ROCKS_LOG_INFO(
ROCKS_LOG_WARN(
immutable_db_options_.info_log,
"Disable persistent stats due to corrupted or incompatible format "
"version\n");
"Recreating persistent stats column family due to corrupted or "
"incompatible format version. Recovered format: %" PRIu64
"; recovered format compatible since: %" PRIu64 "\n",
format_version_recovered, compatible_version_recovered);
}
s = DropColumnFamily(persist_stats_cf_handle_);
if (s.ok()) {
s = DestroyColumnFamilyHandle(persist_stats_cf_handle_);
}
DropColumnFamily(persist_stats_cf_handle_);
DestroyColumnFamilyHandle(persist_stats_cf_handle_);
ColumnFamilyHandle* handle = nullptr;
ColumnFamilyOptions cfo;
OptimizeForPersistentStats(&cfo);
s = CreateColumnFamily(cfo, kPersistentStatsColumnFamilyName, &handle);
persist_stats_cf_handle_ = static_cast<ColumnFamilyHandleImpl*>(handle);
// should also persist version here because old stats CF is discarded
should_persist_format_version = true;
if (s.ok()) {
ColumnFamilyOptions cfo;
OptimizeForPersistentStats(&cfo);
s = CreateColumnFamily(cfo, kPersistentStatsColumnFamilyName, &handle);
}
if (s.ok()) {
persist_stats_cf_handle_ = static_cast<ColumnFamilyHandleImpl*>(handle);
// should also persist version here because old stats CF is discarded
should_persist_format_version = true;
}
}
}
if (s.ok() && should_persist_format_version) {
if (should_persist_format_version) {
// Persistent stats CF being created for the first time, need to write
// format version key
WriteBatch batch;
batch.Put(persist_stats_cf_handle_, kFormatVersionKeyString,
ToString(kStatsCFCurrentFormatVersion));
batch.Put(persist_stats_cf_handle_, kCompatibleVersionKeyString,
ToString(kStatsCFCompatibleFormatVersion));
WriteOptions wo;
wo.low_pri = true;
wo.no_slowdown = true;
wo.sync = false;
s = Write(wo, &batch);
if (s.ok()) {
s = batch.Put(persist_stats_cf_handle_, kFormatVersionKeyString,
ToString(kStatsCFCurrentFormatVersion));
}
if (s.ok()) {
s = batch.Put(persist_stats_cf_handle_, kCompatibleVersionKeyString,
ToString(kStatsCFCompatibleFormatVersion));
}
if (s.ok()) {
WriteOptions wo;
wo.low_pri = true;
wo.no_slowdown = true;
wo.sync = false;
s = Write(wo, &batch);
}
}
mutex_.Lock();
return s;
@ -1619,7 +1634,7 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
}
}
if (s.ok() && impl->immutable_db_options_.persist_stats_to_disk) {
// try to read format version but no need to fail Open() even if it fails
// try to read format version
s = impl->PersistentStatsProcessFormatVersion();
}

View File

@ -155,7 +155,7 @@ TEST_F(StatsHistoryTest, GetStatsHistoryInMemory) {
[&] { mock_env_->MockSleepForSeconds(kPeriodSec); });
std::unique_ptr<StatsHistoryIterator> stats_iter;
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
// disabled stats snapshots
ASSERT_OK(dbfull()->SetDBOptions({{"stats_persist_period_sec", "0"}}));
@ -171,7 +171,7 @@ TEST_F(StatsHistoryTest, GetStatsHistoryInMemory) {
dbfull()->TEST_WaitForStatsDumpRun(
[&] { mock_env_->MockSleepForSeconds(1); });
}
db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
size_t stats_count_new = 0;
for (; stats_iter->Valid(); stats_iter->Next()) {
@ -207,7 +207,7 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) {
delete iterator;
ASSERT_OK(Flush());
ASSERT_OK(Delete("sol"));
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
// second round of ops
ASSERT_OK(Put("saigon", "saigon"));
@ -219,7 +219,7 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) {
}
delete iterator;
ASSERT_OK(Flush());
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
const int kIterations = 10;
for (int i = 0; i < kIterations; ++i) {
@ -228,7 +228,7 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) {
}
std::unique_ptr<StatsHistoryIterator> stats_iter;
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
size_t stats_count = 0;
int slice_count = 0;
@ -250,7 +250,7 @@ TEST_F(StatsHistoryTest, InMemoryStatsHistoryPurging) {
[&] { mock_env_->MockSleepForSeconds(kPeriodSec); });
}
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
size_t stats_count_reopen = 0;
slice_count = 0;
@ -323,7 +323,7 @@ TEST_F(StatsHistoryTest, GetStatsHistoryFromDisk) {
ASSERT_GE(key_count3, key_count2);
ASSERT_EQ(key_count3 - key_count2, key_count2 - key_count1);
std::unique_ptr<StatsHistoryIterator> stats_iter;
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
size_t stats_count = 0;
int slice_count = 0;
@ -344,7 +344,7 @@ TEST_F(StatsHistoryTest, GetStatsHistoryFromDisk) {
ASSERT_EQ(stats_count, key_count3 - 2);
// verify reopen will not cause data loss
ReopenWithColumnFamilies({"default", "pikachu"}, options);
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
size_t stats_count_reopen = 0;
int slice_count_reopen = 0;
@ -416,7 +416,7 @@ TEST_F(StatsHistoryTest, PersitentStatsVerifyValue) {
std::map<std::string, uint64_t> stats_map_after;
ASSERT_TRUE(options.statistics->getTickerMap(&stats_map_after));
std::unique_ptr<StatsHistoryIterator> stats_iter;
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
std::string sample = "rocksdb.num.iterator.deleted";
uint64_t recovered_value = 0;
@ -433,7 +433,7 @@ TEST_F(StatsHistoryTest, PersitentStatsVerifyValue) {
// test stats value retains after recovery
ReopenWithColumnFamilies({"default", "pikachu"}, options);
db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds() + 1, &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
uint64_t new_recovered_value = 0;
for (int i = 2; stats_iter->Valid(); stats_iter->Next(), i++) {
@ -485,7 +485,7 @@ TEST_F(StatsHistoryTest, PersistentStatsCreateColumnFamilies) {
uint64_t num_write_wal = 0;
std::string sample = "rocksdb.write.wal";
std::unique_ptr<StatsHistoryIterator> stats_iter;
db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
for (; stats_iter->Valid(); stats_iter->Next()) {
auto stats_map = stats_iter->GetStatsMap();
@ -521,7 +521,7 @@ TEST_F(StatsHistoryTest, PersistentStatsCreateColumnFamilies) {
ASSERT_NOK(db_->CreateColumnFamily(cf_opts, kPersistentStatsColumnFamilyName,
&handle));
// verify stats is not affected by prior failed CF creation
db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter);
ASSERT_OK(db_->GetStatsHistory(0, mock_env_->NowSeconds(), &stats_iter));
ASSERT_TRUE(stats_iter != nullptr);
num_write_wal = 0;
for (; stats_iter->Valid(); stats_iter->Next()) {