Add some simulator cache and block tracer tests to ASSERT_STATUS_CHECKED (#7305)
Summary: More tests now pass. When in doubt, I added a TODO comment to check what should happen with an ignored error. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7305 Reviewed By: akankshamahajan15 Differential Revision: D23301262 Pulled By: ajkr fbshipit-source-id: 5f120edc7393560aefc0633250277bbc7e8de9e6
This commit is contained in:
parent
e653af7164
commit
b7e1c5213f
4
Makefile
4
Makefile
@ -609,6 +609,10 @@ ifdef ASSERT_STATUS_CHECKED
|
||||
timer_queue_test \
|
||||
timer_test \
|
||||
util_merge_operators_test \
|
||||
block_cache_trace_analyzer_test \
|
||||
block_cache_tracer_test \
|
||||
cache_simulator_test \
|
||||
sim_cache_test \
|
||||
version_edit_test \
|
||||
work_queue_test \
|
||||
write_controller_test \
|
||||
|
@ -3821,7 +3821,8 @@ Status DestroyDB(const std::string& dbname, const Options& options,
|
||||
}
|
||||
}
|
||||
}
|
||||
env->DeleteDir(path);
|
||||
// TODO: Should we return an error if we cannot delete the directory?
|
||||
env->DeleteDir(path).PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
|
||||
@ -3849,7 +3850,7 @@ Status DestroyDB(const std::string& dbname, const Options& options,
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: Should we check for errors here?
|
||||
// Ignore error in case dir contains other files
|
||||
env->DeleteDir(archivedir).PermitUncheckedError();
|
||||
}
|
||||
|
||||
@ -3866,7 +3867,8 @@ Status DestroyDB(const std::string& dbname, const Options& options,
|
||||
}
|
||||
}
|
||||
}
|
||||
env->DeleteDir(soptions.wal_dir);
|
||||
// Ignore error in case dir contains other files
|
||||
env->DeleteDir(soptions.wal_dir).PermitUncheckedError();
|
||||
}
|
||||
|
||||
// Ignore error since state is already gone
|
||||
|
@ -147,13 +147,13 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
|
||||
// DeleteScheduler::CleanupDirectory on the same dir later, it will be
|
||||
// safe
|
||||
std::vector<std::string> filenames;
|
||||
result.env->GetChildren(result.wal_dir, &filenames);
|
||||
result.env->GetChildren(result.wal_dir, &filenames).PermitUncheckedError();
|
||||
for (std::string& filename : filenames) {
|
||||
if (filename.find(".log.trash", filename.length() -
|
||||
std::string(".log.trash").length()) !=
|
||||
std::string::npos) {
|
||||
std::string trash_file = result.wal_dir + "/" + filename;
|
||||
result.env->DeleteFile(trash_file);
|
||||
result.env->DeleteFile(trash_file).PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -901,7 +901,11 @@ Status DBImpl::RecoverLogFiles(const std::vector<uint64_t>& log_numbers,
|
||||
Status::Corruption("log record too small"));
|
||||
continue;
|
||||
}
|
||||
WriteBatchInternal::SetContents(&batch, record);
|
||||
|
||||
status = WriteBatchInternal::SetContents(&batch, record);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
SequenceNumber sequence = WriteBatchInternal::Sequence(&batch);
|
||||
|
||||
if (immutable_db_options_.wal_recovery_mode ==
|
||||
@ -1292,7 +1296,8 @@ Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd,
|
||||
cfd->GetLatestMutableCFOptions()->paranoid_file_checks;
|
||||
|
||||
int64_t _current_time = 0;
|
||||
env_->GetCurrentTime(&_current_time); // ignore error
|
||||
env_->GetCurrentTime(&_current_time)
|
||||
.PermitUncheckedError(); // ignore error
|
||||
const uint64_t current_time = static_cast<uint64_t>(_current_time);
|
||||
meta.oldest_ancester_time = current_time;
|
||||
|
||||
@ -1693,13 +1698,15 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
||||
std::string file_path = path + "/" + file_name;
|
||||
if (ParseFileName(file_name, &file_number, &file_type) &&
|
||||
file_type == kTableFile) {
|
||||
// TODO: Check for errors from OnAddFile?
|
||||
if (known_file_sizes.count(file_name)) {
|
||||
// We're assuming that each sst file name exists in at most one of
|
||||
// the paths.
|
||||
sfm->OnAddFile(file_path, known_file_sizes.at(file_name),
|
||||
/* compaction */ false);
|
||||
/* compaction */ false)
|
||||
.PermitUncheckedError();
|
||||
} else {
|
||||
sfm->OnAddFile(file_path);
|
||||
sfm->OnAddFile(file_path).PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -53,16 +53,24 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
Header(options.info_log, "IDENTITY file: %s\n", file.c_str());
|
||||
break;
|
||||
case kDescriptorFile:
|
||||
env->GetFileSize(dbname + "/" + file, &file_size);
|
||||
Header(options.info_log, "MANIFEST file: %s size: %" PRIu64 " Bytes\n",
|
||||
file.c_str(), file_size);
|
||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
||||
Header(options.info_log,
|
||||
"MANIFEST file: %s size: %" PRIu64 " Bytes\n", file.c_str(),
|
||||
file_size);
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading MANIFEST file: %s/%s\n",
|
||||
dbname.c_str(), file.c_str());
|
||||
}
|
||||
break;
|
||||
case kLogFile:
|
||||
env->GetFileSize(dbname + "/" + file, &file_size);
|
||||
if (env->GetFileSize(dbname + "/" + file, &file_size).ok()) {
|
||||
char str[16];
|
||||
snprintf(str, sizeof(str), "%" PRIu64, file_size);
|
||||
wal_info.append(file).append(" size: ").
|
||||
append(str).append(" ; ");
|
||||
wal_info.append(file).append(" size: ").append(str).append(" ; ");
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading LOG file: %s/%s\n",
|
||||
dbname.c_str(), file.c_str());
|
||||
}
|
||||
break;
|
||||
case kTableFile:
|
||||
if (++file_num < 10) {
|
||||
@ -111,11 +119,14 @@ void DumpDBFileSummary(const ImmutableDBOptions& options,
|
||||
for (const std::string& file : files) {
|
||||
if (ParseFileName(file, &number, &type)) {
|
||||
if (type == kLogFile) {
|
||||
env->GetFileSize(options.wal_dir + "/" + file, &file_size);
|
||||
if (env->GetFileSize(options.wal_dir + "/" + file, &file_size).ok()) {
|
||||
char str[16];
|
||||
snprintf(str, sizeof(str), "%" PRIu64, file_size);
|
||||
wal_info.append(file).append(" size: ").
|
||||
append(str).append(" ; ");
|
||||
wal_info.append(file).append(" size: ").append(str).append(" ; ");
|
||||
} else {
|
||||
Error(options.info_log, "Error when reading LOG file %s/%s\n",
|
||||
options.wal_dir.c_str(), file.c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1209,7 +1209,8 @@ void DBIter::Seek(const Slice& target) {
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
if (db_impl_ != nullptr && cfd_ != nullptr) {
|
||||
db_impl_->TraceIteratorSeek(cfd_->GetID(), target);
|
||||
// TODO: What do we do if this returns an error?
|
||||
db_impl_->TraceIteratorSeek(cfd_->GetID(), target).PermitUncheckedError();
|
||||
}
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
@ -1270,7 +1271,9 @@ void DBIter::SeekForPrev(const Slice& target) {
|
||||
|
||||
#ifndef ROCKSDB_LITE
|
||||
if (db_impl_ != nullptr && cfd_ != nullptr) {
|
||||
db_impl_->TraceIteratorSeekForPrev(cfd_->GetID(), target);
|
||||
// TODO: What do we do if this returns an error?
|
||||
db_impl_->TraceIteratorSeekForPrev(cfd_->GetID(), target)
|
||||
.PermitUncheckedError();
|
||||
}
|
||||
#endif // ROCKSDB_LITE
|
||||
|
||||
|
@ -19,7 +19,7 @@ namespace ROCKSDB_NAMESPACE {
|
||||
namespace {
|
||||
int64_t MaybeCurrentTime(Env* env) {
|
||||
int64_t time = 1337346000; // arbitrary fallback default
|
||||
(void)env->GetCurrentTime(&time);
|
||||
env->GetCurrentTime(&time).PermitUncheckedError();
|
||||
return time;
|
||||
}
|
||||
} // namespace
|
||||
|
@ -3652,6 +3652,7 @@ VersionSet::~VersionSet() {
|
||||
file.DeleteMetadata();
|
||||
}
|
||||
obsolete_files_.clear();
|
||||
io_status_.PermitUncheckedError();
|
||||
}
|
||||
|
||||
void VersionSet::Reset() {
|
||||
|
@ -1415,8 +1415,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
// else insert the values to the memtable right away
|
||||
}
|
||||
|
||||
Status seek_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &seek_status))) {
|
||||
Status ret_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &ret_status))) {
|
||||
bool batch_boundry = false;
|
||||
if (rebuilding_trx_ != nullptr) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1426,10 +1426,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
batch_boundry = IsDuplicateKeySeq(column_family_id, key);
|
||||
}
|
||||
MaybeAdvanceSeq(batch_boundry);
|
||||
return seek_status;
|
||||
return ret_status;
|
||||
}
|
||||
seek_status.PermitUncheckedError(); // Ignore errors
|
||||
Status ret_status;
|
||||
|
||||
MemTable* mem = cf_mems_->GetMemTable();
|
||||
auto* moptions = mem->GetImmutableMemTableOptions();
|
||||
@ -1544,8 +1542,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
// else insert the values to the memtable right away
|
||||
}
|
||||
|
||||
Status seek_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &seek_status))) {
|
||||
Status ret_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &ret_status))) {
|
||||
bool batch_boundry = false;
|
||||
if (rebuilding_trx_ != nullptr) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1555,7 +1553,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
batch_boundry = IsDuplicateKeySeq(column_family_id, key);
|
||||
}
|
||||
MaybeAdvanceSeq(batch_boundry);
|
||||
return seek_status;
|
||||
return ret_status;
|
||||
}
|
||||
|
||||
ColumnFamilyData* cfd = cf_mems_->current();
|
||||
@ -1565,7 +1563,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
: 0;
|
||||
const ValueType delete_type =
|
||||
(0 == ts_sz) ? kTypeDeletion : kTypeDeletionWithTimestamp;
|
||||
auto ret_status = DeleteImpl(column_family_id, key, Slice(), delete_type);
|
||||
ret_status = DeleteImpl(column_family_id, key, Slice(), delete_type);
|
||||
// optimize for non-recovery mode
|
||||
if (UNLIKELY(!ret_status.IsTryAgain() && rebuilding_trx_ != nullptr)) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1584,8 +1582,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
// else insert the values to the memtable right away
|
||||
}
|
||||
|
||||
Status seek_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &seek_status))) {
|
||||
Status ret_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &ret_status))) {
|
||||
bool batch_boundry = false;
|
||||
if (rebuilding_trx_ != nullptr) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1596,10 +1594,10 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
batch_boundry = IsDuplicateKeySeq(column_family_id, key);
|
||||
}
|
||||
MaybeAdvanceSeq(batch_boundry);
|
||||
return seek_status;
|
||||
return ret_status;
|
||||
}
|
||||
|
||||
auto ret_status =
|
||||
ret_status =
|
||||
DeleteImpl(column_family_id, key, Slice(), kTypeSingleDeletion);
|
||||
// optimize for non-recovery mode
|
||||
if (UNLIKELY(!ret_status.IsTryAgain() && rebuilding_trx_ != nullptr)) {
|
||||
@ -1621,8 +1619,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
// else insert the values to the memtable right away
|
||||
}
|
||||
|
||||
Status seek_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &seek_status))) {
|
||||
Status ret_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &ret_status))) {
|
||||
bool batch_boundry = false;
|
||||
if (rebuilding_trx_ != nullptr) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1635,7 +1633,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
batch_boundry = IsDuplicateKeySeq(column_family_id, begin_key);
|
||||
}
|
||||
MaybeAdvanceSeq(batch_boundry);
|
||||
return seek_status;
|
||||
return ret_status;
|
||||
}
|
||||
if (db_ != nullptr) {
|
||||
auto cf_handle = cf_mems_->GetColumnFamilyHandle();
|
||||
@ -1661,7 +1659,7 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
}
|
||||
}
|
||||
|
||||
auto ret_status =
|
||||
ret_status =
|
||||
DeleteImpl(column_family_id, begin_key, end_key, kTypeRangeDeletion);
|
||||
// optimize for non-recovery mode
|
||||
if (UNLIKELY(!ret_status.IsTryAgain() && rebuilding_trx_ != nullptr)) {
|
||||
@ -1683,8 +1681,8 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
// else insert the values to the memtable right away
|
||||
}
|
||||
|
||||
Status seek_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &seek_status))) {
|
||||
Status ret_status;
|
||||
if (UNLIKELY(!SeekToColumnFamily(column_family_id, &ret_status))) {
|
||||
bool batch_boundry = false;
|
||||
if (rebuilding_trx_ != nullptr) {
|
||||
assert(!write_after_commit_);
|
||||
@ -1695,10 +1693,9 @@ class MemTableInserter : public WriteBatch::Handler {
|
||||
batch_boundry = IsDuplicateKeySeq(column_family_id, key);
|
||||
}
|
||||
MaybeAdvanceSeq(batch_boundry);
|
||||
return seek_status;
|
||||
return ret_status;
|
||||
}
|
||||
|
||||
Status ret_status;
|
||||
MemTable* mem = cf_mems_->GetMemTable();
|
||||
auto* moptions = mem->GetImmutableMemTableOptions();
|
||||
bool perform_merge = false;
|
||||
|
@ -79,6 +79,7 @@ class MergingIterator : public InternalIterator {
|
||||
for (auto& child : children_) {
|
||||
child.DeleteIter(is_arena_mode_);
|
||||
}
|
||||
status_.PermitUncheckedError();
|
||||
}
|
||||
|
||||
bool Valid() const override { return current_ != nullptr && status_.ok(); }
|
||||
|
@ -306,8 +306,8 @@ Status BlockCacheTraceReader::ReadAccess(BlockCacheTraceRecord* record) {
|
||||
|
||||
BlockCacheHumanReadableTraceWriter::~BlockCacheHumanReadableTraceWriter() {
|
||||
if (human_readable_trace_file_writer_) {
|
||||
human_readable_trace_file_writer_->Flush();
|
||||
human_readable_trace_file_writer_->Close();
|
||||
human_readable_trace_file_writer_->Flush().PermitUncheckedError();
|
||||
human_readable_trace_file_writer_->Close().PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,8 @@ Tracer::Tracer(Env* env, const TraceOptions& trace_options,
|
||||
trace_options_(trace_options),
|
||||
trace_writer_(std::move(trace_writer)),
|
||||
trace_request_count_ (0) {
|
||||
WriteHeader();
|
||||
// TODO: What if this fails?
|
||||
WriteHeader().PermitUncheckedError();
|
||||
}
|
||||
|
||||
Tracer::~Tracer() { trace_writer_.reset(); }
|
||||
|
@ -22,8 +22,10 @@ bool GhostCache::Admit(const Slice& lookup_key) {
|
||||
sim_cache_->Release(handle);
|
||||
return true;
|
||||
}
|
||||
sim_cache_->Insert(lookup_key, /*value=*/nullptr, lookup_key.size(),
|
||||
// TODO: Should we check for errors here?
|
||||
auto s = sim_cache_->Insert(lookup_key, /*value=*/nullptr, lookup_key.size(),
|
||||
/*deleter=*/nullptr);
|
||||
s.PermitUncheckedError();
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -45,8 +47,11 @@ void CacheSimulator::Access(const BlockCacheTraceRecord& access) {
|
||||
is_cache_miss = false;
|
||||
} else {
|
||||
if (access.no_insert == Boolean::kFalse && admit && access.block_size > 0) {
|
||||
sim_cache_->Insert(access.block_key, /*value=*/nullptr, access.block_size,
|
||||
// Ignore errors on insert
|
||||
auto s = sim_cache_->Insert(access.block_key, /*value=*/nullptr,
|
||||
access.block_size,
|
||||
/*deleter=*/nullptr);
|
||||
s.PermitUncheckedError();
|
||||
}
|
||||
}
|
||||
miss_ratio_stats_.UpdateMetrics(access.access_timestamp, is_user_access,
|
||||
@ -100,8 +105,11 @@ void PrioritizedCacheSimulator::AccessKVPair(
|
||||
sim_cache_->Release(handle);
|
||||
*is_cache_miss = false;
|
||||
} else if (!no_insert && *admitted && value_size > 0) {
|
||||
sim_cache_->Insert(key, /*value=*/nullptr, value_size, /*deleter=*/nullptr,
|
||||
// TODO: Should we check for an error here?
|
||||
auto s = sim_cache_->Insert(key, /*value=*/nullptr, value_size,
|
||||
/*deleter=*/nullptr,
|
||||
/*handle=*/nullptr, priority);
|
||||
s.PermitUncheckedError();
|
||||
}
|
||||
if (update_metrics) {
|
||||
miss_ratio_stats_.UpdateMetrics(access.access_timestamp, is_user_access,
|
||||
@ -176,9 +184,12 @@ void HybridRowBlockCacheSimulator::Access(const BlockCacheTraceRecord& access) {
|
||||
/*is_user_access=*/true, &is_cache_miss, &admitted,
|
||||
/*update_metrics=*/true);
|
||||
if (access.referenced_data_size > 0 && inserted == InsertResult::ADMITTED) {
|
||||
sim_cache_->Insert(row_key, /*value=*/nullptr,
|
||||
access.referenced_data_size, /*deleter=*/nullptr,
|
||||
// TODO: Should we check for an error here?
|
||||
auto s = sim_cache_->Insert(row_key, /*value=*/nullptr,
|
||||
access.referenced_data_size,
|
||||
/*deleter=*/nullptr,
|
||||
/*handle=*/nullptr, Cache::Priority::HIGH);
|
||||
s.PermitUncheckedError();
|
||||
status.row_key_status[row_key] = InsertResult::INSERTED;
|
||||
}
|
||||
return;
|
||||
|
@ -26,6 +26,7 @@ class CacheActivityLogger {
|
||||
MutexLock l(&mutex_);
|
||||
|
||||
StopLoggingInternal();
|
||||
bg_status_.PermitUncheckedError();
|
||||
}
|
||||
|
||||
Status StartLogging(const std::string& activity_log_file, Env* env,
|
||||
@ -177,9 +178,11 @@ class SimCacheImpl : public SimCache {
|
||||
// *Lambda function without capture can be assgined to a function pointer
|
||||
Handle* h = key_only_cache_->Lookup(key);
|
||||
if (h == nullptr) {
|
||||
key_only_cache_->Insert(key, nullptr, charge,
|
||||
[](const Slice& /*k*/, void* /*v*/) {}, nullptr,
|
||||
// TODO: Check for error here?
|
||||
auto s = key_only_cache_->Insert(
|
||||
key, nullptr, charge, [](const Slice& /*k*/, void* /*v*/) {}, nullptr,
|
||||
priority);
|
||||
s.PermitUncheckedError();
|
||||
} else {
|
||||
key_only_cache_->Release(h);
|
||||
}
|
||||
|
@ -156,8 +156,8 @@ TEST_F(SimCacheTest, SimCacheLogging) {
|
||||
|
||||
int num_block_entries = 20;
|
||||
for (int i = 0; i < num_block_entries; i++) {
|
||||
Put(Key(i), "val");
|
||||
Flush();
|
||||
ASSERT_OK(Put(Key(i), "val"));
|
||||
ASSERT_OK(Flush());
|
||||
}
|
||||
|
||||
std::string log_file = test::PerThreadDBPath(env_, "cache_log.txt");
|
||||
@ -172,7 +172,7 @@ TEST_F(SimCacheTest, SimCacheLogging) {
|
||||
ASSERT_OK(sim_cache->GetActivityLoggingStatus());
|
||||
|
||||
std::string file_contents = "";
|
||||
ReadFileToString(env_, log_file, &file_contents);
|
||||
ASSERT_OK(ReadFileToString(env_, log_file, &file_contents));
|
||||
|
||||
int lookup_num = 0;
|
||||
int add_num = 0;
|
||||
|
@ -22,7 +22,7 @@ FileTraceReader::FileTraceReader(
|
||||
buffer_(new char[kBufferSize]) {}
|
||||
|
||||
FileTraceReader::~FileTraceReader() {
|
||||
Close();
|
||||
Close().PermitUncheckedError();
|
||||
delete[] buffer_;
|
||||
}
|
||||
|
||||
@ -76,7 +76,7 @@ Status FileTraceReader::Read(std::string* data) {
|
||||
return s;
|
||||
}
|
||||
|
||||
FileTraceWriter::~FileTraceWriter() { Close(); }
|
||||
FileTraceWriter::~FileTraceWriter() { Close().PermitUncheckedError(); }
|
||||
|
||||
Status FileTraceWriter::Close() {
|
||||
file_writer_.reset();
|
||||
|
Loading…
Reference in New Issue
Block a user