No elide constructors (#7798)

Summary:
Added "no-elide-constructors to the ASSERT_STATUS_CHECK builds.  This flag gives more errors/warnings for some of the Status checks where an inner class checks a Status and later returns it.  In this case,  without the elide check on, the returned status may not have been checked in the caller, thereby bypassing the checked code.

Pull Request resolved: https://github.com/facebook/rocksdb/pull/7798

Reviewed By: jay-zhuang

Differential Revision: D25680451

Pulled By: pdillinger

fbshipit-source-id: c3f14ed9e2a13f0a8c54d839d5fb4d1fc1e93917
This commit is contained in:
mrambacher 2020-12-23 16:54:05 -08:00 committed by Facebook GitHub Bot
parent 30a5ed9c53
commit 55e99688cc
33 changed files with 310 additions and 302 deletions

View File

@ -190,6 +190,11 @@ else
endif
ifdef ASSERT_STATUS_CHECKED
# For ASC, turn off constructor elision, preventing the case where a constructor returned
# by a method may pass the ASC check if the status is checked in the inner method. Forcing
# the copy constructor to be invoked disables the optimization and will cause the calling method
# to check the status in order to prevent an error from being raised.
PLATFORM_CXXFLAGS += -fno-elide-constructors
ifeq ($(filter -DROCKSDB_ASSERT_STATUS_CHECKED,$(OPT)),)
OPT += -DROCKSDB_ASSERT_STATUS_CHECKED
endif

View File

@ -335,7 +335,9 @@ ColumnFamilyOptions SanitizeOptions(const ImmutableDBOptions& db_options,
// was not used)
auto sfm = static_cast<SstFileManagerImpl*>(db_options.sst_file_manager.get());
for (size_t i = 0; i < result.cf_paths.size(); i++) {
DeleteScheduler::CleanupDirectory(db_options.env, sfm, result.cf_paths[i].path);
DeleteScheduler::CleanupDirectory(db_options.env, sfm,
result.cf_paths[i].path)
.PermitUncheckedError();
}
#endif

View File

@ -73,7 +73,7 @@ class ColumnFamilyTestBase : public testing::Test {
db_options_.create_if_missing = true;
db_options_.fail_if_options_file_error = true;
db_options_.env = env_;
DestroyDB(dbname_, Options(db_options_, column_family_options_));
EXPECT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_)));
}
~ColumnFamilyTestBase() override {
@ -653,8 +653,8 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) {
// after flushing file B is deleted. At the same time, the min log number of
// default CF is not written to manifest. Log file A still remains.
// Flushed to SST file Y.
Flush(1);
Flush(0);
ASSERT_OK(Flush(1));
ASSERT_OK(Flush(0));
ASSERT_OK(Put(1, "bar", "v3")); // seqID 4
ASSERT_OK(Put(1, "foo", "v4")); // seqID 5
ASSERT_OK(db_->FlushWAL(/*sync=*/false));
@ -708,15 +708,15 @@ TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) {
// and is set to current. Both CFs' min log number is set to file C so after
// flushing file B is deleted. Log file A still remains.
// Flushed to SST file Y.
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(0, "bar", "v2")); // seqID 4
ASSERT_OK(Put(2, "bar", "v2")); // seqID 5
ASSERT_OK(Put(1, "bar", "v3")); // seqID 6
// Flushing all column families. This forces all CFs' min log to current. This
// is written to the manifest file. Log file C is cleared.
Flush(0);
Flush(1);
Flush(2);
ASSERT_OK(Flush(0));
ASSERT_OK(Flush(1));
ASSERT_OK(Flush(2));
// Write to log file D
ASSERT_OK(Put(1, "bar", "v4")); // seqID 7
ASSERT_OK(Put(1, "bar", "v5")); // seqID 8
@ -985,7 +985,7 @@ TEST_P(ColumnFamilyTest, FlushTest) {
for (int i = 0; i < 3; ++i) {
uint64_t max_total_in_memory_state =
MaxTotalInMemoryState();
Flush(i);
ASSERT_OK(Flush(i));
AssertMaxTotalInMemoryState(max_total_in_memory_state);
}
ASSERT_OK(Put(1, "foofoo", "bar"));
@ -1093,7 +1093,7 @@ TEST_P(ColumnFamilyTest, CrashAfterFlush) {
ASSERT_OK(batch.Put(handles_[0], Slice("foo"), Slice("bar")));
ASSERT_OK(batch.Put(handles_[1], Slice("foo"), Slice("bar")));
ASSERT_OK(db_->Write(WriteOptions(), &batch));
Flush(0);
ASSERT_OK(Flush(0));
fault_env->SetFilesystemActive(false);
std::vector<std::string> names;
@ -1103,7 +1103,7 @@ TEST_P(ColumnFamilyTest, CrashAfterFlush) {
}
}
Close();
fault_env->DropUnsyncedFileData();
ASSERT_OK(fault_env->DropUnsyncedFileData());
fault_env->ResetState();
Open(names, {});
@ -2236,7 +2236,7 @@ TEST_P(ColumnFamilyTest, FlushStaleColumnFamilies) {
// files for column family [one], because it's empty
AssertCountLiveFiles(4);
Flush(0);
ASSERT_OK(Flush(0));
ASSERT_EQ(0, dbfull()->TEST_total_log_size());
Close();
}
@ -3040,7 +3040,7 @@ TEST_P(ColumnFamilyTest, IteratorCloseWALFile1) {
Iterator* it = db_->NewIterator(ReadOptions(), handles_[1]);
ASSERT_OK(it->status());
// A flush will make `it` hold the last reference of its super version.
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "fodor", "mirko"));
ASSERT_OK(Put(0, "fodor", "mirko"));
@ -3093,7 +3093,7 @@ TEST_P(ColumnFamilyTest, IteratorCloseWALFile2) {
Iterator* it = db_->NewIterator(ro, handles_[1]);
ASSERT_OK(it->status());
// A flush will make `it` hold the last reference of its super version.
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "fodor", "mirko"));
ASSERT_OK(Put(0, "fodor", "mirko"));
@ -3147,7 +3147,7 @@ TEST_P(ColumnFamilyTest, ForwardIteratorCloseWALFile) {
CreateColumnFamilies({"one"});
ASSERT_OK(Put(1, "fodor", "mirko"));
ASSERT_OK(Put(1, "fodar2", "mirko"));
Flush(1);
ASSERT_OK(Flush(1));
// Create an iterator holding the current super version, as well as
// the SST file just flushed.
@ -3159,7 +3159,7 @@ TEST_P(ColumnFamilyTest, ForwardIteratorCloseWALFile) {
ASSERT_OK(Put(1, "fodor", "mirko"));
ASSERT_OK(Put(1, "fodar2", "mirko"));
Flush(1);
ASSERT_OK(Flush(1));
WaitForCompaction();
@ -3232,9 +3232,9 @@ TEST_P(ColumnFamilyTest, LogSyncConflictFlush) {
ROCKSDB_NAMESPACE::port::Thread thread([&] { ASSERT_OK(db_->SyncWAL()); });
TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:1");
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "foo", "bar"));
Flush(1);
ASSERT_OK(Flush(1));
TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:2");
@ -3256,7 +3256,7 @@ TEST_P(ColumnFamilyTest, DISABLED_LogTruncationTest) {
Build(0, 100);
// Flush the 0th column family to force a roll of the wal log
Flush(0);
ASSERT_OK(Flush(0));
// Add some more entries
Build(100, 100);
@ -3332,14 +3332,14 @@ TEST_P(ColumnFamilyTest, DefaultCfPathsTest) {
// Fill Column family 1.
PutRandomData(1, 100, 100);
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_EQ(1, GetSstFileCount(cf_opt1.cf_paths[0].path));
ASSERT_EQ(0, GetSstFileCount(dbname_));
// Fill column family 2
PutRandomData(2, 100, 100);
Flush(2);
ASSERT_OK(Flush(2));
// SST from Column family 2 should be generated in
// db_paths which is dbname_ in this case.
@ -3358,14 +3358,14 @@ TEST_P(ColumnFamilyTest, MultipleCFPathsTest) {
Reopen({ColumnFamilyOptions(), cf_opt1, cf_opt2});
PutRandomData(1, 100, 100, true /* save */);
Flush(1);
ASSERT_OK(Flush(1));
// Check that files are generated in appropriate paths.
ASSERT_EQ(1, GetSstFileCount(cf_opt1.cf_paths[0].path));
ASSERT_EQ(0, GetSstFileCount(dbname_));
PutRandomData(2, 100, 100, true /* save */);
Flush(2);
ASSERT_OK(Flush(2));
ASSERT_EQ(1, GetSstFileCount(cf_opt2.cf_paths[0].path));
ASSERT_EQ(0, GetSstFileCount(dbname_));

View File

@ -269,7 +269,7 @@ class CompactionJobTestBase : public testing::Test {
&write_buffer_manager_, &write_controller_,
/*block_cache_tracer=*/nullptr, /*io_tracer=*/nullptr));
compaction_job_stats_.Reset();
SetIdentityFile(env_, dbname_);
ASSERT_OK(SetIdentityFile(env_, dbname_));
VersionEdit new_db;
new_db.SetLogNumber(0);

View File

@ -579,7 +579,7 @@ TEST_F(CorruptionTest, FileSystemStateCorrupted) {
if (iter == 0) { // corrupt file size
std::unique_ptr<WritableFile> file;
env_->NewWritableFile(filename, &file, EnvOptions());
ASSERT_OK(env_->NewWritableFile(filename, &file, EnvOptions()));
ASSERT_OK(file->Append(Slice("corrupted sst")));
file.reset();
Status x = TryReopen(&options);
@ -616,7 +616,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksOnFlush) {
options.table_factory = mock;
mock->SetCorruptionMode(mode);
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
Build(10);
s = db_->Flush(FlushOptions());
if (mode == mock::MockTableFactory::kCorruptNone) {
@ -642,7 +642,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksOnCompact) {
std::make_shared<mock::MockTableFactory>();
options.table_factory = mock;
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
Build(100, 2);
// ASSERT_OK(db_->Flush(FlushOptions()));
DBImpl* dbi = static_cast_with_check<DBImpl>(db_);
@ -669,7 +669,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksWithDeleteRangeFirst) {
ASSERT_OK(DestroyDB(dbname_, options));
ASSERT_OK(DB::Open(options, dbname_, &db_));
std::string start, end;
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
Key(3, &start), Key(7, &end)));
auto snap = db_->GetSnapshot();
@ -701,7 +701,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksWithDeleteRange) {
db_ = nullptr;
ASSERT_OK(DestroyDB(dbname_, options));
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
Build(10, 0, 0);
std::string start, end;
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
@ -737,7 +737,7 @@ TEST_F(CorruptionTest, ParanoidFileChecksWithDeleteRangeLast) {
db_ = nullptr;
ASSERT_OK(DestroyDB(dbname_, options));
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
std::string start, end;
Build(10);
ASSERT_OK(db_->DeleteRange(WriteOptions(), db_->DefaultColumnFamily(),
@ -775,7 +775,7 @@ TEST_F(CorruptionTest, LogCorruptionErrorsInCompactionIterator) {
options.table_factory = mock;
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
Build(100, 2);
DBImpl* dbi = static_cast_with_check<DBImpl>(db_);
@ -798,7 +798,7 @@ TEST_F(CorruptionTest, CompactionKeyOrderCheck) {
std::make_shared<mock::MockTableFactory>();
options.table_factory = mock;
ASSERT_OK(DB::Open(options, dbname_, &db_));
assert(db_ != nullptr);
assert(db_ != nullptr); // suppress false clang-analyze report
mock->SetCorruptionMode(mock::MockTableFactory::kCorruptReorderKey);
Build(100, 2);
DBImpl* dbi = static_cast_with_check<DBImpl>(db_);
@ -884,7 +884,7 @@ TEST_F(CorruptionTest, VerifyWholeTableChecksum) {
SyncPoint::GetInstance()->SetCallBack(
"DBImpl::VerifySstFileChecksum:mismatch", [&](void* arg) {
auto* s = reinterpret_cast<Status*>(arg);
assert(s);
ASSERT_NE(s, nullptr);
++count;
ASSERT_NOK(*s);
});

View File

@ -146,7 +146,7 @@ TEST_F(DBBasicTest, ReadOnlyDB) {
// Reopen and flush memtable.
Reopen(options);
Flush();
ASSERT_OK(Flush());
Close();
// Now check keys in read only mode.
ASSERT_OK(ReadOnlyReopen(options));
@ -182,7 +182,7 @@ TEST_F(DBBasicTest, ReadOnlyDBWithWriteDBIdToManifestSet) {
// Reopen and flush memtable.
Reopen(options);
Flush();
ASSERT_OK(Flush());
Close();
// Now check keys in read only mode.
ASSERT_OK(ReadOnlyReopen(options));
@ -205,7 +205,7 @@ TEST_F(DBBasicTest, CompactedDB) {
Reopen(options);
// 1 L0 file, use CompactedDB if max_open_files = -1
ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, '1')));
Flush();
ASSERT_OK(Flush());
Close();
ASSERT_OK(ReadOnlyReopen(options));
Status s = Put("new", "value");
@ -223,12 +223,12 @@ TEST_F(DBBasicTest, CompactedDB) {
Reopen(options);
// Add more L0 files
ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, '2')));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, 'a')));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, 'b')));
ASSERT_OK(Put("eee", DummyString(kFileSize / 2, 'e')));
Flush();
ASSERT_OK(Flush());
Close();
ASSERT_OK(ReadOnlyReopen(options));
@ -1413,10 +1413,10 @@ TEST_F(DBBasicTest, MultiGetBatchedSortedMultiFile) {
// mix with memtable
ASSERT_OK(Put(1, "k1", "v1"));
ASSERT_OK(Put(1, "k2", "v2"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "k3", "v3"));
ASSERT_OK(Put(1, "k4", "v4"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Delete(1, "k4"));
ASSERT_OK(Put(1, "k5", "v5"));
ASSERT_OK(Delete(1, "no_key"));
@ -1459,19 +1459,19 @@ TEST_F(DBBasicTest, MultiGetBatchedDuplicateKeys) {
// mix with memtable
ASSERT_OK(Merge(1, "k1", "v1"));
ASSERT_OK(Merge(1, "k2", "v2"));
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
ASSERT_OK(Merge(1, "k3", "v3"));
ASSERT_OK(Merge(1, "k4", "v4"));
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
ASSERT_OK(Merge(1, "k4", "v4_2"));
ASSERT_OK(Merge(1, "k6", "v6"));
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
ASSERT_OK(Merge(1, "k7", "v7"));
ASSERT_OK(Merge(1, "k8", "v8"));
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
get_perf_context()->Reset();
@ -1511,12 +1511,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
ASSERT_OK(Put("key_" + std::to_string(i), "val_l2_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(2);
@ -1525,12 +1525,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
ASSERT_OK(Put("key_" + std::to_string(i), "val_l1_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(1);
@ -1539,12 +1539,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevel) {
ASSERT_OK(Put("key_" + std::to_string(i), "val_l0_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
ASSERT_EQ(0, num_keys);
@ -1590,12 +1590,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevelMerge) {
ASSERT_OK(Put("key_" + std::to_string(i), "val_l2_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(2);
@ -1604,12 +1604,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevelMerge) {
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l1_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(1);
@ -1618,12 +1618,12 @@ TEST_F(DBBasicTest, MultiGetBatchedMultiLevelMerge) {
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l0_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
ASSERT_EQ(0, num_keys);
@ -1705,7 +1705,7 @@ TEST_F(DBBasicTest, MultiGetBatchedValueSize) {
ASSERT_OK(Put(1, "k7", "v7_"));
ASSERT_OK(Put(1, "k3", "v3_"));
ASSERT_OK(Put(1, "k4", "v4"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Delete(1, "k4"));
ASSERT_OK(Put(1, "k11", "v11"));
ASSERT_OK(Delete(1, "no_key"));
@ -1715,7 +1715,7 @@ TEST_F(DBBasicTest, MultiGetBatchedValueSize) {
ASSERT_OK(Put(1, "k15", "v15"));
ASSERT_OK(Put(1, "k16", "v16"));
ASSERT_OK(Put(1, "k17", "v17"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "k1", "v1_"));
ASSERT_OK(Put(1, "k2", "v2_"));
@ -1785,12 +1785,12 @@ TEST_F(DBBasicTest, MultiGetBatchedValueSizeMultiLevelMerge) {
ASSERT_OK(Put("key_" + std::to_string(i), "val_l2_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(2);
@ -1799,12 +1799,12 @@ TEST_F(DBBasicTest, MultiGetBatchedValueSizeMultiLevelMerge) {
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l1_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
MoveFilesToLevel(1);
@ -1813,12 +1813,12 @@ TEST_F(DBBasicTest, MultiGetBatchedValueSizeMultiLevelMerge) {
ASSERT_OK(Merge("key_" + std::to_string(i), "val_l0_" + std::to_string(i)));
num_keys++;
if (num_keys == 8) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
}
if (num_keys > 0) {
Flush();
ASSERT_OK(Flush());
num_keys = 0;
}
ASSERT_EQ(0, num_keys);
@ -1908,10 +1908,10 @@ TEST_F(DBBasicTest, MultiGetStats) {
keys[i] = Slice(keys_str[i]);
ASSERT_OK(Put(1, "k" + std::to_string(i), rnd.RandomString(1000)));
if (i % 100 == 0) {
Flush(1);
ASSERT_OK(Flush(1));
}
}
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
for (int i = 501; i < 1000; ++i) {
@ -1919,11 +1919,11 @@ TEST_F(DBBasicTest, MultiGetStats) {
keys[i] = Slice(keys_str[i]);
ASSERT_OK(Put(1, "k" + std::to_string(i), rnd.RandomString(1000)));
if (i % 100 == 0) {
Flush(1);
ASSERT_OK(Flush(1));
}
}
Flush(1);
ASSERT_OK(Flush(1));
MoveFilesToLevel(2, 1);
for (int i = 1001; i < total_keys; ++i) {
@ -1931,10 +1931,10 @@ TEST_F(DBBasicTest, MultiGetStats) {
keys[i] = Slice(keys_str[i]);
ASSERT_OK(Put(1, "k" + std::to_string(i), rnd.RandomString(1000)));
if (i % 100 == 0) {
Flush(1);
ASSERT_OK(Flush(1));
}
}
Flush(1);
ASSERT_OK(Flush(1));
Close();
ReopenWithColumnFamilies({"default", "pikachu"}, options);
@ -2042,11 +2042,11 @@ TEST_P(DBMultiGetRowCacheTest, MultiGetBatched) {
ASSERT_OK(Put(1, "k2", "v2"));
ASSERT_OK(Put(1, "k3", "v3"));
ASSERT_OK(Put(1, "k4", "v4"));
Flush(1);
ASSERT_OK(Flush(1));
ASSERT_OK(Put(1, "k5", "v5"));
const Snapshot* snap1 = dbfull()->GetSnapshot();
ASSERT_OK(Delete(1, "k4"));
Flush(1);
ASSERT_OK(Flush(1));
const Snapshot* snap2 = dbfull()->GetSnapshot();
get_perf_context()->Reset();
@ -2185,7 +2185,7 @@ TEST_F(DBBasicTest, MultiGetIOBufferOverrun) {
std::string value(rnd.RandomString(128) + zero_str);
assert(Put(Key(i), value) == Status::OK());
}
Flush();
ASSERT_OK(Flush());
std::vector<std::string> key_data(10);
std::vector<Slice> keys;
@ -2579,7 +2579,7 @@ class DBBasicTestTrackWal : public DBTestBase,
int CountWalFiles() {
VectorLogPtr log_files;
dbfull()->GetSortedWalFiles(log_files);
EXPECT_OK(dbfull()->GetSortedWalFiles(log_files));
return static_cast<int>(log_files.size());
};
};
@ -2714,9 +2714,9 @@ class DBBasicTestMultiGet : public DBTestBase {
: Put(cf, Key(i), values_[i])) == Status::OK());
}
if (num_cfs == 1) {
Flush();
EXPECT_OK(Flush());
} else {
dbfull()->Flush(FlushOptions(), handles_[cf]);
EXPECT_OK(dbfull()->Flush(FlushOptions(), handles_[cf]));
}
for (int i = 0; i < 100; ++i) {
@ -2728,9 +2728,9 @@ class DBBasicTestMultiGet : public DBTestBase {
Status::OK());
}
if (num_cfs == 1) {
Flush();
EXPECT_OK(Flush());
} else {
dbfull()->Flush(FlushOptions(), handles_[cf]);
EXPECT_OK(dbfull()->Flush(FlushOptions(), handles_[cf]));
}
}
}
@ -3643,7 +3643,7 @@ TEST_P(DBBasicTestDeadline, PointLookupDeadline) {
std::string key = "k" + ToString(i);
ASSERT_OK(Put(key, rnd.RandomString(100)));
}
Flush();
ASSERT_OK(Flush());
bool timedout = true;
// A timeout will be forced when the IO counter reaches this value

View File

@ -284,7 +284,7 @@ Status DBImpl::FlushMemTableToOutputFile(
// Notify sst_file_manager that a new file was added
std::string file_path = MakeTableFileName(
cfd->ioptions()->cf_paths[0].path, file_meta.fd.GetNumber());
sfm->OnAddFile(file_path);
s = sfm->OnAddFile(file_path);
if (sfm->IsMaxAllowedSpaceReached()) {
Status new_bg_error =
Status::SpaceLimit("Max allowed space was reached");
@ -618,7 +618,7 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
auto sfm = static_cast<SstFileManagerImpl*>(
immutable_db_options_.sst_file_manager.get());
assert(all_mutable_cf_options.size() == static_cast<size_t>(num_cfs));
for (int i = 0; i != num_cfs; ++i) {
for (int i = 0; s.ok() && i != num_cfs; ++i) {
if (cfds[i]->IsDropped()) {
continue;
}
@ -627,7 +627,7 @@ Status DBImpl::AtomicFlushMemTablesToOutputFiles(
if (sfm) {
std::string file_path = MakeTableFileName(
cfds[i]->ioptions()->cf_paths[0].path, file_meta[i].fd.GetNumber());
sfm->OnAddFile(file_path);
s = sfm->OnAddFile(file_path);
if (sfm->IsMaxAllowedSpaceReached() &&
error_handler_.GetBGError().ok()) {
Status new_bg_error =
@ -809,20 +809,20 @@ Status DBImpl::CompactRangeInternal(const CompactRangeOptions& options,
}
bool flush_needed = true;
Status s;
if (begin != nullptr && end != nullptr) {
// TODO(ajkr): We could also optimize away the flush in certain cases where
// one/both sides of the interval are unbounded. But it requires more
// changes to RangesOverlapWithMemtables.
Range range(*begin, *end);
SuperVersion* super_version = cfd->GetReferencedSuperVersion(this);
cfd->RangesOverlapWithMemtables({range}, super_version,
immutable_db_options_.allow_data_in_errors,
s = cfd->RangesOverlapWithMemtables(
{range}, super_version, immutable_db_options_.allow_data_in_errors,
&flush_needed);
CleanupSuperVersion(super_version);
}
Status s;
if (flush_needed) {
if (s.ok() && flush_needed) {
FlushOptions fo;
fo.allow_write_stall = options.allow_write_stall;
if (immutable_db_options_.atomic_flush) {
@ -1194,7 +1194,8 @@ Status DBImpl::CompactFilesImpl(
mutex_.Unlock();
TEST_SYNC_POINT("CompactFilesImpl:0");
TEST_SYNC_POINT("CompactFilesImpl:1");
compaction_job.Run();
// Ignore the status here, as it will be checked in the Install down below...
compaction_job.Run().PermitUncheckedError();
TEST_SYNC_POINT("CompactFilesImpl:2");
TEST_SYNC_POINT("CompactFilesImpl:3");
mutex_.Lock();

View File

@ -164,7 +164,8 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
// was not used)
auto sfm = static_cast<SstFileManagerImpl*>(result.sst_file_manager.get());
for (size_t i = 0; i < result.db_paths.size(); i++) {
DeleteScheduler::CleanupDirectory(result.env, sfm, result.db_paths[i].path);
DeleteScheduler::CleanupDirectory(result.env, sfm, result.db_paths[i].path)
.PermitUncheckedError();
}
// Create a default SstFileManager for purposes of tracking compaction size

View File

@ -729,7 +729,7 @@ TEST_F(DBOptionsTest, SetFIFOCompactionOptions) {
for (int j = 0; j < 10; j++) {
ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980)));
}
Flush();
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(NumTableFilesAtLevel(0), 10);

View File

@ -236,7 +236,7 @@ bool DBTestBase::ChangeWalOptions() {
auto options = CurrentOptions();
Destroy(options);
options.create_if_missing = true;
TryReopen(options);
Reopen(options);
return true;
} else if (option_config_ == kDBLogDir) {
option_config_ = kWalDirAndMmapReads;
@ -244,14 +244,14 @@ bool DBTestBase::ChangeWalOptions() {
auto options = CurrentOptions();
Destroy(options);
options.create_if_missing = true;
TryReopen(options);
Reopen(options);
return true;
} else if (option_config_ == kWalDirAndMmapReads) {
option_config_ = kRecycleLogFiles;
Destroy(last_options_);
auto options = CurrentOptions();
Destroy(options);
TryReopen(options);
Reopen(options);
return true;
} else {
return false;

View File

@ -996,7 +996,7 @@ TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) {
// First, clean up memtable before inserting new data. This will generate
// a level-0 file, with size around 0.4 (according to previously written
// data amount).
dbfull()->Flush(FlushOptions());
ASSERT_OK(dbfull()->Flush(FlushOptions()));
for (int num = 0; num < options.level0_file_num_compaction_trigger - 3;
num++) {
// Write 110KB (11 values, each 10K)
@ -1781,7 +1781,7 @@ TEST_P(DBTestUniversalCompaction, FinalSortedRunCompactFilesConflict) {
dbfull()->TEST_write_controler().GetCompactionPressureToken();
ASSERT_OK(Put("key", "val"));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
ASSERT_EQ(NumTableFilesAtLevel(num_levels_ - 1), 1);
ColumnFamilyMetaData cf_meta;
@ -1807,7 +1807,7 @@ TEST_P(DBTestUniversalCompaction, FinalSortedRunCompactFilesConflict) {
"DBTestUniversalCompaction:FinalSortedRunCompactFilesConflict:0");
for (int i = 0; i < 2; ++i) {
ASSERT_OK(Put("key", "val"));
Flush();
ASSERT_OK(Flush());
}
ASSERT_OK(dbfull()->TEST_WaitForCompact());
@ -1911,7 +1911,7 @@ TEST_F(DBTestUniversalCompaction2, BasicL0toL1) {
for (i = 0; i < 2000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
// MoveFilesToLevel(6);
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -1923,7 +1923,7 @@ TEST_F(DBTestUniversalCompaction2, BasicL0toL1) {
ASSERT_OK(Put(Key(i), "val"));
}
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, NumTableFilesAtLevel(0));
@ -1954,7 +1954,7 @@ TEST_F(DBTestUniversalCompaction2, SingleLevel) {
for (i = 0; i < 2000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 1999; i < kNumKeys; ++i) {
if (i >= kNumKeys - kWindowSize &&
@ -1964,7 +1964,7 @@ TEST_F(DBTestUniversalCompaction2, SingleLevel) {
ASSERT_OK(Put(Key(i), "val"));
}
}
Flush();
ASSERT_OK(Flush()(;
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(1, NumTableFilesAtLevel(0));
@ -1992,19 +1992,19 @@ TEST_F(DBTestUniversalCompaction2, MultipleLevels) {
for (i = 0; i < 500; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 500; i < 1000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 1000; i < 1500; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 1500; i < 2000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, NumTableFilesAtLevel(0));
@ -2013,15 +2013,15 @@ TEST_F(DBTestUniversalCompaction2, MultipleLevels) {
for (i = 1999; i < 2333; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 2333; i < 2666; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 2666; i < 2999; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, NumTableFilesAtLevel(0));
@ -2031,7 +2031,7 @@ TEST_F(DBTestUniversalCompaction2, MultipleLevels) {
for (i = 1900; i < 2100; ++i) {
ASSERT_OK(Delete(Key(i)));
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, NumTableFilesAtLevel(0));
@ -2064,19 +2064,19 @@ TEST_F(DBTestUniversalCompaction2, OverlappingL0) {
for (i = 0; i < 2000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 2000; i < 3000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 3500; i < 4000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
for (i = 2900; i < 3100; ++i) {
ASSERT_OK(Delete(Key(i)));
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(2, NumTableFilesAtLevel(0));
@ -2106,7 +2106,7 @@ TEST_F(DBTestUniversalCompaction2, IngestBehind) {
for (i = 0; i < 2000; ++i) {
ASSERT_OK(Put(Key(i), "val"));
}
Flush();
ASSERT_OK(Flush());
// MoveFilesToLevel(6);
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr));
@ -2118,7 +2118,7 @@ TEST_F(DBTestUniversalCompaction2, IngestBehind) {
ASSERT_OK(Put(Key(i), "val"));
}
}
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, NumTableFilesAtLevel(0));
@ -2184,7 +2184,7 @@ TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
// Case 1: Oldest flushed file excceeds periodic compaction threshold.
ASSERT_OK(Put("foo", "bar"));
Flush();
ASSERT_OK(Flush());
ASSERT_EQ(0, periodic_compactions);
// Move clock forward so that the flushed file would qualify periodic
// compaction.
@ -2192,7 +2192,7 @@ TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
// Another flush would trigger compaction the oldest file.
ASSERT_OK(Put("foo", "bar2"));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(1, periodic_compactions);
@ -2203,7 +2203,7 @@ TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
periodic_compactions = 0;
// A flush doesn't trigger a periodic compaction when threshold not hit
ASSERT_OK(Put("foo", "bar2"));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(0, periodic_compactions);
@ -2211,7 +2211,7 @@ TEST_F(DBTestUniversalCompaction2, PeriodicCompaction) {
// a compaction
ASSERT_OK(Put("foo", "bar2"));
env_->MockSleepForSeconds(48 * 60 * 60 + 100);
Flush();
ASSERT_OK(Flush());
ASSERT_OK(dbfull()->TEST_WaitForCompact());
ASSERT_EQ(1, periodic_compactions);
ASSERT_EQ(0, start_level);

View File

@ -173,7 +173,7 @@ TEST_F(DBErrorHandlingFSTest, FLushWriteError) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
Reopen(options);
ASSERT_EQ("val", Get(Key(0)));
@ -271,7 +271,7 @@ TEST_F(DBErrorHandlingFSTest, FLushWritNoWALRetryableError1) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_EQ("val2", Get(Key(2)));
ASSERT_OK(Put(Key(3), "val3", wo));
@ -314,7 +314,7 @@ TEST_F(DBErrorHandlingFSTest, FLushWritNoWALRetryableError2) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_EQ("val2", Get(Key(2)));
ASSERT_OK(Put(Key(3), "val3", wo));
@ -357,7 +357,7 @@ TEST_F(DBErrorHandlingFSTest, FLushWritNoWALRetryableError3) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_EQ("val2", Get(Key(2)));
ASSERT_OK(Put(Key(3), "val3", wo));
@ -399,7 +399,7 @@ TEST_F(DBErrorHandlingFSTest, ManifestWriteError) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -442,7 +442,7 @@ TEST_F(DBErrorHandlingFSTest, ManifestWriteRetryableError) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -487,7 +487,7 @@ TEST_F(DBErrorHandlingFSTest, ManifestWriteNoWALRetryableError) {
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -535,7 +535,7 @@ TEST_F(DBErrorHandlingFSTest, DoubleManifestWriteError) {
// A successful Resume() will create a new manifest file
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -568,7 +568,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteError) {
ASSERT_OK(Put(Key(0), "val"));
ASSERT_OK(Put(Key(2), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
// Wait for flush of 2nd L0 file before starting compaction
@ -598,7 +598,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteError) {
// This Flush will trigger a compaction, which will fail when appending to
// the manifest
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
TEST_SYNC_POINT("CompactionManifestWriteError:0");
// Clear all errors so when the compaction is retried, it will succeed
@ -609,7 +609,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteError) {
s = dbfull()->TEST_WaitForCompact();
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -642,7 +642,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteRetryableError) {
ASSERT_OK(Put(Key(0), "val"));
ASSERT_OK(Put(Key(2), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
listener->OverrideBGError(Status(error_msg, Status::Severity::kHardError));
listener->EnableAutoRecovery(false);
@ -668,7 +668,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteRetryableError) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
TEST_SYNC_POINT("CompactionManifestWriteError:0");
TEST_SYNC_POINT("CompactionManifestWriteError:1");
@ -680,7 +680,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionManifestWriteRetryableError) {
SyncPoint::GetInstance()->ClearAllCallBacks();
SyncPoint::GetInstance()->DisableProcessing();
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
new_manifest = GetManifestNameFromLiveFiles();
ASSERT_NE(new_manifest, old_manifest);
@ -706,7 +706,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteError) {
ASSERT_OK(Put(Key(0), "va;"));
ASSERT_OK(Put(Key(2), "va;"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
listener->OverrideBGError(
Status(Status::NoSpace(), Status::Severity::kHardError));
@ -723,14 +723,14 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteError) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), ROCKSDB_NAMESPACE::Status::Severity::kHardError);
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
Destroy(options);
}
@ -752,7 +752,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteRetryableError) {
ASSERT_OK(Put(Key(0), "va;"));
ASSERT_OK(Put(Key(2), "va;"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
listener->OverrideBGError(Status(error_msg, Status::Severity::kHardError));
listener->EnableAutoRecovery(false);
@ -766,7 +766,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteRetryableError) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), ROCKSDB_NAMESPACE::Status::Severity::kSoftError);
@ -775,7 +775,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteRetryableError) {
SyncPoint::GetInstance()->ClearAllCallBacks();
SyncPoint::GetInstance()->DisableProcessing();
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
Destroy(options);
}
@ -790,7 +790,7 @@ TEST_F(DBErrorHandlingFSTest, CorruptionError) {
ASSERT_OK(Put(Key(0), "va;"));
ASSERT_OK(Put(Key(2), "va;"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
{{"DBImpl::FlushMemTable:FlushMemTableFinished",
@ -804,7 +804,7 @@ TEST_F(DBErrorHandlingFSTest, CorruptionError) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(),
@ -812,7 +812,7 @@ TEST_F(DBErrorHandlingFSTest, CorruptionError) {
fault_fs_->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_NE(s, Status::OK());
ASSERT_NOK(s);
Destroy(options);
}
@ -844,7 +844,7 @@ TEST_F(DBErrorHandlingFSTest, AutoRecoverFlushError) {
ASSERT_EQ(listener->WaitForRecovery(5000000), true);
s = Put(Key(1), "val");
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
Reopen(options);
ASSERT_EQ("val", Get(Key(0)));
@ -874,7 +874,7 @@ TEST_F(DBErrorHandlingFSTest, FailRecoverFlushError) {
// We should be able to shutdown the database while auto recovery is going
// on in the background
Close();
DestroyDB(dbname_, options);
DestroyDB(dbname_, options).PermitUncheckedError();
}
TEST_F(DBErrorHandlingFSTest, WALWriteError) {
@ -904,7 +904,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
{
@ -960,7 +960,6 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
options.listeners.emplace_back(listener);
options.paranoid_checks = true;
options.max_bgerror_resume_count = 0;
Status s;
Random rnd(301);
DestroyAndReopen(options);
@ -978,7 +977,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
// For the second batch, the first 2 file Append are successful, then the
@ -1001,8 +1000,8 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
SyncPoint::GetInstance()->EnableProcessing();
WriteOptions wopts;
wopts.sync = true;
s = dbfull()->Write(wopts, &batch);
ASSERT_EQ(true, s.IsIOError());
Status s = dbfull()->Write(wopts, &batch);
ASSERT_TRUE(s.IsIOError());
}
fault_fs_->SetFilesystemActive(true);
SyncPoint::GetInstance()->ClearAllCallBacks();
@ -1018,8 +1017,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
}
// Resume and write a new batch, should be in the WAL
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(dbfull()->Resume());
{
WriteBatch batch;
@ -1029,7 +1027,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
Reopen(options);
@ -1055,7 +1053,6 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) {
options.create_if_missing = true;
options.writable_file_max_buffer_size = 32768;
options.listeners.emplace_back(listener);
Status s;
Random rnd(301);
listener->EnableAutoRecovery();
@ -1072,7 +1069,7 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
{
@ -1095,8 +1092,8 @@ TEST_F(DBErrorHandlingFSTest, MultiCFWALWriteError) {
SyncPoint::GetInstance()->EnableProcessing();
WriteOptions wopts;
wopts.sync = true;
s = dbfull()->Write(wopts, &batch);
ASSERT_EQ(s, s.NoSpace());
Status s = dbfull()->Write(wopts, &batch);
ASSERT_TRUE(s.IsNoSpace());
}
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
@ -1164,9 +1161,8 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
listener[i]->InjectFileCreationError(fault_fs[i], 3,
IOStatus::NoSpace("Out of space"));
snprintf(buf, sizeof(buf), "_%d", i);
DestroyDB(dbname_ + std::string(buf), options[i]);
ASSERT_EQ(DB::Open(options[i], dbname_ + std::string(buf), &dbptr),
Status::OK());
ASSERT_OK(DestroyDB(dbname_ + std::string(buf), options[i]));
ASSERT_OK(DB::Open(options[i], dbname_ + std::string(buf), &dbptr));
db.emplace_back(dbptr);
}
@ -1179,8 +1175,8 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(db[i]->Write(wopts, &batch), Status::OK());
ASSERT_EQ(db[i]->Flush(FlushOptions()), Status::OK());
ASSERT_OK(db[i]->Write(wopts, &batch));
ASSERT_OK(db[i]->Flush(FlushOptions()));
}
def_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
@ -1194,8 +1190,8 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(db[i]->Write(wopts, &batch), Status::OK());
ASSERT_EQ(db[i]->Flush(FlushOptions()), Status::OK());
ASSERT_OK(db[i]->Write(wopts, &batch));
ASSERT_OK(db[i]->Flush(FlushOptions()));
}
for (auto i = 0; i < kNumDbInstances; ++i) {
@ -1208,8 +1204,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
for (auto i = 0; i < kNumDbInstances; ++i) {
std::string prop;
ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true);
ASSERT_EQ(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true),
Status::OK());
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
EXPECT_TRUE(db[i]->GetProperty(
"rocksdb.num-files-at-level" + NumberToString(0), &prop));
EXPECT_EQ(atoi(prop.c_str()), 0);
@ -1226,7 +1221,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBCompactionError) {
if (getenv("KEEP_DB")) {
printf("DB is still at %s%s\n", dbname_.c_str(), buf);
} else {
Status s = DestroyDB(dbname_ + std::string(buf), options[i]);
ASSERT_OK(DestroyDB(dbname_ + std::string(buf), options[i]));
}
}
options.clear();
@ -1281,9 +1276,8 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
break;
}
snprintf(buf, sizeof(buf), "_%d", i);
DestroyDB(dbname_ + std::string(buf), options[i]);
ASSERT_EQ(DB::Open(options[i], dbname_ + std::string(buf), &dbptr),
Status::OK());
ASSERT_OK(DestroyDB(dbname_ + std::string(buf), options[i]));
ASSERT_OK(DB::Open(options[i], dbname_ + std::string(buf), &dbptr));
db.emplace_back(dbptr);
}
@ -1296,8 +1290,8 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(db[i]->Write(wopts, &batch), Status::OK());
ASSERT_EQ(db[i]->Flush(FlushOptions()), Status::OK());
ASSERT_OK(db[i]->Write(wopts, &batch));
ASSERT_OK(db[i]->Flush(FlushOptions()));
}
def_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
@ -1311,11 +1305,11 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(db[i]->Write(wopts, &batch), Status::OK());
ASSERT_OK(db[i]->Write(wopts, &batch));
if (i != 1) {
ASSERT_EQ(db[i]->Flush(FlushOptions()), Status::OK());
ASSERT_OK(db[i]->Flush(FlushOptions()));
} else {
ASSERT_EQ(db[i]->Flush(FlushOptions()), Status::NoSpace());
ASSERT_TRUE(db[i]->Flush(FlushOptions()).IsNoSpace());
}
}
@ -1329,7 +1323,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
ASSERT_EQ(s.severity(), Status::Severity::kHardError);
break;
case 2:
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
break;
}
fault_fs[i]->SetFilesystemActive(true);
@ -1342,8 +1336,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
ASSERT_EQ(listener[i]->WaitForRecovery(5000000), true);
}
if (i == 1) {
ASSERT_EQ(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true),
Status::OK());
ASSERT_OK(static_cast<DBImpl*>(db[i])->TEST_WaitForCompact(true));
}
EXPECT_TRUE(db[i]->GetProperty(
"rocksdb.num-files-at-level" + NumberToString(0), &prop));
@ -1361,7 +1354,7 @@ TEST_F(DBErrorHandlingFSTest, MultiDBVariousErrors) {
if (getenv("KEEP_DB")) {
printf("DB is still at %s%s\n", dbname_.c_str(), buf);
} else {
DestroyDB(dbname_ + std::string(buf), options[i]);
EXPECT_OK(DestroyDB(dbname_ + std::string(buf), options[i]));
}
}
options.clear();
@ -1418,12 +1411,10 @@ TEST_F(DBErrorHandlingFSTest, FLushWritNoWALRetryableeErrorAutoRecover1) {
ASSERT_EQ("val2", Get(Key(2)));
// call auto resume
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(dbfull()->Resume());
ASSERT_OK(Put(Key(3), "val3", wo));
s = Flush();
// After resume is successful, the flush should be ok.
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
ASSERT_EQ("val3", Get(Key(3)));
Destroy(options);
}
@ -1513,8 +1504,7 @@ TEST_F(DBErrorHandlingFSTest, DISABLED_FLushWritRetryableeErrorAutoRecover1) {
Reopen(options);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1554,8 +1544,7 @@ TEST_F(DBErrorHandlingFSTest, FLushWritRetryableeErrorAutoRecover2) {
Reopen(options);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1602,10 +1591,9 @@ TEST_F(DBErrorHandlingFSTest, FLushWritRetryableeErrorAutoRecover3) {
// resume manually here.
s = dbfull()->Resume();
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1656,19 +1644,16 @@ TEST_F(DBErrorHandlingFSTest, DISABLED_FLushWritRetryableeErrorAutoRecover4) {
// Even the FS is recoverd, due to the Fatal Error in bg_error_ the resume
// and flush will all fail.
ASSERT_EQ("val1", Get(Key(1)));
s = dbfull()->Resume();
ASSERT_NE(s, Status::OK());
ASSERT_NOK(dbfull()->Resume());
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_NE(s, Status::OK());
ASSERT_NOK(Flush());
ASSERT_EQ("NOT_FOUND", Get(Key(2)));
Reopen(options);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1708,7 +1693,7 @@ TEST_F(DBErrorHandlingFSTest, DISABLED_FLushWritRetryableeErrorAutoRecover5) {
// The first resume will cause recovery_error and its severity is the
// Fatal error
s = dbfull()->Close();
ASSERT_NE(s, Status::OK());
ASSERT_NOK(s);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
SyncPoint::GetInstance()->DisableProcessing();
fault_fs_->SetFilesystemActive(true);
@ -1717,7 +1702,7 @@ TEST_F(DBErrorHandlingFSTest, DISABLED_FLushWritRetryableeErrorAutoRecover5) {
ASSERT_NE("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1768,14 +1753,14 @@ TEST_F(DBErrorHandlingFSTest, FLushWritRetryableeErrorAutoRecover6) {
// The first resume will cause recovery_error and its severity is the
// Fatal error
s = dbfull()->Close();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
SyncPoint::GetInstance()->DisableProcessing();
Reopen(options);
ASSERT_EQ("val1", Get(Key(1)));
ASSERT_OK(Put(Key(2), "val2"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
ASSERT_EQ("val2", Get(Key(2)));
Destroy(options);
@ -1912,8 +1897,7 @@ TEST_F(DBErrorHandlingFSTest,
ASSERT_OK(Put(Key(0), "val"));
ASSERT_OK(Put(Key(2), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(Flush());
listener->OverrideBGError(Status(error_msg, Status::Severity::kHardError));
listener->EnableAutoRecovery(false);
@ -1950,7 +1934,7 @@ TEST_F(DBErrorHandlingFSTest,
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
TEST_SYNC_POINT("CompactionManifestWriteErrorAR:0");
TEST_SYNC_POINT("CompactionManifestWriteErrorAR:1");
@ -2000,7 +1984,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteRetryableErrorAutoRecover) {
ASSERT_OK(Put(Key(0), "va;"));
ASSERT_OK(Put(Key(2), "va;"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
listener->OverrideBGError(Status(error_msg, Status::Severity::kHardError));
listener->EnableAutoRecovery(false);
@ -2025,7 +2009,7 @@ TEST_F(DBErrorHandlingFSTest, CompactionWriteRetryableErrorAutoRecover) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), ROCKSDB_NAMESPACE::Status::Severity::kSoftError);
@ -2065,7 +2049,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableErrorAutoRecover1) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
// For the second batch, the first 2 file Append are successful, then the
@ -2122,7 +2106,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableErrorAutoRecover1) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
Reopen(options);
@ -2166,7 +2150,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableErrorAutoRecover2) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
// For the second batch, the first 2 file Append are successful, then the
@ -2223,7 +2207,7 @@ TEST_F(DBErrorHandlingFSTest, WALWriteRetryableErrorAutoRecover2) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
Reopen(options);
@ -2285,7 +2269,7 @@ TEST_P(DBErrorHandlingFencingTest, ManifestWriteFenced) {
old_manifest = GetManifestNameFromLiveFiles();
ASSERT_OK(Put(Key(0), "val"));
Flush();
ASSERT_OK(Flush());
ASSERT_OK(Put(Key(1), "val"));
SyncPoint::GetInstance()->SetCallBack(
"VersionSet::LogAndApply:WriteManifest", [&](void*) {
@ -2318,7 +2302,7 @@ TEST_P(DBErrorHandlingFencingTest, CompactionWriteFenced) {
ASSERT_OK(Put(Key(0), "va;"));
ASSERT_OK(Put(Key(2), "va;"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
listener->EnableAutoRecovery(true);
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
@ -2332,7 +2316,7 @@ TEST_P(DBErrorHandlingFencingTest, CompactionWriteFenced) {
ASSERT_OK(Put(Key(1), "val"));
s = Flush();
ASSERT_EQ(s, Status::OK());
ASSERT_OK(s);
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), ROCKSDB_NAMESPACE::Status::Severity::kFatalError);
@ -2368,7 +2352,7 @@ TEST_P(DBErrorHandlingFencingTest, WALWriteFenced) {
WriteOptions wopts;
wopts.sync = true;
ASSERT_EQ(dbfull()->Write(wopts, &batch), Status::OK());
ASSERT_OK(dbfull()->Write(wopts, &batch));
};
{

View File

@ -52,7 +52,7 @@ class FlushJobTestBase : public testing::Test {
}
void NewDB() {
SetIdentityFile(env_, dbname_);
ASSERT_OK(SetIdentityFile(env_, dbname_));
VersionEdit new_db;
new_db.SetLogNumber(0);
@ -85,6 +85,7 @@ class FlushJobTestBase : public testing::Test {
std::string record;
new_db.EncodeTo(&record);
s = log.AddRecord(record);
ASSERT_OK(s);
for (const auto& e : new_cfs) {
record.clear();

View File

@ -104,9 +104,9 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFiles) {
ASSERT_NE(import_cfh_, nullptr);
std::string value;
db_->Get(ReadOptions(), import_cfh_, "K1", &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, "K1", &value));
ASSERT_EQ(value, "V1");
db_->Get(ReadOptions(), import_cfh_, "K2", &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, "K2", &value));
ASSERT_EQ(value, "V2");
ASSERT_OK(db_->DropColumnFamily(import_cfh_));
ASSERT_OK(db_->DestroyColumnFamilyHandle(import_cfh_));
@ -125,9 +125,9 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFiles) {
ASSERT_NE(import_cfh_, nullptr);
std::string value;
db_->Get(ReadOptions(), import_cfh_, "K3", &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, "K3", &value));
ASSERT_EQ(value, "V1");
db_->Get(ReadOptions(), import_cfh_, "K4", &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, "K4", &value));
ASSERT_EQ(value, "V2");
}
}
@ -214,7 +214,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
for (int i = 0; i < 100; i++) {
std::string value;
db_->Get(ReadOptions(), import_cfh_, Key(i), &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value));
if (i % 16 == 0) {
ASSERT_EQ(value, Key(i) + "_overwrite4");
} else if (i % 4 == 0) {
@ -235,7 +235,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
ASSERT_OK(db_->Flush(FlushOptions(), import_cfh_));
for (int i = 0; i < 100; i++) {
std::string value;
db_->Get(ReadOptions(), import_cfh_, Key(i), &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value));
if (i % 5 == 0) {
ASSERT_EQ(value, Key(i) + "_overwrite5");
} else if (i % 16 == 0) {
@ -254,7 +254,7 @@ TEST_F(ImportColumnFamilyTest, ImportSSTFileWriterFilesWithOverlap) {
db_->CompactRange(CompactRangeOptions(), import_cfh_, nullptr, nullptr));
for (int i = 0; i < 100; i++) {
std::string value;
db_->Get(ReadOptions(), import_cfh_, Key(i), &value);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value));
if (i % 5 == 0) {
ASSERT_EQ(value, Key(i) + "_overwrite5");
} else if (i % 16 == 0) {
@ -318,12 +318,12 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherCF) {
std::string value1, value2;
for (int i = 0; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value1));
ASSERT_EQ(Get(1, Key(i)), value1);
}
for (int i = 0; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2));
ASSERT_EQ(Get(1, Key(i)), value2);
}
@ -340,16 +340,16 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherCF) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1).IsNotFound());
}
for (int i = 25; i < 50; ++i) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value1));
ASSERT_EQ(Key(i) + "_overwrite3", value1);
}
for (int i = 50; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value1));
ASSERT_EQ(Key(i) + "_overwrite2", value1);
}
for (int i = 0; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2));
ASSERT_EQ(Get(1, Key(i)), value2);
}
@ -363,16 +363,16 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherCF) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1).IsNotFound());
}
for (int i = 25; i < 50; ++i) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value1));
ASSERT_EQ(Key(i) + "_overwrite3", value1);
}
for (int i = 50; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh_, Key(i), &value1);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh_, Key(i), &value1));
ASSERT_EQ(Key(i) + "_overwrite2", value1);
}
for (int i = 0; i < 100; ++i) {
db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2);
ASSERT_OK(db_->Get(ReadOptions(), import_cfh2_, Key(i), &value2));
ASSERT_EQ(Get(1, Key(i)), value2);
}
}
@ -424,7 +424,7 @@ TEST_F(ImportColumnFamilyTest, ImportExportedSSTFromAnotherDB) {
for (int i = 0; i < 100; ++i) {
std::string value;
db_copy->Get(ReadOptions(), cfh, Key(i), &value);
ASSERT_OK(db_copy->Get(ReadOptions(), cfh, Key(i), &value));
ASSERT_EQ(Get(1, Key(i)), value);
}
ASSERT_OK(db_copy->DropColumnFamily(cfh));

View File

@ -33,7 +33,7 @@ Writer::Writer(std::unique_ptr<WritableFileWriter>&& dest, uint64_t log_number,
Writer::~Writer() {
if (dest_) {
WriteBuffer();
WriteBuffer().PermitUncheckedError();
}
}

View File

@ -30,14 +30,14 @@ class MemTableListTest : public testing::Test {
MemTableListTest() : db(nullptr), file_number(1) {
dbname = test::PerThreadDBPath("memtable_list_test");
options.create_if_missing = true;
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
}
// Create a test db if not yet created
void CreateDB() {
if (db == nullptr) {
options.create_if_missing = true;
DestroyDB(dbname, options);
EXPECT_OK(DestroyDB(dbname, options));
// Open DB only with default column family
ColumnFamilyOptions cf_options;
std::vector<ColumnFamilyDescriptor> cf_descs;
@ -78,7 +78,7 @@ class MemTableListTest : public testing::Test {
handles.clear();
delete db;
db = nullptr;
DestroyDB(dbname, options, cf_descs);
EXPECT_OK(DestroyDB(dbname, options, cf_descs));
}
}

View File

@ -65,7 +65,7 @@ TEST_F(OptionsFileTest, NumberOfOptionsFiles) {
const int kReopenCount = 20;
Options opt;
opt.create_if_missing = true;
DestroyDB(dbname_, opt);
ASSERT_OK(DestroyDB(dbname_, opt));
std::unordered_set<std::string> filename_history;
DB* db;
for (int i = 0; i < kReopenCount; ++i) {

View File

@ -311,7 +311,7 @@ TEST(SamePrefixTest, InDomainTest) {
ASSERT_OK(db->Put(write_options, "HHKB pro2", "Mar 24, 2006"));
ASSERT_OK(db->Put(write_options, "HHKB pro2 Type-S", "June 29, 2011"));
ASSERT_OK(db->Put(write_options, "Realforce 87u", "idk"));
db->Flush(FlushOptions());
ASSERT_OK(db->Flush(FlushOptions()));
std::string result;
auto db_iter = db->NewIterator(ReadOptions());
@ -331,7 +331,7 @@ TEST(SamePrefixTest, InDomainTest) {
ASSERT_OK(db->Put(write_options, "pikachu", "1"));
ASSERT_OK(db->Put(write_options, "Meowth", "1"));
ASSERT_OK(db->Put(write_options, "Mewtwo", "idk"));
db->Flush(FlushOptions());
ASSERT_OK(db->Flush(FlushOptions()));
std::string result;
auto db_iter = db->NewIterator(ReadOptions());
@ -351,7 +351,7 @@ TEST_F(PrefixTest, TestResult) {
std::cout << "*** Mem table: " << options.memtable_factory->Name()
<< " number of buckets: " << num_buckets
<< std::endl;
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -528,7 +528,7 @@ TEST_F(PrefixTest, PrefixValid) {
while (NextOptions(num_buckets)) {
std::cout << "*** Mem table: " << options.memtable_factory->Name()
<< " number of buckets: " << num_buckets << std::endl;
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -543,7 +543,7 @@ TEST_F(PrefixTest, PrefixValid) {
PutKey(db.get(), write_options, 12345, 8, v18);
PutKey(db.get(), write_options, 12345, 9, v19);
PutKey(db.get(), write_options, 12346, 8, v16);
db->Flush(FlushOptions());
ASSERT_OK(db->Flush(FlushOptions()));
TestKey test_key(12346, 8);
std::string s;
ASSERT_OK(db->Delete(write_options, TestKeyToSlice(s, test_key)));
@ -581,7 +581,7 @@ TEST_F(PrefixTest, DynamicPrefixIterator) {
while (NextOptions(FLAGS_bucket_count)) {
std::cout << "*** Mem table: " << options.memtable_factory->Name()
<< std::endl;
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -689,7 +689,7 @@ TEST_F(PrefixTest, PrefixSeekModePrev) {
for (size_t m = 1; m < 100; m++) {
std::cout << "[" + std::to_string(m) + "]" + "*** Mem table: "
<< options.memtable_factory->Name() << std::endl;
DestroyDB(kDbName, Options());
ASSERT_OK(DestroyDB(kDbName, Options()));
auto db = OpenDb();
WriteOptions write_options;
ReadOptions read_options;
@ -714,7 +714,7 @@ TEST_F(PrefixTest, PrefixSeekModePrev) {
}
}
if (i < 2) {
db->Flush(FlushOptions());
ASSERT_OK(db->Flush(FlushOptions()));
}
}

View File

@ -325,7 +325,7 @@ TEST_P(EnvMoreTestWithParam, MakeDir) {
ASSERT_OK(env_->CreateDir(test_dir_ + "/j"));
ASSERT_OK(env_->FileExists(test_dir_ + "/j"));
std::vector<std::string> children;
env_->GetChildren(test_dir_, &children);
ASSERT_OK(env_->GetChildren(test_dir_, &children));
ASSERT_EQ(1U, children.size());
// fail because file already exists
ASSERT_TRUE(!env_->CreateDir(test_dir_ + "/j").ok());

2
env/env_test.cc vendored
View File

@ -2166,7 +2166,7 @@ TEST_P(EnvFSTestWithParam, OptionsTest) {
ASSERT_OK(db->Close());
delete db;
DestroyDB(dbname, opts);
ASSERT_OK(DestroyDB(dbname, opts));
dbname = dbname2_;
}

View File

@ -39,7 +39,7 @@ class EnvLogger : public Logger {
~EnvLogger() {
if (!closed_) {
closed_ = true;
CloseHelper();
CloseHelper().PermitUncheckedError();
}
}
@ -48,7 +48,7 @@ class EnvLogger : public Logger {
mutex_.AssertHeld();
if (flush_pending_) {
flush_pending_ = false;
file_.Flush();
file_.Flush().PermitUncheckedError();
}
last_flush_micros_ = env_->NowMicros();
}
@ -134,7 +134,7 @@ class EnvLogger : public Logger {
assert(p <= limit);
mutex_.Lock();
// We will ignore any error returned by Append().
file_.Append(Slice(base, p - base));
file_.Append(Slice(base, p - base)).PermitUncheckedError();
flush_pending_ = true;
const uint64_t now_micros = env_->NowMicros();
if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) {

View File

@ -553,7 +553,7 @@ TEST_F(StatsHistoryTest, PersistentStatsReadOnly) {
// Reopen and flush memtable.
ASSERT_OK(TryReopen(options));
Flush();
ASSERT_OK(Flush());
Close();
// Now check keys in read only mode.
ASSERT_OK(ReadOnlyReopen(options));

View File

@ -235,7 +235,11 @@ Status MockTableFactory::NewTableReader(
std::unique_ptr<RandomAccessFileReader>&& file, uint64_t /*file_size*/,
std::unique_ptr<TableReader>* table_reader,
bool /*prefetch_index_and_filter_in_cache*/) const {
uint32_t id = GetIDFromFile(file.get());
uint32_t id;
Status s = GetIDFromFile(file.get(), &id);
if (!s.ok()) {
return s;
}
MutexLock lock_guard(&file_system_.mutex);
@ -252,7 +256,9 @@ Status MockTableFactory::NewTableReader(
TableBuilder* MockTableFactory::NewTableBuilder(
const TableBuilderOptions& /*table_builder_options*/,
uint32_t /*column_family_id*/, WritableFileWriter* file) const {
uint32_t id = GetAndWriteNextID(file);
uint32_t id;
Status s = GetAndWriteNextID(file, &id);
assert(s.ok());
return new MockTableBuilder(id, &file_system_, corrupt_mode_);
}
@ -268,25 +274,30 @@ Status MockTableFactory::CreateMockTable(Env* env, const std::string& fname,
WritableFileWriter file_writer(NewLegacyWritableFileWrapper(std::move(file)),
fname, EnvOptions());
uint32_t id = GetAndWriteNextID(&file_writer);
uint32_t id;
s = GetAndWriteNextID(&file_writer, &id);
if (s.ok()) {
file_system_.files.insert({id, std::move(file_contents)});
return Status::OK();
}
return s;
}
uint32_t MockTableFactory::GetAndWriteNextID(WritableFileWriter* file) const {
uint32_t next_id = next_id_.fetch_add(1);
Status MockTableFactory::GetAndWriteNextID(WritableFileWriter* file,
uint32_t* next_id) const {
*next_id = next_id_.fetch_add(1);
char buf[4];
EncodeFixed32(buf, next_id);
file->Append(Slice(buf, 4));
return next_id;
EncodeFixed32(buf, *next_id);
return file->Append(Slice(buf, 4));
}
uint32_t MockTableFactory::GetIDFromFile(RandomAccessFileReader* file) const {
Status MockTableFactory::GetIDFromFile(RandomAccessFileReader* file,
uint32_t* id) const {
char buf[4];
Slice result;
file->Read(IOOptions(), 0, 4, &result, buf, nullptr);
Status s = file->Read(IOOptions(), 0, 4, &result, buf, nullptr);
assert(result.size() == 4);
return DecodeFixed32(buf);
*id = DecodeFixed32(buf);
return s;
}
void MockTableFactory::AssertSingleFile(const KVVector& file_contents) {

View File

@ -77,8 +77,8 @@ class MockTableFactory : public TableFactory {
void AssertLatestFile(const KVVector& file_contents);
private:
uint32_t GetAndWriteNextID(WritableFileWriter* file) const;
uint32_t GetIDFromFile(RandomAccessFileReader* file) const;
Status GetAndWriteNextID(WritableFileWriter* file, uint32_t* id) const;
Status GetIDFromFile(RandomAccessFileReader* file, uint32_t* id) const;
mutable MockTableFileSystem file_system_;
mutable std::atomic<uint32_t> next_id_;

View File

@ -177,8 +177,10 @@ Status SstFileDumper::VerifyChecksum() {
Status SstFileDumper::DumpTable(const std::string& out_filename) {
std::unique_ptr<WritableFile> out_file;
Env* env = options_.env;
env->NewWritableFile(out_filename, &out_file, soptions_);
Status s = table_reader_->DumpTable(out_file.get());
Status s = env->NewWritableFile(out_filename, &out_file, soptions_);
if (s.ok()) {
s = table_reader_->DumpTable(out_file.get());
}
if (!s.ok()) {
// close the file before return error, ignore the close error if there's any
out_file->Close().PermitUncheckedError();

View File

@ -377,10 +377,10 @@ class TableConstructor : public Constructor {
} else {
builder->Add(kv.first, kv.second);
}
EXPECT_TRUE(builder->status().ok());
EXPECT_OK(builder->status());
}
Status s = builder->Finish();
file_writer_->Flush();
EXPECT_OK(file_writer_->Flush());
EXPECT_TRUE(s.ok()) << s.ToString();
EXPECT_EQ(TEST_GetSink()->contents().size(), builder->FileSize());
@ -1270,15 +1270,15 @@ class FileChecksumTestHelper {
EXPECT_TRUE(table_builder_->status().ok());
}
Status s = table_builder_->Finish();
file_writer_->Flush();
EXPECT_TRUE(s.ok());
EXPECT_OK(file_writer_->Flush());
EXPECT_OK(s);
EXPECT_EQ(sink_->contents().size(), table_builder_->FileSize());
return s;
}
std::string GetFileChecksum() {
file_writer_->Close();
EXPECT_OK(file_writer_->Close());
return table_builder_->GetFileChecksum();
}
@ -3323,7 +3323,7 @@ TEST_P(BlockBasedTableTest, NoFileChecksum) {
f.GetFileWriter()));
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
f.AddKVtoKVMap(1000);
f.WriteKVAndFlushTable();
ASSERT_OK(f.WriteKVAndFlushTable());
ASSERT_STREQ(f.GetFileChecksumFuncName(), kUnknownFileChecksumFuncName);
ASSERT_STREQ(f.GetFileChecksum().c_str(), kUnknownFileChecksum);
}
@ -3362,7 +3362,7 @@ TEST_P(BlockBasedTableTest, Crc32cFileChecksum) {
f.GetFileWriter()));
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
f.AddKVtoKVMap(1000);
f.WriteKVAndFlushTable();
ASSERT_OK(f.WriteKVAndFlushTable());
ASSERT_STREQ(f.GetFileChecksumFuncName(), "FileChecksumCrc32c");
std::unique_ptr<FileChecksumGenerator> checksum_crc32c_gen2 =
@ -3418,7 +3418,7 @@ TEST_F(PlainTableTest, BasicPlainTableProperties) {
builder->Add(key, value);
}
ASSERT_OK(builder->Finish());
file_writer->Flush();
ASSERT_OK(file_writer->Flush());
test::StringSink* ss =
ROCKSDB_NAMESPACE::test::GetStringSinkFromLegacyWriter(file_writer.get());
@ -3468,7 +3468,7 @@ TEST_F(PlainTableTest, NoFileChecksum) {
f.GetFileWriter()));
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
f.AddKVtoKVMap(1000);
f.WriteKVAndFlushTable();
ASSERT_OK(f.WriteKVAndFlushTable());
ASSERT_STREQ(f.GetFileChecksumFuncName(), kUnknownFileChecksumFuncName);
EXPECT_EQ(f.GetFileChecksum(), kUnknownFileChecksum);
}
@ -3510,7 +3510,7 @@ TEST_F(PlainTableTest, Crc32cFileChecksum) {
f.GetFileWriter()));
ASSERT_OK(f.ResetTableBuilder(std::move(builder)));
f.AddKVtoKVMap(1000);
f.WriteKVAndFlushTable();
ASSERT_OK(f.WriteKVAndFlushTable());
ASSERT_STREQ(f.GetFileChecksumFuncName(), "FileChecksumCrc32c");
std::unique_ptr<FileChecksumGenerator> checksum_crc32c_gen2 =
@ -4022,7 +4022,7 @@ TEST_F(PrefixTest, PrefixAndWholeKeyTest) {
const std::string kDBPath = test::PerThreadDBPath("table_prefix_test");
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyDB(kDBPath, options);
ASSERT_OK(DestroyDB(kDBPath, options));
ROCKSDB_NAMESPACE::DB* db;
ASSERT_OK(ROCKSDB_NAMESPACE::DB::Open(options, kDBPath, &db));
@ -4081,7 +4081,7 @@ TEST_P(BlockBasedTableTest, DISABLED_TableWithGlobalSeqno) {
builder->Add(ik.Encode(), value);
}
ASSERT_OK(builder->Finish());
file_writer->Flush();
ASSERT_OK(file_writer->Flush());
test::RandomRWStringSink ss_rw(sink);
uint32_t version;
@ -4265,7 +4265,7 @@ TEST_P(BlockBasedTableTest, BlockAlignTest) {
builder->Add(ik.Encode(), value);
}
ASSERT_OK(builder->Finish());
file_writer->Flush();
ASSERT_OK(file_writer->Flush());
test::RandomRWStringSink ss_rw(sink);
std::unique_ptr<RandomAccessFileReader> file_reader(
@ -4360,7 +4360,7 @@ TEST_P(BlockBasedTableTest, PropertiesBlockRestartPointTest) {
builder->Add(ik.Encode(), value);
}
ASSERT_OK(builder->Finish());
file_writer->Flush();
ASSERT_OK(file_writer->Flush());
test::RandomRWStringSink ss_rw(sink);
std::unique_ptr<RandomAccessFileReader> file_reader(
@ -4511,7 +4511,7 @@ TEST_P(BlockBasedTableTest, BadOptions) {
const std::string kDBPath =
test::PerThreadDBPath("block_based_table_bad_options_test");
options.table_factory.reset(NewBlockBasedTableFactory(bbto));
DestroyDB(kDBPath, options);
ASSERT_OK(DestroyDB(kDBPath, options));
ROCKSDB_NAMESPACE::DB* db;
ASSERT_NOK(ROCKSDB_NAMESPACE::DB::Open(options, kDBPath, &db));

View File

@ -126,7 +126,7 @@ class SSTDumpToolTest : public testing::Test {
tb->Add(MakeKey(i), MakeValue(i));
}
ASSERT_OK(tb->Finish());
file_writer->Close();
ASSERT_OK(file_writer->Close());
}
protected:

View File

@ -85,7 +85,7 @@ class TraceAnalyzerTest : public testing::Test {
delete single_iter;
std::this_thread::sleep_for (std::chrono::seconds(1));
db_->Get(ro, "g", &value);
db_->Get(ro, "g", &value).PermitUncheckedError();
ASSERT_OK(db_->EndTrace());

View File

@ -171,16 +171,16 @@ TEST_F(WritableFileWriterTest, IncrementalBuffer) {
for (int i = 0; i < 20; i++) {
uint32_t num = r.Skewed(16) * 100 + r.Uniform(100);
std::string random_string = r.RandomString(num);
writer->Append(Slice(random_string.c_str(), num));
ASSERT_OK(writer->Append(Slice(random_string.c_str(), num)));
target.append(random_string.c_str(), num);
// In some attempts, flush in a chance of 1/10.
if (!no_flush && r.Uniform(10) == 0) {
writer->Flush();
ASSERT_OK(writer->Flush());
}
}
writer->Flush();
writer->Close();
ASSERT_OK(writer->Flush());
ASSERT_OK(writer->Close());
ASSERT_EQ(target.size(), actual.size());
ASSERT_EQ(target, actual);
}

View File

@ -165,7 +165,7 @@ TestWritableFile::TestWritableFile(const std::string& fname,
TestWritableFile::~TestWritableFile() {
if (writable_file_opened_) {
Close();
Close().PermitUncheckedError();
}
}

View File

@ -103,7 +103,7 @@ TEST_F(MemoryTest, SharedBlockCacheTotal) {
BlockBasedTableOptions bbt_opts;
bbt_opts.block_cache = NewLRUCache(4096 * 1000 * 10);
for (int i = 0; i < kNumDBs; ++i) {
DestroyDB(GetDBName(i), opt);
ASSERT_OK(DestroyDB(GetDBName(i), opt));
DB* db = nullptr;
ASSERT_OK(DB::Open(opt, GetDBName(i), &db));
dbs.push_back(db);
@ -119,13 +119,13 @@ TEST_F(MemoryTest, SharedBlockCacheTotal) {
ASSERT_OK(dbs[i]->Put(WriteOptions(), keys_by_db[i].back(),
rnd_.RandomString(kValueSize)));
}
dbs[i]->Flush(FlushOptions());
ASSERT_OK(dbs[i]->Flush(FlushOptions()));
}
}
for (int i = 0; i < kNumDBs; ++i) {
for (auto& key : keys_by_db[i]) {
std::string value;
dbs[i]->Get(ReadOptions(), key, &value);
ASSERT_OK(dbs[i]->Get(ReadOptions(), key, &value));
}
UpdateUsagesHistory(dbs);
}
@ -162,7 +162,7 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
};
for (int i = 0; i < kNumDBs; ++i) {
DestroyDB(GetDBName(i), opt);
ASSERT_OK(DestroyDB(GetDBName(i), opt));
std::vector<ColumnFamilyHandle*> handles;
dbs.emplace_back();
vec_handles.emplace_back();
@ -198,11 +198,12 @@ TEST_F(MemoryTest, MemTableAndTableReadersTotal) {
// Create an iterator and flush all memtables for each db
for (int i = 0; i < kNumDBs; ++i) {
iters.push_back(dbs[i]->NewIterator(ReadOptions()));
dbs[i]->Flush(FlushOptions());
ASSERT_OK(dbs[i]->Flush(FlushOptions()));
for (int j = 0; j < 100; ++j) {
std::string value;
dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value);
ASSERT_NOK(
dbs[i]->Get(ReadOptions(), rnd_.RandomString(kKeySize), &value));
}
UpdateUsagesHistory(dbs);

View File

@ -120,8 +120,8 @@ class StringAppendOperatorTest : public testing::Test,
public ::testing::WithParamInterface<bool> {
public:
StringAppendOperatorTest() {
DestroyDB(kDbName, Options())
.PermitUncheckedError(); // Start each test with a fresh DB
EXPECT_OK(
DestroyDB(kDbName, Options())); // Start each test with a fresh DB
}
void SetUp() override {

View File

@ -85,7 +85,7 @@ TEST_F(OptionsUtilTest, SaveAndLoad) {
exact, cf_opts[i], loaded_cf_descs[i].options));
}
DestroyDB(dbname_, Options(db_opt, cf_opts[0]));
ASSERT_OK(DestroyDB(dbname_, Options(db_opt, cf_opts[0])));
for (size_t i = 0; i < kCFCount; ++i) {
if (cf_opts[i].compaction_filter) {
delete cf_opts[i].compaction_filter;
@ -155,7 +155,7 @@ TEST_F(OptionsUtilTest, SaveAndLoadWithCacheCheck) {
ASSERT_EQ(loaded_bbt_opt->block_cache.get(), cache.get());
}
}
DestroyDB(dbname_, Options(loaded_db_opt, cf_opts[0]));
ASSERT_OK(DestroyDB(dbname_, Options(loaded_db_opt, cf_opts[0])));
}
namespace {
@ -252,7 +252,7 @@ TEST_F(OptionsUtilTest, SanityCheck) {
db_opt.create_missing_column_families = true;
db_opt.create_if_missing = true;
DestroyDB(dbname_, Options(db_opt, cf_descs[0].options));
ASSERT_OK(DestroyDB(dbname_, Options(db_opt, cf_descs[0].options)));
DB* db;
std::vector<ColumnFamilyHandle*> handles;
// open and persist the options
@ -361,7 +361,7 @@ TEST_F(OptionsUtilTest, SanityCheck) {
ASSERT_OK(
CheckOptionsCompatibility(config_options, dbname_, db_opt, cf_descs));
}
DestroyDB(dbname_, Options(db_opt, cf_descs[0].options));
ASSERT_OK(DestroyDB(dbname_, Options(db_opt, cf_descs[0].options)));
}
TEST_F(OptionsUtilTest, LatestOptionsNotFound) {
@ -379,7 +379,7 @@ TEST_F(OptionsUtilTest, LatestOptionsNotFound) {
std::vector<std::string> children;
std::string options_file_name;
DestroyDB(dbname_, options);
ASSERT_OK(DestroyDB(dbname_, options));
// First, test where the db directory does not exist
ASSERT_NOK(options.env->GetChildren(dbname_, &children));
@ -436,7 +436,7 @@ TEST_F(OptionsUtilTest, LoadLatestOptions) {
DB* db;
options.create_if_missing = true;
DestroyDB(dbname_, options);
ASSERT_OK(DestroyDB(dbname_, options));
cf_descs.emplace_back();
cf_descs.back().name = kDefaultColumnFamilyName;
@ -494,7 +494,7 @@ TEST_F(OptionsUtilTest, LoadLatestOptions) {
delete handle;
}
delete db;
DestroyDB(dbname_, options, cf_descs);
ASSERT_OK(DestroyDB(dbname_, options, cf_descs));
}
static void WriteOptionsFile(Env* env, const std::string& path,