diff --git a/Makefile b/Makefile index 2c71ab458..64e75d7f2 100644 --- a/Makefile +++ b/Makefile @@ -569,7 +569,6 @@ ifdef ASSERT_STATUS_CHECKED # TODO: finish fixing all tests to pass this check TESTS_FAILING_ASC = \ c_test \ - db_test \ env_test \ range_locking_test \ testutil_test \ diff --git a/db/db_impl/db_impl_experimental.cc b/db/db_impl/db_impl_experimental.cc index 9436d7474..06a51f53a 100644 --- a/db/db_impl/db_impl_experimental.cc +++ b/db/db_impl/db_impl_experimental.cc @@ -76,7 +76,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { "PromoteL0 FAILED. Target level %d does not exist\n", target_level); job_context.Clean(); - return Status::InvalidArgument("Target level does not exist"); + status = Status::InvalidArgument("Target level does not exist"); + return status; } // Sort L0 files by range. @@ -96,7 +97,9 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { "PromoteL0 FAILED. File %" PRIu64 " being compacted\n", f->fd.GetNumber()); job_context.Clean(); - return Status::InvalidArgument("PromoteL0 called during L0 compaction"); + status = + Status::InvalidArgument("PromoteL0 called during L0 compaction"); + return status; } if (i == 0) continue; @@ -107,7 +110,8 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { " have overlapping ranges\n", prev_f->fd.GetNumber(), f->fd.GetNumber()); job_context.Clean(); - return Status::InvalidArgument("L0 has overlapping files"); + status = Status::InvalidArgument("L0 has overlapping files"); + return status; } } @@ -117,9 +121,10 @@ Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { ROCKS_LOG_INFO(immutable_db_options_.info_log, "PromoteL0 FAILED. Level %d not empty\n", level); job_context.Clean(); - return Status::InvalidArgument( + status = Status::InvalidArgument( "All levels up to target_level " "must be empty"); + return status; } } diff --git a/db/db_test.cc b/db/db_test.cc index 970725aa7..1367238d7 100644 --- a/db/db_test.cc +++ b/db/db_test.cc @@ -317,7 +317,7 @@ TEST_F(DBTest, MixedSlowdownOptions) { wo.sync = false; wo.disableWAL = false; wo.no_slowdown = false; - dbfull()->Put(wo, "foo", "bar"); + ASSERT_OK(dbfull()->Put(wo, "foo", "bar")); // We need the 2nd write to trigger delay. This is because delay is // estimated based on the last write size which is 0 for the first write. ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2")); @@ -375,7 +375,7 @@ TEST_F(DBTest, MixedSlowdownOptionsInQueue) { wo.sync = false; wo.disableWAL = false; wo.no_slowdown = false; - dbfull()->Put(wo, "foo", "bar"); + ASSERT_OK(dbfull()->Put(wo, "foo", "bar")); // We need the 2nd write to trigger delay. This is because delay is // estimated based on the last write size which is 0 for the first write. ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2")); @@ -444,7 +444,7 @@ TEST_F(DBTest, MixedSlowdownOptionsStop) { wo.sync = false; wo.disableWAL = false; wo.no_slowdown = false; - dbfull()->Put(wo, "foo", "bar"); + ASSERT_OK(dbfull()->Put(wo, "foo", "bar")); // We need the 2nd write to trigger delay. This is because delay is // estimated based on the last write size which is 0 for the first write. ASSERT_OK(dbfull()->Put(wo, "foo2", "bar2")); @@ -625,24 +625,24 @@ TEST_F(DBTest, SingleDeleteFlush) { // Put values on second level (so that they will not be in the same // compaction as the other operations. - Put(1, "foo", "first"); - Put(1, "bar", "one"); + ASSERT_OK(Put(1, "foo", "first")); + ASSERT_OK(Put(1, "bar", "one")); ASSERT_OK(Flush(1)); MoveFilesToLevel(2, 1); // (Single) delete hidden by a put - SingleDelete(1, "foo"); - Put(1, "foo", "second"); - Delete(1, "bar"); - Put(1, "bar", "two"); + ASSERT_OK(SingleDelete(1, "foo")); + ASSERT_OK(Put(1, "foo", "second")); + ASSERT_OK(Delete(1, "bar")); + ASSERT_OK(Put(1, "bar", "two")); ASSERT_OK(Flush(1)); - SingleDelete(1, "foo"); - Delete(1, "bar"); + ASSERT_OK(SingleDelete(1, "foo")); + ASSERT_OK(Delete(1, "bar")); ASSERT_OK(Flush(1)); - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, - nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], + nullptr, nullptr)); ASSERT_EQ("NOT_FOUND", Get(1, "bar")); ASSERT_EQ("NOT_FOUND", Get(1, "foo")); @@ -663,9 +663,9 @@ TEST_F(DBTest, SingleDeletePutFlush) { options.disable_auto_compactions = true; CreateAndReopenWithCF({"pikachu"}, options); - Put(1, "foo", Slice()); - Put(1, "a", Slice()); - SingleDelete(1, "a"); + ASSERT_OK(Put(1, "foo", Slice())); + ASSERT_OK(Put(1, "a", Slice())); + ASSERT_OK(SingleDelete(1, "a")); ASSERT_OK(Flush(1)); ASSERT_EQ("[ ]", AllEntriesFor("a", 1)); @@ -773,8 +773,8 @@ TEST_F(DBTest, GetFromImmutableLayer) { // Block sync calls env_->delay_sstable_sync_.store(true, std::memory_order_release); - Put(1, "k1", std::string(100000, 'x')); // Fill memtable - Put(1, "k2", std::string(100000, 'y')); // Trigger flush + ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable + ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger flush ASSERT_EQ("v1", Get(1, "foo")); ASSERT_EQ("NOT_FOUND", Get(0, "foo")); // Release sync calls @@ -852,19 +852,19 @@ TEST_F(DBTest, GetEncountersEmptyLevel) { // occurring at level 1 (instead of the correct level 0). // Step 1: First place sstables in levels 0 and 2 - Put(1, "a", "begin"); - Put(1, "z", "end"); + ASSERT_OK(Put(1, "a", "begin")); + ASSERT_OK(Put(1, "z", "end")); ASSERT_OK(Flush(1)); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); - Put(1, "a", "begin"); - Put(1, "z", "end"); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); + ASSERT_OK(Put(1, "a", "begin")); + ASSERT_OK(Put(1, "z", "end")); ASSERT_OK(Flush(1)); ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); ASSERT_GT(NumTableFilesAtLevel(2, 1), 0); // Step 2: clear level 1 if necessary. - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); ASSERT_EQ(NumTableFilesAtLevel(2, 1), 1); @@ -875,7 +875,7 @@ TEST_F(DBTest, GetEncountersEmptyLevel) { } // Step 4: Wait for compaction to finish - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); // XXX } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); @@ -1203,7 +1203,7 @@ TEST_F(DBTest, MetaDataTest) { // Fill up the rest of the file with random values. GenerateNewFile(&rnd, &key_index, /* nowait */ true); - Flush(); + ASSERT_OK(Flush()); } std::vector> files_by_level; @@ -1291,7 +1291,7 @@ void MinLevelHelper(DBTest* self, Options& options) { values.push_back(rnd.RandomString(10000)); ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); } - self->dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(self->dbfull()->TEST_WaitForFlushMemTable()); ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1); } @@ -1301,7 +1301,7 @@ void MinLevelHelper(DBTest* self, Options& options) { values.push_back(rnd.RandomString(10000)); ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); } - self->dbfull()->TEST_WaitForCompact(); + ASSERT_OK(self->dbfull()->TEST_WaitForCompact()); ASSERT_EQ(self->NumTableFilesAtLevel(0), 0); ASSERT_EQ(self->NumTableFilesAtLevel(1), 1); @@ -1536,7 +1536,7 @@ TEST_F(DBTest, ApproximateSizesMemTable) { ASSERT_OK(db_->GetApproximateSizes(&r, 1, &size_without_mt)); ASSERT_EQ(size_without_mt, 0); - Flush(); + ASSERT_OK(Flush()); for (int i = 0; i < N; i++) { ASSERT_OK(Put(Key(i + 1000), rnd.RandomString(1024))); @@ -1593,16 +1593,17 @@ TEST_F(DBTest, ApproximateSizesFilesWithErrorMargin) { ASSERT_OK(Put(Key(i), rnd.RandomString(24))); } // Flush everything to files - Flush(); + ASSERT_OK(Flush()); // Compact the entire key space into the next level - db_->CompactRange(CompactRangeOptions(), default_cf, nullptr, nullptr); + ASSERT_OK( + db_->CompactRange(CompactRangeOptions(), default_cf, nullptr, nullptr)); // Write more keys for (int i = N; i < (N + N / 4); i++) { ASSERT_OK(Put(Key(i), rnd.RandomString(24))); } // Flush everything to files again - Flush(); + ASSERT_OK(Flush()); // Wait for compaction to finish ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -1682,7 +1683,7 @@ TEST_F(DBTest, GetApproximateMemTableStats) { ASSERT_EQ(count, 0); ASSERT_EQ(size, 0); - Flush(); + ASSERT_OK(Flush()); start = Key(50); end = Key(60); @@ -1755,7 +1756,7 @@ TEST_F(DBTest, ApproximateSizes) { std::string cend_str = Key(compact_start + 9); Slice cstart = cstart_str; Slice cend = cend_str; - dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1])); } ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); @@ -1811,7 +1812,7 @@ TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { ASSERT_OK(Size(Key(3), Key(5), 1, &size)); ASSERT_TRUE(Between(size, 110100, 111000)); - dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1])); } // ApproximateOffsetOf() is not yet implemented in plain table format. } while (ChangeOptions(kSkipPlainTable)); @@ -1825,16 +1826,16 @@ TEST_F(DBTest, Snapshot) { options_override.skip_policy = kSkipNoSnapshot; do { CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); - Put(0, "foo", "0v1"); - Put(1, "foo", "1v1"); + ASSERT_OK(Put(0, "foo", "0v1")); + ASSERT_OK(Put(1, "foo", "1v1")); const Snapshot* s1 = db_->GetSnapshot(); ASSERT_EQ(1U, GetNumSnapshots()); uint64_t time_snap1 = GetTimeOldestSnapshots(); ASSERT_GT(time_snap1, 0U); ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber()); - Put(0, "foo", "0v2"); - Put(1, "foo", "1v2"); + ASSERT_OK(Put(0, "foo", "0v2")); + ASSERT_OK(Put(1, "foo", "1v2")); env_->MockSleepForSeconds(1); @@ -1842,8 +1843,8 @@ TEST_F(DBTest, Snapshot) { ASSERT_EQ(2U, GetNumSnapshots()); ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber()); - Put(0, "foo", "0v3"); - Put(1, "foo", "1v3"); + ASSERT_OK(Put(0, "foo", "0v3")); + ASSERT_OK(Put(1, "foo", "1v3")); { ManagedSnapshot s3(db_); @@ -1851,8 +1852,8 @@ TEST_F(DBTest, Snapshot) { ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); ASSERT_EQ(GetSequenceOldestSnapshots(), s1->GetSequenceNumber()); - Put(0, "foo", "0v4"); - Put(1, "foo", "1v4"); + ASSERT_OK(Put(0, "foo", "0v4")); + ASSERT_OK(Put(1, "foo", "1v4")); ASSERT_EQ("0v1", Get(0, "foo", s1)); ASSERT_EQ("1v1", Get(1, "foo", s1)); ASSERT_EQ("0v2", Get(0, "foo", s2)); @@ -1901,11 +1902,11 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { FillLevels("a", "z", 1); std::string big = rnd.RandomString(50000); - Put(1, "foo", big); - Put(1, "pastfoo", "v"); + ASSERT_OK(Put(1, "foo", big)); + ASSERT_OK(Put(1, "pastfoo", "v")); const Snapshot* snapshot = db_->GetSnapshot(); - Put(1, "foo", "tiny"); - Put(1, "pastfoo2", "v2"); // Advance sequence number one more + ASSERT_OK(Put(1, "foo", "tiny")); + ASSERT_OK(Put(1, "pastfoo2", "v2")); // Advance sequence number one more ASSERT_OK(Flush(1)); ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); @@ -1916,11 +1917,11 @@ TEST_F(DBTest, HiddenValuesAreRemoved) { db_->ReleaseSnapshot(snapshot); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]"); Slice x("x"); - dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1])); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); ASSERT_GE(NumTableFilesAtLevel(1, 1), 1); - dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1])); ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); ASSERT_OK(Size("", "pastfoo", 1, &size)); @@ -1950,26 +1951,26 @@ TEST_F(DBTest, UnremovableSingleDelete) { options.disable_auto_compactions = true; CreateAndReopenWithCF({"pikachu"}, options); - Put(1, "foo", "first"); + ASSERT_OK(Put(1, "foo", "first")); const Snapshot* snapshot = db_->GetSnapshot(); - SingleDelete(1, "foo"); - Put(1, "foo", "second"); + ASSERT_OK(SingleDelete(1, "foo")); + ASSERT_OK(Put(1, "foo", "second")); ASSERT_OK(Flush(1)); ASSERT_EQ("first", Get(1, "foo", snapshot)); ASSERT_EQ("second", Get(1, "foo")); - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, - nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], + nullptr, nullptr)); ASSERT_EQ("[ second, SDEL, first ]", AllEntriesFor("foo", 1)); - SingleDelete(1, "foo"); + ASSERT_OK(SingleDelete(1, "foo")); ASSERT_EQ("first", Get(1, "foo", snapshot)); ASSERT_EQ("NOT_FOUND", Get(1, "foo")); - dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, - nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[1], + nullptr, nullptr)); ASSERT_EQ("first", Get(1, "foo", snapshot)); ASSERT_EQ("NOT_FOUND", Get(1, "foo")); @@ -1985,7 +1986,7 @@ TEST_F(DBTest, UnremovableSingleDelete) { TEST_F(DBTest, DeletionMarkers1) { Options options = CurrentOptions(); CreateAndReopenWithCF({"pikachu"}, options); - Put(1, "foo", "v1"); + ASSERT_OK(Put(1, "foo", "v1")); ASSERT_OK(Flush(1)); const int last = 2; MoveFilesToLevel(last, 1); @@ -1993,24 +1994,25 @@ TEST_F(DBTest, DeletionMarkers1) { ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); // Place a table at level last-1 to prevent merging with preceding mutation - Put(1, "a", "begin"); - Put(1, "z", "end"); - Flush(1); + ASSERT_OK(Put(1, "a", "begin")); + ASSERT_OK(Put(1, "z", "end")); + ASSERT_OK(Flush(1)); MoveFilesToLevel(last - 1, 1); ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1); - Delete(1, "foo"); - Put(1, "foo", "v2"); + ASSERT_OK(Delete(1, "foo")); + ASSERT_OK(Put(1, "foo", "v2")); ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]"); ASSERT_OK(Flush(1)); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); Slice z("z"); - dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1])); // DEL eliminated, but v1 remains because we aren't compacting that level // (DEL can be eliminated because v2 hides v1). ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); - dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]); + ASSERT_OK( + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1])); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]"); @@ -2019,7 +2021,7 @@ TEST_F(DBTest, DeletionMarkers1) { TEST_F(DBTest, DeletionMarkers2) { Options options = CurrentOptions(); CreateAndReopenWithCF({"pikachu"}, options); - Put(1, "foo", "v1"); + ASSERT_OK(Put(1, "foo", "v1")); ASSERT_OK(Flush(1)); const int last = 2; MoveFilesToLevel(last, 1); @@ -2027,21 +2029,23 @@ TEST_F(DBTest, DeletionMarkers2) { ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); // Place a table at level last-1 to prevent merging with preceding mutation - Put(1, "a", "begin"); - Put(1, "z", "end"); - Flush(1); + ASSERT_OK(Put(1, "a", "begin")); + ASSERT_OK(Put(1, "z", "end")); + ASSERT_OK(Flush(1)); MoveFilesToLevel(last - 1, 1); ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1); - Delete(1, "foo"); + ASSERT_OK(Delete(1, "foo")); ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); ASSERT_OK(Flush(1)); // Moves to level last-2 ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1]); + ASSERT_OK( + dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1])); // DEL kept: "last" file overlaps ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); - dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]); + ASSERT_OK( + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1])); // Merging last-1 w/ last, so we are the base level for "foo", so // DEL is removed. (as is v1). ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); @@ -2056,11 +2060,11 @@ TEST_F(DBTest, OverlapInLevel0) { // 0. ASSERT_OK(Put(1, "100", "v100")); ASSERT_OK(Put(1, "999", "v999")); - Flush(1); + ASSERT_OK(Flush(1)); MoveFilesToLevel(2, 1); ASSERT_OK(Delete(1, "100")); ASSERT_OK(Delete(1, "999")); - Flush(1); + ASSERT_OK(Flush(1)); MoveFilesToLevel(1, 1); ASSERT_EQ("0,1,1", FilesPerLevel(1)); @@ -2070,11 +2074,11 @@ TEST_F(DBTest, OverlapInLevel0) { // Note that files are sorted by smallest key. ASSERT_OK(Put(1, "300", "v300")); ASSERT_OK(Put(1, "500", "v500")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_OK(Put(1, "200", "v200")); ASSERT_OK(Put(1, "600", "v600")); ASSERT_OK(Put(1, "900", "v900")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_EQ("2,1,1", FilesPerLevel(1)); // BEGIN addition to existing test @@ -2085,15 +2089,15 @@ TEST_F(DBTest, OverlapInLevel0) { // END addition to existing test // Compact away the placeholder files we created initially - dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); - dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1]); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1])); + ASSERT_OK(dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1])); ASSERT_EQ("2", FilesPerLevel(1)); // Do a memtable compaction. Before bug-fix, the compaction would // not detect the overlap with level-0 files and would incorrectly place // the deletion in a deeper level. ASSERT_OK(Delete(1, "600")); - Flush(1); + ASSERT_OK(Flush(1)); ASSERT_EQ("3", FilesPerLevel(1)); ASSERT_EQ("NOT_FOUND", Get(1, "600")); } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); @@ -2239,7 +2243,7 @@ TEST_F(DBTest, DBOpen_Change_NumLevels) { ASSERT_OK(Put(1, "a", "123")); ASSERT_OK(Put(1, "b", "234")); - Flush(1); + ASSERT_OK(Flush(1)); MoveFilesToLevel(3, 1); Close(); @@ -2374,7 +2378,7 @@ TEST_F(DBTest, SnapshotFiles) { ReadOptions roptions; std::string val; for (unsigned int i = 0; i < 80; i++) { - stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val); + ASSERT_OK(snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val)); ASSERT_EQ(values[i].compare(val), 0); } for (auto cfh : cf_handles) { @@ -2455,7 +2459,7 @@ TEST_F(DBTest, ReadonlyDBGetLiveManifestSize) { uint64_t manifest_size = 0; std::vector files; - dbfull()->GetLiveFiles(files, &manifest_size); + ASSERT_OK(dbfull()->GetLiveFiles(files, &manifest_size)); for (const std::string& f : files) { uint64_t number = 0; @@ -2463,7 +2467,7 @@ TEST_F(DBTest, ReadonlyDBGetLiveManifestSize) { if (ParseFileName(f.substr(1), &number, &type)) { if (type == kDescriptorFile) { uint64_t size_on_disk; - env_->GetFileSize(dbname_ + "/" + f, &size_on_disk); + ASSERT_OK(env_->GetFileSize(dbname_ + "/" + f, &size_on_disk)); ASSERT_EQ(manifest_size, size_on_disk); break; } @@ -2523,7 +2527,7 @@ TEST_F(DBTest, PurgeInfoLogs) { for (int mode = 0; mode <= 1; mode++) { if (mode == 1) { options.db_log_dir = dbname_ + "_logs"; - env_->CreateDirIfMissing(options.db_log_dir); + ASSERT_OK(env_->CreateDirIfMissing(options.db_log_dir)); } else { options.db_log_dir = ""; } @@ -2532,8 +2536,8 @@ TEST_F(DBTest, PurgeInfoLogs) { } std::vector files; - env_->GetChildren(options.db_log_dir.empty() ? dbname_ : options.db_log_dir, - &files); + ASSERT_OK(env_->GetChildren( + options.db_log_dir.empty() ? dbname_ : options.db_log_dir, &files)); int info_log_count = 0; for (std::string file : files) { if (file.find("LOG") != std::string::npos) { @@ -2545,19 +2549,18 @@ TEST_F(DBTest, PurgeInfoLogs) { Destroy(options); // For mode (1), test DestroyDB() to delete all the logs under DB dir. // For mode (2), no info log file should have been put under DB dir. + // Since dbname_ has no children, there is no need to loop db_files std::vector db_files; - env_->GetChildren(dbname_, &db_files); - for (std::string file : db_files) { - ASSERT_TRUE(file.find("LOG") == std::string::npos); - } + ASSERT_TRUE(env_->GetChildren(dbname_, &db_files).IsNotFound()); + ASSERT_TRUE(db_files.empty()); if (mode == 1) { // Cleaning up - env_->GetChildren(options.db_log_dir, &files); + ASSERT_OK(env_->GetChildren(options.db_log_dir, &files)); for (std::string file : files) { - env_->DeleteFile(options.db_log_dir + "/" + file); + ASSERT_OK(env_->DeleteFile(options.db_log_dir + "/" + file)); } - env_->DeleteDir(options.db_log_dir); + ASSERT_OK(env_->DeleteDir(options.db_log_dir)); } } } @@ -2612,7 +2615,8 @@ static void MTThreadBody(void* arg) { for (int cf = 0; cf < kColumnFamilies; ++cf) { snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id, static_cast(counter), cf, unique_id); - batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf)); + ASSERT_OK(batch.Put(t->state->test->handles_[cf], Slice(keybuf), + Slice(valbuf))); } ASSERT_OK(db->Write(WriteOptions(), &batch)); } else { @@ -2620,7 +2624,8 @@ static void MTThreadBody(void* arg) { for (int cf = 0; cf < kColumnFamilies; ++cf) { snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id, static_cast(counter), cf, unique_id); - batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf)); + ASSERT_OK(batch.Put(t->state->test->handles_[cf], Slice(keybuf), + Slice(valbuf))); } ASSERT_OK(db->Write(WriteOptions(), batch.GetWriteBatch())); } @@ -2862,7 +2867,10 @@ class ModelDB : public DB { Status Put(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k, const Slice& v) override { WriteBatch batch; - batch.Put(cf, k, v); + Status s = batch.Put(cf, k, v); + if (!s.ok()) { + return s; + } return Write(o, &batch); } using DB::Close; @@ -2871,21 +2879,30 @@ class ModelDB : public DB { Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& key) override { WriteBatch batch; - batch.Delete(cf, key); + Status s = batch.Delete(cf, key); + if (!s.ok()) { + return s; + } return Write(o, &batch); } using DB::SingleDelete; Status SingleDelete(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& key) override { WriteBatch batch; - batch.SingleDelete(cf, key); + Status s = batch.SingleDelete(cf, key); + if (!s.ok()) { + return s; + } return Write(o, &batch); } using DB::Merge; Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf, const Slice& k, const Slice& v) override { WriteBatch batch; - batch.Merge(cf, k, v); + Status s = batch.Merge(cf, k, v); + if (!s.ok()) { + return s; + } return Write(o, &batch); } using DB::Get; @@ -3366,9 +3383,9 @@ TEST_P(DBTestRandomized, Randomized) { } if (rnd.OneIn(2)) { v = rnd.RandomString(rnd.Uniform(10)); - b.Put(k, v); + ASSERT_OK(b.Put(k, v)); } else { - b.Delete(k); + ASSERT_OK(b.Delete(k)); } } ASSERT_OK(model.Write(WriteOptions(), &b)); @@ -3413,7 +3430,7 @@ TEST_F(DBTest, BlockBasedTablePrefixIndexTest) { Reopen(options); ASSERT_OK(Put("k1", "v1")); - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(Put("k2", "v2")); // Reopen it without prefix extractor, make sure everything still works. @@ -3446,7 +3463,7 @@ TEST_F(DBTest, BlockBasedTablePrefixIndexTotalOrderSeek) { Reopen(options); ASSERT_OK(Put("k1", "v1")); - Flush(); + ASSERT_OK(Flush()); CompactRangeOptions cro; cro.change_level = true; @@ -3575,7 +3592,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int j = 0; j < 20; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // It should be compacted to 10 files. @@ -3586,7 +3603,7 @@ TEST_F(DBTest, FIFOCompactionTestWithCompaction) { for (int j = 0; j < 20; j++) { ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } @@ -3617,7 +3634,7 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); ASSERT_OK(Put("key" + ToString(i), "")); ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 1); @@ -3629,7 +3646,7 @@ TEST_F(DBTest, FIFOCompactionStyleWithCompactionAndDelete) { ASSERT_OK(Put("a" + ToString(i), rnd.RandomString(500))); ASSERT_OK(Delete("key" + ToString(i))); ASSERT_OK(Put("z" + ToString(i), rnd.RandomString(500))); - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 2); @@ -3705,7 +3722,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -3718,7 +3735,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 10); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 0); } @@ -3737,7 +3754,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 10); @@ -3753,7 +3770,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); } ASSERT_OK(dbfull()->TEST_WaitForCompact()); @@ -3779,7 +3796,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 3); @@ -3794,7 +3811,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 140; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // Size limit is still guaranteed. @@ -3817,7 +3834,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // With Intra-L0 compaction, out of 10 files, 6 files will be compacted to 1 @@ -3836,7 +3853,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 10; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } ASSERT_EQ(NumTableFilesAtLevel(0), 5); @@ -3861,7 +3878,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 20; j++) { ASSERT_OK(Put(ToString(i * 20 + j), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // It should be compacted to 10 files. @@ -3872,7 +3889,7 @@ TEST_F(DBTest, FIFOCompactionWithTTLTest) { for (int j = 0; j < 20; j++) { ASSERT_OK(Put(ToString(i * 20 + j + 2000), rnd.RandomString(980))); } - Flush(); + ASSERT_OK(Flush()); ASSERT_OK(dbfull()->TEST_WaitForCompact()); } @@ -4113,7 +4130,7 @@ TEST_F(DBTest, SanitizeNumThreads) { ASSERT_OK(Put("abc", "def")); ASSERT_EQ("def", Get("abc")); - Flush(); + ASSERT_OK(Flush()); ASSERT_EQ("def", Get("abc")); } } @@ -4122,9 +4139,9 @@ TEST_F(DBTest, WriteSingleThreadEntry) { std::vector threads; dbfull()->TEST_LockMutex(); auto w = dbfull()->TEST_BeginWrite(); - threads.emplace_back([&] { Put("a", "b"); }); + threads.emplace_back([&] { ASSERT_OK(Put("a", "b")); }); env_->SleepForMicroseconds(10000); - threads.emplace_back([&] { Flush(); }); + threads.emplace_back([&] { ASSERT_OK(Flush()); }); env_->SleepForMicroseconds(10000); dbfull()->TEST_UnlockMutex(); dbfull()->TEST_LockMutex(); @@ -4152,7 +4169,8 @@ TEST_F(DBTest, ConcurrentFlushWAL) { threads.emplace_back([&] { for (size_t i = 0; i < cnt; i++) { auto istr = ToString(i); - db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr, "b" + istr); + ASSERT_OK(db_->Put(wopt, db_->DefaultColumnFamily(), "a" + istr, + "b" + istr)); } }); if (two_write_queues) { @@ -4160,14 +4178,15 @@ TEST_F(DBTest, ConcurrentFlushWAL) { for (size_t i = cnt; i < 2 * cnt; i++) { auto istr = ToString(i); WriteBatch batch; - batch.Put("a" + istr, "b" + istr); - dbfull()->WriteImpl(wopt, &batch, nullptr, nullptr, 0, true); + ASSERT_OK(batch.Put("a" + istr, "b" + istr)); + ASSERT_OK( + dbfull()->WriteImpl(wopt, &batch, nullptr, nullptr, 0, true)); } }); } threads.emplace_back([&] { for (size_t i = 0; i < cnt * 100; i++) { // FlushWAL is faster than Put - db_->FlushWAL(false); + ASSERT_OK(db_->FlushWAL(false)); } }); for (auto& t : threads) { @@ -4202,7 +4221,7 @@ TEST_F(DBTest, ManualFlushWalAndWriteRace) { port::Thread writeThread([&]() { for (int i = 0; i < 100; i++) { auto istr = ToString(i); - dbfull()->Put(wopts, "key_" + istr, "value_" + istr); + ASSERT_OK(dbfull()->Put(wopts, "key_" + istr, "value_" + istr)); } }); port::Thread flushThread([&]() { @@ -4251,10 +4270,10 @@ TEST_F(DBTest, DynamicMemtableOptions) { // multiple memtables are flushed into a single L0 file. This race // condition affects assertion (A). if (i % kNumPutsBeforeWaitForFlush == kNumPutsBeforeWaitForFlush - 1) { - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); }; // Test write_buffer_size @@ -4264,7 +4283,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2); // Clean up L0 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 0); // Increase buffer size @@ -4330,7 +4349,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { {"max_write_buffer_number", "8"}, })); // Clean up memtable and L0 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); sleeping_task_low.Reset(); env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, @@ -4353,7 +4372,7 @@ TEST_F(DBTest, DynamicMemtableOptions) { {"max_write_buffer_number", "4"}, })); // Clean up memtable and L0 - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); sleeping_task_low.Reset(); env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, @@ -4453,7 +4472,7 @@ TEST_F(DBTest, GetThreadStatus) { true); } } - db_->DropColumnFamily(handles_[2]); + ASSERT_OK(db_->DropColumnFamily(handles_[2])); delete handles_[2]; handles_.erase(handles_.begin() + 2); env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_, @@ -4495,17 +4514,19 @@ TEST_F(DBTest, ThreadStatusFlush) { VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0); uint64_t num_running_flushes = 0; - db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes); + ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kNumRunningFlushes, + &num_running_flushes)); ASSERT_EQ(num_running_flushes, 0); - Put(1, "k1", std::string(100000, 'x')); // Fill memtable - Put(1, "k2", std::string(100000, 'y')); // Trigger flush + ASSERT_OK(Put(1, "k1", std::string(100000, 'x'))); // Fill memtable + ASSERT_OK(Put(1, "k2", std::string(100000, 'y'))); // Trigger flush // The first sync point is to make sure there's one flush job // running when we perform VerifyOperationCount(). TEST_SYNC_POINT("DBTest::ThreadStatusFlush:1"); VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 1); - db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes); + ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kNumRunningFlushes, + &num_running_flushes)); ASSERT_EQ(num_running_flushes, 1); // This second sync point is to ensure the flush job will not // be completed until we already perform VerifyOperationCount(). @@ -4550,13 +4571,13 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) { ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer), rnd.RandomString(kTestValueSize))); } - Flush(); + ASSERT_OK(Flush()); } // This makes sure a compaction won't be scheduled until // we have done with the above Put Phase. uint64_t num_running_compactions = 0; - db_->GetIntProperty(DB::Properties::kNumRunningCompactions, - &num_running_compactions); + ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kNumRunningCompactions, + &num_running_compactions)); ASSERT_EQ(num_running_compactions, 0); TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:0"); ASSERT_GE(NumTableFilesAtLevel(0), @@ -4572,8 +4593,8 @@ TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) { // If thread tracking is not enabled, compaction count should be 0. VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 0); } - db_->GetIntProperty(DB::Properties::kNumRunningCompactions, - &num_running_compactions); + ASSERT_TRUE(db_->GetIntProperty(DB::Properties::kNumRunningCompactions, + &num_running_compactions)); ASSERT_EQ(num_running_compactions, 1); // TODO(yhchiang): adding assert to verify each compaction stage. TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:2"); @@ -4619,7 +4640,9 @@ TEST_P(DBTestWithParam, PreShutdownManualCompaction) { MakeTables(1, "a", "z", 1); ASSERT_EQ("1,0,2", FilesPerLevel(1)); CancelAllBackgroundWork(db_); - db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); + ASSERT_TRUE( + db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr) + .IsShutdownInProgress()); ASSERT_EQ("1,0,2", FilesPerLevel(1)); if (iter == 0) { @@ -4699,7 +4722,7 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) { ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); } - Status s = env_->GetThreadList(&thread_list); + ASSERT_OK(env_->GetThreadList(&thread_list)); for (auto thread : thread_list) { operation_count[thread.operation_type]++; } @@ -4719,12 +4742,12 @@ TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) { ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1); CancelAllBackgroundWork(db_); TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Record the number of compactions at a time. for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) { operation_count[i] = 0; } - Status s = env_->GetThreadList(&thread_list); + ASSERT_OK(env_->GetThreadList(&thread_list)); for (auto thread : thread_list) { operation_count[thread.operation_type]++; } @@ -4786,7 +4809,7 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) { ASSERT_OK(Put(ToString(key++), rnd.RandomString(kTestValueSize))); } - Status s = env_->GetThreadList(&thread_list); + ASSERT_OK(env_->GetThreadList(&thread_list)); for (auto thread : thread_list) { operation_count[thread.operation_type]++; } @@ -4806,12 +4829,12 @@ TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) { CancelAllBackgroundWork(db_); TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown"); TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Record the number of compactions at a time. for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) { operation_count[i] = 0; } - Status s = env_->GetThreadList(&thread_list); + ASSERT_OK(env_->GetThreadList(&thread_list)); for (auto thread : thread_list) { operation_count[thread.operation_type]++; } @@ -4871,8 +4894,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) { for (int i = 0; i < 20; i++) { ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000))); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 0); @@ -4884,8 +4907,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel) { for (int i = 21; i < 120; i++) { ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000))); } - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(1), 0); ASSERT_EQ(NumTableFilesAtLevel(2), 0); ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4), @@ -4973,14 +4996,14 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { std::string value = rnd.RandomString(200); ASSERT_OK(Put(Key(keys[i]), value)); if (i % 25 == 24) { - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } } - Flush(); - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks(); @@ -5018,8 +5041,8 @@ TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { std::string value = rnd.RandomString(200); ASSERT_OK(Put(Key(keys[i]), value)); if (i % 100 == 99) { - Flush(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Flush()); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } } @@ -5069,7 +5092,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { for (int i = 0; i < size; i++) { ASSERT_OK(Put(Key(start + stride * i), rnd.RandomString(1024))); } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); }; // Write 3 files that have the same key range. @@ -5080,7 +5103,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { gen_l0_kb(0, 64, 1); ASSERT_EQ(NumTableFilesAtLevel(0), 2); gen_l0_kb(0, 64, 1); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,1", FilesPerLevel()); std::vector metadata; db_->GetLiveFilesMetaData(&metadata); @@ -5099,7 +5122,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { gen_l0_kb(0, 64, 1); ASSERT_EQ("1,1", FilesPerLevel()); gen_l0_kb(0, 64, 1); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ("0,2", FilesPerLevel()); metadata.clear(); db_->GetLiveFilesMetaData(&metadata); @@ -5121,7 +5144,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { for (int i = 0; i < 96; ++i) { gen_l0_kb(i, 64, 96); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_GT(SizeAtLevel(1), k1MB / 2); ASSERT_LT(SizeAtLevel(1), k1MB + k1MB / 2); @@ -5142,7 +5165,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { for (int i = 0; i < 20; ++i) { gen_l0_kb(i, 64, 32); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); uint64_t total_size = SizeAtLevel(1) + SizeAtLevel(2) + SizeAtLevel(3); ASSERT_TRUE(total_size < k128KB * 7 * 1.5); @@ -5150,8 +5173,8 @@ TEST_F(DBTest, DynamicCompactionOptions) { // Clean up memtable and L0. Block compaction threads. If continue to write // and flush memtables. We should see put stop after 8 memtable flushes // since level0_stop_writes_trigger = 8 - dbfull()->TEST_FlushMemTable(true, true); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); // Block compaction test::SleepingBackgroundTask sleeping_task_low; env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, @@ -5163,7 +5186,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { WriteOptions wo; while (count < 64) { ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo)); - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); count++; if (dbfull()->TEST_write_controler().IsStopped()) { sleeping_task_low.WakeUp(); @@ -5179,8 +5202,8 @@ TEST_F(DBTest, DynamicCompactionOptions) { // Block compaction thread again. Perform the put and memtable flushes // until we see the stop after 6 memtable flushes. ASSERT_OK(dbfull()->SetOptions({{"level0_stop_writes_trigger", "6"}})); - dbfull()->TEST_FlushMemTable(true); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 0); // Block compaction again @@ -5191,7 +5214,7 @@ TEST_F(DBTest, DynamicCompactionOptions) { count = 0; while (count < 64) { ASSERT_OK(Put(Key(count), rnd.RandomString(1024), wo)); - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); count++; if (dbfull()->TEST_write_controler().IsStopped()) { sleeping_task_low.WakeUp(); @@ -5208,29 +5231,29 @@ TEST_F(DBTest, DynamicCompactionOptions) { // disabled, then TEST_WaitForCompact will be waiting for nothing. Number of // L0 files do not change after the call. ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "true"}})); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 0); for (int i = 0; i < 4; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); // Wait for compaction so that put won't stop - dbfull()->TEST_FlushMemTable(true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_EQ(NumTableFilesAtLevel(0), 4); // Enable auto compaction and perform the same test, # of L0 files should be // reduced after compaction. ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}})); - dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_EQ(NumTableFilesAtLevel(0), 0); for (int i = 0; i < 4; ++i) { ASSERT_OK(Put(Key(i), rnd.RandomString(1024))); // Wait for compaction so that put won't stop - dbfull()->TEST_FlushMemTable(true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_LT(NumTableFilesAtLevel(0), 4); } @@ -5416,8 +5439,8 @@ TEST_F(DBTest, FileCreationRandomFailure) { } // If rocksdb does not do the correct job, internal assert will fail here. - dbfull()->TEST_WaitForFlushMemTable(); - dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(dbfull()->TEST_WaitForFlushMemTable().IsIOError()); + ASSERT_TRUE(dbfull()->TEST_WaitForCompact().IsIOError()); // verify we have the latest successful update for (int k = 0; k < kTestSize; ++k) { @@ -5471,14 +5494,14 @@ TEST_F(DBTest, DynamicMiscOptions) { ASSERT_OK(dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "4"}})); // Clear memtable and make new option effective - dbfull()->TEST_FlushMemTable(true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); // Trigger reseek assert_reseek_count(200, 1); ASSERT_OK( dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "16"}})); // Clear memtable and make new option effective - dbfull()->TEST_FlushMemTable(true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true)); // No reseek assert_reseek_count(300, 1); @@ -5641,12 +5664,20 @@ TEST_F(DBTest, CloseSpeedup) { &sleeping_task_high, Env::Priority::HIGH); std::vector filenames; - env_->GetChildren(dbname_, &filenames); + ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + // In Windows, LOCK file cannot be deleted because it is locked by db_test + // After closing db_test, the LOCK file is unlocked and can be deleted // Delete archival files. + bool deleteDir = true; for (size_t i = 0; i < filenames.size(); ++i) { - env_->DeleteFile(dbname_ + "/" + filenames[i]); + Status s = env_->DeleteFile(dbname_ + "/" + filenames[i]); + if (!s.ok()) { + deleteDir = false; + } + } + if (deleteDir) { + ASSERT_OK(env_->DeleteDir(dbname_)); } - env_->DeleteDir(dbname_); DestroyAndReopen(options); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing(); @@ -5710,7 +5741,7 @@ TEST_F(DBTest, MergeTestTime) { // NOTE: Presumed unnecessary and removed: resetting mock time in env ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0); - db_->Put(WriteOptions(), "foo", one); + ASSERT_OK(db_->Put(WriteOptions(), "foo", one)); ASSERT_OK(Flush()); ASSERT_OK(db_->Merge(WriteOptions(), "foo", two)); ASSERT_OK(Flush()); @@ -5721,7 +5752,7 @@ TEST_F(DBTest, MergeTestTime) { opt.verify_checksums = true; opt.snapshot = nullptr; std::string result; - db_->Get(opt, "foo", &result); + ASSERT_OK(db_->Get(opt, "foo", &result)); ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME)); @@ -5757,7 +5788,7 @@ TEST_P(DBTestWithParam, MergeCompactionTimeTest) { ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST")); ASSERT_OK(Flush()); } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); CompactRangeOptions cro; cro.exclusive_manual_compaction = exclusive_manual_compaction_; @@ -5783,10 +5814,10 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) { // put some data for (int table = 0; table < 4; ++table) { for (int i = 0; i < 10 + table; ++i) { - Put(ToString(table * 100 + i), "val"); + ASSERT_OK(Put(ToString(table * 100 + i), "val")); ++n; } - Flush(); + ASSERT_OK(Flush()); } CompactRangeOptions cro; @@ -5798,6 +5829,7 @@ TEST_P(DBTestWithParam, FilterCompactionTimeTest) { Iterator* itr = db_->NewIterator(ReadOptions()); itr->SeekToFirst(); + ASSERT_OK(itr->status()); ASSERT_EQ(uint64_t{n} * 1000000U, TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME)); delete itr; @@ -5812,7 +5844,7 @@ TEST_F(DBTest, TestLogCleanup) { Reopen(options); for (int i = 0; i < 100000; ++i) { - Put(Key(i), "val"); + ASSERT_OK(Put(Key(i), "val")); // only 2 memtables will be alive, so logs_to_free needs to always be below // 2 ASSERT_LT(dbfull()->TEST_LogsToFreeSize(), static_cast(3)); @@ -5910,7 +5942,7 @@ TEST_F(DBTest, DISABLED_SuggestCompactRangeTest) { // compact it three times for (int i = 0; i < 3; ++i) { ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } // All files are compacted @@ -5923,7 +5955,7 @@ TEST_F(DBTest, DISABLED_SuggestCompactRangeTest) { // nonoverlapping with the file on level 0 Slice start("a"), end("b"); ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // should not compact the level 0 file ASSERT_EQ(1, NumTableFilesAtLevel(0)); @@ -5931,7 +5963,7 @@ TEST_F(DBTest, DISABLED_SuggestCompactRangeTest) { start = Slice("j"); end = Slice("m"); ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end)); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); ASSERT_TRUE(CompactionFilterFactoryGetContext::IsManual( options.compaction_filter_factory.get())); @@ -6019,7 +6051,7 @@ TEST_F(DBTest, CompactRangeWithEmptyBottomLevel) { Random rnd(301); for (int i = 0; i < kNumL0Files; ++i) { ASSERT_OK(Put(Key(0), rnd.RandomString(1024))); - Flush(); + ASSERT_OK(Flush()); } ASSERT_EQ(NumTableFilesAtLevel(0), kNumL0Files); ASSERT_EQ(NumTableFilesAtLevel(1), 0); @@ -6083,7 +6115,7 @@ TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { } ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing(); manual_compaction_thread.join(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); } #ifndef ROCKSDB_LITE @@ -6123,6 +6155,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) { port::Thread manual_compaction_thread([&]() { auto s = db_->CompactFiles(CompactionOptions(), db_->DefaultColumnFamily(), input_files, 0); + ASSERT_OK(s); }); TEST_SYNC_POINT( @@ -6141,7 +6174,7 @@ TEST_F(DBTest, CompactFilesShouldTriggerAutoCompaction) { "DBTest::CompactFilesShouldTriggerAutoCompaction:End"); manual_compaction_thread.join(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); db_->GetColumnFamilyMetaData(db_->DefaultColumnFamily(), &cf_meta_data); ASSERT_LE(cf_meta_data.levels[0].files.size(), @@ -6166,7 +6199,7 @@ TEST_F(DBTest, LargeBatchWithColumnFamilies) { for (;;) { std::string data(3000, j++ % 127 + 20); data += ToString(j); - batch.Put(handles_[0], Slice(data), Slice(data)); + ASSERT_OK(batch.Put(handles_[0], Slice(data), Slice(data))); if (batch.GetDataSize() > write_size) { break; } @@ -6270,14 +6303,14 @@ TEST_F(DBTest, DelayedWriteRate) { Env::Priority::LOW); for (int i = 0; i < 3; i++) { - Put(Key(i), std::string(10000, 'x')); - Flush(); + ASSERT_OK(Put(Key(i), std::string(10000, 'x'))); + ASSERT_OK(Flush()); } // These writes will be slowed down to 1KB/s uint64_t estimated_sleep_time = 0; Random rnd(301); - Put("", ""); + ASSERT_OK(Put("", "")); uint64_t cur_rate = options.delayed_write_rate; for (int i = 0; i < kTotalFlushes; i++) { uint64_t size_memtable = 0; @@ -6286,14 +6319,14 @@ TEST_F(DBTest, DelayedWriteRate) { // Spread the size range to more. size_t entry_size = rand_num * rand_num * rand_num; WriteOptions wo; - Put(Key(i), std::string(entry_size, 'x'), wo); + ASSERT_OK(Put(Key(i), std::string(entry_size, 'x'), wo)); size_memtable += entry_size + 18; // Occasionally sleep a while if (rnd.Uniform(20) == 6) { env_->SleepForMicroseconds(2666); } } - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); estimated_sleep_time += size_memtable * 1000000u / cur_rate; // Slow down twice. One for memtable switch and one for flush finishes. cur_rate = static_cast(static_cast(cur_rate) * @@ -6343,14 +6376,14 @@ TEST_F(DBTest, HardLimit) { int key_idx = 0; for (int num = 0; num < 5; num++) { GenerateNewFile(&rnd, &key_idx, true); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } ASSERT_EQ(0, callback_count.load()); for (int num = 0; num < 5; num++) { GenerateNewFile(&rnd, &key_idx, true); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); } ASSERT_GE(callback_count.load(), 1); @@ -6432,25 +6465,25 @@ TEST_F(DBTest, SoftLimit) { // Generating 360KB in Level 3 for (int i = 0; i < 72; i++) { - Put(Key(i), std::string(5000, 'x')); + ASSERT_OK(Put(Key(i), std::string(5000, 'x'))); if (i % 10 == 0) { - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); } } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(3); // Generating 360KB in Level 2 for (int i = 0; i < 72; i++) { - Put(Key(i), std::string(5000, 'x')); + ASSERT_OK(Put(Key(i), std::string(5000, 'x'))); if (i % 10 == 0) { - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); } } - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); MoveFilesToLevel(2); - Put(Key(0), ""); + ASSERT_OK(Put(Key(0), "")); test::SleepingBackgroundTask sleeping_task_low; // Block compactions @@ -6460,11 +6493,11 @@ TEST_F(DBTest, SoftLimit) { // Create 3 L0 files, making score of L0 to be 3. for (int i = 0; i < 3; i++) { - Put(Key(i), std::string(5000, 'x')); - Put(Key(100 - i), std::string(5000, 'x')); + ASSERT_OK(Put(Key(i), std::string(5000, 'x'))); + ASSERT_OK(Put(Key(100 - i), std::string(5000, 'x'))); // Flush the file. File size is around 30KB. InstallFlushCallback(); - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); WaitForFlush(); } ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); @@ -6473,7 +6506,7 @@ TEST_F(DBTest, SoftLimit) { sleeping_task_low.WakeUp(); sleeping_task_low.WaitUntilDone(); sleeping_task_low.Reset(); - dbfull()->TEST_WaitForCompact(); + ASSERT_OK(dbfull()->TEST_WaitForCompact()); // Now there is one L1 file but doesn't trigger soft_rate_limit // The L1 file size is around 30KB. @@ -6495,11 +6528,11 @@ TEST_F(DBTest, SoftLimit) { sleeping_task_low.WaitUntilSleeping(); // Create 3 L0 files, making score of L0 to be 3 for (int i = 0; i < 3; i++) { - Put(Key(10 + i), std::string(5000, 'x')); - Put(Key(90 - i), std::string(5000, 'x')); + ASSERT_OK(Put(Key(10 + i), std::string(5000, 'x'))); + ASSERT_OK(Put(Key(90 - i), std::string(5000, 'x'))); // Flush the file. File size is around 30KB. InstallFlushCallback(); - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); WaitForFlush(); } @@ -6518,11 +6551,11 @@ TEST_F(DBTest, SoftLimit) { // Create 3 L0 files, making score of L0 to be 3, higher than L0. for (int i = 0; i < 3; i++) { - Put(Key(20 + i), std::string(5000, 'x')); - Put(Key(80 - i), std::string(5000, 'x')); + ASSERT_OK(Put(Key(20 + i), std::string(5000, 'x'))); + ASSERT_OK(Put(Key(80 - i), std::string(5000, 'x'))); // Flush the file. File size is around 30KB. InstallFlushCallback(); - dbfull()->TEST_FlushMemTable(true, true); + ASSERT_OK(dbfull()->TEST_FlushMemTable(true, true)); WaitForFlush(); } // Wake up sleep task to enable compaction to run and waits @@ -6550,8 +6583,8 @@ TEST_F(DBTest, SoftLimit) { {"max_bytes_for_level_base", "5000"}, })); - Put("", ""); - Flush(); + ASSERT_OK(Put("", "")); + ASSERT_OK(Flush()); ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); ASSERT_TRUE(listener->CheckCondition(WriteStallCondition::kDelayed)); @@ -6584,12 +6617,12 @@ TEST_F(DBTest, LastWriteBufferDelay) { for (int i = 0; i < 3; i++) { // Fill one mem table for (int j = 0; j < kNumKeysPerMemtable; j++) { - Put(Key(j), ""); + ASSERT_OK(Put(Key(j), "")); } ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); } // Inserting a new entry would create a new mem table, triggering slow down. - Put(Key(0), ""); + ASSERT_OK(Put(Key(0), "")); ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); sleeping_task.WakeUp(); @@ -6734,18 +6767,18 @@ TEST_F(DBTest, PauseBackgroundWorkTest) { std::vector threads; std::atomic done(false); - db_->PauseBackgroundWork(); + ASSERT_OK(db_->PauseBackgroundWork()); threads.emplace_back([&]() { Random rnd(301); for (int i = 0; i < 10000; ++i) { - Put(rnd.RandomString(10), rnd.RandomString(10)); + ASSERT_OK(Put(rnd.RandomString(10), rnd.RandomString(10))); } done.store(true); }); env_->SleepForMicroseconds(200000); // make sure the thread is not done ASSERT_FALSE(done.load()); - db_->ContinueBackgroundWork(); + ASSERT_OK(db_->ContinueBackgroundWork()); for (auto& t : threads) { t.join(); } @@ -6780,6 +6813,7 @@ TEST_F(DBTest, ThreadLocalPtrDeadlock) { { port::Thread tmp_thread([&] { auto it = db_->NewIterator(ReadOptions()); + ASSERT_OK(it->status()); delete it; }); tmp_thread.join(); @@ -6871,7 +6905,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } // At this point there should be 2 files, one with file_creation_time = 0 and @@ -6896,7 +6930,7 @@ TEST_F(DBTest, CreationTimeOfOldestFile) { ASSERT_OK( Put(Key(i * kNumKeysPerFile + j), rnd.RandomString(kValueSize))); } - Flush(); + ASSERT_OK(Flush()); } // At this point there should be 2 files with non-zero file creation time. @@ -6931,7 +6965,7 @@ TEST_F(DBTest, MemoryUsageWithMaxWriteBufferSizeToMaintain) { std::string value = rnd.RandomString(1000); ASSERT_OK(Put("keykey_" + std::to_string(i), value)); - dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); const uint64_t cur_active_mem = cfd->mem()->ApproximateMemoryUsage(); const uint64_t size_all_mem_table =