Add more tests for assert status checked (#7524)
Summary: Added 10 more tests that pass the ASSERT_STATUS_CHECKED test. Pull Request resolved: https://github.com/facebook/rocksdb/pull/7524 Reviewed By: akankshamahajan15 Differential Revision: D24323093 Pulled By: ajkr fbshipit-source-id: 28d4106d0ca1740c3b896c755edf82d504b74801
This commit is contained in:
parent
daab7603f6
commit
02418194d7
33
Makefile
33
Makefile
@ -587,6 +587,7 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
blob_file_reader_test \
|
blob_file_reader_test \
|
||||||
bloom_test \
|
bloom_test \
|
||||||
cassandra_format_test \
|
cassandra_format_test \
|
||||||
|
cassandra_functional_test \
|
||||||
cassandra_row_merge_test \
|
cassandra_row_merge_test \
|
||||||
cassandra_serialize_test \
|
cassandra_serialize_test \
|
||||||
cleanable_test \
|
cleanable_test \
|
||||||
@ -595,6 +596,14 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
crc32c_test \
|
crc32c_test \
|
||||||
dbformat_test \
|
dbformat_test \
|
||||||
db_basic_test \
|
db_basic_test \
|
||||||
|
compact_files_test \
|
||||||
|
compaction_picker_test \
|
||||||
|
comparator_db_test \
|
||||||
|
db_encryption_test \
|
||||||
|
db_iter_test \
|
||||||
|
db_iter_stress_test \
|
||||||
|
db_log_iter_test \
|
||||||
|
db_bloom_filter_test \
|
||||||
db_blob_basic_test \
|
db_blob_basic_test \
|
||||||
db_blob_index_test \
|
db_blob_index_test \
|
||||||
db_block_cache_test \
|
db_block_cache_test \
|
||||||
@ -615,6 +624,19 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
deletefile_test \
|
deletefile_test \
|
||||||
external_sst_file_test \
|
external_sst_file_test \
|
||||||
options_file_test \
|
options_file_test \
|
||||||
|
db_statistics_test \
|
||||||
|
db_table_properties_test \
|
||||||
|
db_tailing_iter_test \
|
||||||
|
fault_injection_test \
|
||||||
|
listener_test \
|
||||||
|
log_test \
|
||||||
|
manual_compaction_test \
|
||||||
|
obsolete_files_test \
|
||||||
|
perf_context_test \
|
||||||
|
periodic_work_scheduler_test \
|
||||||
|
perf_context_test \
|
||||||
|
version_set_test \
|
||||||
|
wal_manager_test \
|
||||||
defer_test \
|
defer_test \
|
||||||
filename_test \
|
filename_test \
|
||||||
dynamic_bloom_test \
|
dynamic_bloom_test \
|
||||||
@ -658,6 +680,7 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
ribbon_test \
|
ribbon_test \
|
||||||
skiplist_test \
|
skiplist_test \
|
||||||
slice_test \
|
slice_test \
|
||||||
|
slice_transform_test \
|
||||||
sst_dump_test \
|
sst_dump_test \
|
||||||
statistics_test \
|
statistics_test \
|
||||||
stats_history_test \
|
stats_history_test \
|
||||||
@ -694,13 +717,23 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
flush_job_test \
|
flush_job_test \
|
||||||
block_based_filter_block_test \
|
block_based_filter_block_test \
|
||||||
block_fetcher_test \
|
block_fetcher_test \
|
||||||
|
block_test \
|
||||||
|
data_block_hash_index_test \
|
||||||
full_filter_block_test \
|
full_filter_block_test \
|
||||||
partitioned_filter_block_test \
|
partitioned_filter_block_test \
|
||||||
column_family_test \
|
column_family_test \
|
||||||
file_reader_writer_test \
|
file_reader_writer_test \
|
||||||
|
rate_limiter_test \
|
||||||
corruption_test \
|
corruption_test \
|
||||||
|
reduce_levels_test \
|
||||||
|
thread_list_test \
|
||||||
|
compact_on_deletion_collector_test \
|
||||||
db_universal_compaction_test \
|
db_universal_compaction_test \
|
||||||
import_column_family_test \
|
import_column_family_test \
|
||||||
|
option_change_migration_test \
|
||||||
|
cuckoo_table_builder_test \
|
||||||
|
cuckoo_table_db_test \
|
||||||
|
cuckoo_table_reader_test \
|
||||||
memory_test \
|
memory_test \
|
||||||
table_test \
|
table_test \
|
||||||
write_batch_test \
|
write_batch_test \
|
||||||
|
@ -91,9 +91,9 @@ TEST_F(CompactFilesTest, L0ConflictsFiles) {
|
|||||||
// create couple files
|
// create couple files
|
||||||
// Background compaction starts and waits in BackgroundCallCompaction:0
|
// Background compaction starts and waits in BackgroundCallCompaction:0
|
||||||
for (int i = 0; i < kLevel0Trigger * 4; ++i) {
|
for (int i = 0; i < kLevel0Trigger * 4; ++i) {
|
||||||
db->Put(WriteOptions(), ToString(i), "");
|
ASSERT_OK(db->Put(WriteOptions(), ToString(i), ""));
|
||||||
db->Put(WriteOptions(), ToString(100 - i), "");
|
ASSERT_OK(db->Put(WriteOptions(), ToString(100 - i), ""));
|
||||||
db->Flush(FlushOptions());
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
|
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
|
||||||
@ -138,18 +138,18 @@ TEST_F(CompactFilesTest, ObsoleteFiles) {
|
|||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
DestroyDB(db_name_, options);
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
assert(s.ok());
|
ASSERT_OK(s);
|
||||||
assert(db);
|
ASSERT_NE(db, nullptr);
|
||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 1000; i < 2000; ++i) {
|
for (int i = 1000; i < 2000; ++i) {
|
||||||
db->Put(WriteOptions(), ToString(i),
|
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
||||||
std::string(kWriteBufferSize / 10, 'a' + (i % 26)));
|
std::string(kWriteBufferSize / 10, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto l0_files = collector->GetFlushedFiles();
|
auto l0_files = collector->GetFlushedFiles();
|
||||||
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1));
|
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1));
|
||||||
static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact();
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForCompact());
|
||||||
|
|
||||||
// verify all compaction input files are deleted
|
// verify all compaction input files are deleted
|
||||||
for (auto fname : l0_files) {
|
for (auto fname : l0_files) {
|
||||||
@ -182,15 +182,17 @@ TEST_F(CompactFilesTest, NotCutOutputOnLevel0) {
|
|||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
|
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
||||||
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
auto l0_files_1 = collector->GetFlushedFiles();
|
auto l0_files_1 = collector->GetFlushedFiles();
|
||||||
collector->ClearFlushedFiles();
|
collector->ClearFlushedFiles();
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
|
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
||||||
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
auto l0_files_2 = collector->GetFlushedFiles();
|
auto l0_files_2 = collector->GetFlushedFiles();
|
||||||
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0));
|
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_1, 0));
|
||||||
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0));
|
ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files_2, 0));
|
||||||
@ -213,13 +215,13 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
|
|||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
DestroyDB(db_name_, options);
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
assert(s.ok());
|
ASSERT_OK(s);
|
||||||
assert(db);
|
assert(db);
|
||||||
|
|
||||||
// Create 5 files.
|
// Create 5 files.
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
db->Put(WriteOptions(), "key" + ToString(i), "value");
|
ASSERT_OK(db->Put(WriteOptions(), "key" + ToString(i), "value"));
|
||||||
db->Flush(FlushOptions());
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto l0_files = collector->GetFlushedFiles();
|
auto l0_files = collector->GetFlushedFiles();
|
||||||
@ -237,8 +239,8 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
|
|||||||
|
|
||||||
// In the meantime flush another file.
|
// In the meantime flush another file.
|
||||||
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0");
|
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0");
|
||||||
db->Put(WriteOptions(), "key5", "value");
|
ASSERT_OK(db->Put(WriteOptions(), "key5", "value"));
|
||||||
db->Flush(FlushOptions());
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1");
|
TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1");
|
||||||
|
|
||||||
compaction_thread.join();
|
compaction_thread.join();
|
||||||
@ -249,7 +251,7 @@ TEST_F(CompactFilesTest, CapturingPendingFiles) {
|
|||||||
|
|
||||||
// Make sure we can reopen the DB.
|
// Make sure we can reopen the DB.
|
||||||
s = DB::Open(options, db_name_, &db);
|
s = DB::Open(options, db_name_, &db);
|
||||||
ASSERT_TRUE(s.ok());
|
ASSERT_OK(s);
|
||||||
assert(db);
|
assert(db);
|
||||||
delete db;
|
delete db;
|
||||||
}
|
}
|
||||||
@ -293,8 +295,8 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
|
|||||||
cf->SetDB(db);
|
cf->SetDB(db);
|
||||||
|
|
||||||
// Write one L0 file
|
// Write one L0 file
|
||||||
db->Put(WriteOptions(), "K1", "V1");
|
ASSERT_OK(db->Put(WriteOptions(), "K1", "V1"));
|
||||||
db->Flush(FlushOptions());
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
|
|
||||||
// Compact all L0 files using CompactFiles
|
// Compact all L0 files using CompactFiles
|
||||||
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
|
ROCKSDB_NAMESPACE::ColumnFamilyMetaData meta;
|
||||||
@ -337,8 +339,8 @@ TEST_F(CompactFilesTest, SentinelCompressionType) {
|
|||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
ASSERT_OK(DB::Open(options, db_name_, &db));
|
ASSERT_OK(DB::Open(options, db_name_, &db));
|
||||||
|
|
||||||
db->Put(WriteOptions(), "key", "val");
|
ASSERT_OK(db->Put(WriteOptions(), "key", "val"));
|
||||||
db->Flush(FlushOptions());
|
ASSERT_OK(db->Flush(FlushOptions()));
|
||||||
|
|
||||||
auto l0_files = collector->GetFlushedFiles();
|
auto l0_files = collector->GetFlushedFiles();
|
||||||
ASSERT_EQ(1, l0_files.size());
|
ASSERT_EQ(1, l0_files.size());
|
||||||
@ -377,14 +379,15 @@ TEST_F(CompactFilesTest, GetCompactionJobInfo) {
|
|||||||
DB* db = nullptr;
|
DB* db = nullptr;
|
||||||
DestroyDB(db_name_, options);
|
DestroyDB(db_name_, options);
|
||||||
Status s = DB::Open(options, db_name_, &db);
|
Status s = DB::Open(options, db_name_, &db);
|
||||||
assert(s.ok());
|
ASSERT_OK(s);
|
||||||
assert(db);
|
assert(db);
|
||||||
|
|
||||||
// create couple files
|
// create couple files
|
||||||
for (int i = 0; i < 500; ++i) {
|
for (int i = 0; i < 500; ++i) {
|
||||||
db->Put(WriteOptions(), ToString(i), std::string(1000, 'a' + (i % 26)));
|
ASSERT_OK(db->Put(WriteOptions(), ToString(i),
|
||||||
|
std::string(1000, 'a' + (i % 26))));
|
||||||
}
|
}
|
||||||
static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable();
|
ASSERT_OK(static_cast_with_check<DBImpl>(db)->TEST_WaitForFlushMemTable());
|
||||||
auto l0_files_1 = collector->GetFlushedFiles();
|
auto l0_files_1 = collector->GetFlushedFiles();
|
||||||
CompactionOptions co;
|
CompactionOptions co;
|
||||||
co.compression = CompressionType::kLZ4Compression;
|
co.compression = CompressionType::kLZ4Compression;
|
||||||
|
@ -141,7 +141,7 @@ class CompactionPickerTest : public testing::Test {
|
|||||||
if (temp_vstorage_) {
|
if (temp_vstorage_) {
|
||||||
VersionBuilder builder(FileOptions(), &ioptions_, nullptr,
|
VersionBuilder builder(FileOptions(), &ioptions_, nullptr,
|
||||||
vstorage_.get(), nullptr);
|
vstorage_.get(), nullptr);
|
||||||
builder.SaveTo(temp_vstorage_.get());
|
ASSERT_OK(builder.SaveTo(temp_vstorage_.get()));
|
||||||
vstorage_ = std::move(temp_vstorage_);
|
vstorage_ = std::move(temp_vstorage_);
|
||||||
}
|
}
|
||||||
vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
|
vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_);
|
||||||
|
@ -129,10 +129,10 @@ TEST_F(CuckooTableDBTest, Flush) {
|
|||||||
ASSERT_OK(Put("key1", "v1"));
|
ASSERT_OK(Put("key1", "v1"));
|
||||||
ASSERT_OK(Put("key2", "v2"));
|
ASSERT_OK(Put("key2", "v2"));
|
||||||
ASSERT_OK(Put("key3", "v3"));
|
ASSERT_OK(Put("key3", "v3"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
TablePropertiesCollection ptc;
|
TablePropertiesCollection ptc;
|
||||||
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
|
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
||||||
ASSERT_EQ(1U, ptc.size());
|
ASSERT_EQ(1U, ptc.size());
|
||||||
ASSERT_EQ(3U, ptc.begin()->second->num_entries);
|
ASSERT_EQ(3U, ptc.begin()->second->num_entries);
|
||||||
ASSERT_EQ("1", FilesPerLevel());
|
ASSERT_EQ("1", FilesPerLevel());
|
||||||
@ -146,9 +146,9 @@ TEST_F(CuckooTableDBTest, Flush) {
|
|||||||
ASSERT_OK(Put("key4", "v4"));
|
ASSERT_OK(Put("key4", "v4"));
|
||||||
ASSERT_OK(Put("key5", "v5"));
|
ASSERT_OK(Put("key5", "v5"));
|
||||||
ASSERT_OK(Put("key6", "v6"));
|
ASSERT_OK(Put("key6", "v6"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
|
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
||||||
ASSERT_EQ(2U, ptc.size());
|
ASSERT_EQ(2U, ptc.size());
|
||||||
auto row = ptc.begin();
|
auto row = ptc.begin();
|
||||||
ASSERT_EQ(3U, row->second->num_entries);
|
ASSERT_EQ(3U, row->second->num_entries);
|
||||||
@ -164,8 +164,8 @@ TEST_F(CuckooTableDBTest, Flush) {
|
|||||||
ASSERT_OK(Delete("key6"));
|
ASSERT_OK(Delete("key6"));
|
||||||
ASSERT_OK(Delete("key5"));
|
ASSERT_OK(Delete("key5"));
|
||||||
ASSERT_OK(Delete("key4"));
|
ASSERT_OK(Delete("key4"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
|
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
||||||
ASSERT_EQ(3U, ptc.size());
|
ASSERT_EQ(3U, ptc.size());
|
||||||
row = ptc.begin();
|
row = ptc.begin();
|
||||||
ASSERT_EQ(3U, row->second->num_entries);
|
ASSERT_EQ(3U, row->second->num_entries);
|
||||||
@ -186,10 +186,10 @@ TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) {
|
|||||||
ASSERT_OK(Put("key1", "v1"));
|
ASSERT_OK(Put("key1", "v1"));
|
||||||
ASSERT_OK(Put("key2", "v2"));
|
ASSERT_OK(Put("key2", "v2"));
|
||||||
ASSERT_OK(Put("key1", "v3")); // Duplicate
|
ASSERT_OK(Put("key1", "v3")); // Duplicate
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
TablePropertiesCollection ptc;
|
TablePropertiesCollection ptc;
|
||||||
reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc);
|
ASSERT_OK(reinterpret_cast<DB*>(dbfull())->GetPropertiesOfAllTables(&ptc));
|
||||||
ASSERT_EQ(1U, ptc.size());
|
ASSERT_EQ(1U, ptc.size());
|
||||||
ASSERT_EQ(2U, ptc.begin()->second->num_entries);
|
ASSERT_EQ(2U, ptc.begin()->second->num_entries);
|
||||||
ASSERT_EQ("1", FilesPerLevel());
|
ASSERT_EQ("1", FilesPerLevel());
|
||||||
@ -219,7 +219,7 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) {
|
|||||||
ASSERT_OK(Put(Uint64Key(1), "v1"));
|
ASSERT_OK(Put(Uint64Key(1), "v1"));
|
||||||
ASSERT_OK(Put(Uint64Key(2), "v2"));
|
ASSERT_OK(Put(Uint64Key(2), "v2"));
|
||||||
ASSERT_OK(Put(Uint64Key(3), "v3"));
|
ASSERT_OK(Put(Uint64Key(3), "v3"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
||||||
ASSERT_EQ("v2", Get(Uint64Key(2)));
|
ASSERT_EQ("v2", Get(Uint64Key(2)));
|
||||||
@ -228,10 +228,10 @@ TEST_F(CuckooTableDBTest, Uint64Comparator) {
|
|||||||
|
|
||||||
// Add more keys.
|
// Add more keys.
|
||||||
ASSERT_OK(Delete(Uint64Key(2))); // Delete.
|
ASSERT_OK(Delete(Uint64Key(2))); // Delete.
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
ASSERT_OK(Put(Uint64Key(3), "v0")); // Update.
|
ASSERT_OK(Put(Uint64Key(3), "v0")); // Update.
|
||||||
ASSERT_OK(Put(Uint64Key(4), "v4"));
|
ASSERT_OK(Put(Uint64Key(4), "v4"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
ASSERT_EQ("v1", Get(Uint64Key(1)));
|
||||||
ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2)));
|
ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2)));
|
||||||
ASSERT_EQ("v0", Get(Uint64Key(3)));
|
ASSERT_EQ("v0", Get(Uint64Key(3)));
|
||||||
@ -251,11 +251,11 @@ TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) {
|
|||||||
for (int idx = 0; idx < 28; ++idx) {
|
for (int idx = 0; idx < 28; ++idx) {
|
||||||
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_EQ("1", FilesPerLevel());
|
ASSERT_EQ("1", FilesPerLevel());
|
||||||
|
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr,
|
||||||
true /* disallow trivial move */);
|
true /* disallow trivial move */));
|
||||||
ASSERT_EQ("0,2", FilesPerLevel());
|
ASSERT_EQ("0,2", FilesPerLevel());
|
||||||
for (int idx = 0; idx < 28; ++idx) {
|
for (int idx = 0; idx < 28; ++idx) {
|
||||||
ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx)));
|
ASSERT_EQ(std::string(10000, 'a' + char(idx)), Get(Key(idx)));
|
||||||
@ -274,15 +274,15 @@ TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) {
|
|||||||
for (int idx = 0; idx < 11; ++idx) {
|
for (int idx = 0; idx < 11; ++idx) {
|
||||||
ASSERT_OK(Put(Key(idx), std::string(10000, 'a')));
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a')));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_EQ("1", FilesPerLevel());
|
ASSERT_EQ("1", FilesPerLevel());
|
||||||
|
|
||||||
// Generate one more file in level-0, and should trigger level-0 compaction
|
// Generate one more file in level-0, and should trigger level-0 compaction
|
||||||
for (int idx = 0; idx < 11; ++idx) {
|
for (int idx = 0; idx < 11; ++idx) {
|
||||||
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + char(idx))));
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr);
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr));
|
||||||
|
|
||||||
ASSERT_EQ("0,1", FilesPerLevel());
|
ASSERT_EQ("0,1", FilesPerLevel());
|
||||||
for (int idx = 0; idx < 11; ++idx) {
|
for (int idx = 0; idx < 11; ++idx) {
|
||||||
@ -303,7 +303,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
|
|||||||
ASSERT_OK(Put("key1", "v1"));
|
ASSERT_OK(Put("key1", "v1"));
|
||||||
ASSERT_OK(Put("key2", "v2"));
|
ASSERT_OK(Put("key2", "v2"));
|
||||||
ASSERT_OK(Put("key3", "v3"));
|
ASSERT_OK(Put("key3", "v3"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
// Write some keys using plain table.
|
// Write some keys using plain table.
|
||||||
std::shared_ptr<TableFactory> block_based_factory(
|
std::shared_ptr<TableFactory> block_based_factory(
|
||||||
@ -319,7 +319,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
|
|||||||
Reopen(&options);
|
Reopen(&options);
|
||||||
ASSERT_OK(Put("key4", "v4"));
|
ASSERT_OK(Put("key4", "v4"));
|
||||||
ASSERT_OK(Put("key1", "v5"));
|
ASSERT_OK(Put("key1", "v5"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
// Write some keys using block based table.
|
// Write some keys using block based table.
|
||||||
options.table_factory.reset(NewAdaptiveTableFactory(
|
options.table_factory.reset(NewAdaptiveTableFactory(
|
||||||
@ -328,7 +328,7 @@ TEST_F(CuckooTableDBTest, AdaptiveTable) {
|
|||||||
Reopen(&options);
|
Reopen(&options);
|
||||||
ASSERT_OK(Put("key5", "v6"));
|
ASSERT_OK(Put("key5", "v6"));
|
||||||
ASSERT_OK(Put("key2", "v7"));
|
ASSERT_OK(Put("key2", "v7"));
|
||||||
dbfull()->TEST_FlushMemTable();
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
||||||
|
|
||||||
ASSERT_EQ("v5", Get("key1"));
|
ASSERT_EQ("v5", Get("key1"));
|
||||||
ASSERT_EQ("v7", Get("key2"));
|
ASSERT_EQ("v7", Get("key2"));
|
||||||
|
@ -128,8 +128,8 @@ TEST_P(DBBloomFilterTestDefFormatVersion, KeyMayExist) {
|
|||||||
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD));
|
||||||
|
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1],
|
ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1],
|
||||||
true /* disallow trivial move */);
|
true /* disallow trivial move */));
|
||||||
|
|
||||||
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
|
numopen = TestGetTickerCount(options, NO_FILE_OPENS);
|
||||||
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
|
cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD);
|
||||||
@ -178,7 +178,7 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloomCustomPrefixExtractor) {
|
|||||||
ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2"));
|
ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2"));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar"));
|
ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar"));
|
||||||
|
|
||||||
dbfull()->Flush(fo);
|
ASSERT_OK(dbfull()->Flush(fo));
|
||||||
|
|
||||||
ASSERT_EQ("foo", Get("barbarbar"));
|
ASSERT_EQ("foo", Get("barbarbar"));
|
||||||
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
|
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
|
||||||
@ -244,7 +244,7 @@ TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) {
|
|||||||
ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2"));
|
ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2"));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar"));
|
ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar"));
|
||||||
|
|
||||||
dbfull()->Flush(fo);
|
ASSERT_OK(dbfull()->Flush(fo));
|
||||||
|
|
||||||
ASSERT_EQ("foo", Get("barbarbar"));
|
ASSERT_EQ("foo", Get("barbarbar"));
|
||||||
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
|
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0);
|
||||||
@ -297,7 +297,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) {
|
|||||||
// ranges.
|
// ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
dbfull()->Flush(fo);
|
ASSERT_OK(dbfull()->Flush(fo));
|
||||||
|
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
ASSERT_EQ("NOT_FOUND", Get("foo"));
|
||||||
@ -328,7 +328,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) {
|
|||||||
// ranges.
|
// ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
|
|
||||||
// Reopen with both of whole key off and prefix extractor enabled.
|
// Reopen with both of whole key off and prefix extractor enabled.
|
||||||
// Still no bloom filter should be used.
|
// Still no bloom filter should be used.
|
||||||
@ -351,7 +351,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) {
|
|||||||
// ranges.
|
// ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
|
|
||||||
options.prefix_extractor.reset();
|
options.prefix_extractor.reset();
|
||||||
bbto.whole_key_filtering = true;
|
bbto.whole_key_filtering = true;
|
||||||
@ -364,7 +364,7 @@ TEST_F(DBBloomFilterTest, WholeKeyFilterProp) {
|
|||||||
// not filtered out by key ranges.
|
// not filtered out by key ranges.
|
||||||
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
ASSERT_OK(dbfull()->Put(wo, "aaa", ""));
|
||||||
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
ASSERT_OK(dbfull()->Put(wo, "zzz", ""));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
// Now we have two files:
|
// Now we have two files:
|
||||||
// File 1: An older file with prefix bloom.
|
// File 1: An older file with prefix bloom.
|
||||||
@ -467,7 +467,7 @@ TEST_P(DBBloomFilterTestWithParam, BloomFilter) {
|
|||||||
for (int i = 0; i < N; i += 100) {
|
for (int i = 0; i < N; i += 100) {
|
||||||
ASSERT_OK(Put(1, Key(i), Key(i)));
|
ASSERT_OK(Put(1, Key(i), Key(i)));
|
||||||
}
|
}
|
||||||
Flush(1);
|
ASSERT_OK(Flush(1));
|
||||||
|
|
||||||
// Prevent auto compactions triggered by seeks
|
// Prevent auto compactions triggered by seeks
|
||||||
env_->delay_sstable_sync_.store(true, std::memory_order_release);
|
env_->delay_sstable_sync_.store(true, std::memory_order_release);
|
||||||
@ -880,7 +880,7 @@ TEST_F(DBBloomFilterTest, ContextCustomFilterPolicy) {
|
|||||||
|
|
||||||
// Destroy
|
// Destroy
|
||||||
ASSERT_OK(dbfull()->DropColumnFamily(handles_[1]));
|
ASSERT_OK(dbfull()->DropColumnFamily(handles_[1]));
|
||||||
dbfull()->DestroyColumnFamilyHandle(handles_[1]);
|
ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[1]));
|
||||||
handles_[1] = nullptr;
|
handles_[1] = nullptr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1444,9 +1444,9 @@ void PrefixScanInit(DBBloomFilterTest* dbtest) {
|
|||||||
snprintf(buf, sizeof(buf), "%02d______:end", 10);
|
snprintf(buf, sizeof(buf), "%02d______:end", 10);
|
||||||
keystr = std::string(buf);
|
keystr = std::string(buf);
|
||||||
ASSERT_OK(dbtest->Put(keystr, keystr));
|
ASSERT_OK(dbtest->Put(keystr, keystr));
|
||||||
dbtest->Flush();
|
ASSERT_OK(dbtest->Flush());
|
||||||
dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr,
|
ASSERT_OK(dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr,
|
||||||
nullptr); // move to level 1
|
nullptr)); // move to level 1
|
||||||
|
|
||||||
// GROUP 1
|
// GROUP 1
|
||||||
for (int i = 1; i <= small_range_sstfiles; i++) {
|
for (int i = 1; i <= small_range_sstfiles; i++) {
|
||||||
@ -1563,21 +1563,21 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) {
|
|||||||
for (int key : keys) {
|
for (int key : keys) {
|
||||||
ASSERT_OK(Put(1, Key(key), "val"));
|
ASSERT_OK(Put(1, Key(key), "val"));
|
||||||
if (++num_inserted % 1000 == 0) {
|
if (++num_inserted % 1000 == 0) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ASSERT_OK(Put(1, Key(0), "val"));
|
ASSERT_OK(Put(1, Key(0), "val"));
|
||||||
ASSERT_OK(Put(1, Key(numkeys), "val"));
|
ASSERT_OK(Put(1, Key(numkeys), "val"));
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
if (NumTableFilesAtLevel(0, 1) == 0) {
|
if (NumTableFilesAtLevel(0, 1) == 0) {
|
||||||
// No Level 0 file. Create one.
|
// No Level 0 file. Create one.
|
||||||
ASSERT_OK(Put(1, Key(0), "val"));
|
ASSERT_OK(Put(1, Key(0), "val"));
|
||||||
ASSERT_OK(Put(1, Key(numkeys), "val"));
|
ASSERT_OK(Put(1, Key(numkeys), "val"));
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 1; i < numkeys; i += 2) {
|
for (int i = 1; i < numkeys; i += 2) {
|
||||||
@ -1682,7 +1682,8 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) {
|
|||||||
BottommostLevelCompaction::kSkip;
|
BottommostLevelCompaction::kSkip;
|
||||||
compact_options.change_level = true;
|
compact_options.change_level = true;
|
||||||
compact_options.target_level = 7;
|
compact_options.target_level = 7;
|
||||||
db_->CompactRange(compact_options, handles_[1], nullptr, nullptr);
|
ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)
|
||||||
|
.IsNotSupported());
|
||||||
|
|
||||||
ASSERT_EQ(trivial_move, 1);
|
ASSERT_EQ(trivial_move, 1);
|
||||||
ASSERT_EQ(non_trivial_move, 0);
|
ASSERT_EQ(non_trivial_move, 0);
|
||||||
@ -1714,10 +1715,10 @@ TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) {
|
|||||||
|
|
||||||
int CountIter(std::unique_ptr<Iterator>& iter, const Slice& key) {
|
int CountIter(std::unique_ptr<Iterator>& iter, const Slice& key) {
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (iter->Seek(key); iter->Valid() && iter->status() == Status::OK();
|
for (iter->Seek(key); iter->Valid(); iter->Next()) {
|
||||||
iter->Next()) {
|
|
||||||
count++;
|
count++;
|
||||||
}
|
}
|
||||||
|
EXPECT_OK(iter->status());
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1747,7 +1748,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterUpperBound) {
|
|||||||
ASSERT_OK(Put("abcdxxx1", "val2"));
|
ASSERT_OK(Put("abcdxxx1", "val2"));
|
||||||
ASSERT_OK(Put("abcdxxx2", "val3"));
|
ASSERT_OK(Put("abcdxxx2", "val3"));
|
||||||
ASSERT_OK(Put("abcdxxx3", "val4"));
|
ASSERT_OK(Put("abcdxxx3", "val4"));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
{
|
{
|
||||||
// prefix_extractor has not changed, BF will always be read
|
// prefix_extractor has not changed, BF will always be read
|
||||||
Slice upper_bound("abce");
|
Slice upper_bound("abce");
|
||||||
@ -1905,7 +1906,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterMultipleSST) {
|
|||||||
ASSERT_OK(Put("foo4", "bar4"));
|
ASSERT_OK(Put("foo4", "bar4"));
|
||||||
ASSERT_OK(Put("foq5", "bar5"));
|
ASSERT_OK(Put("foq5", "bar5"));
|
||||||
ASSERT_OK(Put("fpb", "1"));
|
ASSERT_OK(Put("fpb", "1"));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
{
|
{
|
||||||
// BF is cappped:3 now
|
// BF is cappped:3 now
|
||||||
std::unique_ptr<Iterator> iter_tmp(db_->NewIterator(read_options));
|
std::unique_ptr<Iterator> iter_tmp(db_->NewIterator(read_options));
|
||||||
@ -1929,7 +1930,7 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterMultipleSST) {
|
|||||||
ASSERT_OK(Put("foo7", "bar7"));
|
ASSERT_OK(Put("foo7", "bar7"));
|
||||||
ASSERT_OK(Put("foq8", "bar8"));
|
ASSERT_OK(Put("foq8", "bar8"));
|
||||||
ASSERT_OK(Put("fpc", "2"));
|
ASSERT_OK(Put("fpc", "2"));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
{
|
{
|
||||||
// BF is fixed:2 now
|
// BF is fixed:2 now
|
||||||
std::unique_ptr<Iterator> iter_tmp(db_->NewIterator(read_options));
|
std::unique_ptr<Iterator> iter_tmp(db_->NewIterator(read_options));
|
||||||
@ -2040,10 +2041,10 @@ TEST_F(DBBloomFilterTest, DynamicBloomFilterNewColumnFamily) {
|
|||||||
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_PREFIX_USEFUL), 0);
|
ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_PREFIX_USEFUL), 0);
|
||||||
}
|
}
|
||||||
ASSERT_OK(dbfull()->DropColumnFamily(handles_[2]));
|
ASSERT_OK(dbfull()->DropColumnFamily(handles_[2]));
|
||||||
dbfull()->DestroyColumnFamilyHandle(handles_[2]);
|
ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[2]));
|
||||||
handles_[2] = nullptr;
|
handles_[2] = nullptr;
|
||||||
ASSERT_OK(dbfull()->DropColumnFamily(handles_[1]));
|
ASSERT_OK(dbfull()->DropColumnFamily(handles_[1]));
|
||||||
dbfull()->DestroyColumnFamilyHandle(handles_[1]);
|
ASSERT_OK(dbfull()->DestroyColumnFamilyHandle(handles_[1]));
|
||||||
handles_[1] = nullptr;
|
handles_[1] = nullptr;
|
||||||
iteration++;
|
iteration++;
|
||||||
}
|
}
|
||||||
|
@ -1391,8 +1391,6 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
|
|||||||
|
|
||||||
SuperVersionContext sv_context(/* create_superversion */ true);
|
SuperVersionContext sv_context(/* create_superversion */ true);
|
||||||
|
|
||||||
Status status;
|
|
||||||
|
|
||||||
InstrumentedMutexLock guard_lock(&mutex_);
|
InstrumentedMutexLock guard_lock(&mutex_);
|
||||||
|
|
||||||
// only allow one thread refitting
|
// only allow one thread refitting
|
||||||
@ -1456,8 +1454,9 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
|
|||||||
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
|
"[%s] Apply version edit:\n%s", cfd->GetName().c_str(),
|
||||||
edit.DebugString().data());
|
edit.DebugString().data());
|
||||||
|
|
||||||
status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_,
|
Status status = versions_->LogAndApply(cfd, mutable_cf_options, &edit,
|
||||||
directories_.GetDbDir());
|
&mutex_, directories_.GetDbDir());
|
||||||
|
|
||||||
InstallSuperVersionAndScheduleWork(cfd, &sv_context, mutable_cf_options);
|
InstallSuperVersionAndScheduleWork(cfd, &sv_context, mutable_cf_options);
|
||||||
|
|
||||||
ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
|
ROCKS_LOG_DEBUG(immutable_db_options_.info_log, "[%s] LogAndApply: %s\n",
|
||||||
@ -1468,12 +1467,14 @@ Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) {
|
|||||||
"[%s] After refitting:\n%s", cfd->GetName().c_str(),
|
"[%s] After refitting:\n%s", cfd->GetName().c_str(),
|
||||||
cfd->current()->DebugString().data());
|
cfd->current()->DebugString().data());
|
||||||
}
|
}
|
||||||
|
sv_context.Clean();
|
||||||
|
refitting_level_ = false;
|
||||||
|
|
||||||
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
sv_context.Clean();
|
|
||||||
refitting_level_ = false;
|
refitting_level_ = false;
|
||||||
|
return Status::OK();
|
||||||
return status;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) {
|
int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) {
|
||||||
|
@ -191,7 +191,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
|
|||||||
// set of all files in the directory. We'll exclude files that are still
|
// set of all files in the directory. We'll exclude files that are still
|
||||||
// alive in the subsequent processings.
|
// alive in the subsequent processings.
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
env_->GetChildren(path, &files).PermitUncheckedError(); // Ignore errors
|
Status s = env_->GetChildren(path, &files);
|
||||||
|
s.PermitUncheckedError(); // TODO: What should we do on error?
|
||||||
for (const std::string& file : files) {
|
for (const std::string& file : files) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
@ -207,7 +208,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes
|
// TODO(icanadi) clean up this mess to avoid having one-off "/"
|
||||||
|
// prefixes
|
||||||
job_context->full_scan_candidate_files.emplace_back("/" + file, path);
|
job_context->full_scan_candidate_files.emplace_back("/" + file, path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -215,9 +217,8 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
|
|||||||
// Add log files in wal_dir
|
// Add log files in wal_dir
|
||||||
if (immutable_db_options_.wal_dir != dbname_) {
|
if (immutable_db_options_.wal_dir != dbname_) {
|
||||||
std::vector<std::string> log_files;
|
std::vector<std::string> log_files;
|
||||||
env_->GetChildren(immutable_db_options_.wal_dir,
|
Status s = env_->GetChildren(immutable_db_options_.wal_dir, &log_files);
|
||||||
&log_files)
|
s.PermitUncheckedError(); // TODO: What should we do on error?
|
||||||
.PermitUncheckedError(); // Ignore errors
|
|
||||||
for (const std::string& log_file : log_files) {
|
for (const std::string& log_file : log_files) {
|
||||||
job_context->full_scan_candidate_files.emplace_back(
|
job_context->full_scan_candidate_files.emplace_back(
|
||||||
log_file, immutable_db_options_.wal_dir);
|
log_file, immutable_db_options_.wal_dir);
|
||||||
@ -227,9 +228,9 @@ void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force,
|
|||||||
if (!immutable_db_options_.db_log_dir.empty() &&
|
if (!immutable_db_options_.db_log_dir.empty() &&
|
||||||
immutable_db_options_.db_log_dir != dbname_) {
|
immutable_db_options_.db_log_dir != dbname_) {
|
||||||
std::vector<std::string> info_log_files;
|
std::vector<std::string> info_log_files;
|
||||||
// Ignore errors
|
Status s =
|
||||||
env_->GetChildren(immutable_db_options_.db_log_dir, &info_log_files)
|
env_->GetChildren(immutable_db_options_.db_log_dir, &info_log_files);
|
||||||
.PermitUncheckedError();
|
s.PermitUncheckedError(); // TODO: What should we do on error?
|
||||||
for (std::string& log_file : info_log_files) {
|
for (std::string& log_file : info_log_files) {
|
||||||
job_context->full_scan_candidate_files.emplace_back(
|
job_context->full_scan_candidate_files.emplace_back(
|
||||||
log_file, immutable_db_options_.db_log_dir);
|
log_file, immutable_db_options_.db_log_dir);
|
||||||
|
@ -147,7 +147,8 @@ DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) {
|
|||||||
// DeleteScheduler::CleanupDirectory on the same dir later, it will be
|
// DeleteScheduler::CleanupDirectory on the same dir later, it will be
|
||||||
// safe
|
// safe
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
result.env->GetChildren(result.wal_dir, &filenames).PermitUncheckedError();
|
Status s = result.env->GetChildren(result.wal_dir, &filenames);
|
||||||
|
s.PermitUncheckedError(); //**TODO: What to do on error?
|
||||||
for (std::string& filename : filenames) {
|
for (std::string& filename : filenames) {
|
||||||
if (filename.find(".log.trash", filename.length() -
|
if (filename.find(".log.trash", filename.length() -
|
||||||
std::string(".log.trash").length()) !=
|
std::string(".log.trash").length()) !=
|
||||||
@ -1739,9 +1740,8 @@ Status DBImpl::Open(const DBOptions& db_options, const std::string& dbname,
|
|||||||
paths.erase(std::unique(paths.begin(), paths.end()), paths.end());
|
paths.erase(std::unique(paths.begin(), paths.end()), paths.end());
|
||||||
for (auto& path : paths) {
|
for (auto& path : paths) {
|
||||||
std::vector<std::string> existing_files;
|
std::vector<std::string> existing_files;
|
||||||
// TODO: Check for errors here?
|
|
||||||
impl->immutable_db_options_.env->GetChildren(path, &existing_files)
|
impl->immutable_db_options_.env->GetChildren(path, &existing_files)
|
||||||
.PermitUncheckedError();
|
.PermitUncheckedError(); //**TODO: What do to on error?
|
||||||
for (auto& file_name : existing_files) {
|
for (auto& file_name : existing_files) {
|
||||||
uint64_t file_number;
|
uint64_t file_number;
|
||||||
FileType file_type;
|
FileType file_type;
|
||||||
|
@ -163,7 +163,6 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
|
|||||||
StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
|
StopWatch write_sw(env_, immutable_db_options_.statistics.get(), DB_WRITE);
|
||||||
|
|
||||||
write_thread_.JoinBatchGroup(&w);
|
write_thread_.JoinBatchGroup(&w);
|
||||||
Status status;
|
|
||||||
if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
|
if (w.state == WriteThread::STATE_PARALLEL_MEMTABLE_WRITER) {
|
||||||
// we are a non-leader in a parallel group
|
// we are a non-leader in a parallel group
|
||||||
|
|
||||||
@ -193,8 +192,6 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
|
|||||||
}
|
}
|
||||||
assert(w.state == WriteThread::STATE_COMPLETED);
|
assert(w.state == WriteThread::STATE_COMPLETED);
|
||||||
// STATE_COMPLETED conditional below handles exit
|
// STATE_COMPLETED conditional below handles exit
|
||||||
|
|
||||||
status = w.FinalStatus();
|
|
||||||
}
|
}
|
||||||
if (w.state == WriteThread::STATE_COMPLETED) {
|
if (w.state == WriteThread::STATE_COMPLETED) {
|
||||||
if (log_used != nullptr) {
|
if (log_used != nullptr) {
|
||||||
@ -204,13 +201,11 @@ Status DBImpl::WriteImpl(const WriteOptions& write_options,
|
|||||||
*seq_used = w.sequence;
|
*seq_used = w.sequence;
|
||||||
}
|
}
|
||||||
// write is complete and leader has updated sequence
|
// write is complete and leader has updated sequence
|
||||||
// Should we handle it?
|
|
||||||
status.PermitUncheckedError();
|
|
||||||
return w.FinalStatus();
|
return w.FinalStatus();
|
||||||
}
|
}
|
||||||
// else we are the leader of the write batch group
|
// else we are the leader of the write batch group
|
||||||
assert(w.state == WriteThread::STATE_GROUP_LEADER);
|
assert(w.state == WriteThread::STATE_GROUP_LEADER);
|
||||||
|
Status status;
|
||||||
// Once reaches this point, the current writer "w" will try to do its write
|
// Once reaches this point, the current writer "w" will try to do its write
|
||||||
// job. It may also pick up some of the remaining writers in the "writers_"
|
// job. It may also pick up some of the remaining writers in the "writers_"
|
||||||
// when it finds suitable, and finish them in the same write batch.
|
// when it finds suitable, and finish them in the same write batch.
|
||||||
@ -531,6 +526,8 @@ Status DBImpl::PipelinedWriteImpl(const WriteOptions& write_options,
|
|||||||
PERF_TIMER_STOP(write_pre_and_post_process_time);
|
PERF_TIMER_STOP(write_pre_and_post_process_time);
|
||||||
|
|
||||||
IOStatus io_s;
|
IOStatus io_s;
|
||||||
|
io_s.PermitUncheckedError(); // Allow io_s to be uninitialized
|
||||||
|
|
||||||
if (w.status.ok() && !write_options.disableWAL) {
|
if (w.status.ok() && !write_options.disableWAL) {
|
||||||
PERF_TIMER_GUARD(write_wal_time);
|
PERF_TIMER_GUARD(write_wal_time);
|
||||||
stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1);
|
stats->AddDBStats(InternalStats::kIntStatsWriteDoneBySelf, 1);
|
||||||
@ -776,6 +773,7 @@ Status DBImpl::WriteImplWALOnly(
|
|||||||
}
|
}
|
||||||
Status status;
|
Status status;
|
||||||
IOStatus io_s;
|
IOStatus io_s;
|
||||||
|
io_s.PermitUncheckedError(); // Allow io_s to be uninitialized
|
||||||
if (!write_options.disableWAL) {
|
if (!write_options.disableWAL) {
|
||||||
io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
|
io_s = ConcurrentWriteToWAL(write_group, log_used, &last_sequence, seq_inc);
|
||||||
status = io_s;
|
status = io_s;
|
||||||
|
@ -392,6 +392,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) {
|
|||||||
|
|
||||||
db_iter->SeekToLast();
|
db_iter->SeekToLast();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
// Test case to check SeekToLast with iterate_upper_bound set
|
// Test case to check SeekToLast with iterate_upper_bound set
|
||||||
// (same key put may times - SeekToLast should start with the
|
// (same key put may times - SeekToLast should start with the
|
||||||
@ -489,6 +490,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) {
|
|||||||
|
|
||||||
db_iter->SeekToLast();
|
db_iter->SeekToLast();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
// Test to check the SeekToLast() with the iterate_upper_bound set
|
// Test to check the SeekToLast() with the iterate_upper_bound set
|
||||||
// (Deletion cases)
|
// (Deletion cases)
|
||||||
@ -596,6 +598,7 @@ TEST_F(DBIteratorTest, DBIteratorPrevNext) {
|
|||||||
|
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -689,6 +692,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) {
|
|||||||
nullptr /* read_callback */));
|
nullptr /* read_callback */));
|
||||||
db_iter->SeekToLast();
|
db_iter->SeekToLast();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -702,6 +706,7 @@ TEST_F(DBIteratorTest, DBIteratorEmpty) {
|
|||||||
nullptr /* read_callback */));
|
nullptr /* read_callback */));
|
||||||
db_iter->SeekToFirst();
|
db_iter->SeekToFirst();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -744,6 +749,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) {
|
|||||||
|
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u);
|
ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -788,6 +794,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
|
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -820,6 +827,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
|
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -855,6 +863,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
|
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -873,9 +882,11 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
nullptr /* read_callback */));
|
nullptr /* read_callback */));
|
||||||
db_iter->SeekToLast();
|
db_iter->SeekToLast();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
|
|
||||||
db_iter->SeekToFirst();
|
db_iter->SeekToFirst();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
TestIterator* internal_iter = new TestIterator(BytewiseComparator());
|
||||||
@ -896,6 +907,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
|
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
|
|
||||||
db_iter->SeekToFirst();
|
db_iter->SeekToFirst();
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
@ -904,6 +916,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
|
|
||||||
db_iter->Next();
|
db_iter->Next();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -943,6 +956,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
|
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -983,6 +997,7 @@ TEST_F(DBIteratorTest, DBIteratorUseSkip) {
|
|||||||
|
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1039,7 +1054,7 @@ TEST_F(DBIteratorTest, DBIteratorSkipInternalKeys) {
|
|||||||
|
|
||||||
db_iter->Prev();
|
db_iter->Prev();
|
||||||
ASSERT_TRUE(!db_iter->Valid());
|
ASSERT_TRUE(!db_iter->Valid());
|
||||||
ASSERT_TRUE(db_iter->status().ok());
|
ASSERT_OK(db_iter->status());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test to make sure that the request will *not* fail as incomplete if
|
// Test to make sure that the request will *not* fail as incomplete if
|
||||||
@ -3136,6 +3151,7 @@ TEST_F(DBIteratorTest, SeekToFirstLowerBound) {
|
|||||||
if (i == kNumKeys + 1) {
|
if (i == kNumKeys + 1) {
|
||||||
// lower bound was beyond the last key
|
// lower bound was beyond the last key
|
||||||
ASSERT_FALSE(db_iter->Valid());
|
ASSERT_FALSE(db_iter->Valid());
|
||||||
|
ASSERT_OK(db_iter->status());
|
||||||
} else {
|
} else {
|
||||||
ASSERT_TRUE(db_iter->Valid());
|
ASSERT_TRUE(db_iter->Valid());
|
||||||
int expected;
|
int expected;
|
||||||
|
@ -33,9 +33,8 @@ class DBTestXactLogIterator : public DBTestBase {
|
|||||||
};
|
};
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
SequenceNumber ReadRecords(
|
SequenceNumber ReadRecords(std::unique_ptr<TransactionLogIterator>& iter,
|
||||||
std::unique_ptr<TransactionLogIterator>& iter,
|
int& count, bool expect_ok = true) {
|
||||||
int& count) {
|
|
||||||
count = 0;
|
count = 0;
|
||||||
SequenceNumber lastSequence = 0;
|
SequenceNumber lastSequence = 0;
|
||||||
BatchResult res;
|
BatchResult res;
|
||||||
@ -47,6 +46,11 @@ SequenceNumber ReadRecords(
|
|||||||
EXPECT_OK(iter->status());
|
EXPECT_OK(iter->status());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
|
if (expect_ok) {
|
||||||
|
EXPECT_OK(iter->status());
|
||||||
|
} else {
|
||||||
|
EXPECT_NOK(iter->status());
|
||||||
|
}
|
||||||
return res.sequence;
|
return res.sequence;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,9 +68,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIterator) {
|
|||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
Put(0, "key1", DummyString(1024));
|
ASSERT_OK(Put(0, "key1", DummyString(1024)));
|
||||||
Put(1, "key2", DummyString(1024));
|
ASSERT_OK(Put(1, "key2", DummyString(1024)));
|
||||||
Put(1, "key2", DummyString(1024));
|
ASSERT_OK(Put(1, "key2", DummyString(1024)));
|
||||||
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U);
|
||||||
{
|
{
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
@ -75,9 +79,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIterator) {
|
|||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
env_->SleepForMicroseconds(2 * 1000 * 1000);
|
env_->SleepForMicroseconds(2 * 1000 * 1000);
|
||||||
{
|
{
|
||||||
Put(0, "key4", DummyString(1024));
|
ASSERT_OK(Put(0, "key4", DummyString(1024)));
|
||||||
Put(1, "key5", DummyString(1024));
|
ASSERT_OK(Put(1, "key5", DummyString(1024)));
|
||||||
Put(0, "key6", DummyString(1024));
|
ASSERT_OK(Put(0, "key6", DummyString(1024)));
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
@ -109,15 +113,15 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
Put("key1", DummyString(1024));
|
ASSERT_OK(Put("key1", DummyString(1024)));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
Put("key2", DummyString(1024));
|
ASSERT_OK(Put("key2", DummyString(1024)));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
Put("key3", DummyString(1024));
|
ASSERT_OK(Put("key3", DummyString(1024)));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
Put("key4", DummyString(1024));
|
ASSERT_OK(Put("key4", DummyString(1024)));
|
||||||
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
|
ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U);
|
||||||
dbfull()->FlushWAL(false);
|
ASSERT_OK(dbfull()->FlushWAL(false));
|
||||||
|
|
||||||
{
|
{
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
@ -130,11 +134,11 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) {
|
|||||||
// condition
|
// condition
|
||||||
FlushOptions flush_options;
|
FlushOptions flush_options;
|
||||||
flush_options.wait = false;
|
flush_options.wait = false;
|
||||||
dbfull()->Flush(flush_options);
|
ASSERT_OK(dbfull()->Flush(flush_options));
|
||||||
|
|
||||||
// "key5" would be written in a new memtable and log
|
// "key5" would be written in a new memtable and log
|
||||||
Put("key5", DummyString(1024));
|
ASSERT_OK(Put("key5", DummyString(1024)));
|
||||||
dbfull()->FlushWAL(false);
|
ASSERT_OK(dbfull()->FlushWAL(false));
|
||||||
{
|
{
|
||||||
// this iter would miss "key4" if not fixed
|
// this iter would miss "key4" if not fixed
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
@ -149,14 +153,14 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) {
|
|||||||
do {
|
do {
|
||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
Put("key1", DummyString(1024));
|
ASSERT_OK(Put("key1", DummyString(1024)));
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
ASSERT_TRUE(!iter->Valid());
|
ASSERT_TRUE(!iter->Valid());
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
Put("key2", DummyString(1024));
|
ASSERT_OK(Put("key2", DummyString(1024)));
|
||||||
iter->Next();
|
iter->Next();
|
||||||
ASSERT_OK(iter->status());
|
ASSERT_OK(iter->status());
|
||||||
ASSERT_TRUE(iter->Valid());
|
ASSERT_TRUE(iter->Valid());
|
||||||
@ -167,9 +171,9 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) {
|
|||||||
do {
|
do {
|
||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
Put("key1", DummyString(1024));
|
ASSERT_OK(Put("key1", DummyString(1024)));
|
||||||
Put("key2", DummyString(1023));
|
ASSERT_OK(Put("key2", DummyString(1023)));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
ExpectRecords(2, iter);
|
ExpectRecords(2, iter);
|
||||||
@ -181,10 +185,10 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) {
|
|||||||
Options options = OptionsForLogIterTest();
|
Options options = OptionsForLogIterTest();
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
for (int i = 0; i < 1024; i++) {
|
for (int i = 0; i < 1024; i++) {
|
||||||
Put("key"+ToString(i), DummyString(10));
|
ASSERT_OK(Put("key" + ToString(i), DummyString(10)));
|
||||||
}
|
}
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
dbfull()->FlushWAL(false);
|
ASSERT_OK(dbfull()->FlushWAL(false));
|
||||||
// Corrupt this log to create a gap
|
// Corrupt this log to create a gap
|
||||||
ROCKSDB_NAMESPACE::VectorLogPtr wal_files;
|
ROCKSDB_NAMESPACE::VectorLogPtr wal_files;
|
||||||
ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
|
ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files));
|
||||||
@ -197,13 +201,13 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Insert a new entry to a new log file
|
// Insert a new entry to a new log file
|
||||||
Put("key1025", DummyString(10));
|
ASSERT_OK(Put("key1025", DummyString(10)));
|
||||||
dbfull()->FlushWAL(false);
|
ASSERT_OK(dbfull()->FlushWAL(false));
|
||||||
// Try to read from the beginning. Should stop before the gap and read less
|
// Try to read from the beginning. Should stop before the gap and read less
|
||||||
// than 1025 entries
|
// than 1025 entries
|
||||||
auto iter = OpenTransactionLogIter(0);
|
auto iter = OpenTransactionLogIter(0);
|
||||||
int count;
|
int count;
|
||||||
SequenceNumber last_sequence_read = ReadRecords(iter, count);
|
SequenceNumber last_sequence_read = ReadRecords(iter, count, false);
|
||||||
ASSERT_LT(last_sequence_read, 1025U);
|
ASSERT_LT(last_sequence_read, 1025U);
|
||||||
// Try to read past the gap, should be able to seek to key1025
|
// Try to read past the gap, should be able to seek to key1025
|
||||||
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
|
auto iter2 = OpenTransactionLogIter(last_sequence_read + 1);
|
||||||
@ -217,15 +221,15 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) {
|
|||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put(handles_[1], "key1", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[1], "key1", DummyString(1024)));
|
||||||
batch.Put(handles_[0], "key2", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[0], "key2", DummyString(1024)));
|
||||||
batch.Put(handles_[1], "key3", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[1], "key3", DummyString(1024)));
|
||||||
batch.Delete(handles_[0], "key2");
|
ASSERT_OK(batch.Delete(handles_[0], "key2"));
|
||||||
dbfull()->Write(WriteOptions(), &batch);
|
ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
|
||||||
Flush(1);
|
ASSERT_OK(Flush(1));
|
||||||
Flush(0);
|
ASSERT_OK(Flush(0));
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
Put(1, "key4", DummyString(1024));
|
ASSERT_OK(Put(1, "key4", DummyString(1024)));
|
||||||
auto iter = OpenTransactionLogIter(3);
|
auto iter = OpenTransactionLogIter(3);
|
||||||
ExpectRecords(2, iter);
|
ExpectRecords(2, iter);
|
||||||
} while (ChangeCompactOptions());
|
} while (ChangeCompactOptions());
|
||||||
@ -237,13 +241,13 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
|
|||||||
CreateAndReopenWithCF({"pikachu"}, options);
|
CreateAndReopenWithCF({"pikachu"}, options);
|
||||||
{
|
{
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put(handles_[1], "key1", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[1], "key1", DummyString(1024)));
|
||||||
batch.Put(handles_[0], "key2", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[0], "key2", DummyString(1024)));
|
||||||
batch.PutLogData(Slice("blob1"));
|
ASSERT_OK(batch.PutLogData(Slice("blob1")));
|
||||||
batch.Put(handles_[1], "key3", DummyString(1024));
|
ASSERT_OK(batch.Put(handles_[1], "key3", DummyString(1024)));
|
||||||
batch.PutLogData(Slice("blob2"));
|
ASSERT_OK(batch.PutLogData(Slice("blob2")));
|
||||||
batch.Delete(handles_[0], "key2");
|
ASSERT_OK(batch.Delete(handles_[0], "key2"));
|
||||||
dbfull()->Write(WriteOptions(), &batch);
|
ASSERT_OK(dbfull()->Write(WriteOptions(), &batch));
|
||||||
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
ReopenWithColumnFamilies({"default", "pikachu"}, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -268,7 +272,7 @@ TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) {
|
|||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
} handler;
|
} handler;
|
||||||
res.writeBatchPtr->Iterate(&handler);
|
ASSERT_OK(res.writeBatchPtr->Iterate(&handler));
|
||||||
ASSERT_EQ(
|
ASSERT_EQ(
|
||||||
"Put(1, key1, 1024)"
|
"Put(1, key1, 1024)"
|
||||||
"Put(0, key2, 1024)"
|
"Put(0, key2, 1024)"
|
||||||
|
@ -170,7 +170,7 @@ TEST_F(DBSSTTest, DontDeleteMovedFile) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
// this should execute both L0->L1 and L1->(move)->L2 compactions
|
// this should execute both L0->L1 and L1->(move)->L2 compactions
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ("0,0,1", FilesPerLevel(0));
|
ASSERT_EQ("0,0,1", FilesPerLevel(0));
|
||||||
|
|
||||||
// If the moved file is actually deleted (the move-safeguard in
|
// If the moved file is actually deleted (the move-safeguard in
|
||||||
@ -218,7 +218,7 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
|||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
// this should execute both L0->L1 and L1->(move)->L2 compactions
|
// this should execute both L0->L1 and L1->(move)->L2 compactions
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ("0,0,1", FilesPerLevel(0));
|
ASSERT_EQ("0,0,1", FilesPerLevel(0));
|
||||||
|
|
||||||
test::SleepingBackgroundTask blocking_thread;
|
test::SleepingBackgroundTask blocking_thread;
|
||||||
@ -264,9 +264,9 @@ TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) {
|
|||||||
// finish the flush!
|
// finish the flush!
|
||||||
blocking_thread.WakeUp();
|
blocking_thread.WakeUp();
|
||||||
blocking_thread.WaitUntilDone();
|
blocking_thread.WaitUntilDone();
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
// File just flushed is too big for L0 and L1 so gets moved to L2.
|
// File just flushed is too big for L0 and L1 so gets moved to L2.
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ("0,0,1,0,1", FilesPerLevel(0));
|
ASSERT_EQ("0,0,1,0,1", FilesPerLevel(0));
|
||||||
|
|
||||||
metadata.clear();
|
metadata.clear();
|
||||||
@ -302,8 +302,8 @@ TEST_F(DBSSTTest, DBWithSstFileManager) {
|
|||||||
for (int i = 0; i < 25; i++) {
|
for (int i = 0; i < 25; i++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
// Verify that we are tracking all sst files in dbname_
|
// Verify that we are tracking all sst files in dbname_
|
||||||
std::unordered_map<std::string, uint64_t> files_in_db;
|
std::unordered_map<std::string, uint64_t> files_in_db;
|
||||||
ASSERT_OK(GetAllSSTFiles(&files_in_db));
|
ASSERT_OK(GetAllSSTFiles(&files_in_db));
|
||||||
@ -608,7 +608,7 @@ TEST_F(DBSSTTest, OpenDBWithExistingTrash) {
|
|||||||
Destroy(last_options_);
|
Destroy(last_options_);
|
||||||
|
|
||||||
// Add some trash files to the db directory so the DB can clean them up
|
// Add some trash files to the db directory so the DB can clean them up
|
||||||
env_->CreateDirIfMissing(dbname_);
|
ASSERT_OK(env_->CreateDirIfMissing(dbname_));
|
||||||
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "001.sst.trash"));
|
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "001.sst.trash"));
|
||||||
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "002.sst.trash"));
|
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "002.sst.trash"));
|
||||||
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash"));
|
ASSERT_OK(WriteStringToFile(env_, "abc", dbname_ + "/" + "003.sst.trash"));
|
||||||
@ -733,7 +733,7 @@ TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) {
|
|||||||
int num_sst_files = 0;
|
int num_sst_files = 0;
|
||||||
int num_wal_files = 0;
|
int num_wal_files = 0;
|
||||||
std::vector<std::string> db_files;
|
std::vector<std::string> db_files;
|
||||||
env_->GetChildren(dbname_, &db_files);
|
ASSERT_OK(env_->GetChildren(dbname_, &db_files));
|
||||||
for (std::string f : db_files) {
|
for (std::string f : db_files) {
|
||||||
if (f.substr(f.find_last_of(".") + 1) == "sst") {
|
if (f.substr(f.find_last_of(".") + 1) == "sst") {
|
||||||
num_sst_files++;
|
num_sst_files++;
|
||||||
@ -822,7 +822,7 @@ TEST_F(DBSSTTest, CancellingCompactionsWorks) {
|
|||||||
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
ASSERT_OK(Put(Key(i), rnd.RandomString(50)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForCompact(true);
|
ASSERT_OK(dbfull()->TEST_WaitForCompact(true));
|
||||||
|
|
||||||
// Because we set a callback in CancelledCompaction, we actually
|
// Because we set a callback in CancelledCompaction, we actually
|
||||||
// let the compaction run
|
// let the compaction run
|
||||||
|
@ -137,7 +137,7 @@ TEST_F(DBStatisticsTest, ResetStats) {
|
|||||||
ASSERT_EQ(1, TestGetTickerCount(options, NUMBER_KEYS_WRITTEN));
|
ASSERT_EQ(1, TestGetTickerCount(options, NUMBER_KEYS_WRITTEN));
|
||||||
options.statistics->histogramData(DB_WRITE, &histogram_data);
|
options.statistics->histogramData(DB_WRITE, &histogram_data);
|
||||||
ASSERT_GT(histogram_data.max, 0.0);
|
ASSERT_GT(histogram_data.max, 0.0);
|
||||||
options.statistics->Reset();
|
ASSERT_OK(options.statistics->Reset());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -65,9 +65,9 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) {
|
|||||||
// Create 4 tables
|
// Create 4 tables
|
||||||
for (int table = 0; table < 4; ++table) {
|
for (int table = 0; table < 4; ++table) {
|
||||||
for (int i = 0; i < 10 + table; ++i) {
|
for (int i = 0; i < 10 + table; ++i) {
|
||||||
db_->Put(WriteOptions(), ToString(table * 100 + i), "val");
|
ASSERT_OK(db_->Put(WriteOptions(), ToString(table * 100 + i), "val"));
|
||||||
}
|
}
|
||||||
db_->Flush(FlushOptions());
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. Read table properties directly from file
|
// 1. Read table properties directly from file
|
||||||
@ -161,14 +161,14 @@ TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) {
|
|||||||
for (int i = 0; i < 10000; i++) {
|
for (int i = 0; i < 10000; i++) {
|
||||||
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
||||||
}
|
}
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
if (NumTableFilesAtLevel(0) == 0) {
|
if (NumTableFilesAtLevel(0) == 0) {
|
||||||
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
ASSERT_OK(Put(test::RandomKey(&rnd, 5), rnd.RandomString(102)));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
}
|
}
|
||||||
|
|
||||||
db_->PauseBackgroundWork();
|
ASSERT_OK(db_->PauseBackgroundWork());
|
||||||
|
|
||||||
// Ensure that we have at least L0, L1 and L2
|
// Ensure that we have at least L0, L1 and L2
|
||||||
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
ASSERT_GT(NumTableFilesAtLevel(0), 0);
|
||||||
@ -236,8 +236,8 @@ TEST_F(DBTablePropertiesTest, GetColumnFamilyNameProperty) {
|
|||||||
// Create one table per CF, then verify it was created with the column family
|
// Create one table per CF, then verify it was created with the column family
|
||||||
// name property.
|
// name property.
|
||||||
for (uint32_t cf = 0; cf < 2; ++cf) {
|
for (uint32_t cf = 0; cf < 2; ++cf) {
|
||||||
Put(cf, "key", "val");
|
ASSERT_OK(Put(cf, "key", "val"));
|
||||||
Flush(cf);
|
ASSERT_OK(Flush(cf));
|
||||||
|
|
||||||
TablePropertiesCollection fname_to_props;
|
TablePropertiesCollection fname_to_props;
|
||||||
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
||||||
@ -260,17 +260,17 @@ TEST_F(DBTablePropertiesTest, GetDbIdentifiersProperty) {
|
|||||||
CreateAndReopenWithCF({"goku"}, CurrentOptions());
|
CreateAndReopenWithCF({"goku"}, CurrentOptions());
|
||||||
|
|
||||||
for (uint32_t cf = 0; cf < 2; ++cf) {
|
for (uint32_t cf = 0; cf < 2; ++cf) {
|
||||||
Put(cf, "key", "val");
|
ASSERT_OK(Put(cf, "key", "val"));
|
||||||
Put(cf, "foo", "bar");
|
ASSERT_OK(Put(cf, "foo", "bar"));
|
||||||
Flush(cf);
|
ASSERT_OK(Flush(cf));
|
||||||
|
|
||||||
TablePropertiesCollection fname_to_props;
|
TablePropertiesCollection fname_to_props;
|
||||||
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
||||||
ASSERT_EQ(1U, fname_to_props.size());
|
ASSERT_EQ(1U, fname_to_props.size());
|
||||||
|
|
||||||
std::string id, sid;
|
std::string id, sid;
|
||||||
db_->GetDbIdentity(id);
|
ASSERT_OK(db_->GetDbIdentity(id));
|
||||||
db_->GetDbSessionId(sid);
|
ASSERT_OK(db_->GetDbSessionId(sid));
|
||||||
ASSERT_EQ(id, fname_to_props.begin()->second->db_id);
|
ASSERT_EQ(id, fname_to_props.begin()->second->db_id);
|
||||||
ASSERT_EQ(sid, fname_to_props.begin()->second->db_session_id);
|
ASSERT_EQ(sid, fname_to_props.begin()->second->db_session_id);
|
||||||
}
|
}
|
||||||
@ -298,9 +298,9 @@ TEST_P(DBTableHostnamePropertyTest, DbHostLocationProperty) {
|
|||||||
CreateAndReopenWithCF({"goku"}, opts);
|
CreateAndReopenWithCF({"goku"}, opts);
|
||||||
|
|
||||||
for (uint32_t cf = 0; cf < 2; ++cf) {
|
for (uint32_t cf = 0; cf < 2; ++cf) {
|
||||||
Put(cf, "key", "val");
|
ASSERT_OK(Put(cf, "key", "val"));
|
||||||
Put(cf, "foo", "bar");
|
ASSERT_OK(Put(cf, "foo", "bar"));
|
||||||
Flush(cf);
|
ASSERT_OK(Flush(cf));
|
||||||
|
|
||||||
TablePropertiesCollection fname_to_props;
|
TablePropertiesCollection fname_to_props;
|
||||||
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props));
|
||||||
@ -356,8 +356,8 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
|
|||||||
|
|
||||||
// add an L1 file to prevent tombstones from dropping due to obsolescence
|
// add an L1 file to prevent tombstones from dropping due to obsolescence
|
||||||
// during flush
|
// during flush
|
||||||
Put(Key(0), "val");
|
ASSERT_OK(Put(Key(0), "val"));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
MoveFilesToLevel(1);
|
MoveFilesToLevel(1);
|
||||||
|
|
||||||
DeletionTriggeredCompactionTestListener *listener =
|
DeletionTriggeredCompactionTestListener *listener =
|
||||||
@ -368,14 +368,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
|
|||||||
for (int i = 0; i < kNumKeys; ++i) {
|
for (int i = 0; i < kNumKeys; ++i) {
|
||||||
if (i >= kNumKeys - kWindowSize &&
|
if (i >= kNumKeys - kWindowSize &&
|
||||||
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
||||||
Delete(Key(i));
|
ASSERT_OK(Delete(Key(i)));
|
||||||
} else {
|
} else {
|
||||||
Put(Key(i), "val");
|
ASSERT_OK(Put(Key(i), "val"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
||||||
|
|
||||||
// Change the window size and deletion trigger and ensure new values take
|
// Change the window size and deletion trigger and ensure new values take
|
||||||
@ -389,14 +389,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
|
|||||||
for (int i = 0; i < kNumKeys; ++i) {
|
for (int i = 0; i < kNumKeys; ++i) {
|
||||||
if (i >= kNumKeys - kWindowSize &&
|
if (i >= kNumKeys - kWindowSize &&
|
||||||
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
||||||
Delete(Key(i));
|
ASSERT_OK(Delete(Key(i)));
|
||||||
} else {
|
} else {
|
||||||
Put(Key(i), "val");
|
ASSERT_OK(Put(Key(i), "val"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
ASSERT_EQ(0, NumTableFilesAtLevel(0));
|
||||||
|
|
||||||
// Change the window size to disable delete triggered compaction
|
// Change the window size to disable delete triggered compaction
|
||||||
@ -408,14 +408,14 @@ TEST_P(DBTablePropertiesTest, DeletionTriggeredCompactionMarking) {
|
|||||||
for (int i = 0; i < kNumKeys; ++i) {
|
for (int i = 0; i < kNumKeys; ++i) {
|
||||||
if (i >= kNumKeys - kWindowSize &&
|
if (i >= kNumKeys - kWindowSize &&
|
||||||
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
i < kNumKeys - kWindowSize + kNumDelsTrigger) {
|
||||||
Delete(Key(i));
|
ASSERT_OK(Delete(Key(i)));
|
||||||
} else {
|
} else {
|
||||||
Put(Key(i), "val");
|
ASSERT_OK(Put(Key(i), "val"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_EQ(1, NumTableFilesAtLevel(0));
|
ASSERT_EQ(1, NumTableFilesAtLevel(0));
|
||||||
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_WRITE_BYTES_MARKED));
|
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_WRITE_BYTES_MARKED));
|
||||||
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_READ_BYTES_MARKED));
|
ASSERT_LT(0, opts.statistics->getTickerCount(COMPACT_READ_BYTES_MARKED));
|
||||||
@ -438,8 +438,8 @@ TEST_P(DBTablePropertiesTest, RatioBasedDeletionTriggeredCompactionMarking) {
|
|||||||
|
|
||||||
// Add an L2 file to prevent tombstones from dropping due to obsolescence
|
// Add an L2 file to prevent tombstones from dropping due to obsolescence
|
||||||
// during flush
|
// during flush
|
||||||
Put(Key(0), "val");
|
ASSERT_OK(Put(Key(0), "val"));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
MoveFilesToLevel(2);
|
MoveFilesToLevel(2);
|
||||||
|
|
||||||
auto* listener = new DeletionTriggeredCompactionTestListener();
|
auto* listener = new DeletionTriggeredCompactionTestListener();
|
||||||
|
@ -179,7 +179,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) {
|
|||||||
|
|
||||||
if (i % 100 == 99) {
|
if (i % 100 == 99) {
|
||||||
ASSERT_OK(Flush(1));
|
ASSERT_OK(Flush(1));
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
if (i == 299) {
|
if (i == 299) {
|
||||||
file_iters_deleted = true;
|
file_iters_deleted = true;
|
||||||
}
|
}
|
||||||
@ -411,7 +411,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
|
|||||||
it->Next();
|
it->Next();
|
||||||
// Not valid since "21" is over the upper bound.
|
// Not valid since "21" is over the upper bound.
|
||||||
ASSERT_FALSE(it->Valid());
|
ASSERT_FALSE(it->Valid());
|
||||||
|
ASSERT_OK(it->status());
|
||||||
// This keeps track of the number of times NeedToSeekImmutable() was true.
|
// This keeps track of the number of times NeedToSeekImmutable() was true.
|
||||||
int immutable_seeks = 0;
|
int immutable_seeks = 0;
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
||||||
@ -424,6 +424,7 @@ TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) {
|
|||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
|
|
||||||
ASSERT_FALSE(it->Valid());
|
ASSERT_FALSE(it->Valid());
|
||||||
|
ASSERT_OK(it->status());
|
||||||
ASSERT_EQ(0, immutable_seeks);
|
ASSERT_EQ(0, immutable_seeks);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1129,11 +1129,12 @@ std::string DBTestBase::FilesPerLevel(int cf) {
|
|||||||
|
|
||||||
size_t DBTestBase::CountFiles() {
|
size_t DBTestBase::CountFiles() {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
env_->GetChildren(dbname_, &files);
|
EXPECT_OK(env_->GetChildren(dbname_, &files));
|
||||||
|
|
||||||
std::vector<std::string> logfiles;
|
std::vector<std::string> logfiles;
|
||||||
if (dbname_ != last_options_.wal_dir) {
|
if (dbname_ != last_options_.wal_dir) {
|
||||||
env_->GetChildren(last_options_.wal_dir, &logfiles);
|
Status s = env_->GetChildren(last_options_.wal_dir, &logfiles);
|
||||||
|
EXPECT_TRUE(s.ok() || s.IsNotFound());
|
||||||
}
|
}
|
||||||
|
|
||||||
return files.size() + logfiles.size();
|
return files.size() + logfiles.size();
|
||||||
@ -1266,8 +1267,8 @@ void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) {
|
|||||||
}
|
}
|
||||||
ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(200)));
|
ASSERT_OK(Put("key" + rnd->RandomString(7), rnd->RandomString(200)));
|
||||||
if (!nowait) {
|
if (!nowait) {
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ class FaultInjectionTest
|
|||||||
for (int i = start_idx; i < start_idx + num_vals; i++) {
|
for (int i = start_idx; i < start_idx + num_vals; i++) {
|
||||||
Slice key = Key(i, &key_space);
|
Slice key = Key(i, &key_space);
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
batch.Put(key, Value(i, &value_space));
|
ASSERT_OK(batch.Put(key, Value(i, &value_space)));
|
||||||
ASSERT_OK(db_->Write(write_options, &batch));
|
ASSERT_OK(db_->Write(write_options, &batch));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -272,12 +272,12 @@ class FaultInjectionTest
|
|||||||
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {
|
||||||
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
ASSERT_OK(db_->Delete(WriteOptions(), iter->key()));
|
||||||
}
|
}
|
||||||
|
ASSERT_OK(iter->status());
|
||||||
delete iter;
|
delete iter;
|
||||||
|
|
||||||
FlushOptions flush_options;
|
FlushOptions flush_options;
|
||||||
flush_options.wait = true;
|
flush_options.wait = true;
|
||||||
db_->Flush(flush_options);
|
ASSERT_OK(db_->Flush(flush_options));
|
||||||
}
|
}
|
||||||
|
|
||||||
// rnd cannot be null for kResetDropRandomUnsyncedData
|
// rnd cannot be null for kResetDropRandomUnsyncedData
|
||||||
@ -310,7 +310,7 @@ class FaultInjectionTest
|
|||||||
|
|
||||||
Build(write_options, 0, num_pre_sync);
|
Build(write_options, 0, num_pre_sync);
|
||||||
if (sync_use_compact_) {
|
if (sync_use_compact_) {
|
||||||
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
}
|
}
|
||||||
write_options.sync = false;
|
write_options.sync = false;
|
||||||
Build(write_options, num_pre_sync, num_post_sync);
|
Build(write_options, num_pre_sync, num_post_sync);
|
||||||
@ -342,7 +342,7 @@ class FaultInjectionTest
|
|||||||
}
|
}
|
||||||
|
|
||||||
void WaitCompactionFinish() {
|
void WaitCompactionFinish() {
|
||||||
static_cast<DBImpl*>(db_->GetRootDB())->TEST_WaitForCompact();
|
ASSERT_OK(static_cast<DBImpl*>(db_->GetRootDB())->TEST_WaitForCompact());
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -409,7 +409,7 @@ TEST_P(FaultInjectionTest, WriteOptionSyncTest) {
|
|||||||
write_options.sync = true;
|
write_options.sync = true;
|
||||||
ASSERT_OK(
|
ASSERT_OK(
|
||||||
db_->Put(write_options, Key(2, &key_space), Value(2, &value_space)));
|
db_->Put(write_options, Key(2, &key_space), Value(2, &value_space)));
|
||||||
db_->FlushWAL(false);
|
ASSERT_OK(db_->FlushWAL(false));
|
||||||
|
|
||||||
env_->SetFilesystemActive(false);
|
env_->SetFilesystemActive(false);
|
||||||
NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
|
NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced);
|
||||||
@ -450,7 +450,7 @@ TEST_P(FaultInjectionTest, UninstalledCompaction) {
|
|||||||
Build(WriteOptions(), 0, kNumKeys);
|
Build(WriteOptions(), 0, kNumKeys);
|
||||||
FlushOptions flush_options;
|
FlushOptions flush_options;
|
||||||
flush_options.wait = true;
|
flush_options.wait = true;
|
||||||
db_->Flush(flush_options);
|
ASSERT_OK(db_->Flush(flush_options));
|
||||||
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
ASSERT_OK(db_->Put(WriteOptions(), "", ""));
|
||||||
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0");
|
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0");
|
||||||
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1");
|
TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1");
|
||||||
@ -521,9 +521,9 @@ TEST_P(FaultInjectionTest, WriteBatchWalTerminationTest) {
|
|||||||
wo.sync = true;
|
wo.sync = true;
|
||||||
wo.disableWAL = false;
|
wo.disableWAL = false;
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put("cats", "dogs");
|
ASSERT_OK(batch.Put("cats", "dogs"));
|
||||||
batch.MarkWalTerminationPoint();
|
batch.MarkWalTerminationPoint();
|
||||||
batch.Put("boys", "girls");
|
ASSERT_OK(batch.Put("boys", "girls"));
|
||||||
ASSERT_OK(db_->Write(wo, &batch));
|
ASSERT_OK(db_->Write(wo, &batch));
|
||||||
|
|
||||||
env_->SetFilesystemActive(false);
|
env_->SetFilesystemActive(false);
|
||||||
|
@ -47,11 +47,7 @@ class ForwardLevelIterator : public InternalIterator {
|
|||||||
pinned_iters_mgr_(nullptr),
|
pinned_iters_mgr_(nullptr),
|
||||||
prefix_extractor_(prefix_extractor),
|
prefix_extractor_(prefix_extractor),
|
||||||
allow_unprepared_value_(allow_unprepared_value) {
|
allow_unprepared_value_(allow_unprepared_value) {
|
||||||
/*
|
status_.PermitUncheckedError(); // Allow uninitialized status through
|
||||||
NOTE needed for ASSERT_STATUS_CHECKED
|
|
||||||
in MergeOperatorPinningTest/MergeOperatorPinningTest.TailingIterator
|
|
||||||
*/
|
|
||||||
status_.PermitUncheckedError();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
~ForwardLevelIterator() override {
|
~ForwardLevelIterator() override {
|
||||||
|
@ -192,10 +192,10 @@ TEST_F(EventListenerTest, OnSingleDBCompactionTest) {
|
|||||||
ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
|
ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
|
||||||
for (int i = 1; i < 8; ++i) {
|
for (int i = 1; i < 8; ++i) {
|
||||||
ASSERT_OK(Flush(i));
|
ASSERT_OK(Flush(i));
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i],
|
ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i],
|
||||||
nullptr, nullptr));
|
nullptr, nullptr));
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size());
|
ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size());
|
||||||
@ -211,6 +211,10 @@ class TestFlushListener : public EventListener {
|
|||||||
: slowdown_count(0), stop_count(0), db_closed(), env_(env), test_(test) {
|
: slowdown_count(0), stop_count(0), db_closed(), env_(env), test_(test) {
|
||||||
db_closed = false;
|
db_closed = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual ~TestFlushListener() {
|
||||||
|
prev_fc_info_.status.PermitUncheckedError(); // Ignore the status
|
||||||
|
}
|
||||||
void OnTableFileCreated(
|
void OnTableFileCreated(
|
||||||
const TableFileCreationInfo& info) override {
|
const TableFileCreationInfo& info) override {
|
||||||
// remember the info for later checking the FlushJobInfo.
|
// remember the info for later checking the FlushJobInfo.
|
||||||
@ -333,7 +337,7 @@ TEST_F(EventListenerTest, OnSingleDBFlushTest) {
|
|||||||
ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
|
ASSERT_OK(Put(7, "popovich", std::string(90000, 'p')));
|
||||||
for (int i = 1; i < 8; ++i) {
|
for (int i = 1; i < 8; ++i) {
|
||||||
ASSERT_OK(Flush(i));
|
ASSERT_OK(Flush(i));
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_EQ(listener->flushed_dbs_.size(), i);
|
ASSERT_EQ(listener->flushed_dbs_.size(), i);
|
||||||
ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
|
ASSERT_EQ(listener->flushed_column_family_names_.size(), i);
|
||||||
}
|
}
|
||||||
@ -417,7 +421,7 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) {
|
|||||||
ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db));
|
ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db));
|
||||||
for (size_t c = 0; c < cf_names.size(); ++c) {
|
for (size_t c = 0; c < cf_names.size(); ++c) {
|
||||||
ColumnFamilyHandle* handle;
|
ColumnFamilyHandle* handle;
|
||||||
db->CreateColumnFamily(cf_opts, cf_names[c], &handle);
|
ASSERT_OK(db->CreateColumnFamily(cf_opts, cf_names[c], &handle));
|
||||||
handles.push_back(handle);
|
handles.push_back(handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -435,7 +439,8 @@ TEST_F(EventListenerTest, MultiDBMultiListeners) {
|
|||||||
for (size_t c = 0; c < cf_names.size(); ++c) {
|
for (size_t c = 0; c < cf_names.size(); ++c) {
|
||||||
for (int d = 0; d < kNumDBs; ++d) {
|
for (int d = 0; d < kNumDBs; ++d) {
|
||||||
ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c]));
|
ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c]));
|
||||||
static_cast_with_check<DBImpl>(dbs[d])->TEST_WaitForFlushMemTable();
|
ASSERT_OK(
|
||||||
|
static_cast_with_check<DBImpl>(dbs[d])->TEST_WaitForFlushMemTable());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -494,10 +499,10 @@ TEST_F(EventListenerTest, DisableBGCompaction) {
|
|||||||
// keep writing until writes are forced to stop.
|
// keep writing until writes are forced to stop.
|
||||||
for (int i = 0; static_cast<int>(cf_meta.file_count) < kSlowdownTrigger * 10;
|
for (int i = 0; static_cast<int>(cf_meta.file_count) < kSlowdownTrigger * 10;
|
||||||
++i) {
|
++i) {
|
||||||
Put(1, ToString(i), std::string(10000, 'x'), WriteOptions());
|
ASSERT_OK(Put(1, ToString(i), std::string(10000, 'x'), WriteOptions()));
|
||||||
FlushOptions fo;
|
FlushOptions fo;
|
||||||
fo.allow_write_stall = true;
|
fo.allow_write_stall = true;
|
||||||
db_->Flush(fo, handles_[1]);
|
ASSERT_OK(db_->Flush(fo, handles_[1]));
|
||||||
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
db_->GetColumnFamilyMetaData(handles_[1], &cf_meta);
|
||||||
}
|
}
|
||||||
ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9);
|
ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9);
|
||||||
@ -534,7 +539,7 @@ TEST_F(EventListenerTest, CompactionReasonLevel) {
|
|||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
ASSERT_EQ(listener->compaction_reasons_.size(), 1);
|
ASSERT_EQ(listener->compaction_reasons_.size(), 1);
|
||||||
ASSERT_EQ(listener->compaction_reasons_[0],
|
ASSERT_EQ(listener->compaction_reasons_[0],
|
||||||
@ -551,14 +556,14 @@ TEST_F(EventListenerTest, CompactionReasonLevel) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Do a trivial move from L0 -> L1
|
// Do a trivial move from L0 -> L1
|
||||||
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
|
|
||||||
options.max_bytes_for_level_base = 1;
|
options.max_bytes_for_level_base = 1;
|
||||||
Close();
|
Close();
|
||||||
listener->compaction_reasons_.clear();
|
listener->compaction_reasons_.clear();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
ASSERT_GT(listener->compaction_reasons_.size(), 1);
|
ASSERT_GT(listener->compaction_reasons_.size(), 1);
|
||||||
|
|
||||||
for (auto compaction_reason : listener->compaction_reasons_) {
|
for (auto compaction_reason : listener->compaction_reasons_) {
|
||||||
@ -570,7 +575,7 @@ TEST_F(EventListenerTest, CompactionReasonLevel) {
|
|||||||
listener->compaction_reasons_.clear();
|
listener->compaction_reasons_.clear();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
Put("key", "value");
|
ASSERT_OK(Put("key", "value"));
|
||||||
CompactRangeOptions cro;
|
CompactRangeOptions cro;
|
||||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForceOptimized;
|
||||||
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr));
|
||||||
@ -604,7 +609,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) {
|
|||||||
for (int i = 0; i < 8; i++) {
|
for (int i = 0; i < 8; i++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
||||||
for (auto compaction_reason : listener->compaction_reasons_) {
|
for (auto compaction_reason : listener->compaction_reasons_) {
|
||||||
@ -622,7 +627,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) {
|
|||||||
for (int i = 0; i < 8; i++) {
|
for (int i = 0; i < 8; i++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
||||||
for (auto compaction_reason : listener->compaction_reasons_) {
|
for (auto compaction_reason : listener->compaction_reasons_) {
|
||||||
@ -634,7 +639,7 @@ TEST_F(EventListenerTest, CompactionReasonUniversal) {
|
|||||||
listener->compaction_reasons_.clear();
|
listener->compaction_reasons_.clear();
|
||||||
Reopen(options);
|
Reopen(options);
|
||||||
|
|
||||||
db_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
|
|
||||||
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
||||||
for (auto compaction_reason : listener->compaction_reasons_) {
|
for (auto compaction_reason : listener->compaction_reasons_) {
|
||||||
@ -663,7 +668,7 @@ TEST_F(EventListenerTest, CompactionReasonFIFO) {
|
|||||||
for (int i = 0; i < 4; i++) {
|
for (int i = 0; i < 4; i++) {
|
||||||
GenerateNewRandomFile(&rnd);
|
GenerateNewRandomFile(&rnd);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
ASSERT_GT(listener->compaction_reasons_.size(), 0);
|
||||||
for (auto compaction_reason : listener->compaction_reasons_) {
|
for (auto compaction_reason : listener->compaction_reasons_) {
|
||||||
@ -783,7 +788,7 @@ TEST_F(EventListenerTest, TableFileCreationListenersTest) {
|
|||||||
ASSERT_OK(Put("foo", "aaa"));
|
ASSERT_OK(Put("foo", "aaa"));
|
||||||
ASSERT_OK(Put("bar", "bbb"));
|
ASSERT_OK(Put("bar", "bbb"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
|
listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
|
||||||
ASSERT_OK(Put("foo", "aaa1"));
|
ASSERT_OK(Put("foo", "aaa1"));
|
||||||
ASSERT_OK(Put("bar", "bbb1"));
|
ASSERT_OK(Put("bar", "bbb1"));
|
||||||
@ -796,21 +801,23 @@ TEST_F(EventListenerTest, TableFileCreationListenersTest) {
|
|||||||
ASSERT_OK(Put("foo", "aaa2"));
|
ASSERT_OK(Put("foo", "aaa2"));
|
||||||
ASSERT_OK(Put("bar", "bbb2"));
|
ASSERT_OK(Put("bar", "bbb2"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
|
listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0);
|
||||||
|
|
||||||
const Slice kRangeStart = "a";
|
const Slice kRangeStart = "a";
|
||||||
const Slice kRangeEnd = "z";
|
const Slice kRangeEnd = "z";
|
||||||
dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd);
|
ASSERT_OK(
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd));
|
||||||
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
listener->CheckAndResetCounters(0, 0, 0, 1, 1, 0);
|
listener->CheckAndResetCounters(0, 0, 0, 1, 1, 0);
|
||||||
|
|
||||||
ASSERT_OK(Put("foo", "aaa3"));
|
ASSERT_OK(Put("foo", "aaa3"));
|
||||||
ASSERT_OK(Put("bar", "bbb3"));
|
ASSERT_OK(Put("bar", "bbb3"));
|
||||||
ASSERT_OK(Flush());
|
ASSERT_OK(Flush());
|
||||||
test_env->SetStatus(Status::NotSupported("not supported"));
|
test_env->SetStatus(Status::NotSupported("not supported"));
|
||||||
dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd);
|
ASSERT_NOK(
|
||||||
dbfull()->TEST_WaitForCompact();
|
dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd));
|
||||||
|
ASSERT_NOK(dbfull()->TEST_WaitForCompact());
|
||||||
listener->CheckAndResetCounters(1, 1, 0, 1, 1, 1);
|
listener->CheckAndResetCounters(1, 1, 0, 1, 1, 1);
|
||||||
Close();
|
Close();
|
||||||
}
|
}
|
||||||
@ -1076,8 +1083,8 @@ TEST_F(EventListenerTest, OnFileOperationTest) {
|
|||||||
}
|
}
|
||||||
DestroyAndReopen(options);
|
DestroyAndReopen(options);
|
||||||
ASSERT_OK(Put("foo", "aaa"));
|
ASSERT_OK(Put("foo", "aaa"));
|
||||||
dbfull()->Flush(FlushOptions());
|
ASSERT_OK(dbfull()->Flush(FlushOptions()));
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_GE(listener->file_writes_.load(),
|
ASSERT_GE(listener->file_writes_.load(),
|
||||||
listener->file_writes_success_.load());
|
listener->file_writes_success_.load());
|
||||||
ASSERT_GT(listener->file_writes_.load(), 0);
|
ASSERT_GT(listener->file_writes_.load(), 0);
|
||||||
|
@ -191,7 +191,7 @@ class LogTest : public ::testing::TestWithParam<std::tuple<int, bool>> {
|
|||||||
Slice* get_reader_contents() { return &reader_contents_; }
|
Slice* get_reader_contents() { return &reader_contents_; }
|
||||||
|
|
||||||
void Write(const std::string& msg) {
|
void Write(const std::string& msg) {
|
||||||
writer_.AddRecord(Slice(msg));
|
ASSERT_OK(writer_.AddRecord(Slice(msg)));
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t WrittenBytes() const {
|
size_t WrittenBytes() const {
|
||||||
@ -689,8 +689,8 @@ TEST_P(LogTest, Recycle) {
|
|||||||
new test::OverwritingStringSink(get_reader_contents()),
|
new test::OverwritingStringSink(get_reader_contents()),
|
||||||
"" /* don't care */));
|
"" /* don't care */));
|
||||||
Writer recycle_writer(std::move(dest_holder), 123, true);
|
Writer recycle_writer(std::move(dest_holder), 123, true);
|
||||||
recycle_writer.AddRecord(Slice("foooo"));
|
ASSERT_OK(recycle_writer.AddRecord(Slice("foooo")));
|
||||||
recycle_writer.AddRecord(Slice("bar"));
|
ASSERT_OK(recycle_writer.AddRecord(Slice("bar")));
|
||||||
ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2);
|
ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2);
|
||||||
ASSERT_EQ("foooo", Read());
|
ASSERT_EQ("foooo", Read());
|
||||||
ASSERT_EQ("bar", Read());
|
ASSERT_EQ("bar", Read());
|
||||||
@ -782,11 +782,13 @@ class RetriableLogTest : public ::testing::TestWithParam<int> {
|
|||||||
return file->contents_;
|
return file->contents_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Encode(const std::string& msg) { log_writer_->AddRecord(Slice(msg)); }
|
void Encode(const std::string& msg) {
|
||||||
|
ASSERT_OK(log_writer_->AddRecord(Slice(msg)));
|
||||||
|
}
|
||||||
|
|
||||||
void Write(const Slice& data) {
|
void Write(const Slice& data) {
|
||||||
writer_->Append(data);
|
ASSERT_OK(writer_->Append(data));
|
||||||
writer_->Sync(true);
|
ASSERT_OK(writer_->Sync(true));
|
||||||
}
|
}
|
||||||
|
|
||||||
bool TryRead(std::string* result) {
|
bool TryRead(std::string* result) {
|
||||||
|
@ -100,13 +100,13 @@ TEST_F(ManualCompactionTest, CompactTouchesAllKeys) {
|
|||||||
options.compaction_filter = new DestroyAllCompactionFilter();
|
options.compaction_filter = new DestroyAllCompactionFilter();
|
||||||
ASSERT_OK(DB::Open(options, dbname_, &db));
|
ASSERT_OK(DB::Open(options, dbname_, &db));
|
||||||
|
|
||||||
db->Put(WriteOptions(), Slice("key1"), Slice("destroy"));
|
ASSERT_OK(db->Put(WriteOptions(), Slice("key1"), Slice("destroy")));
|
||||||
db->Put(WriteOptions(), Slice("key2"), Slice("destroy"));
|
ASSERT_OK(db->Put(WriteOptions(), Slice("key2"), Slice("destroy")));
|
||||||
db->Put(WriteOptions(), Slice("key3"), Slice("value3"));
|
ASSERT_OK(db->Put(WriteOptions(), Slice("key3"), Slice("value3")));
|
||||||
db->Put(WriteOptions(), Slice("key4"), Slice("destroy"));
|
ASSERT_OK(db->Put(WriteOptions(), Slice("key4"), Slice("destroy")));
|
||||||
|
|
||||||
Slice key4("key4");
|
Slice key4("key4");
|
||||||
db->CompactRange(CompactRangeOptions(), nullptr, &key4);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &key4));
|
||||||
Iterator* itr = db->NewIterator(ReadOptions());
|
Iterator* itr = db->NewIterator(ReadOptions());
|
||||||
itr->SeekToFirst();
|
itr->SeekToFirst();
|
||||||
ASSERT_TRUE(itr->Valid());
|
ASSERT_TRUE(itr->Valid());
|
||||||
@ -135,21 +135,21 @@ TEST_F(ManualCompactionTest, Test) {
|
|||||||
// create first key range
|
// create first key range
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (int i = 0; i < kNumKeys; i++) {
|
for (int i = 0; i < kNumKeys; i++) {
|
||||||
batch.Put(Key1(i), "value for range 1 key");
|
ASSERT_OK(batch.Put(Key1(i), "value for range 1 key"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
||||||
|
|
||||||
// create second key range
|
// create second key range
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int i = 0; i < kNumKeys; i++) {
|
for (int i = 0; i < kNumKeys; i++) {
|
||||||
batch.Put(Key2(i), "value for range 2 key");
|
ASSERT_OK(batch.Put(Key2(i), "value for range 2 key"));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
||||||
|
|
||||||
// delete second key range
|
// delete second key range
|
||||||
batch.Clear();
|
batch.Clear();
|
||||||
for (int i = 0; i < kNumKeys; i++) {
|
for (int i = 0; i < kNumKeys; i++) {
|
||||||
batch.Delete(Key2(i));
|
ASSERT_OK(batch.Delete(Key2(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
ASSERT_OK(db->Write(WriteOptions(), &batch));
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ TEST_F(ManualCompactionTest, Test) {
|
|||||||
Slice greatest(end_key.data(), end_key.size());
|
Slice greatest(end_key.data(), end_key.size());
|
||||||
|
|
||||||
// commenting out the line below causes the example to work correctly
|
// commenting out the line below causes the example to work correctly
|
||||||
db->CompactRange(CompactRangeOptions(), &least, &greatest);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &least, &greatest));
|
||||||
|
|
||||||
// count the keys
|
// count the keys
|
||||||
Iterator* iter = db->NewIterator(ReadOptions());
|
Iterator* iter = db->NewIterator(ReadOptions());
|
||||||
@ -205,7 +205,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
Slice start("5");
|
Slice start("5");
|
||||||
Slice end("7");
|
Slice end("7");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, &end);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
|
||||||
ASSERT_EQ(0, filter->NumKeys());
|
ASSERT_EQ(0, filter->NumKeys());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
Slice start("3");
|
Slice start("3");
|
||||||
Slice end("7");
|
Slice end("7");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, &end);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
|
||||||
ASSERT_EQ(2, filter->NumKeys());
|
ASSERT_EQ(2, filter->NumKeys());
|
||||||
ASSERT_EQ(0, filter->KeyLevel("4"));
|
ASSERT_EQ(0, filter->KeyLevel("4"));
|
||||||
ASSERT_EQ(0, filter->KeyLevel("8"));
|
ASSERT_EQ(0, filter->KeyLevel("8"));
|
||||||
@ -227,7 +227,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
// no file has keys in range (-inf, 0]
|
// no file has keys in range (-inf, 0]
|
||||||
Slice end("0");
|
Slice end("0");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), nullptr, &end);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, &end));
|
||||||
ASSERT_EQ(0, filter->NumKeys());
|
ASSERT_EQ(0, filter->NumKeys());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
// no file has keys in range [9, inf)
|
// no file has keys in range [9, inf)
|
||||||
Slice start("9");
|
Slice start("9");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, nullptr);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr));
|
||||||
ASSERT_EQ(0, filter->NumKeys());
|
ASSERT_EQ(0, filter->NumKeys());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -248,7 +248,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
Slice start("2");
|
Slice start("2");
|
||||||
Slice end("2");
|
Slice end("2");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, &end);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
|
||||||
ASSERT_EQ(1, filter->NumKeys());
|
ASSERT_EQ(1, filter->NumKeys());
|
||||||
ASSERT_EQ(0, filter->KeyLevel("2"));
|
ASSERT_EQ(0, filter->KeyLevel("2"));
|
||||||
}
|
}
|
||||||
@ -260,7 +260,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
Slice start("2");
|
Slice start("2");
|
||||||
Slice end("5");
|
Slice end("5");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, &end);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, &end));
|
||||||
ASSERT_EQ(3, filter->NumKeys());
|
ASSERT_EQ(3, filter->NumKeys());
|
||||||
ASSERT_EQ(1, filter->KeyLevel("2"));
|
ASSERT_EQ(1, filter->KeyLevel("2"));
|
||||||
ASSERT_EQ(1, filter->KeyLevel("4"));
|
ASSERT_EQ(1, filter->KeyLevel("4"));
|
||||||
@ -273,7 +273,7 @@ TEST_F(ManualCompactionTest, SkipLevel) {
|
|||||||
// [0, inf) overlaps all files
|
// [0, inf) overlaps all files
|
||||||
Slice start("0");
|
Slice start("0");
|
||||||
filter->Reset();
|
filter->Reset();
|
||||||
db->CompactRange(CompactRangeOptions(), &start, nullptr);
|
ASSERT_OK(db->CompactRange(CompactRangeOptions(), &start, nullptr));
|
||||||
ASSERT_EQ(4, filter->NumKeys());
|
ASSERT_EQ(4, filter->NumKeys());
|
||||||
// 1 is first compacted to L1 and then further compacted into [2, 4, 8],
|
// 1 is first compacted to L1 and then further compacted into [2, 4, 8],
|
||||||
// so finally the logged level for 1 is L1.
|
// so finally the logged level for 1 is L1.
|
||||||
|
@ -61,7 +61,7 @@ class ObsoleteFilesTest : public DBTestBase {
|
|||||||
void CheckFileTypeCounts(const std::string& dir, int required_log,
|
void CheckFileTypeCounts(const std::string& dir, int required_log,
|
||||||
int required_sst, int required_manifest) {
|
int required_sst, int required_manifest) {
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
env_->GetChildren(dir, &filenames);
|
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
||||||
|
|
||||||
int log_cnt = 0;
|
int log_cnt = 0;
|
||||||
int sst_cnt = 0;
|
int sst_cnt = 0;
|
||||||
|
@ -76,12 +76,12 @@ TEST_F(PerfContextTest, SeekIntoDeletion) {
|
|||||||
std::string key = "k" + ToString(i);
|
std::string key = "k" + ToString(i);
|
||||||
std::string value = "v" + ToString(i);
|
std::string value = "v" + ToString(i);
|
||||||
|
|
||||||
db->Put(write_options, key, value);
|
ASSERT_OK(db->Put(write_options, key, value));
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
|
for (int i = 0; i < FLAGS_total_keys -1 ; ++i) {
|
||||||
std::string key = "k" + ToString(i);
|
std::string key = "k" + ToString(i);
|
||||||
db->Delete(write_options, key);
|
ASSERT_OK(db->Delete(write_options, key));
|
||||||
}
|
}
|
||||||
|
|
||||||
HistogramImpl hist_get;
|
HistogramImpl hist_get;
|
||||||
@ -116,10 +116,9 @@ TEST_F(PerfContextTest, SeekIntoDeletion) {
|
|||||||
auto elapsed_nanos = timer.ElapsedNanos();
|
auto elapsed_nanos = timer.ElapsedNanos();
|
||||||
|
|
||||||
if (FLAGS_verbose) {
|
if (FLAGS_verbose) {
|
||||||
std::cout << "SeekToFirst uesr key comparison: \n"
|
std::cout << "SeekToFirst user key comparison: \n"
|
||||||
<< hist_seek_to_first.ToString()
|
<< hist_seek_to_first.ToString() << "ikey skipped: "
|
||||||
<< "ikey skipped: " << get_perf_context()->internal_key_skipped_count
|
<< get_perf_context()->internal_key_skipped_count << "\n"
|
||||||
<< "\n"
|
|
||||||
<< "idelete skipped: "
|
<< "idelete skipped: "
|
||||||
<< get_perf_context()->internal_delete_skipped_count << "\n"
|
<< get_perf_context()->internal_delete_skipped_count << "\n"
|
||||||
<< "elapsed: " << elapsed_nanos << "\n";
|
<< "elapsed: " << elapsed_nanos << "\n";
|
||||||
@ -156,7 +155,7 @@ TEST_F(PerfContextTest, SeekIntoDeletion) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (FLAGS_verbose) {
|
if (FLAGS_verbose) {
|
||||||
std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString();
|
std::cout << "Seek user key comparison: \n" << hist_seek.ToString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,7 +269,7 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
std::vector<std::string> values;
|
std::vector<std::string> values;
|
||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
db->Put(write_options, key, value);
|
ASSERT_OK(db->Put(write_options, key, value));
|
||||||
if (++num_mutex_waited > 3) {
|
if (++num_mutex_waited > 3) {
|
||||||
#ifndef NDEBUG
|
#ifndef NDEBUG
|
||||||
ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
|
ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U);
|
||||||
@ -314,7 +313,10 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
hist_get.Add(get_perf_context()->user_key_comparison_count);
|
hist_get.Add(get_perf_context()->user_key_comparison_count);
|
||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
db->MultiGet(read_options, multiget_keys, &values);
|
auto statuses = db->MultiGet(read_options, multiget_keys, &values);
|
||||||
|
for (const auto& s : statuses) {
|
||||||
|
ASSERT_OK(s);
|
||||||
|
}
|
||||||
hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
|
hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
|
||||||
hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
|
hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
|
||||||
hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
|
hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
|
||||||
@ -324,9 +326,10 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (FLAGS_verbose) {
|
if (FLAGS_verbose) {
|
||||||
std::cout << "Put uesr key comparison: \n" << hist_put.ToString()
|
std::cout << "Put user key comparison: \n"
|
||||||
<< "Get uesr key comparison: \n" << hist_get.ToString()
|
<< hist_put.ToString() << "Get user key comparison: \n"
|
||||||
<< "MultiGet uesr key comparison: \n" << hist_get.ToString();
|
<< hist_get.ToString() << "MultiGet user key comparison: \n"
|
||||||
|
<< hist_get.ToString();
|
||||||
std::cout << "Put(): Pre and Post Process Time: \n"
|
std::cout << "Put(): Pre and Post Process Time: \n"
|
||||||
<< hist_write_pre_post.ToString() << " Writing WAL time: \n"
|
<< hist_write_pre_post.ToString() << " Writing WAL time: \n"
|
||||||
<< hist_write_wal_time.ToString() << "\n"
|
<< hist_write_wal_time.ToString() << "\n"
|
||||||
@ -428,7 +431,10 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
hist_get.Add(get_perf_context()->user_key_comparison_count);
|
hist_get.Add(get_perf_context()->user_key_comparison_count);
|
||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
db->MultiGet(read_options, multiget_keys, &values);
|
auto statuses = db->MultiGet(read_options, multiget_keys, &values);
|
||||||
|
for (const auto& s : statuses) {
|
||||||
|
ASSERT_OK(s);
|
||||||
|
}
|
||||||
hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
|
hist_mget_snapshot.Add(get_perf_context()->get_snapshot_time);
|
||||||
hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
|
hist_mget_memtable.Add(get_perf_context()->get_from_memtable_time);
|
||||||
hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
|
hist_mget_files.Add(get_perf_context()->get_from_output_files_time);
|
||||||
@ -438,8 +444,9 @@ void ProfileQueries(bool enabled_time = false) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (FLAGS_verbose) {
|
if (FLAGS_verbose) {
|
||||||
std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString()
|
std::cout << "ReadOnly Get user key comparison: \n"
|
||||||
<< "ReadOnly MultiGet uesr key comparison: \n"
|
<< hist_get.ToString()
|
||||||
|
<< "ReadOnly MultiGet user key comparison: \n"
|
||||||
<< hist_mget.ToString();
|
<< hist_mget.ToString();
|
||||||
|
|
||||||
std::cout << "ReadOnly Get(): Time to get snapshot: \n"
|
std::cout << "ReadOnly Get(): Time to get snapshot: \n"
|
||||||
@ -539,7 +546,7 @@ TEST_F(PerfContextTest, SeekKeyComparison) {
|
|||||||
|
|
||||||
get_perf_context()->Reset();
|
get_perf_context()->Reset();
|
||||||
timer.Start();
|
timer.Start();
|
||||||
db->Put(write_options, key, value);
|
ASSERT_OK(db->Put(write_options, key, value));
|
||||||
auto put_time = timer.ElapsedNanos();
|
auto put_time = timer.ElapsedNanos();
|
||||||
hist_put_time.Add(put_time);
|
hist_put_time.Add(put_time);
|
||||||
hist_wal_time.Add(get_perf_context()->write_wal_time);
|
hist_wal_time.Add(get_perf_context()->write_wal_time);
|
||||||
@ -573,7 +580,7 @@ TEST_F(PerfContextTest, SeekKeyComparison) {
|
|||||||
iter->Next();
|
iter->Next();
|
||||||
hist_next.Add(get_perf_context()->user_key_comparison_count);
|
hist_next.Add(get_perf_context()->user_key_comparison_count);
|
||||||
}
|
}
|
||||||
|
ASSERT_OK(iter->status());
|
||||||
if (FLAGS_verbose) {
|
if (FLAGS_verbose) {
|
||||||
std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n"
|
std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n"
|
||||||
<< hist_next.ToString();
|
<< hist_next.ToString();
|
||||||
@ -835,7 +842,7 @@ TEST_F(PerfContextTest, CPUTimer) {
|
|||||||
std::string value = "v" + i_str;
|
std::string value = "v" + i_str;
|
||||||
max_str = max_str > i_str ? max_str : i_str;
|
max_str = max_str > i_str ? max_str : i_str;
|
||||||
|
|
||||||
db->Put(write_options, key, value);
|
ASSERT_OK(db->Put(write_options, key, value));
|
||||||
}
|
}
|
||||||
std::string last_key = "k" + max_str;
|
std::string last_key = "k" + max_str;
|
||||||
std::string last_value = "v" + max_str;
|
std::string last_value = "v" + max_str;
|
||||||
|
@ -185,7 +185,7 @@ TEST_F(PeriodicWorkSchedulerTest, MultiInstances) {
|
|||||||
ASSERT_EQ(expected_run, pst_st_counter);
|
ASSERT_EQ(expected_run, pst_st_counter);
|
||||||
|
|
||||||
for (int i = half; i < kInstanceNum; i++) {
|
for (int i = half; i < kInstanceNum; i++) {
|
||||||
dbs[i]->Close();
|
ASSERT_OK(dbs[i]->Close());
|
||||||
delete dbs[i];
|
delete dbs[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -217,7 +217,7 @@ TEST_F(PeriodicWorkSchedulerTest, MultiEnv) {
|
|||||||
ASSERT_EQ(dbi->TEST_GetPeriodicWorkScheduler(),
|
ASSERT_EQ(dbi->TEST_GetPeriodicWorkScheduler(),
|
||||||
dbfull()->TEST_GetPeriodicWorkScheduler());
|
dbfull()->TEST_GetPeriodicWorkScheduler());
|
||||||
|
|
||||||
db->Close();
|
ASSERT_OK(db->Close());
|
||||||
delete db;
|
delete db;
|
||||||
Close();
|
Close();
|
||||||
}
|
}
|
||||||
|
@ -34,7 +34,7 @@ TransactionLogIteratorImpl::TransactionLogIteratorImpl(
|
|||||||
io_tracer_(io_tracer) {
|
io_tracer_(io_tracer) {
|
||||||
assert(files_ != nullptr);
|
assert(files_ != nullptr);
|
||||||
assert(versions_ != nullptr);
|
assert(versions_ != nullptr);
|
||||||
|
current_status_.PermitUncheckedError(); // Clear on start
|
||||||
reporter_.env = options_->env;
|
reporter_.env = options_->env;
|
||||||
reporter_.info_log = options_->info_log.get();
|
reporter_.info_log = options_->info_log.get();
|
||||||
SeekToStartSequence(); // Seek till starting sequence
|
SeekToStartSequence(); // Seek till starting sequence
|
||||||
@ -225,7 +225,8 @@ bool TransactionLogIteratorImpl::IsBatchExpected(
|
|||||||
|
|
||||||
void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
|
void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) {
|
||||||
std::unique_ptr<WriteBatch> batch(new WriteBatch());
|
std::unique_ptr<WriteBatch> batch(new WriteBatch());
|
||||||
WriteBatchInternal::SetContents(batch.get(), record);
|
Status s = WriteBatchInternal::SetContents(batch.get(), record);
|
||||||
|
s.PermitUncheckedError(); // TODO: What should we do with this error?
|
||||||
|
|
||||||
SequenceNumber expected_seq = current_last_seq_ + 1;
|
SequenceNumber expected_seq = current_last_seq_ + 1;
|
||||||
// If the iterator has started, then confirm that we get continuous batches
|
// If the iterator has started, then confirm that we get continuous batches
|
||||||
|
@ -1231,7 +1231,7 @@ TEST_F(VersionSetTest, WalEditsNotAppliedToVersion) {
|
|||||||
[&](void* arg) { versions.push_back(reinterpret_cast<Version*>(arg)); });
|
[&](void* arg) { versions.push_back(reinterpret_cast<Version*>(arg)); });
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
LogAndApplyToDefaultCF(edits);
|
ASSERT_OK(LogAndApplyToDefaultCF(edits));
|
||||||
|
|
||||||
SyncPoint::GetInstance()->DisableProcessing();
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
@ -1267,7 +1267,7 @@ TEST_F(VersionSetTest, NonWalEditsAppliedToVersion) {
|
|||||||
[&](void* arg) { versions.push_back(reinterpret_cast<Version*>(arg)); });
|
[&](void* arg) { versions.push_back(reinterpret_cast<Version*>(arg)); });
|
||||||
SyncPoint::GetInstance()->EnableProcessing();
|
SyncPoint::GetInstance()->EnableProcessing();
|
||||||
|
|
||||||
LogAndApplyToDefaultCF(edits);
|
ASSERT_OK(LogAndApplyToDefaultCF(edits));
|
||||||
|
|
||||||
SyncPoint::GetInstance()->DisableProcessing();
|
SyncPoint::GetInstance()->DisableProcessing();
|
||||||
SyncPoint::GetInstance()->ClearAllCallBacks();
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
||||||
@ -1674,7 +1674,7 @@ TEST_F(VersionSetTest, AtomicGroupWithWalEdits) {
|
|||||||
edits.back()->MarkAtomicGroup(--remaining);
|
edits.back()->MarkAtomicGroup(--remaining);
|
||||||
ASSERT_EQ(remaining, 0);
|
ASSERT_EQ(remaining, 0);
|
||||||
|
|
||||||
Status s = LogAndApplyToDefaultCF(edits);
|
ASSERT_OK(LogAndApplyToDefaultCF(edits));
|
||||||
|
|
||||||
// Recover a new VersionSet, the min log number and the last WAL should be
|
// Recover a new VersionSet, the min log number and the last WAL should be
|
||||||
// kept.
|
// kept.
|
||||||
|
@ -243,9 +243,13 @@ void WalManager::PurgeObsoleteWALFiles() {
|
|||||||
|
|
||||||
size_t files_del_num = log_files_num - files_keep_num;
|
size_t files_del_num = log_files_num - files_keep_num;
|
||||||
VectorLogPtr archived_logs;
|
VectorLogPtr archived_logs;
|
||||||
GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
|
s = GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile);
|
||||||
|
if (!s.ok()) {
|
||||||
if (files_del_num > archived_logs.size()) {
|
ROCKS_LOG_WARN(db_options_.info_log,
|
||||||
|
"Unable to get archived WALs from: %s: %s",
|
||||||
|
archival_dir.c_str(), s.ToString().c_str());
|
||||||
|
files_del_num = 0;
|
||||||
|
} else if (files_del_num > archived_logs.size()) {
|
||||||
ROCKS_LOG_WARN(db_options_.info_log,
|
ROCKS_LOG_WARN(db_options_.info_log,
|
||||||
"Trying to delete more archived log files than "
|
"Trying to delete more archived log files than "
|
||||||
"exist. Deleting all");
|
"exist. Deleting all");
|
||||||
|
@ -69,9 +69,10 @@ class WalManagerTest : public testing::Test {
|
|||||||
assert(current_log_writer_.get() != nullptr);
|
assert(current_log_writer_.get() != nullptr);
|
||||||
uint64_t seq = versions_->LastSequence() + 1;
|
uint64_t seq = versions_->LastSequence() + 1;
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put(key, value);
|
ASSERT_OK(batch.Put(key, value));
|
||||||
WriteBatchInternal::SetSequence(&batch, seq);
|
WriteBatchInternal::SetSequence(&batch, seq);
|
||||||
current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch));
|
ASSERT_OK(
|
||||||
|
current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch)));
|
||||||
versions_->SetLastAllocatedSequence(seq);
|
versions_->SetLastAllocatedSequence(seq);
|
||||||
versions_->SetLastPublishedSequence(seq);
|
versions_->SetLastPublishedSequence(seq);
|
||||||
versions_->SetLastSequence(seq);
|
versions_->SetLastSequence(seq);
|
||||||
@ -140,9 +141,9 @@ TEST_F(WalManagerTest, ReadFirstRecordCache) {
|
|||||||
log::Writer writer(std::move(file_writer), 1,
|
log::Writer writer(std::move(file_writer), 1,
|
||||||
db_options_.recycle_log_file_num > 0);
|
db_options_.recycle_log_file_num > 0);
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put("foo", "bar");
|
ASSERT_OK(batch.Put("foo", "bar"));
|
||||||
WriteBatchInternal::SetSequence(&batch, 10);
|
WriteBatchInternal::SetSequence(&batch, 10);
|
||||||
writer.AddRecord(WriteBatchInternal::Contents(&batch));
|
ASSERT_OK(writer.AddRecord(WriteBatchInternal::Contents(&batch)));
|
||||||
|
|
||||||
// TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here.
|
// TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here.
|
||||||
// Waiting for lei to finish with db_test
|
// Waiting for lei to finish with db_test
|
||||||
@ -167,14 +168,14 @@ namespace {
|
|||||||
uint64_t GetLogDirSize(std::string dir_path, Env* env) {
|
uint64_t GetLogDirSize(std::string dir_path, Env* env) {
|
||||||
uint64_t dir_size = 0;
|
uint64_t dir_size = 0;
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
env->GetChildren(dir_path, &files);
|
EXPECT_OK(env->GetChildren(dir_path, &files));
|
||||||
for (auto& f : files) {
|
for (auto& f : files) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
if (ParseFileName(f, &number, &type) && type == kWalFile) {
|
if (ParseFileName(f, &number, &type) && type == kWalFile) {
|
||||||
std::string const file_path = dir_path + "/" + f;
|
std::string const file_path = dir_path + "/" + f;
|
||||||
uint64_t file_size;
|
uint64_t file_size;
|
||||||
env->GetFileSize(file_path, &file_size);
|
EXPECT_OK(env->GetFileSize(file_path, &file_size));
|
||||||
dir_size += file_size;
|
dir_size += file_size;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -184,9 +185,9 @@ std::vector<std::uint64_t> ListSpecificFiles(
|
|||||||
Env* env, const std::string& path, const FileType expected_file_type) {
|
Env* env, const std::string& path, const FileType expected_file_type) {
|
||||||
std::vector<std::string> files;
|
std::vector<std::string> files;
|
||||||
std::vector<uint64_t> file_numbers;
|
std::vector<uint64_t> file_numbers;
|
||||||
env->GetChildren(path, &files);
|
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
|
EXPECT_OK(env->GetChildren(path, &files));
|
||||||
for (size_t i = 0; i < files.size(); ++i) {
|
for (size_t i = 0; i < files.size(); ++i) {
|
||||||
if (ParseFileName(files[i], &number, &type)) {
|
if (ParseFileName(files[i], &number, &type)) {
|
||||||
if (type == expected_file_type) {
|
if (type == expected_file_type) {
|
||||||
@ -209,6 +210,7 @@ int CountRecords(TransactionLogIterator* iter) {
|
|||||||
EXPECT_OK(iter->status());
|
EXPECT_OK(iter->status());
|
||||||
iter->Next();
|
iter->Next();
|
||||||
}
|
}
|
||||||
|
EXPECT_OK(iter->status());
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -111,7 +111,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
|
|||||||
|
|
||||||
void Put(const string& key, const string& val) {
|
void Put(const string& key, const string& val) {
|
||||||
kvs_.push_back(std::make_pair(key, val));
|
kvs_.push_back(std::make_pair(key, val));
|
||||||
write_batch_.Put(key, val);
|
ASSERT_OK(write_batch_.Put(key, val));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Clear() {
|
void Clear() {
|
||||||
@ -319,7 +319,7 @@ TEST_P(WriteCallbackPTest, WriteWithCallbackTest) {
|
|||||||
DBImpl* db_impl_;
|
DBImpl* db_impl_;
|
||||||
} publish_seq_callback(db_impl);
|
} publish_seq_callback(db_impl);
|
||||||
// seq_per_batch_ requires a natural batch separator or Noop
|
// seq_per_batch_ requires a natural batch separator or Noop
|
||||||
WriteBatchInternal::InsertNoop(&write_op.write_batch_);
|
ASSERT_OK(WriteBatchInternal::InsertNoop(&write_op.write_batch_));
|
||||||
const size_t ONE_BATCH = 1;
|
const size_t ONE_BATCH = 1;
|
||||||
s = db_impl->WriteImpl(woptions, &write_op.write_batch_,
|
s = db_impl->WriteImpl(woptions, &write_op.write_batch_,
|
||||||
&write_op.callback_, nullptr, 0, false, nullptr,
|
&write_op.callback_, nullptr, 0, false, nullptr,
|
||||||
@ -396,8 +396,8 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
|||||||
|
|
||||||
WriteBatch wb;
|
WriteBatch wb;
|
||||||
|
|
||||||
wb.Put("a", "value.a");
|
ASSERT_OK(wb.Put("a", "value.a"));
|
||||||
wb.Delete("x");
|
ASSERT_OK(wb.Delete("x"));
|
||||||
|
|
||||||
// Test a simple Write
|
// Test a simple Write
|
||||||
s = db->Write(write_options, &wb);
|
s = db->Write(write_options, &wb);
|
||||||
@ -411,7 +411,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
|||||||
WriteCallbackTestWriteCallback1 callback1;
|
WriteCallbackTestWriteCallback1 callback1;
|
||||||
WriteBatch wb2;
|
WriteBatch wb2;
|
||||||
|
|
||||||
wb2.Put("a", "value.a2");
|
ASSERT_OK(wb2.Put("a", "value.a2"));
|
||||||
|
|
||||||
s = db_impl->WriteWithCallback(write_options, &wb2, &callback1);
|
s = db_impl->WriteWithCallback(write_options, &wb2, &callback1);
|
||||||
ASSERT_OK(s);
|
ASSERT_OK(s);
|
||||||
@ -425,7 +425,7 @@ TEST_F(WriteCallbackTest, WriteCallBackTest) {
|
|||||||
WriteCallbackTestWriteCallback2 callback2;
|
WriteCallbackTestWriteCallback2 callback2;
|
||||||
WriteBatch wb3;
|
WriteBatch wb3;
|
||||||
|
|
||||||
wb3.Put("a", "value.a3");
|
ASSERT_OK(wb3.Put("a", "value.a3"));
|
||||||
|
|
||||||
s = db_impl->WriteWithCallback(write_options, &wb3, &callback2);
|
s = db_impl->WriteWithCallback(write_options, &wb3, &callback2);
|
||||||
ASSERT_NOK(s);
|
ASSERT_NOK(s);
|
||||||
|
@ -56,14 +56,13 @@ DeleteScheduler::~DeleteScheduler() {
|
|||||||
Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
||||||
const std::string& dir_to_sync,
|
const std::string& dir_to_sync,
|
||||||
const bool force_bg) {
|
const bool force_bg) {
|
||||||
Status s;
|
|
||||||
if (rate_bytes_per_sec_.load() <= 0 || (!force_bg &&
|
if (rate_bytes_per_sec_.load() <= 0 || (!force_bg &&
|
||||||
total_trash_size_.load() >
|
total_trash_size_.load() >
|
||||||
sst_file_manager_->GetTotalSize() * max_trash_db_ratio_.load())) {
|
sst_file_manager_->GetTotalSize() * max_trash_db_ratio_.load())) {
|
||||||
// Rate limiting is disabled or trash size makes up more than
|
// Rate limiting is disabled or trash size makes up more than
|
||||||
// max_trash_db_ratio_ (default 25%) of the total DB size
|
// max_trash_db_ratio_ (default 25%) of the total DB size
|
||||||
TEST_SYNC_POINT("DeleteScheduler::DeleteFile");
|
TEST_SYNC_POINT("DeleteScheduler::DeleteFile");
|
||||||
s = fs_->DeleteFile(file_path, IOOptions(), nullptr);
|
Status s = fs_->DeleteFile(file_path, IOOptions(), nullptr);
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
s = sst_file_manager_->OnDeleteFile(file_path);
|
s = sst_file_manager_->OnDeleteFile(file_path);
|
||||||
ROCKS_LOG_INFO(info_log_,
|
ROCKS_LOG_INFO(info_log_,
|
||||||
@ -79,7 +78,7 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
|||||||
|
|
||||||
// Move file to trash
|
// Move file to trash
|
||||||
std::string trash_file;
|
std::string trash_file;
|
||||||
s = MarkAsTrash(file_path, &trash_file);
|
Status s = MarkAsTrash(file_path, &trash_file);
|
||||||
ROCKS_LOG_INFO(info_log_, "Mark file: %s as trash -- %s", trash_file.c_str(),
|
ROCKS_LOG_INFO(info_log_, "Mark file: %s as trash -- %s", trash_file.c_str(),
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
|
|
||||||
@ -99,7 +98,10 @@ Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
|||||||
|
|
||||||
// Update the total trash size
|
// Update the total trash size
|
||||||
uint64_t trash_file_size = 0;
|
uint64_t trash_file_size = 0;
|
||||||
fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr);
|
Status ignored =
|
||||||
|
fs_->GetFileSize(trash_file, IOOptions(), &trash_file_size, nullptr);
|
||||||
|
ignored.PermitUncheckedError(); //**TODO: What should we do if we failed to
|
||||||
|
// get the file size?
|
||||||
total_trash_size_.fetch_add(trash_file_size);
|
total_trash_size_.fetch_add(trash_file_size);
|
||||||
|
|
||||||
// Add file to delete queue
|
// Add file to delete queue
|
||||||
@ -169,17 +171,17 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path,
|
|||||||
return Status::InvalidArgument("file_path is corrupted");
|
return Status::InvalidArgument("file_path is corrupted");
|
||||||
}
|
}
|
||||||
|
|
||||||
Status s;
|
|
||||||
if (DeleteScheduler::IsTrashFile(file_path)) {
|
if (DeleteScheduler::IsTrashFile(file_path)) {
|
||||||
// This is already a trash file
|
// This is already a trash file
|
||||||
*trash_file = file_path;
|
*trash_file = file_path;
|
||||||
return s;
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
*trash_file = file_path + kTrashExtension;
|
*trash_file = file_path + kTrashExtension;
|
||||||
// TODO(tec) : Implement Env::RenameFileIfNotExist and remove
|
// TODO(tec) : Implement Env::RenameFileIfNotExist and remove
|
||||||
// file_move_mu mutex.
|
// file_move_mu mutex.
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
Status s;
|
||||||
InstrumentedMutexLock l(&file_move_mu_);
|
InstrumentedMutexLock l(&file_move_mu_);
|
||||||
while (true) {
|
while (true) {
|
||||||
s = fs_->FileExists(*trash_file, IOOptions(), nullptr);
|
s = fs_->FileExists(*trash_file, IOOptions(), nullptr);
|
||||||
@ -197,7 +199,9 @@ Status DeleteScheduler::MarkAsTrash(const std::string& file_path,
|
|||||||
cnt++;
|
cnt++;
|
||||||
}
|
}
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
sst_file_manager_->OnMoveFile(file_path, *trash_file);
|
//**TODO: What should we do if this returns an error?
|
||||||
|
sst_file_manager_->OnMoveFile(file_path, *trash_file)
|
||||||
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
@ -579,13 +579,13 @@ void TestBoundary(InternalKey& ik1, std::string& v1, InternalKey& ik2,
|
|||||||
0 /*uniq_id*/, ioptions.allow_mmap_reads)));
|
0 /*uniq_id*/, ioptions.allow_mmap_reads)));
|
||||||
const bool kSkipFilters = true;
|
const bool kSkipFilters = true;
|
||||||
const bool kImmortal = true;
|
const bool kImmortal = true;
|
||||||
ioptions.table_factory->NewTableReader(
|
ASSERT_OK(ioptions.table_factory->NewTableReader(
|
||||||
TableReaderOptions(ioptions, moptions.prefix_extractor.get(), soptions,
|
TableReaderOptions(ioptions, moptions.prefix_extractor.get(), soptions,
|
||||||
internal_comparator, !kSkipFilters, !kImmortal,
|
internal_comparator, !kSkipFilters, !kImmortal,
|
||||||
level_),
|
level_),
|
||||||
std::move(file_reader),
|
std::move(file_reader),
|
||||||
test::GetStringSinkFromLegacyWriter(file_writer.get())->contents().size(),
|
test::GetStringSinkFromLegacyWriter(file_writer.get())->contents().size(),
|
||||||
&table_reader);
|
&table_reader));
|
||||||
// Search using Get()
|
// Search using Get()
|
||||||
ReadOptions ro;
|
ReadOptions ro;
|
||||||
|
|
||||||
|
@ -82,6 +82,8 @@ CuckooTableBuilder::CuckooTableBuilder(
|
|||||||
properties_.column_family_name = column_family_name;
|
properties_.column_family_name = column_family_name;
|
||||||
properties_.db_id = db_id;
|
properties_.db_id = db_id;
|
||||||
properties_.db_session_id = db_session_id;
|
properties_.db_session_id = db_session_id;
|
||||||
|
status_.PermitUncheckedError();
|
||||||
|
io_status_.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
|
|
||||||
void CuckooTableBuilder::Add(const Slice& key, const Slice& value) {
|
void CuckooTableBuilder::Add(const Slice& key, const Slice& value) {
|
||||||
@ -250,7 +252,6 @@ Status CuckooTableBuilder::Finish() {
|
|||||||
assert(!closed_);
|
assert(!closed_);
|
||||||
closed_ = true;
|
closed_ = true;
|
||||||
std::vector<CuckooBucket> buckets;
|
std::vector<CuckooBucket> buckets;
|
||||||
Status s;
|
|
||||||
std::string unused_bucket;
|
std::string unused_bucket;
|
||||||
if (num_entries_ > 0) {
|
if (num_entries_ > 0) {
|
||||||
// Calculate the real hash size if module hash is enabled.
|
// Calculate the real hash size if module hash is enabled.
|
||||||
|
@ -2016,12 +2016,16 @@ void ReduceDBLevelsCommand::DoCommand() {
|
|||||||
assert(db_ != nullptr);
|
assert(db_ != nullptr);
|
||||||
// Compact the whole DB to put all files to the highest level.
|
// Compact the whole DB to put all files to the highest level.
|
||||||
fprintf(stdout, "Compacting the db...\n");
|
fprintf(stdout, "Compacting the db...\n");
|
||||||
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
st =
|
||||||
|
db_->CompactRange(CompactRangeOptions(), GetCfHandle(), nullptr, nullptr);
|
||||||
|
|
||||||
CloseDB();
|
CloseDB();
|
||||||
|
|
||||||
EnvOptions soptions;
|
if (st.ok()) {
|
||||||
st = VersionSet::ReduceNumberOfLevels(db_path_, &options_, soptions,
|
EnvOptions soptions;
|
||||||
new_levels_);
|
st = VersionSet::ReduceNumberOfLevels(db_path_, &options_, soptions,
|
||||||
|
new_levels_);
|
||||||
|
}
|
||||||
if (!st.ok()) {
|
if (!st.ok()) {
|
||||||
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
exec_state_ = LDBCommandExecuteResult::Failed(st.ToString());
|
||||||
return;
|
return;
|
||||||
|
@ -107,7 +107,7 @@ bool ReduceLevelTest::ReduceLevels(int target_level) {
|
|||||||
TEST_F(ReduceLevelTest, Last_Level) {
|
TEST_F(ReduceLevelTest, Last_Level) {
|
||||||
ASSERT_OK(OpenDB(true, 4));
|
ASSERT_OK(OpenDB(true, 4));
|
||||||
ASSERT_OK(Put("aaaa", "11111"));
|
ASSERT_OK(Put("aaaa", "11111"));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
MoveL0FileToLevel(3);
|
MoveL0FileToLevel(3);
|
||||||
ASSERT_EQ(FilesOnLevel(3), 1);
|
ASSERT_EQ(FilesOnLevel(3), 1);
|
||||||
CloseDB();
|
CloseDB();
|
||||||
@ -126,7 +126,7 @@ TEST_F(ReduceLevelTest, Last_Level) {
|
|||||||
TEST_F(ReduceLevelTest, Top_Level) {
|
TEST_F(ReduceLevelTest, Top_Level) {
|
||||||
ASSERT_OK(OpenDB(true, 5));
|
ASSERT_OK(OpenDB(true, 5));
|
||||||
ASSERT_OK(Put("aaaa", "11111"));
|
ASSERT_OK(Put("aaaa", "11111"));
|
||||||
Flush();
|
ASSERT_OK(Flush());
|
||||||
ASSERT_EQ(FilesOnLevel(0), 1);
|
ASSERT_EQ(FilesOnLevel(0), 1);
|
||||||
CloseDB();
|
CloseDB();
|
||||||
|
|
||||||
|
@ -111,7 +111,8 @@ void GenericRateLimiter::Request(int64_t bytes, const Env::IOPriority pri,
|
|||||||
std::chrono::microseconds now(NowMicrosMonotonic(env_));
|
std::chrono::microseconds now(NowMicrosMonotonic(env_));
|
||||||
if (now - tuned_time_ >=
|
if (now - tuned_time_ >=
|
||||||
kRefillsPerTune * std::chrono::microseconds(refill_period_us_)) {
|
kRefillsPerTune * std::chrono::microseconds(refill_period_us_)) {
|
||||||
Tune();
|
Status s = Tune();
|
||||||
|
s.PermitUncheckedError(); //**TODO: What to do on error?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,7 +145,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
|
|||||||
std::vector<ThreadStatus> thread_list;
|
std::vector<ThreadStatus> thread_list;
|
||||||
|
|
||||||
// Verify the number of running threads in each pool.
|
// Verify the number of running threads in each pool.
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
|
int running_count[ThreadStatus::NUM_THREAD_TYPES] = {0};
|
||||||
for (auto thread_status : thread_list) {
|
for (auto thread_status : thread_list) {
|
||||||
if (thread_status.cf_name == "pikachu" &&
|
if (thread_status.cf_name == "pikachu" &&
|
||||||
@ -166,7 +166,7 @@ TEST_F(ThreadListTest, SimpleColumnFamilyInfoTest) {
|
|||||||
running_task.WaitUntilDone();
|
running_task.WaitUntilDone();
|
||||||
|
|
||||||
// Verify none of the threads are running
|
// Verify none of the threads are running
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
|
|
||||||
for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
|
for (int i = 0; i < ThreadStatus::NUM_THREAD_TYPES; ++i) {
|
||||||
running_count[i] = 0;
|
running_count[i] = 0;
|
||||||
@ -281,7 +281,7 @@ TEST_F(ThreadListTest, SimpleEventTest) {
|
|||||||
int state_counts[ThreadStatus::NUM_STATE_TYPES] = {0};
|
int state_counts[ThreadStatus::NUM_STATE_TYPES] = {0};
|
||||||
|
|
||||||
std::vector<ThreadStatus> thread_list;
|
std::vector<ThreadStatus> thread_list;
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
||||||
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
||||||
ThreadStatus::NUM_OP_TYPES);
|
ThreadStatus::NUM_OP_TYPES);
|
||||||
@ -293,7 +293,7 @@ TEST_F(ThreadListTest, SimpleEventTest) {
|
|||||||
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
||||||
ThreadStatus::OP_UNKNOWN, kCompactionWaitTasks);
|
ThreadStatus::OP_UNKNOWN, kCompactionWaitTasks);
|
||||||
|
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
||||||
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
||||||
ThreadStatus::NUM_OP_TYPES);
|
ThreadStatus::NUM_OP_TYPES);
|
||||||
@ -305,7 +305,7 @@ TEST_F(ThreadListTest, SimpleEventTest) {
|
|||||||
UpdateCount(correct_operation_counts, ThreadStatus::OP_FLUSH,
|
UpdateCount(correct_operation_counts, ThreadStatus::OP_FLUSH,
|
||||||
ThreadStatus::OP_UNKNOWN, kFlushWriteTasks);
|
ThreadStatus::OP_UNKNOWN, kFlushWriteTasks);
|
||||||
|
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
||||||
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
||||||
ThreadStatus::NUM_OP_TYPES);
|
ThreadStatus::NUM_OP_TYPES);
|
||||||
@ -317,7 +317,7 @@ TEST_F(ThreadListTest, SimpleEventTest) {
|
|||||||
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
||||||
ThreadStatus::OP_UNKNOWN, kCompactionWriteTasks);
|
ThreadStatus::OP_UNKNOWN, kCompactionWriteTasks);
|
||||||
|
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
||||||
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
||||||
ThreadStatus::NUM_OP_TYPES);
|
ThreadStatus::NUM_OP_TYPES);
|
||||||
@ -329,7 +329,7 @@ TEST_F(ThreadListTest, SimpleEventTest) {
|
|||||||
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
UpdateCount(correct_operation_counts, ThreadStatus::OP_COMPACTION,
|
||||||
ThreadStatus::OP_UNKNOWN, kCompactionReadTasks);
|
ThreadStatus::OP_UNKNOWN, kCompactionReadTasks);
|
||||||
|
|
||||||
env->GetThreadList(&thread_list);
|
ASSERT_OK(env->GetThreadList(&thread_list));
|
||||||
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
UpdateStatusCounts(thread_list, operation_counts, state_counts);
|
||||||
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
VerifyAndResetCounts(correct_operation_counts, operation_counts,
|
||||||
ThreadStatus::NUM_OP_TYPES);
|
ThreadStatus::NUM_OP_TYPES);
|
||||||
|
@ -58,14 +58,17 @@ class CassandraStore {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Flush() {
|
Status Flush() {
|
||||||
dbfull()->TEST_FlushMemTable();
|
Status s = dbfull()->TEST_FlushMemTable();
|
||||||
dbfull()->TEST_WaitForCompact();
|
if (s.ok()) {
|
||||||
|
s = dbfull()->TEST_WaitForCompact();
|
||||||
|
}
|
||||||
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Compact() {
|
Status Compact() {
|
||||||
dbfull()->TEST_CompactRange(
|
return dbfull()->TEST_CompactRange(0, nullptr, nullptr,
|
||||||
0, nullptr, nullptr, db_->DefaultColumnFamily());
|
db_->DefaultColumnFamily());
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<bool, RowValue> Get(const std::string& key){
|
std::tuple<bool, RowValue> Get(const std::string& key){
|
||||||
@ -189,15 +192,15 @@ TEST_F(CassandraFunctionalTest,
|
|||||||
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))
|
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
|
|
||||||
store.Append("k1",CreateTestRowValue({
|
store.Append("k1",CreateTestRowValue({
|
||||||
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
|
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
|
||||||
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))
|
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
store.Compact();
|
ASSERT_OK(store.Compact());
|
||||||
|
|
||||||
auto ret = store.Get("k1");
|
auto ret = store.Get("k1");
|
||||||
ASSERT_TRUE(std::get<0>(ret));
|
ASSERT_TRUE(std::get<0>(ret));
|
||||||
@ -226,15 +229,15 @@ TEST_F(CassandraFunctionalTest,
|
|||||||
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))
|
CreateTestColumnSpec(kTombstone, 3, ToMicroSeconds(now))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
|
|
||||||
store.Append("k1",CreateTestRowValue({
|
store.Append("k1",CreateTestRowValue({
|
||||||
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
|
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)), //expired
|
||||||
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))
|
CreateTestColumnSpec(kColumn, 2, ToMicroSeconds(now))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
store.Compact();
|
ASSERT_OK(store.Compact());
|
||||||
|
|
||||||
auto ret = store.Get("k1");
|
auto ret = store.Get("k1");
|
||||||
ASSERT_TRUE(std::get<0>(ret));
|
ASSERT_TRUE(std::get<0>(ret));
|
||||||
@ -259,14 +262,14 @@ TEST_F(CassandraFunctionalTest,
|
|||||||
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)),
|
CreateTestColumnSpec(kExpiringColumn, 1, ToMicroSeconds(now - kTtl - 20)),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
|
|
||||||
store.Append("k1",CreateTestRowValue({
|
store.Append("k1",CreateTestRowValue({
|
||||||
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)),
|
CreateTestColumnSpec(kExpiringColumn, 0, ToMicroSeconds(now - kTtl - 10)),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
store.Compact();
|
ASSERT_OK(store.Compact());
|
||||||
ASSERT_FALSE(std::get<0>(store.Get("k1")));
|
ASSERT_FALSE(std::get<0>(store.Get("k1")));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,14 +288,14 @@ TEST_F(CassandraFunctionalTest,
|
|||||||
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now))
|
CreateTestColumnSpec(kColumn, 0, ToMicroSeconds(now))
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
|
|
||||||
store.Append("k1",CreateTestRowValue({
|
store.Append("k1",CreateTestRowValue({
|
||||||
CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now)),
|
CreateTestColumnSpec(kColumn, 1, ToMicroSeconds(now)),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
store.Compact();
|
ASSERT_OK(store.Compact());
|
||||||
|
|
||||||
auto ret = store.Get("k1");
|
auto ret = store.Get("k1");
|
||||||
ASSERT_TRUE(std::get<0>(ret));
|
ASSERT_TRUE(std::get<0>(ret));
|
||||||
@ -310,8 +313,8 @@ TEST_F(CassandraFunctionalTest, CompactionShouldRemoveTombstoneFromPut) {
|
|||||||
CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)),
|
CreateTestColumnSpec(kTombstone, 0, ToMicroSeconds(now - gc_grace_period_in_seconds_ - 1)),
|
||||||
}));
|
}));
|
||||||
|
|
||||||
store.Flush();
|
ASSERT_OK(store.Flush());
|
||||||
store.Compact();
|
ASSERT_OK(store.Compact());
|
||||||
ASSERT_FALSE(std::get<0>(store.Get("k1")));
|
ASSERT_FALSE(std::get<0>(store.Get("k1")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,9 +60,9 @@ Status CompactToLevel(const Options& options, const std::string& dbname,
|
|||||||
// generate one output file
|
// generate one output file
|
||||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||||
}
|
}
|
||||||
db->CompactRange(cro, nullptr, nullptr);
|
s = db->CompactRange(cro, nullptr, nullptr);
|
||||||
|
|
||||||
if (need_reopen) {
|
if (s.ok() && need_reopen) {
|
||||||
// Need to restart DB to rewrite the manifest file.
|
// Need to restart DB to rewrite the manifest file.
|
||||||
// In order to open a DB with specific num_levels, the manifest file should
|
// In order to open a DB with specific num_levels, the manifest file should
|
||||||
// contain no record that mentiones any level beyond num_levels. Issuing a
|
// contain no record that mentiones any level beyond num_levels. Issuing a
|
||||||
|
@ -72,8 +72,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) {
|
|||||||
for (int num = 0; num < 20; num++) {
|
for (int num = 0; num < 20; num++) {
|
||||||
GenerateNewFile(&rnd, &key_idx);
|
GenerateNewFile(&rnd, &key_idx);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
// Will make sure exactly those keys are in the DB after migration.
|
// Will make sure exactly those keys are in the DB after migration.
|
||||||
std::set<std::string> keys;
|
std::set<std::string> keys;
|
||||||
@ -100,8 +100,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate1) {
|
|||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
// Wait for compaction to finish and make sure it can reopen
|
// Wait for compaction to finish and make sure it can reopen
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -140,8 +140,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) {
|
|||||||
for (int num = 0; num < 20; num++) {
|
for (int num = 0; num < 20; num++) {
|
||||||
GenerateNewFile(&rnd, &key_idx);
|
GenerateNewFile(&rnd, &key_idx);
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
// Will make sure exactly those keys are in the DB after migration.
|
// Will make sure exactly those keys are in the DB after migration.
|
||||||
std::set<std::string> keys;
|
std::set<std::string> keys;
|
||||||
@ -168,8 +168,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate2) {
|
|||||||
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
// Wait for compaction to finish and make sure it can reopen
|
// Wait for compaction to finish and make sure it can reopen
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -207,16 +207,16 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) {
|
|||||||
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
if (num == 9) {
|
if (num == 9) {
|
||||||
// Issue a full compaction to generate some zero-out files
|
// Issue a full compaction to generate some zero-out files
|
||||||
CompactRangeOptions cro;
|
CompactRangeOptions cro;
|
||||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||||
dbfull()->CompactRange(cro, nullptr, nullptr);
|
ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
// Will make sure exactly those keys are in the DB after migration.
|
// Will make sure exactly those keys are in the DB after migration.
|
||||||
std::set<std::string> keys;
|
std::set<std::string> keys;
|
||||||
@ -243,8 +243,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate3) {
|
|||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
// Wait for compaction to finish and make sure it can reopen
|
// Wait for compaction to finish and make sure it can reopen
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -281,16 +281,16 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) {
|
|||||||
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
ASSERT_OK(Put(Key(num * 100 + i), rnd.RandomString(900)));
|
||||||
}
|
}
|
||||||
Flush();
|
Flush();
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
if (num == 9) {
|
if (num == 9) {
|
||||||
// Issue a full compaction to generate some zero-out files
|
// Issue a full compaction to generate some zero-out files
|
||||||
CompactRangeOptions cro;
|
CompactRangeOptions cro;
|
||||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||||
dbfull()->CompactRange(cro, nullptr, nullptr);
|
ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
|
|
||||||
// Will make sure exactly those keys are in the DB after migration.
|
// Will make sure exactly those keys are in the DB after migration.
|
||||||
std::set<std::string> keys;
|
std::set<std::string> keys;
|
||||||
@ -317,8 +317,8 @@ TEST_P(DBOptionChangeMigrationTests, Migrate4) {
|
|||||||
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
// Wait for compaction to finish and make sure it can reopen
|
// Wait for compaction to finish and make sure it can reopen
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -381,7 +381,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
|
|||||||
Flush();
|
Flush();
|
||||||
CompactRangeOptions cro;
|
CompactRangeOptions cro;
|
||||||
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
cro.bottommost_level_compaction = BottommostLevelCompaction::kForce;
|
||||||
dbfull()->CompactRange(cro, nullptr, nullptr);
|
ASSERT_OK(dbfull()->CompactRange(cro, nullptr, nullptr));
|
||||||
|
|
||||||
// Will make sure exactly those keys are in the DB after migration.
|
// Will make sure exactly those keys are in the DB after migration.
|
||||||
std::set<std::string> keys;
|
std::set<std::string> keys;
|
||||||
@ -404,8 +404,8 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
|
|||||||
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
ASSERT_OK(OptionChangeMigration(dbname_, old_options, new_options));
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
// Wait for compaction to finish and make sure it can reopen
|
// Wait for compaction to finish and make sure it can reopen
|
||||||
dbfull()->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
||||||
dbfull()->TEST_WaitForCompact();
|
ASSERT_OK(dbfull()->TEST_WaitForCompact());
|
||||||
Reopen(new_options);
|
Reopen(new_options);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -417,6 +417,7 @@ TEST_F(DBOptionChangeMigrationTest, CompactedSrcToUniversal) {
|
|||||||
it->Next();
|
it->Next();
|
||||||
}
|
}
|
||||||
ASSERT_TRUE(!it->Valid());
|
ASSERT_TRUE(!it->Valid());
|
||||||
|
ASSERT_OK(it->status());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,10 +38,11 @@ TEST(CompactOnDeletionCollector, DeletionRatio) {
|
|||||||
factory->CreateTablePropertiesCollector(context));
|
factory->CreateTablePropertiesCollector(context));
|
||||||
for (size_t i = 0; i < kTotalEntries; i++) {
|
for (size_t i = 0; i < kTotalEntries; i++) {
|
||||||
// All entries are deletion entries.
|
// All entries are deletion entries.
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0));
|
||||||
ASSERT_FALSE(collector->NeedCompact());
|
ASSERT_FALSE(collector->NeedCompact());
|
||||||
}
|
}
|
||||||
collector->Finish(nullptr);
|
ASSERT_OK(collector->Finish(nullptr));
|
||||||
ASSERT_FALSE(collector->NeedCompact());
|
ASSERT_FALSE(collector->NeedCompact());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -58,13 +59,15 @@ TEST(CompactOnDeletionCollector, DeletionRatio) {
|
|||||||
factory->CreateTablePropertiesCollector(context));
|
factory->CreateTablePropertiesCollector(context));
|
||||||
for (size_t i = 0; i < kTotalEntries; i++) {
|
for (size_t i = 0; i < kTotalEntries; i++) {
|
||||||
if (i < actual_deletion_entries) {
|
if (i < actual_deletion_entries) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0));
|
||||||
} else {
|
} else {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
ASSERT_FALSE(collector->NeedCompact());
|
ASSERT_FALSE(collector->NeedCompact());
|
||||||
}
|
}
|
||||||
collector->Finish(nullptr);
|
ASSERT_OK(collector->Finish(nullptr));
|
||||||
if (delta >= 0) {
|
if (delta >= 0) {
|
||||||
// >= deletion_ratio
|
// >= deletion_ratio
|
||||||
ASSERT_TRUE(collector->NeedCompact());
|
ASSERT_TRUE(collector->NeedCompact());
|
||||||
@ -123,10 +126,12 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
int deletions = 0;
|
int deletions = 0;
|
||||||
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
||||||
if (i % kSample < delete_rate) {
|
if (i % kSample < delete_rate) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0));
|
||||||
deletions++;
|
deletions++;
|
||||||
} else {
|
} else {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (collector->NeedCompact() !=
|
if (collector->NeedCompact() !=
|
||||||
@ -138,7 +143,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
kWindowSize, kNumDeletionTrigger);
|
kWindowSize, kNumDeletionTrigger);
|
||||||
ASSERT_TRUE(false);
|
ASSERT_TRUE(false);
|
||||||
}
|
}
|
||||||
collector->Finish(nullptr);
|
ASSERT_OK(collector->Finish(nullptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -154,21 +159,25 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
for (int section = 0; section < 5; ++section) {
|
for (int section = 0; section < 5; ++section) {
|
||||||
int initial_entries = rnd.Uniform(kWindowSize) + kWindowSize;
|
int initial_entries = rnd.Uniform(kWindowSize) + kWindowSize;
|
||||||
for (int i = 0; i < initial_entries; ++i) {
|
for (int i = 0; i < initial_entries; ++i) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
||||||
if (i % kSample < delete_rate) {
|
if (i % kSample < delete_rate) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0));
|
||||||
deletions++;
|
deletions++;
|
||||||
} else {
|
} else {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (int section = 0; section < 5; ++section) {
|
for (int section = 0; section < 5; ++section) {
|
||||||
int ending_entries = rnd.Uniform(kWindowSize) + kWindowSize;
|
int ending_entries = rnd.Uniform(kWindowSize) + kWindowSize;
|
||||||
for (int i = 0; i < ending_entries; ++i) {
|
for (int i = 0; i < ending_entries; ++i) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) &&
|
if (collector->NeedCompact() != (deletions >= kNumDeletionTrigger) &&
|
||||||
@ -180,7 +189,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
kNumDeletionTrigger);
|
kNumDeletionTrigger);
|
||||||
ASSERT_TRUE(false);
|
ASSERT_TRUE(false);
|
||||||
}
|
}
|
||||||
collector->Finish(nullptr);
|
ASSERT_OK(collector->Finish(nullptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -199,9 +208,11 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
for (int section = 0; section < 200; ++section) {
|
for (int section = 0; section < 200; ++section) {
|
||||||
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
for (int i = 0; i < kPaddedWindowSize; ++i) {
|
||||||
if (i < kDeletionsPerSection) {
|
if (i < kDeletionsPerSection) {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryDelete, 0, 0);
|
ASSERT_OK(collector->AddUserKey("hello", "rocksdb", kEntryDelete,
|
||||||
|
0, 0));
|
||||||
} else {
|
} else {
|
||||||
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0);
|
ASSERT_OK(
|
||||||
|
collector->AddUserKey("hello", "rocksdb", kEntryPut, 0, 0));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -212,7 +223,7 @@ TEST(CompactOnDeletionCollector, SlidingWindow) {
|
|||||||
kWindowSize, kNumDeletionTrigger);
|
kWindowSize, kNumDeletionTrigger);
|
||||||
ASSERT_TRUE(false);
|
ASSERT_TRUE(false);
|
||||||
}
|
}
|
||||||
collector->Finish(nullptr);
|
ASSERT_OK(collector->Finish(nullptr));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -208,10 +208,10 @@ Status DBWithTTLImpl::Put(const WriteOptions& options,
|
|||||||
const Slice& val) {
|
const Slice& val) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
Status st = batch.Put(column_family, key, val);
|
Status st = batch.Put(column_family, key, val);
|
||||||
if (!st.ok()) {
|
if (st.ok()) {
|
||||||
return st;
|
st = Write(options, &batch);
|
||||||
}
|
}
|
||||||
return Write(options, &batch);
|
return st;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBWithTTLImpl::Get(const ReadOptions& options,
|
Status DBWithTTLImpl::Get(const ReadOptions& options,
|
||||||
@ -264,10 +264,10 @@ Status DBWithTTLImpl::Merge(const WriteOptions& options,
|
|||||||
const Slice& value) {
|
const Slice& value) {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
Status st = batch.Merge(column_family, key, value);
|
Status st = batch.Merge(column_family, key, value);
|
||||||
if (!st.ok()) {
|
if (st.ok()) {
|
||||||
return st;
|
st = Write(options, &batch);
|
||||||
}
|
}
|
||||||
return Write(options, &batch);
|
return st;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DBWithTTLImpl::Write(const WriteOptions& opts, WriteBatch* updates) {
|
Status DBWithTTLImpl::Write(const WriteOptions& opts, WriteBatch* updates) {
|
||||||
|
@ -27,7 +27,7 @@ enum BatchOperation { OP_PUT = 0, OP_DELETE = 1 };
|
|||||||
class SpecialTimeEnv : public EnvWrapper {
|
class SpecialTimeEnv : public EnvWrapper {
|
||||||
public:
|
public:
|
||||||
explicit SpecialTimeEnv(Env* base) : EnvWrapper(base) {
|
explicit SpecialTimeEnv(Env* base) : EnvWrapper(base) {
|
||||||
base->GetCurrentTime(¤t_time_);
|
EXPECT_OK(base->GetCurrentTime(¤t_time_));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Sleep(int64_t sleep_time) { current_time_ += sleep_time; }
|
void Sleep(int64_t sleep_time) { current_time_ += sleep_time; }
|
||||||
|
Loading…
x
Reference in New Issue
Block a user