From ce60d0cbe5ef23d28389af4972fc67c1a931021f Mon Sep 17 00:00:00 2001 From: Peter Dillinger Date: Fri, 4 Mar 2022 12:32:30 -0800 Subject: [PATCH] Test refactoring for Backups+Temperatures (#9655) Summary: In preparation for more support for file Temperatures in BackupEngine, this change does some test refactoring: * Move DBTest2::BackupFileTemperature test to BackupEngineTest::FileTemperatures, with some updates to make it work in the new home. This test will soon be expanded for deeper backup work. * Move FileTemperatureTestFS from db_test2.cc to db_test_util.h, to support sharing because of above moved test, but split off the "no link" part to the test needing it. * Use custom FileSystems in backupable_db_test rather than custom Envs, because going through Env file interfaces doesn't support temperatures. * Fix RemapFileSystem to map DirFsyncOptions::renamed_new_name parameter to FsyncWithDirOptions, which was required because this limitation caused a crash only after moving to higher fidelity of FileSystem interface (vs. LegacyDirectoryWrapper throwing away some parameter details) * `backupable_options_` -> `engine_options_` as part of the ongoing work to get rid of the obsolete "backupable" naming. Pull Request resolved: https://github.com/facebook/rocksdb/pull/9655 Test Plan: test code updates only Reviewed By: jay-zhuang Differential Revision: D34622183 Pulled By: pdillinger fbshipit-source-id: f24b7a596a89b9e089e960f4e5d772575513e93f --- db/db_test2.cc | 127 +---- db/db_test_util.h | 44 ++ env/fs_remap.cc | 39 +- utilities/backupable/backupable_db_test.cc | 510 ++++++++++++--------- 4 files changed, 389 insertions(+), 331 deletions(-) diff --git a/db/db_test2.cc b/db/db_test2.cc index 7cecc2911..da9975ed2 100644 --- a/db/db_test2.cc +++ b/db/db_test2.cc @@ -21,7 +21,6 @@ #include "rocksdb/persistent_cache.h" #include "rocksdb/trace_record.h" #include "rocksdb/trace_record_result.h" -#include "rocksdb/utilities/backup_engine.h" #include "rocksdb/utilities/replayer.h" #include "rocksdb/wal_filter.h" #include "test_util/testutil.h" @@ -6909,126 +6908,18 @@ TEST_F(DBTest2, LastLevelStatistics) { options.statistics->getTickerCount(WARM_FILE_READ_COUNT)); } -class FileTemperatureTestFS : public FileSystemWrapper { - public: - explicit FileTemperatureTestFS(SpecialEnv* env) - : FileSystemWrapper(env->GetFileSystem()) {} +TEST_F(DBTest2, CheckpointFileTemperature) { + class NoLinkTestFS : public FileTemperatureTestFS { + using FileTemperatureTestFS::FileTemperatureTestFS; - static const char* kClassName() { return "TestFileSystem"; } - const char* Name() const override { return kClassName(); } - - IOStatus NewSequentialFile(const std::string& fname, const FileOptions& opts, - std::unique_ptr* result, - IODebugContext* dbg) override { - auto filename = GetFileName(fname); - uint64_t number; - FileType type; - auto r = ParseFileName(filename, &number, &type); - assert(r); - if (type == kTableFile) { - auto emplaced = - requested_sst_file_temperatures_.emplace(number, opts.temperature); - assert(emplaced.second); // assume no duplication - } - return target()->NewSequentialFile(fname, opts, result, dbg); - } - - IOStatus LinkFile(const std::string& s, const std::string& t, - const IOOptions& options, IODebugContext* dbg) override { - auto filename = GetFileName(s); - uint64_t number; - FileType type; - auto r = ParseFileName(filename, &number, &type); - assert(r); - // return not supported to force checkpoint copy the file instead of just - // link - if (type == kTableFile) { + IOStatus LinkFile(const std::string&, const std::string&, const IOOptions&, + IODebugContext*) override { + // return not supported to force checkpoint copy the file instead of just + // link return IOStatus::NotSupported(); } - return target()->LinkFile(s, t, options, dbg); - } - - const std::map& RequestedSstFileTemperatures() { - return requested_sst_file_temperatures_; - } - - void ClearRequestedFileTemperatures() { - requested_sst_file_temperatures_.clear(); - } - - private: - std::map requested_sst_file_temperatures_; - - std::string GetFileName(const std::string& fname) { - auto filename = fname.substr(fname.find_last_of(kFilePathSeparator) + 1); - // workaround only for Windows that the file path could contain both Windows - // FilePathSeparator and '/' - filename = filename.substr(filename.find_last_of('/') + 1); - return filename; - } -}; - -TEST_F(DBTest2, BackupFileTemperature) { - std::shared_ptr test_fs = - std::make_shared(env_); - std::unique_ptr backup_env(new CompositeEnvWrapper(env_, test_fs)); - Options options = CurrentOptions(); - options.bottommost_temperature = Temperature::kWarm; - options.level0_file_num_compaction_trigger = 2; - Reopen(options); - - // generate a bottommost file and a non-bottommost file - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("bar", "bar")); - ASSERT_OK(Flush()); - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("bar", "bar")); - ASSERT_OK(Flush()); - ASSERT_OK(dbfull()->TEST_WaitForCompact()); - ASSERT_OK(Put("foo", "bar")); - ASSERT_OK(Put("bar", "bar")); - ASSERT_OK(Flush()); - auto size = GetSstSizeHelper(Temperature::kWarm); - ASSERT_GT(size, 0); - - std::map temperatures; - std::vector infos; - ASSERT_OK( - dbfull()->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos)); - for (auto info : infos) { - temperatures.emplace(info.file_number, info.temperature); - } - - std::unique_ptr backup_engine; - { - BackupEngine* backup_engine_raw_ptr; - auto backup_options = BackupEngineOptions( - dbname_ + kFilePathSeparator + "tempbk", backup_env.get()); - ASSERT_OK(BackupEngine::Open(backup_env.get(), backup_options, - &backup_engine_raw_ptr)); - backup_engine.reset(backup_engine_raw_ptr); - } - ASSERT_OK(backup_engine->CreateNewBackup(db_)); - - // checking src file src_temperature hints: 2 sst files: 1 sst is kWarm, - // another is kUnknown - auto file_temperatures = test_fs->RequestedSstFileTemperatures(); - ASSERT_EQ(file_temperatures.size(), 2); - bool has_only_one_warm_sst = false; - for (const auto& file_temperature : file_temperatures) { - ASSERT_EQ(temperatures.at(file_temperature.first), file_temperature.second); - if (file_temperature.second == Temperature::kWarm) { - ASSERT_FALSE(has_only_one_warm_sst); - has_only_one_warm_sst = true; - } - } - ASSERT_TRUE(has_only_one_warm_sst); - Close(); -} - -TEST_F(DBTest2, CheckpointFileTemperature) { - std::shared_ptr test_fs = - std::make_shared(env_); + }; + auto test_fs = std::make_shared(env_->GetFileSystem()); std::unique_ptr env(new CompositeEnvWrapper(env_, test_fs)); Options options = CurrentOptions(); options.bottommost_temperature = Temperature::kWarm; diff --git a/db/db_test_util.h b/db/db_test_util.h index affa2ba38..548c4c1ab 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -687,6 +687,50 @@ class SpecialEnv : public EnvWrapper { }; #ifndef ROCKSDB_LITE +class FileTemperatureTestFS : public FileSystemWrapper { + public: + explicit FileTemperatureTestFS(const std::shared_ptr& fs) + : FileSystemWrapper(fs) {} + + static const char* kClassName() { return "FileTemperatureTestFS"; } + const char* Name() const override { return kClassName(); } + + IOStatus NewSequentialFile(const std::string& fname, const FileOptions& opts, + std::unique_ptr* result, + IODebugContext* dbg) override { + auto filename = GetFileName(fname); + uint64_t number; + FileType type; + auto r = ParseFileName(filename, &number, &type); + assert(r); + if (type == kTableFile) { + auto emplaced = + requested_sst_file_temperatures_.emplace(number, opts.temperature); + assert(emplaced.second); // assume no duplication + } + return target()->NewSequentialFile(fname, opts, result, dbg); + } + + const std::map& RequestedSstFileTemperatures() { + return requested_sst_file_temperatures_; + } + + void ClearRequestedFileTemperatures() { + requested_sst_file_temperatures_.clear(); + } + + protected: + std::map requested_sst_file_temperatures_; + + std::string GetFileName(const std::string& fname) { + auto filename = fname.substr(fname.find_last_of(kFilePathSeparator) + 1); + // workaround only for Windows that the file path could contain both Windows + // FilePathSeparator and '/' + filename = filename.substr(filename.find_last_of('/') + 1); + return filename; + } +}; + class OnFileDeletionListener : public EventListener { public: OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {} diff --git a/env/fs_remap.cc b/env/fs_remap.cc index 026f83cd1..b75121f44 100644 --- a/env/fs_remap.cc +++ b/env/fs_remap.cc @@ -110,12 +110,45 @@ IOStatus RemapFileSystem::NewDirectory(const std::string& dir, const IOOptions& options, std::unique_ptr* result, IODebugContext* dbg) { + // A hassle to remap DirFsyncOptions::renamed_new_name + class RemapFSDirectory : public FSDirectoryWrapper { + public: + RemapFSDirectory(RemapFileSystem* fs, std::unique_ptr&& t) + : FSDirectoryWrapper(std::move(t)), fs_(fs) {} + IOStatus FsyncWithDirOptions( + const IOOptions& options, IODebugContext* dbg, + const DirFsyncOptions& dir_fsync_options) override { + if (dir_fsync_options.renamed_new_name.empty()) { + return FSDirectoryWrapper::FsyncWithDirOptions(options, dbg, + dir_fsync_options); + } else { + auto status_and_enc_path = + fs_->EncodePath(dir_fsync_options.renamed_new_name); + if (status_and_enc_path.first.ok()) { + DirFsyncOptions mapped_options = dir_fsync_options; + mapped_options.renamed_new_name = status_and_enc_path.second; + return FSDirectoryWrapper::FsyncWithDirOptions(options, dbg, + mapped_options); + } else { + return status_and_enc_path.first; + } + } + } + + private: + RemapFileSystem* const fs_; + }; + auto status_and_enc_path = EncodePathWithNewBasename(dir); if (!status_and_enc_path.first.ok()) { return status_and_enc_path.first; } - return FileSystemWrapper::NewDirectory(status_and_enc_path.second, options, - result, dbg); + IOStatus ios = FileSystemWrapper::NewDirectory(status_and_enc_path.second, + options, result, dbg); + if (ios.ok()) { + *result = std::make_unique(this, std::move(*result)); + } + return ios; } IOStatus RemapFileSystem::FileExists(const std::string& fname, @@ -293,7 +326,7 @@ IOStatus RemapFileSystem::GetAbsolutePath(const std::string& db_path, const IOOptions& options, std::string* output_path, IODebugContext* dbg) { - auto status_and_enc_path = EncodePath(db_path); + auto status_and_enc_path = EncodePathWithNewBasename(db_path); if (!status_and_enc_path.first.ok()) { return status_and_enc_path.first; } diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index f8ff2b34e..2b71f0b7e 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -22,6 +22,7 @@ #include "db/db_impl/db_impl.h" #include "db/db_test_util.h" +#include "env/composite_env_wrapper.h" #include "env/env_chroot.h" #include "file/filename.h" #include "port/port.h" @@ -141,13 +142,13 @@ class DummyDB : public StackableDB { info.file_number = number; info.file_type = type; if (type == kDescriptorFile) { - info.size = 100; // See TestEnv::GetChildrenFileAttributes below + info.size = 100; // See TestFs::GetChildrenFileAttributes below info.trim_to_size = true; } else if (type == kCurrentFile) { info.size = 0; info.trim_to_size = true; } else { - info.size = 200; // See TestEnv::GetChildrenFileAttributes below + info.size = 200; // See TestFs::GetChildrenFileAttributes below } if (opts.include_checksum_info) { info.file_checksum = kUnknownFileChecksum; @@ -169,18 +170,20 @@ class DummyDB : public StackableDB { mutable SequenceNumber sequence_number_; }; // DummyDB -class TestEnv : public EnvWrapper { +class TestFs : public FileSystemWrapper { public: - explicit TestEnv(Env* t) : EnvWrapper(t) {} - const char* Name() const override { return "TestEnv"; } + explicit TestFs(const std::shared_ptr& t) + : FileSystemWrapper(t) {} + const char* Name() const override { return "TestFs"; } - class DummySequentialFile : public SequentialFile { + class DummySequentialFile : public FSSequentialFile { public: explicit DummySequentialFile(bool fail_reads) - : SequentialFile(), rnd_(5), fail_reads_(fail_reads) {} - Status Read(size_t n, Slice* result, char* scratch) override { + : FSSequentialFile(), rnd_(5), fail_reads_(fail_reads) {} + IOStatus Read(size_t n, const IOOptions&, Slice* result, char* scratch, + IODebugContext*) override { if (fail_reads_) { - return Status::IOError(); + return IOStatus::IOError(); } size_t read_size = (n > size_left) ? size_left : n; for (size_t i = 0; i < read_size; ++i) { @@ -188,12 +191,12 @@ class TestEnv : public EnvWrapper { } *result = Slice(scratch, read_size); size_left -= read_size; - return Status::OK(); + return IOStatus::OK(); } - Status Skip(uint64_t n) override { + IOStatus Skip(uint64_t n) override { size_left = (n > size_left) ? size_left - n : 0; - return Status::OK(); + return IOStatus::OK(); } private: @@ -202,16 +205,16 @@ class TestEnv : public EnvWrapper { bool fail_reads_; }; - Status NewSequentialFile(const std::string& f, - std::unique_ptr* r, - const EnvOptions& options) override { + IOStatus NewSequentialFile(const std::string& f, const FileOptions& file_opts, + std::unique_ptr* r, + IODebugContext* dbg) override { MutexLock l(&mutex_); if (dummy_sequential_file_) { r->reset( - new TestEnv::DummySequentialFile(dummy_sequential_file_fail_reads_)); - return Status::OK(); + new TestFs::DummySequentialFile(dummy_sequential_file_fail_reads_)); + return IOStatus::OK(); } else { - Status s = EnvWrapper::NewSequentialFile(f, r, options); + IOStatus s = FileSystemWrapper::NewSequentialFile(f, file_opts, r, dbg); if (s.ok()) { if ((*r)->use_direct_io()) { ++num_direct_seq_readers_; @@ -222,15 +225,16 @@ class TestEnv : public EnvWrapper { } } - Status NewWritableFile(const std::string& f, std::unique_ptr* r, - const EnvOptions& options) override { + IOStatus NewWritableFile(const std::string& f, const FileOptions& file_opts, + std::unique_ptr* r, + IODebugContext* dbg) override { MutexLock l(&mutex_); written_files_.push_back(f); if (limit_written_files_ <= 0) { - return Status::NotSupported("Sorry, can't do this"); + return IOStatus::NotSupported("Sorry, can't do this"); } limit_written_files_--; - Status s = EnvWrapper::NewWritableFile(f, r, options); + IOStatus s = FileSystemWrapper::NewWritableFile(f, file_opts, r, dbg); if (s.ok()) { if ((*r)->use_direct_io()) { ++num_direct_writers_; @@ -240,13 +244,14 @@ class TestEnv : public EnvWrapper { return s; } - Status NewRandomAccessFile(const std::string& fname, - std::unique_ptr* result, - const EnvOptions& options) override { + IOStatus NewRandomAccessFile(const std::string& f, + const FileOptions& file_opts, + std::unique_ptr* r, + IODebugContext* dbg) override { MutexLock l(&mutex_); - Status s = EnvWrapper::NewRandomAccessFile(fname, result, options); + IOStatus s = FileSystemWrapper::NewRandomAccessFile(f, file_opts, r, dbg); if (s.ok()) { - if ((*result)->use_direct_io()) { + if ((*r)->use_direct_io()) { ++num_direct_rand_readers_; } ++num_rand_readers_; @@ -254,22 +259,24 @@ class TestEnv : public EnvWrapper { return s; } - Status DeleteFile(const std::string& fname) override { + IOStatus DeleteFile(const std::string& f, const IOOptions& options, + IODebugContext* dbg) override { MutexLock l(&mutex_); if (fail_delete_files_) { - return Status::IOError(); + return IOStatus::IOError(); } EXPECT_GT(limit_delete_files_, 0U); limit_delete_files_--; - return EnvWrapper::DeleteFile(fname); + return FileSystemWrapper::DeleteFile(f, options, dbg); } - Status DeleteDir(const std::string& dirname) override { + IOStatus DeleteDir(const std::string& d, const IOOptions& options, + IODebugContext* dbg) override { MutexLock l(&mutex_); if (fail_delete_files_) { - return Status::IOError(); + return IOStatus::IOError(); } - return EnvWrapper::DeleteDir(dirname); + return FileSystemWrapper::DeleteDir(d, options, dbg); } void AssertWrittenFiles(std::vector& should_have_written) { @@ -310,12 +317,13 @@ class TestEnv : public EnvWrapper { } void SetGetChildrenFailure(bool fail) { get_children_failure_ = fail; } - Status GetChildren(const std::string& dir, - std::vector* r) override { + IOStatus GetChildren(const std::string& dir, const IOOptions& io_opts, + std::vector* r, + IODebugContext* dbg) override { if (get_children_failure_) { - return Status::IOError("SimulatedFailure"); + return IOStatus::IOError("SimulatedFailure"); } - return EnvWrapper::GetChildren(dir, r); + return FileSystemWrapper::GetChildren(dir, io_opts, r, dbg); } // Some test cases do not actually create the test files (e.g., see @@ -324,54 +332,61 @@ class TestEnv : public EnvWrapper { void SetFilenamesForMockedAttrs(const std::vector& filenames) { filenames_for_mocked_attrs_ = filenames; } - Status GetChildrenFileAttributes( - const std::string& dir, std::vector* r) override { + IOStatus GetChildrenFileAttributes(const std::string& dir, + const IOOptions& options, + std::vector* result, + IODebugContext* dbg) override { if (filenames_for_mocked_attrs_.size() > 0) { for (const auto& filename : filenames_for_mocked_attrs_) { - uint64_t size_bytes = 200; // Match TestEnv + uint64_t size_bytes = 200; // Match TestFs if (filename.find("MANIFEST") == 0) { size_bytes = 100; // Match DummyDB::GetLiveFiles } - r->push_back({dir + "/" + filename, size_bytes}); + result->push_back({dir + "/" + filename, size_bytes}); } - return Status::OK(); + return IOStatus::OK(); } - return EnvWrapper::GetChildrenFileAttributes(dir, r); + return FileSystemWrapper::GetChildrenFileAttributes(dir, options, result, + dbg); } - Status GetFileSize(const std::string& path, uint64_t* size_bytes) override { + + IOStatus GetFileSize(const std::string& f, const IOOptions& options, + uint64_t* s, IODebugContext* dbg) override { if (filenames_for_mocked_attrs_.size() > 0) { - auto fname = path.substr(path.find_last_of('/') + 1); + auto fname = f.substr(f.find_last_of('/') + 1); auto filename_iter = std::find(filenames_for_mocked_attrs_.begin(), filenames_for_mocked_attrs_.end(), fname); if (filename_iter != filenames_for_mocked_attrs_.end()) { - *size_bytes = 200; // Match TestEnv + *s = 200; // Match TestFs if (fname.find("MANIFEST") == 0) { - *size_bytes = 100; // Match DummyDB::GetLiveFiles + *s = 100; // Match DummyDB::GetLiveFiles } - return Status::OK(); + return IOStatus::OK(); } - return Status::NotFound(fname); + return IOStatus::NotFound(fname); } - return EnvWrapper::GetFileSize(path, size_bytes); + return FileSystemWrapper::GetFileSize(f, options, s, dbg); } void SetCreateDirIfMissingFailure(bool fail) { create_dir_if_missing_failure_ = fail; } - Status CreateDirIfMissing(const std::string& d) override { + IOStatus CreateDirIfMissing(const std::string& d, const IOOptions& options, + IODebugContext* dbg) override { if (create_dir_if_missing_failure_) { - return Status::IOError("SimulatedFailure"); + return IOStatus::IOError("SimulatedFailure"); } - return EnvWrapper::CreateDirIfMissing(d); + return FileSystemWrapper::CreateDirIfMissing(d, options, dbg); } void SetNewDirectoryFailure(bool fail) { new_directory_failure_ = fail; } - Status NewDirectory(const std::string& name, - std::unique_ptr* result) override { + IOStatus NewDirectory(const std::string& name, const IOOptions& io_opts, + std::unique_ptr* result, + IODebugContext* dbg) override { if (new_directory_failure_) { - return Status::IOError("SimulatedFailure"); + return IOStatus::IOError("SimulatedFailure"); } - return EnvWrapper::NewDirectory(name, result); + return FileSystemWrapper::NewDirectory(name, io_opts, result, dbg); } void ClearFileOpenCounters() { @@ -413,7 +428,7 @@ class TestEnv : public EnvWrapper { std::atomic num_direct_seq_readers_; std::atomic num_writers_; std::atomic num_direct_writers_; -}; // TestEnv +}; // TestFs class FileManager : public EnvWrapper { public: @@ -634,30 +649,21 @@ class BackupEngineTest : public testing::Test { backupdir_ = "/tempbk"; latest_backup_ = backupdir_ + "/LATEST_BACKUP"; - // set up envs - db_chroot_env_.reset(NewChrootEnv(Env::Default(), db_chroot)); - backup_chroot_env_.reset(NewChrootEnv(Env::Default(), backup_chroot)); - test_db_env_.reset(new TestEnv(db_chroot_env_.get())); - test_backup_env_.reset(new TestEnv(backup_chroot_env_.get())); - file_manager_.reset(new FileManager(backup_chroot_env_.get())); - db_file_manager_.reset(new FileManager(db_chroot_env_.get())); + // set up FileSystem & Envs + db_chroot_fs_ = NewChrootFileSystem(FileSystem::Default(), db_chroot); + backup_chroot_fs_ = + NewChrootFileSystem(FileSystem::Default(), backup_chroot); + test_db_fs_ = std::make_shared(db_chroot_fs_); + test_backup_fs_ = std::make_shared(backup_chroot_fs_); + SetEnvsFromFileSystems(); // set up db options options_.create_if_missing = true; options_.paranoid_checks = true; options_.write_buffer_size = 1 << 17; // 128KB - options_.env = test_db_env_.get(); options_.wal_dir = dbname_; options_.enable_blob_files = true; - // Create logger - DBOptions logger_options; - logger_options.env = db_chroot_env_.get(); - // TODO: This should really be an EXPECT_OK, but this CreateLogger fails - // regularly in some environments with "no such directory" - CreateLoggerFromOptions(dbname_, logger_options, &logger_) - .PermitUncheckedError(); - // The sync option is not easily testable in unit tests, but should be // smoke tested across all the other backup tests. However, it is // certainly not worth doubling the runtime of backup tests for it. @@ -671,12 +677,12 @@ class BackupEngineTest : public testing::Test { #endif // ROCKSDB_MODIFY_NPHASH // set up backup db options - backupable_options_.reset(new BackupEngineOptions( + engine_options_.reset(new BackupEngineOptions( backupdir_, test_backup_env_.get(), /*share_table_files*/ true, logger_.get(), kUseSync)); // most tests will use multi-threaded backups - backupable_options_->max_background_operations = 7; + engine_options_->max_background_operations = 7; // delete old files in db DestroyDB(dbname_, options_); @@ -686,6 +692,27 @@ class BackupEngineTest : public testing::Test { backup_chroot_env_->DeleteFile(latest_backup_).PermitUncheckedError(); } + void SetEnvsFromFileSystems() { + db_chroot_env_.reset( + new CompositeEnvWrapper(Env::Default(), db_chroot_fs_)); + backup_chroot_env_.reset( + new CompositeEnvWrapper(Env::Default(), backup_chroot_fs_)); + test_db_env_.reset(new CompositeEnvWrapper(Env::Default(), test_db_fs_)); + options_.env = test_db_env_.get(); + test_backup_env_.reset( + new CompositeEnvWrapper(Env::Default(), test_backup_fs_)); + if (engine_options_) { + engine_options_->backup_env = test_backup_env_.get(); + } + file_manager_.reset(new FileManager(backup_chroot_env_.get())); + db_file_manager_.reset(new FileManager(db_chroot_env_.get())); + + // Create logger + DBOptions logger_options; + logger_options.env = db_chroot_env_.get(); + ASSERT_OK(CreateLoggerFromOptions(dbname_, logger_options, &logger_)); + } + DB* OpenDB() { DB* db; EXPECT_OK(DB::Open(options_, dbname_, &db)); @@ -697,7 +724,7 @@ class BackupEngineTest : public testing::Test { db_.reset(); // Open DB - test_db_env_->SetLimitWrittenFiles(1000000); + test_db_fs_->SetLimitWrittenFiles(1000000); DB* db; if (read_only) { ASSERT_OK(DB::OpenForReadOnly(options_, dbname_, &db)); @@ -709,8 +736,8 @@ class BackupEngineTest : public testing::Test { void InitializeDBAndBackupEngine(bool dummy = false) { // reset all the db env defaults - test_db_env_->SetLimitWrittenFiles(1000000); - test_db_env_->SetDummySequentialFile(dummy); + test_db_fs_->SetLimitWrittenFiles(1000000); + test_db_fs_->SetDummySequentialFile(dummy); DB* db; if (dummy) { @@ -727,10 +754,10 @@ class BackupEngineTest : public testing::Test { ShareOption shared_option = kShareNoChecksum) { InitializeDBAndBackupEngine(dummy); // reset backup env defaults - test_backup_env_->SetLimitWrittenFiles(1000000); - backupable_options_->destroy_old_data = destroy_old_data; - backupable_options_->share_table_files = shared_option != kNoShare; - backupable_options_->share_files_with_checksum = + test_backup_fs_->SetLimitWrittenFiles(1000000); + engine_options_->destroy_old_data = destroy_old_data; + engine_options_->share_table_files = shared_option != kNoShare; + engine_options_->share_files_with_checksum = shared_option == kShareWithChecksum; OpenBackupEngine(destroy_old_data); } @@ -741,9 +768,10 @@ class BackupEngineTest : public testing::Test { } void OpenBackupEngine(bool destroy_old_data = false) { - backupable_options_->destroy_old_data = destroy_old_data; + engine_options_->destroy_old_data = destroy_old_data; + engine_options_->info_log = logger_.get(); BackupEngine* backup_engine; - ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); backup_engine_.reset(backup_engine); } @@ -970,11 +998,17 @@ class BackupEngineTest : public testing::Test { // which uses a raw pointer to the logger, executes first. std::shared_ptr logger_; - // envs + // FileSystems + std::shared_ptr db_chroot_fs_; + std::shared_ptr backup_chroot_fs_; + std::shared_ptr test_db_fs_; + std::shared_ptr test_backup_fs_; + + // Env wrappers std::unique_ptr db_chroot_env_; std::unique_ptr backup_chroot_env_; - std::unique_ptr test_db_env_; - std::unique_ptr test_backup_env_; + std::unique_ptr test_db_env_; + std::unique_ptr test_backup_env_; std::unique_ptr file_manager_; std::unique_ptr db_file_manager_; @@ -987,7 +1021,7 @@ class BackupEngineTest : public testing::Test { Options options_; protected: - std::unique_ptr backupable_options_; + std::unique_ptr engine_options_; }; // BackupEngineTest void AppendPath(const std::string& path, std::vector& v) { @@ -1000,16 +1034,16 @@ class BackupEngineTestWithParam : public BackupEngineTest, public testing::WithParamInterface { public: BackupEngineTestWithParam() { - backupable_options_->share_files_with_checksum = GetParam(); + engine_options_->share_files_with_checksum = GetParam(); } void OpenDBAndBackupEngine( bool destroy_old_data = false, bool dummy = false, ShareOption shared_option = kShareNoChecksum) override { BackupEngineTest::InitializeDBAndBackupEngine(dummy); // reset backup env defaults - test_backup_env_->SetLimitWrittenFiles(1000000); - backupable_options_->destroy_old_data = destroy_old_data; - backupable_options_->share_table_files = shared_option != kNoShare; + test_backup_fs_->SetLimitWrittenFiles(1000000); + engine_options_->destroy_old_data = destroy_old_data; + engine_options_->share_table_files = shared_option != kNoShare; // NOTE: keep share_files_with_checksum setting from constructor OpenBackupEngine(destroy_old_data); } @@ -1209,30 +1243,30 @@ TEST_F(BackupEngineTest, NoDoubleCopy_And_AutoGC) { OpenDBAndBackupEngine(true, true); // should write 5 DB files + one meta file - test_backup_env_->SetLimitWrittenFiles(7); - test_backup_env_->ClearWrittenFiles(); - test_db_env_->SetLimitWrittenFiles(0); + test_backup_fs_->SetLimitWrittenFiles(7); + test_backup_fs_->ClearWrittenFiles(); + test_db_fs_->SetLimitWrittenFiles(0); dummy_db_->live_files_ = {"00010.sst", "00011.sst", "CURRENT", "MANIFEST-01", "00011.log"}; - test_db_env_->SetFilenamesForMockedAttrs(dummy_db_->live_files_); + test_db_fs_->SetFilenamesForMockedAttrs(dummy_db_->live_files_); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false)); std::vector should_have_written = { "/shared/.00010.sst.tmp", "/shared/.00011.sst.tmp", "/private/1/CURRENT", "/private/1/MANIFEST-01", "/private/1/00011.log", "/meta/.1.tmp"}; AppendPath(backupdir_, should_have_written); - test_backup_env_->AssertWrittenFiles(should_have_written); + test_backup_fs_->AssertWrittenFiles(should_have_written); char db_number = '1'; for (std::string other_sst : {"00015.sst", "00017.sst", "00019.sst"}) { // should write 4 new DB files + one meta file // should not write/copy 00010.sst, since it's already there! - test_backup_env_->SetLimitWrittenFiles(6); - test_backup_env_->ClearWrittenFiles(); + test_backup_fs_->SetLimitWrittenFiles(6); + test_backup_fs_->ClearWrittenFiles(); dummy_db_->live_files_ = {"00010.sst", other_sst, "CURRENT", "MANIFEST-01", "00011.log"}; - test_db_env_->SetFilenamesForMockedAttrs(dummy_db_->live_files_); + test_db_fs_->SetFilenamesForMockedAttrs(dummy_db_->live_files_); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), false)); // should not open 00010.sst - it's already there @@ -1243,7 +1277,7 @@ TEST_F(BackupEngineTest, NoDoubleCopy_And_AutoGC) { private_dir + "/MANIFEST-01", private_dir + "/00011.log", std::string("/meta/.") + db_number + ".tmp"}; AppendPath(backupdir_, should_have_written); - test_backup_env_->AssertWrittenFiles(should_have_written); + test_backup_fs_->AssertWrittenFiles(should_have_written); } ASSERT_OK(backup_engine_->DeleteBackup(1)); @@ -1325,11 +1359,11 @@ TEST_F(BackupEngineTest, CorruptionsTest) { // ---------- case 1. - fail a write ----------- // try creating backup 6, but fail a write FillDB(db_.get(), keys_iteration * 5, keys_iteration * 6); - test_backup_env_->SetLimitWrittenFiles(2); + test_backup_fs_->SetLimitWrittenFiles(2); // should fail s = backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)); ASSERT_NOK(s); - test_backup_env_->SetLimitWrittenFiles(1000000); + test_backup_fs_->SetLimitWrittenFiles(1000000); // latest backup should have all the keys CloseDBAndBackupEngine(); AssertBackupConsistency(0, 0, keys_iteration * 5, keys_iteration * 6); @@ -1497,7 +1531,7 @@ TEST_P(BackupEngineTestWithParam, CorruptBlobFileMaintainSize) { std::vector children; std::string dir = backupdir_; - if (backupable_options_->share_files_with_checksum) { + if (engine_options_->share_files_with_checksum) { dir += "/shared_checksum"; } else { dir += "/shared"; @@ -1653,8 +1687,7 @@ TEST_P(BackupEngineTestWithParam, BlobFileCorruptedBeforeBackup) { TEST_F(BackupEngineTest, TableFileWithoutDbChecksumCorruptedDuringBackup) { const int keys_iteration = 50000; - backupable_options_->share_files_with_checksum_naming = - kLegacyCrc32cAndFileSize; + engine_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; // When share_files_with_checksum is on, we calculate checksums of table // files before and after copying. So we can test whether a corruption has // happened during the file is copied to backup directory. @@ -1740,8 +1773,8 @@ TEST_F(BackupEngineTest, InterruptCreationTest) { OpenDBAndBackupEngine(true /* destroy_old_data */); FillDB(db_.get(), 0, keys_iteration); - test_backup_env_->SetLimitWrittenFiles(2); - test_backup_env_->SetDeleteFileFailure(true); + test_backup_fs_->SetLimitWrittenFiles(2); + test_backup_fs_->SetDeleteFileFailure(true); // should fail creation ASSERT_NOK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2))); CloseDBAndBackupEngine(); @@ -1749,8 +1782,8 @@ TEST_F(BackupEngineTest, InterruptCreationTest) { ASSERT_OK(backup_chroot_env_->FileExists(backupdir_ + "/private/1/")); OpenDBAndBackupEngine(false /* destroy_old_data */); - test_backup_env_->SetLimitWrittenFiles(1000000); - test_backup_env_->SetDeleteFileFailure(false); + test_backup_fs_->SetLimitWrittenFiles(1000000); + test_backup_fs_->SetDeleteFileFailure(false); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2))); // latest backup should have all the keys CloseDBAndBackupEngine(); @@ -1884,11 +1917,10 @@ TEST_F(BackupEngineTest, NoDeleteWithReadOnly) { CloseDBAndBackupEngine(); ASSERT_OK(file_manager_->WriteToFile(latest_backup_, "4")); - backupable_options_->destroy_old_data = false; + engine_options_->destroy_old_data = false; BackupEngineReadOnly* read_only_backup_engine; - ASSERT_OK(BackupEngineReadOnly::Open(backup_chroot_env_.get(), - *backupable_options_, - &read_only_backup_engine)); + ASSERT_OK(BackupEngineReadOnly::Open( + backup_chroot_env_.get(), *engine_options_, &read_only_backup_engine)); // assert that data from backup 5 is still here (even though LATEST_BACKUP // says 4 is latest) @@ -2017,7 +2049,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsTransition) { // Verify backup and restore with various naming options, check names TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNaming) { - ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == + ASSERT_TRUE(engine_options_->share_files_with_checksum_naming == kNamingDefault); const int keys_iteration = 5000; @@ -2040,7 +2072,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNaming) { for (const auto& pair : option_to_expected) { CloseAndReopenDB(); - backupable_options_->share_files_with_checksum_naming = pair.first; + engine_options_->share_files_with_checksum_naming = pair.first; OpenBackupEngine(true /*destroy_old_data*/); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get())); CloseDBAndBackupEngine(); @@ -2083,7 +2115,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsOldFileNaming) { for (ShareFilesNaming option : {kNamingDefault, kUseDbSessionId}) { CloseAndReopenDB(); - backupable_options_->share_files_with_checksum_naming = option; + engine_options_->share_files_with_checksum_naming = option; OpenBackupEngine(true /*destroy_old_data*/); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get())); CloseDBAndBackupEngine(); @@ -2111,7 +2143,7 @@ TEST_F(BackupEngineTest, TableFileCorruptionBeforeIncremental) { auto share = option == share_no_checksum ? kShareNoChecksum : kShareWithChecksum; if (option != share_no_checksum) { - backupable_options_->share_files_with_checksum_naming = option; + engine_options_->share_files_with_checksum_naming = option; } OpenDBAndBackupEngine(true, false, share); DBImpl* dbi = static_cast(db_.get()); @@ -2220,7 +2252,7 @@ TEST_F(BackupEngineTest, FileSizeForIncremental) { auto share = option == share_no_checksum ? kShareNoChecksum : kShareWithChecksum; if (option != share_no_checksum) { - backupable_options_->share_files_with_checksum_naming = option; + engine_options_->share_files_with_checksum_naming = option; } OpenDBAndBackupEngine(true, false, share); @@ -2341,7 +2373,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { // We may set share_files_with_checksum_naming to kLegacyCrc32cAndFileSize // here but even if we don't, it should have no effect when // share_files_with_checksum is false - ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == + ASSERT_TRUE(engine_options_->share_files_with_checksum_naming == kNamingDefault); // set share_files_with_checksum to false OpenDBAndBackupEngine(true, false, kShareNoChecksum); @@ -2359,7 +2391,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { // set share_files_with_checksum to true and do some more backups // and use session id in the name of SST file backup - ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == + ASSERT_TRUE(engine_options_->share_files_with_checksum_naming == kNamingDefault); OpenDBAndBackupEngine(false /* destroy_old_data */, false, kShareWithChecksum); @@ -2381,7 +2413,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { // For an extra challenge, make sure that GarbageCollect / DeleteBackup // is OK even if we open without share_table_files but with // share_files_with_checksum_naming based on kUseDbSessionId - ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == + ASSERT_TRUE(engine_options_->share_files_with_checksum_naming == kNamingDefault); OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); ASSERT_OK(backup_engine_->DeleteBackup(1)); @@ -2394,8 +2426,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { // Use checksum and file size for backup table file names and open without // share_table_files // Again, make sure that GarbageCollect / DeleteBackup is OK - backupable_options_->share_files_with_checksum_naming = - kLegacyCrc32cAndFileSize; + engine_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); ASSERT_OK(backup_engine_->DeleteBackup(2)); ASSERT_OK(backup_engine_->GarbageCollect()); @@ -2411,8 +2442,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { // Verify backup and restore with share_files_with_checksum on and transition // from kLegacyCrc32cAndFileSize to kUseDbSessionId TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { - backupable_options_->share_files_with_checksum_naming = - kLegacyCrc32cAndFileSize; + engine_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; const int keys_iteration = 5000; // set share_files_with_checksum to true OpenDBAndBackupEngine(true, false, kShareWithChecksum); @@ -2428,7 +2458,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { keys_iteration * (j + 1)); } - backupable_options_->share_files_with_checksum_naming = kUseDbSessionId; + engine_options_->share_files_with_checksum_naming = kUseDbSessionId; OpenDBAndBackupEngine(false /* destroy_old_data */, false, kShareWithChecksum); FillDB(db_.get(), keys_iteration * j, keys_iteration * (j + 1)); @@ -2459,8 +2489,7 @@ TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { // Use checksum and file size for backup table file names and open without // share_table_files // Again, make sure that GarbageCollect / DeleteBackup is OK - backupable_options_->share_files_with_checksum_naming = - kLegacyCrc32cAndFileSize; + engine_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare); ASSERT_OK(backup_engine_->DeleteBackup(2)); ASSERT_OK(backup_engine_->GarbageCollect()); @@ -2554,7 +2583,7 @@ TEST_F(BackupEngineTest, DeleteTmpFiles) { } TEST_F(BackupEngineTest, KeepLogFiles) { - backupable_options_->backup_log_files = false; + engine_options_->backup_log_files = false; // basically infinite options_.WAL_ttl_seconds = 24 * 60 * 60; OpenDBAndBackupEngine(true); @@ -2603,8 +2632,8 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) { bool makeThrottler = std::get<0>(GetParam()); if (makeThrottler) { - backupable_options_->backup_rate_limiter = backupThrottler; - backupable_options_->restore_rate_limiter = restoreThrottler; + engine_options_->backup_rate_limiter = backupThrottler; + engine_options_->restore_rate_limiter = restoreThrottler; } // iter 0 -- single threaded @@ -2618,10 +2647,10 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) { backupThrottler->SetBytesPerSecond(limit.first); restoreThrottler->SetBytesPerSecond(limit.second); } else { - backupable_options_->backup_rate_limit = limit.first; - backupable_options_->restore_rate_limit = limit.second; + engine_options_->backup_rate_limit = limit.first; + engine_options_->restore_rate_limit = limit.second; } - backupable_options_->max_background_operations = (iter == 0) ? 1 : 10; + engine_options_->max_background_operations = (iter == 0) ? 1 : 10; options_.compression = kNoCompression; OpenDBAndBackupEngine(true); size_t bytes_written = FillDB(db_.get(), 0, 100000); @@ -2654,18 +2683,18 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) { bool makeThrottler = std::get<0>(GetParam()); if (makeThrottler) { - backupable_options_->backup_rate_limiter = backupThrottler; + engine_options_->backup_rate_limiter = backupThrottler; } bool is_single_threaded = std::get<1>(GetParam()) == 0 ? true : false; - backupable_options_->max_background_operations = is_single_threaded ? 1 : 10; + engine_options_->max_background_operations = is_single_threaded ? 1 : 10; const std::uint64_t backup_rate_limiter_limit = std::get<2>(GetParam()).first; if (makeThrottler) { - backupable_options_->backup_rate_limiter->SetBytesPerSecond( + engine_options_->backup_rate_limiter->SetBytesPerSecond( backup_rate_limiter_limit); } else { - backupable_options_->backup_rate_limit = backup_rate_limiter_limit; + engine_options_->backup_rate_limit = backup_rate_limiter_limit; } DestroyDB(dbname_, Options()); @@ -2706,13 +2735,13 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingVerifyBackup) { TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) { bool is_single_threaded = std::get<1>(GetParam()) == 0 ? true : false; - backupable_options_->max_background_operations = is_single_threaded ? 1 : 10; + engine_options_->max_background_operations = is_single_threaded ? 1 : 10; const std::uint64_t backup_rate_limiter_limit = std::get<2>(GetParam()).first; std::shared_ptr backup_rate_limiter(NewGenericRateLimiter( backup_rate_limiter_limit, 100 * 1000 /* refill_period_us */, 10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */)); - backupable_options_->backup_rate_limiter = backup_rate_limiter; + engine_options_->backup_rate_limiter = backup_rate_limiter; DestroyDB(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, @@ -2727,7 +2756,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) { backup_rate_limiter.reset(NewGenericRateLimiter( backup_rate_limiter_limit, 100 * 1000 /* refill_period_us */, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */)); - backupable_options_->backup_rate_limiter = backup_rate_limiter; + engine_options_->backup_rate_limiter = backup_rate_limiter; OpenBackupEngine(true); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), @@ -2743,14 +2772,14 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInBackup) { TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { bool is_single_threaded = std::get<1>(GetParam()) == 0 ? true : false; - backupable_options_->max_background_operations = is_single_threaded ? 1 : 10; + engine_options_->max_background_operations = is_single_threaded ? 1 : 10; const std::uint64_t restore_rate_limiter_limit = std::get<2>(GetParam()).second; std::shared_ptr restore_rate_limiter(NewGenericRateLimiter( restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */, 10 /* fairness */, RateLimiter::Mode::kWritesOnly /* mode */)); - backupable_options_->restore_rate_limiter = restore_rate_limiter; + engine_options_->restore_rate_limiter = restore_rate_limiter; DestroyDB(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */); @@ -2770,7 +2799,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { restore_rate_limiter.reset(NewGenericRateLimiter( restore_rate_limiter_limit, 100 * 1000 /* refill_period_us */, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */)); - backupable_options_->restore_rate_limiter = restore_rate_limiter; + engine_options_->restore_rate_limiter = restore_rate_limiter; OpenBackupEngine(false /* destroy_old_data */); ASSERT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_)); @@ -2786,13 +2815,13 @@ TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInRestore) { TEST_P(BackupEngineRateLimitingTestWithParam, RateLimitingChargeReadInInitialize) { bool is_single_threaded = std::get<1>(GetParam()) == 0 ? true : false; - backupable_options_->max_background_operations = is_single_threaded ? 1 : 10; + engine_options_->max_background_operations = is_single_threaded ? 1 : 10; const std::uint64_t backup_rate_limiter_limit = std::get<2>(GetParam()).first; std::shared_ptr backup_rate_limiter(NewGenericRateLimiter( backup_rate_limiter_limit, 100 * 1000 /* refill_period_us */, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */)); - backupable_options_->backup_rate_limiter = backup_rate_limiter; + engine_options_->backup_rate_limiter = backup_rate_limiter; DestroyDB(dbname_, Options()); OpenDBAndBackupEngine(true /* destroy_old_data */); @@ -2803,12 +2832,12 @@ TEST_P(BackupEngineRateLimitingTestWithParam, AssertBackupConsistency(1, 0, 10, 20); std::int64_t total_bytes_through_before_initialize = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); OpenDBAndBackupEngine(false /* destroy_old_data */); // We charge read in BackupEngineImpl::BackupMeta::LoadFromFile, // which is called in BackupEngineImpl::Initialize() during // OpenBackupEngine(false) - EXPECT_GT(backupable_options_->backup_rate_limiter->GetTotalBytesThrough(), + EXPECT_GT(engine_options_->backup_rate_limiter->GetTotalBytesThrough(), total_bytes_through_before_initialize); CloseDBAndBackupEngine(); DestroyDB(dbname_, Options()); @@ -2833,7 +2862,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, RateLimitingWithLowRefillBytesPerPeriod) { SpecialEnv special_env(Env::Default(), /*time_elapse_only_sleep*/ true); - backupable_options_->max_background_operations = 1; + engine_options_->max_background_operations = 1; const uint64_t backup_rate_limiter_limit = std::get<0>(GetParam()).first; std::shared_ptr backup_rate_limiter( std::make_shared( @@ -2841,7 +2870,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */, special_env.GetSystemClock(), false /* auto_tuned */)); - backupable_options_->backup_rate_limiter = backup_rate_limiter; + engine_options_->backup_rate_limiter = backup_rate_limiter; const uint64_t restore_rate_limiter_limit = std::get<0>(GetParam()).second; std::shared_ptr restore_rate_limiter( @@ -2850,7 +2879,7 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, 10 /* fairness */, RateLimiter::Mode::kAllIo /* mode */, special_env.GetSystemClock(), false /* auto_tuned */)); - backupable_options_->restore_rate_limiter = restore_rate_limiter; + engine_options_->restore_rate_limiter = restore_rate_limiter; // Rate limiter uses `CondVar::TimedWait()`, which does not have access to the // `Env` to advance its time according to the fake wait duration. The @@ -2868,11 +2897,11 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, FillDB(db_.get(), 0, 100); int64_t total_bytes_through_before_backup = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); EXPECT_OK(backup_engine_->CreateNewBackup(db_.get(), false /* flush_before_backup */)); int64_t total_bytes_through_after_backup = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); ASSERT_GT(total_bytes_through_after_backup, total_bytes_through_before_backup); @@ -2885,11 +2914,11 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, ASSERT_OK(backup_engine_->GetBackupInfo(backup_id, &backup_info, true /* include_file_details */)); int64_t total_bytes_through_before_verify_backup = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); EXPECT_OK( backup_engine_->VerifyBackup(backup_id, true /* verify_with_checksum */)); int64_t total_bytes_through_after_verify_backup = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); ASSERT_GT(total_bytes_through_after_verify_backup, total_bytes_through_before_verify_backup); @@ -2897,13 +2926,13 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, AssertBackupConsistency(backup_id, 0, 100, 101); int64_t total_bytes_through_before_initialize = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); OpenDBAndBackupEngine(false /* destroy_old_data */); // We charge read in BackupEngineImpl::BackupMeta::LoadFromFile, // which is called in BackupEngineImpl::Initialize() during // OpenBackupEngine(false) int64_t total_bytes_through_after_initialize = - backupable_options_->backup_rate_limiter->GetTotalBytesThrough(); + engine_options_->backup_rate_limiter->GetTotalBytesThrough(); ASSERT_GT(total_bytes_through_after_initialize, total_bytes_through_before_initialize); CloseDBAndBackupEngine(); @@ -2911,10 +2940,10 @@ TEST_P(BackupEngineRateLimitingTestWithParam2, DestroyDB(dbname_, Options()); OpenBackupEngine(false /* destroy_old_data */); int64_t total_bytes_through_before_restore = - backupable_options_->restore_rate_limiter->GetTotalBytesThrough(); + engine_options_->restore_rate_limiter->GetTotalBytesThrough(); EXPECT_OK(backup_engine_->RestoreDBFromLatestBackup(dbname_, dbname_)); int64_t total_bytes_through_after_restore = - backupable_options_->restore_rate_limiter->GetTotalBytesThrough(); + engine_options_->restore_rate_limiter->GetTotalBytesThrough(); ASSERT_GT(total_bytes_through_after_restore, total_bytes_through_before_restore); CloseBackupEngine(); @@ -2938,12 +2967,12 @@ TEST_F(BackupEngineTest, ReadOnlyBackupEngine) { CloseDBAndBackupEngine(); DestroyDB(dbname_, options_); - backupable_options_->destroy_old_data = false; - test_backup_env_->ClearWrittenFiles(); - test_backup_env_->SetLimitDeleteFiles(0); + engine_options_->destroy_old_data = false; + test_backup_fs_->ClearWrittenFiles(); + test_backup_fs_->SetLimitDeleteFiles(0); BackupEngineReadOnly* read_only_backup_engine; - ASSERT_OK(BackupEngineReadOnly::Open( - db_chroot_env_.get(), *backupable_options_, &read_only_backup_engine)); + ASSERT_OK(BackupEngineReadOnly::Open(db_chroot_env_.get(), *engine_options_, + &read_only_backup_engine)); std::vector backup_info; read_only_backup_engine->GetBackupInfo(&backup_info); ASSERT_EQ(backup_info.size(), 2U); @@ -2953,7 +2982,7 @@ TEST_F(BackupEngineTest, ReadOnlyBackupEngine) { dbname_, dbname_, restore_options)); delete read_only_backup_engine; std::vector should_have_written; - test_backup_env_->AssertWrittenFiles(should_have_written); + test_backup_fs_->AssertWrittenFiles(should_have_written); DB* db = OpenDB(); AssertExists(db, 0, 200); @@ -3029,7 +3058,7 @@ TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) { TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) { DestroyDB(dbname_, options_); // Too big for this small DB - backupable_options_->callback_trigger_interval_size = 100000; + engine_options_->callback_trigger_interval_size = 100000; OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); bool is_callback_invoked = false; @@ -3040,7 +3069,7 @@ TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) { CloseBackupEngine(); // Easily small enough for this small DB - backupable_options_->callback_trigger_interval_size = 1000; + engine_options_->callback_trigger_interval_size = 1000; OpenBackupEngine(); ASSERT_OK(backup_engine_->CreateNewBackup( db_.get(), true, @@ -3081,26 +3110,26 @@ TEST_F(BackupEngineTest, EnvFailures) { // get children failure { - test_backup_env_->SetGetChildrenFailure(true); - ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + test_backup_fs_->SetGetChildrenFailure(true); + ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); - test_backup_env_->SetGetChildrenFailure(false); + test_backup_fs_->SetGetChildrenFailure(false); } // created dir failure { - test_backup_env_->SetCreateDirIfMissingFailure(true); - ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + test_backup_fs_->SetCreateDirIfMissingFailure(true); + ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); - test_backup_env_->SetCreateDirIfMissingFailure(false); + test_backup_fs_->SetCreateDirIfMissingFailure(false); } // new directory failure { - test_backup_env_->SetNewDirectoryFailure(true); - ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + test_backup_fs_->SetNewDirectoryFailure(true); + ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); - test_backup_env_->SetNewDirectoryFailure(false); + test_backup_fs_->SetNewDirectoryFailure(false); } // Read from meta-file failure @@ -3110,18 +3139,18 @@ TEST_F(BackupEngineTest, EnvFailures) { FillDB(db_.get(), 0, 100); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); CloseDBAndBackupEngine(); - test_backup_env_->SetDummySequentialFile(true); - test_backup_env_->SetDummySequentialFileFailReads(true); - backupable_options_->destroy_old_data = false; - ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + test_backup_fs_->SetDummySequentialFile(true); + test_backup_fs_->SetDummySequentialFileFailReads(true); + engine_options_->destroy_old_data = false; + ASSERT_NOK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); - test_backup_env_->SetDummySequentialFile(false); - test_backup_env_->SetDummySequentialFileFailReads(false); + test_backup_fs_->SetDummySequentialFile(false); + test_backup_fs_->SetDummySequentialFileFailReads(false); } // no failure { - ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *backupable_options_, + ASSERT_OK(BackupEngine::Open(test_db_env_.get(), *engine_options_, &backup_engine)); delete backup_engine; } @@ -3174,11 +3203,11 @@ TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) { // see https://github.com/facebook/rocksdb/issues/921 TEST_F(BackupEngineTest, Issue921Test) { BackupEngine* backup_engine; - backupable_options_->share_table_files = false; + engine_options_->share_table_files = false; ASSERT_OK( - backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir)); - backupable_options_->backup_dir += "/new_dir"; - ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *backupable_options_, + backup_chroot_env_->CreateDirIfMissing(engine_options_->backup_dir)); + engine_options_->backup_dir += "/new_dir"; + ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *engine_options_, &backup_engine)); delete backup_engine; @@ -3446,8 +3475,8 @@ TEST_F(BackupEngineTest, Concurrency) { // To check for a race condition in handling buffer size based on byte // burst limit, we need a (generous) rate limiter std::shared_ptr limiter{NewGenericRateLimiter(1000000000)}; - backupable_options_->backup_rate_limiter = limiter; - backupable_options_->restore_rate_limiter = limiter; + engine_options_->backup_rate_limiter = limiter; + engine_options_->restore_rate_limiter = limiter; OpenDBAndBackupEngine(true, false, kShareWithChecksum); @@ -3465,7 +3494,7 @@ TEST_F(BackupEngineTest, Concurrency) { Options db_opts = options_; db_opts.wal_dir = ""; db_opts.create_if_missing = false; - BackupEngineOptions be_opts = *backupable_options_; + BackupEngineOptions be_opts = *engine_options_; be_opts.destroy_old_data = false; std::mt19937 rng{std::random_device()()}; @@ -3636,12 +3665,11 @@ TEST_F(BackupEngineTest, LimitBackupsOpened) { } CloseDBAndBackupEngine(); - backupable_options_->max_valid_backups_to_open = 2; - backupable_options_->destroy_old_data = false; + engine_options_->max_valid_backups_to_open = 2; + engine_options_->destroy_old_data = false; BackupEngineReadOnly* read_only_backup_engine; - ASSERT_OK(BackupEngineReadOnly::Open(backup_chroot_env_.get(), - *backupable_options_, - &read_only_backup_engine)); + ASSERT_OK(BackupEngineReadOnly::Open( + backup_chroot_env_.get(), *engine_options_, &read_only_backup_engine)); std::vector backup_infos; read_only_backup_engine->GetBackupInfo(&backup_infos); @@ -3673,7 +3701,7 @@ TEST_F(BackupEngineTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) { } CloseDBAndBackupEngine(); - backupable_options_->max_valid_backups_to_open = 2; + engine_options_->max_valid_backups_to_open = 2; OpenDBAndBackupEngine(); std::vector backup_infos; backup_engine_->GetBackupInfo(&backup_infos); @@ -3732,7 +3760,7 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) { ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true)); CloseDBAndBackupEngine(); - backupable_options_->max_valid_backups_to_open = 0; + engine_options_->max_valid_backups_to_open = 0; OpenDBAndBackupEngine(); switch (i) { case 0: @@ -3749,7 +3777,7 @@ TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) { } CloseDBAndBackupEngine(); - backupable_options_->max_valid_backups_to_open = port::kMaxInt32; + engine_options_->max_valid_backups_to_open = port::kMaxInt32; AssertBackupConsistency(i + 1, 0, (i + 1) * kNumKeys); } } @@ -3773,8 +3801,8 @@ TEST_P(BackupEngineTestWithParam, BackupUsingDirectIO) { // Clear the file open counters and then do a bunch of backup engine ops. // For all ops, files should be opened in direct mode. - test_backup_env_->ClearFileOpenCounters(); - test_db_env_->ClearFileOpenCounters(); + test_backup_fs_->ClearFileOpenCounters(); + test_db_fs_->ClearFileOpenCounters(); CloseBackupEngine(); OpenBackupEngine(); ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), @@ -3787,15 +3815,15 @@ TEST_P(BackupEngineTestWithParam, BackupUsingDirectIO) { ASSERT_EQ(static_cast(i + 1), backup_infos.size()); // Verify backup engine always opened files with direct I/O - ASSERT_EQ(0, test_db_env_->num_writers()); - ASSERT_GE(test_db_env_->num_direct_rand_readers(), 0); - ASSERT_GT(test_db_env_->num_direct_seq_readers(), 0); + ASSERT_EQ(0, test_db_fs_->num_writers()); + ASSERT_GE(test_db_fs_->num_direct_rand_readers(), 0); + ASSERT_GT(test_db_fs_->num_direct_seq_readers(), 0); // Currently the DB doesn't support reading WALs or manifest with direct // I/O, so subtract two. - ASSERT_EQ(test_db_env_->num_seq_readers() - 2, - test_db_env_->num_direct_seq_readers()); - ASSERT_EQ(test_db_env_->num_rand_readers(), - test_db_env_->num_direct_rand_readers()); + ASSERT_EQ(test_db_fs_->num_seq_readers() - 2, + test_db_fs_->num_direct_seq_readers()); + ASSERT_EQ(test_db_fs_->num_rand_readers(), + test_db_fs_->num_direct_rand_readers()); } CloseDBAndBackupEngine(); @@ -3817,7 +3845,7 @@ TEST_F(BackupEngineTest, BackgroundThreadCpuPriority) { // 1 thread is easier to test, otherwise, we may not be sure which thread // actually does the work during CreateNewBackup. - backupable_options_->max_background_operations = 1; + engine_options_->max_background_operations = 1; OpenDBAndBackupEngine(true); { @@ -3979,6 +4007,68 @@ TEST_F(BackupEngineTest, IOStats) { 2 * options_.statistics->getTickerCount(BACKUP_READ_BYTES)); } +TEST_F(BackupEngineTest, FileTemperatures) { + CloseDBAndBackupEngine(); + // More file IO instrumentation + auto my_db_fs = std::make_shared(db_chroot_fs_); + test_db_fs_ = std::make_shared(my_db_fs); + SetEnvsFromFileSystems(); + + // Use temperatures + options_.bottommost_temperature = Temperature::kWarm; + options_.level0_file_num_compaction_trigger = 2; + + OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, + kShareWithChecksum); + + // generate a bottommost file and a non-bottommost file + DBImpl* dbi = static_cast_with_check(db_.get()); + ASSERT_OK(db_->Put(WriteOptions(), "a", "val")); + ASSERT_OK(db_->Put(WriteOptions(), "c", "val")); + ASSERT_OK(db_->Flush(FlushOptions())); + ASSERT_OK(db_->Put(WriteOptions(), "b", "val")); + ASSERT_OK(db_->Put(WriteOptions(), "d", "val")); + ASSERT_OK(db_->Flush(FlushOptions())); + ASSERT_OK(dbi->TEST_WaitForCompact()); + ASSERT_OK(db_->Put(WriteOptions(), "e", "val")); + ASSERT_OK(db_->Flush(FlushOptions())); + + std::map manifest_temps; + std::map manifest_temp_counts; + std::vector infos; + ASSERT_OK( + db_->GetLiveFilesStorageInfo(LiveFilesStorageInfoOptions(), &infos)); + for (auto info : infos) { + if (info.file_type == kTableFile) { + manifest_temps.emplace(info.file_number, info.temperature); + manifest_temp_counts[info.temperature]++; + } + } + + // Verify expected manifest temperatures + ASSERT_EQ(manifest_temp_counts.size(), 2); + ASSERT_EQ(manifest_temp_counts[Temperature::kWarm], 1); + ASSERT_EQ(manifest_temp_counts[Temperature::kUnknown], 1); + + // Sample requested temperatures in opening files for backup + my_db_fs->ClearRequestedFileTemperatures(); + ASSERT_OK(backup_engine_->CreateNewBackup(db_.get())); + + // checking src file src_temperature hints: 2 sst files: 1 sst is kWarm, + // another is kUnknown + auto requested_temps = my_db_fs->RequestedSstFileTemperatures(); + ASSERT_EQ(requested_temps.size(), 2); + bool has_only_one_warm_sst = false; + for (const auto& requested_temp : requested_temps) { + ASSERT_EQ(manifest_temps.at(requested_temp.first), requested_temp.second); + if (requested_temp.second == Temperature::kWarm) { + ASSERT_FALSE(has_only_one_warm_sst); + has_only_one_warm_sst = true; + } + } + ASSERT_TRUE(has_only_one_warm_sst); +} + } // anon namespace } // namespace ROCKSDB_NAMESPACE