From c26b75baa5d20575171e1650663ca3e9b7ace881 Mon Sep 17 00:00:00 2001 From: Peter Dillinger Date: Fri, 7 May 2021 13:52:09 -0700 Subject: [PATCH] Deprecate obsolete "backupable db" from public APIs (#8274) Summary: An early design of BackupEngine used stackable DB, so I guess a DB had to opt-in to being backupable. Unfortunately the naming of that obsolete design still infects our public API and implementation. This change fixes the public API, with a deprecated backward-compatibility header. `BackupableDBOptions` is renamed to `BackupEngineOptions` (copy-replace in the public header) and backup_engine.h replaces backupable_db.h (present for backward compatibility). The only other change in backupable_db.h -> backup_engine.h is cleaning up headers. Later changes will fix the internal implementation. Pull Request resolved: https://github.com/facebook/rocksdb/pull/8274 Test Plan: The internal implementation of BackupEngine uses the name BackupEngineOptions, while the unit tests use the old name BackupableDBOptions. This gives me confidence that both still work. Reviewed By: mrambacher Differential Revision: D28259471 Pulled By: pdillinger fbshipit-source-id: a25dbe327b9772143488e7bb0ec7139ee42d0613 --- HISTORY.md | 1 + include/rocksdb/utilities/backup_engine.h | 606 +++++++++++++++++++++ include/rocksdb/utilities/backupable_db.h | 602 +------------------- utilities/backupable/backupable_db.cc | 31 +- utilities/backupable/backupable_db_test.cc | 150 ++--- 5 files changed, 707 insertions(+), 683 deletions(-) create mode 100644 include/rocksdb/utilities/backup_engine.h diff --git a/HISTORY.md b/HISTORY.md index b31c855f1..64d2d681f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -21,6 +21,7 @@ * Removed a parameter from TableFactory::NewTableBuilder, which should not be called by user code because TableBuilder is not a public API. * The `skip_filters` parameter to SstFileWriter is now considered deprecated. Use `BlockBasedTableOptions::filter_policy` to control generation of filters. * ClockCache is known to have bugs that could lead to crash or corruption, so should not be used until fixed. Use NewLRUCache instead. +* Deprecated backupable_db.h and BackupableDBOptions in favor of new versions with appropriate names: backup_engine.h and BackupEngineOptions. Old API compatibility is preserved. ### Default Option Change * When options.arena_block_size <= 0 (default value 0), still use writer_buffer_size / 8 but cap to 1MB. Too large alloation size might not be friendly to allocator and might cause performance issues in extreme cases. diff --git a/include/rocksdb/utilities/backup_engine.h b/include/rocksdb/utilities/backup_engine.h new file mode 100644 index 000000000..d6a7764e6 --- /dev/null +++ b/include/rocksdb/utilities/backup_engine.h @@ -0,0 +1,606 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under both the GPLv2 (found in the +// COPYING file in the root directory) and Apache 2.0 License +// (found in the LICENSE.Apache file in the root directory). +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include +#include + +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" + +namespace ROCKSDB_NAMESPACE { + +// The default DB file checksum function name. +constexpr char kDbFileChecksumFuncName[] = "FileChecksumCrc32c"; +// The default BackupEngine file checksum function name. +constexpr char kBackupFileChecksumFuncName[] = "crc32c"; + +struct BackupEngineOptions { + // Where to keep the backup files. Has to be different than dbname_ + // Best to set this to dbname_ + "/backups" + // Required + std::string backup_dir; + + // Backup Env object. It will be used for backup file I/O. If it's + // nullptr, backups will be written out using DBs Env. If it's + // non-nullptr, backup's I/O will be performed using this object. + // If you want to have backups on HDFS, use HDFS Env here! + // Default: nullptr + Env* backup_env; + + // share_table_files supports table and blob files. + // + // If share_table_files == true, the backup directory will share table and + // blob files among backups, to save space among backups of the same DB and to + // enable incremental backups by only copying new files. + // If share_table_files == false, each backup will be on its own and will not + // share any data with other backups. + // + // default: true + bool share_table_files; + + // Backup info and error messages will be written to info_log + // if non-nullptr. + // Default: nullptr + Logger* info_log; + + // If sync == true, we can guarantee you'll get consistent backup even + // on a machine crash/reboot. Backup process is slower with sync enabled. + // If sync == false, we don't guarantee anything on machine reboot. However, + // chances are some of the backups are consistent. + // Default: true + bool sync; + + // If true, it will delete whatever backups there are already + // Default: false + bool destroy_old_data; + + // If false, we won't backup log files. This option can be useful for backing + // up in-memory databases where log file are persisted, but table files are in + // memory. + // Default: true + bool backup_log_files; + + // Max bytes that can be transferred in a second during backup. + // If 0, go as fast as you can + // Default: 0 + uint64_t backup_rate_limit; + + // Backup rate limiter. Used to control transfer speed for backup. If this is + // not null, backup_rate_limit is ignored. + // Default: nullptr + std::shared_ptr backup_rate_limiter{nullptr}; + + // Max bytes that can be transferred in a second during restore. + // If 0, go as fast as you can + // Default: 0 + uint64_t restore_rate_limit; + + // Restore rate limiter. Used to control transfer speed during restore. If + // this is not null, restore_rate_limit is ignored. + // Default: nullptr + std::shared_ptr restore_rate_limiter{nullptr}; + + // share_files_with_checksum supports table and blob files. + // + // Only used if share_table_files is set to true. Setting to false is + // DEPRECATED and potentially dangerous because in that case BackupEngine + // can lose data if backing up databases with distinct or divergent + // history, for example if restoring from a backup other than the latest, + // writing to the DB, and creating another backup. Setting to true (default) + // prevents these issues by ensuring that different table files (SSTs) and + // blob files with the same number are treated as distinct. See + // share_files_with_checksum_naming and ShareFilesNaming. + // + // Default: true + bool share_files_with_checksum; + + // Up to this many background threads will copy files for CreateNewBackup() + // and RestoreDBFromBackup() + // Default: 1 + int max_background_operations; + + // During backup user can get callback every time next + // callback_trigger_interval_size bytes being copied. + // Default: 4194304 + uint64_t callback_trigger_interval_size; + + // For BackupEngineReadOnly, Open() will open at most this many of the + // latest non-corrupted backups. + // + // Note: this setting is ignored (behaves like INT_MAX) for any kind of + // writable BackupEngine because it would inhibit accounting for shared + // files for proper backup deletion, including purging any incompletely + // created backups on creation of a new backup. + // + // Default: INT_MAX + int max_valid_backups_to_open; + + // ShareFilesNaming describes possible naming schemes for backup + // table and blob file names when they are stored in the + // shared_checksum directory (i.e., both share_table_files and + // share_files_with_checksum are true). + enum ShareFilesNaming : uint32_t { + // Backup blob filenames are __.blob and + // backup SST filenames are __.sst + // where is an unsigned decimal integer. This is the + // original/legacy naming scheme for share_files_with_checksum, + // with two problems: + // * At massive scale, collisions on this triple with different file + // contents is plausible. + // * Determining the name to use requires computing the checksum, + // so generally requires reading the whole file even if the file + // is already backed up. + // + // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** + kLegacyCrc32cAndFileSize = 1U, + + // Backup SST filenames are _s.sst. This + // pair of values should be very strongly unique for a given SST file + // and easily determined before computing a checksum. The 's' indicates + // the value is a DB session id, not a checksum. + // + // Exceptions: + // * For blob files, kLegacyCrc32cAndFileSize is used as currently + // db_session_id is not supported by the blob file format. + // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize + // will be used instead, matching the names assigned by RocksDB versions + // not supporting the newer naming scheme. + // * See also flags below. + kUseDbSessionId = 2U, + + kMaskNoNamingFlags = 0xffffU, + + // If not already part of the naming scheme, insert + // _ + // before .sst and .blob in the name. In case of user code actually parsing + // the last _ before the .sst and .blob as the file size, this + // preserves that feature of kLegacyCrc32cAndFileSize. In other words, this + // option makes official that unofficial feature of the backup metadata. + // + // We do not consider SST and blob file sizes to have sufficient entropy to + // contribute significantly to naming uniqueness. + kFlagIncludeFileSize = 1U << 31, + + kMaskNamingFlags = ~kMaskNoNamingFlags, + }; + + // Naming option for share_files_with_checksum table and blob files. See + // ShareFilesNaming for details. + // + // Modifying this option cannot introduce a downgrade compatibility issue + // because RocksDB can read, restore, and delete backups using different file + // names, and it's OK for a backup directory to use a mixture of table and + // blob files naming schemes. + // + // However, modifying this option and saving more backups to the same + // directory can lead to the same file getting saved again to that + // directory, under the new shared name in addition to the old shared + // name. + // + // Default: kUseDbSessionId | kFlagIncludeFileSize + // + // Note: This option comes into effect only if both share_files_with_checksum + // and share_table_files are true. + ShareFilesNaming share_files_with_checksum_naming; + + void Dump(Logger* logger) const; + + explicit BackupEngineOptions( + const std::string& _backup_dir, Env* _backup_env = nullptr, + bool _share_table_files = true, Logger* _info_log = nullptr, + bool _sync = true, bool _destroy_old_data = false, + bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, + uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, + uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, + int _max_valid_backups_to_open = INT_MAX, + ShareFilesNaming _share_files_with_checksum_naming = + static_cast(kUseDbSessionId | kFlagIncludeFileSize)) + : backup_dir(_backup_dir), + backup_env(_backup_env), + share_table_files(_share_table_files), + info_log(_info_log), + sync(_sync), + destroy_old_data(_destroy_old_data), + backup_log_files(_backup_log_files), + backup_rate_limit(_backup_rate_limit), + restore_rate_limit(_restore_rate_limit), + share_files_with_checksum(true), + max_background_operations(_max_background_operations), + callback_trigger_interval_size(_callback_trigger_interval_size), + max_valid_backups_to_open(_max_valid_backups_to_open), + share_files_with_checksum_naming(_share_files_with_checksum_naming) { + assert(share_table_files || !share_files_with_checksum); + assert((share_files_with_checksum_naming & kMaskNoNamingFlags) != 0); + } +}; + +inline BackupEngineOptions::ShareFilesNaming operator&( + BackupEngineOptions::ShareFilesNaming lhs, + BackupEngineOptions::ShareFilesNaming rhs) { + uint32_t l = static_cast(lhs); + uint32_t r = static_cast(rhs); + assert(r == BackupEngineOptions::kMaskNoNamingFlags || + (r & BackupEngineOptions::kMaskNoNamingFlags) == 0); + return static_cast(l & r); +} + +inline BackupEngineOptions::ShareFilesNaming operator|( + BackupEngineOptions::ShareFilesNaming lhs, + BackupEngineOptions::ShareFilesNaming rhs) { + uint32_t l = static_cast(lhs); + uint32_t r = static_cast(rhs); + assert((r & BackupEngineOptions::kMaskNoNamingFlags) == 0); + return static_cast(l | r); +} + +struct CreateBackupOptions { + // Flush will always trigger if 2PC is enabled. + // If write-ahead logs are disabled, set flush_before_backup=true to + // avoid losing unflushed key/value pairs from the memtable. + bool flush_before_backup = false; + + // Callback for reporting progress, based on callback_trigger_interval_size. + std::function progress_callback = []() {}; + + // If false, background_thread_cpu_priority is ignored. + // Otherwise, the cpu priority can be decreased, + // if you try to increase the priority, the priority will not change. + // The initial priority of the threads is CpuPriority::kNormal, + // so you can decrease to priorities lower than kNormal. + bool decrease_background_thread_cpu_priority = false; + CpuPriority background_thread_cpu_priority = CpuPriority::kNormal; +}; + +struct RestoreOptions { + // If true, restore won't overwrite the existing log files in wal_dir. It will + // also move all log files from archive directory to wal_dir. Use this option + // in combination with BackupEngineOptions::backup_log_files = false for + // persisting in-memory databases. + // Default: false + bool keep_log_files; + + explicit RestoreOptions(bool _keep_log_files = false) + : keep_log_files(_keep_log_files) {} +}; + +struct BackupFileInfo { + // File name and path relative to the backup_dir directory. + std::string relative_filename; + + // Size of the file in bytes, not including filesystem overheads. + uint64_t size; +}; + +typedef uint32_t BackupID; + +struct BackupInfo { + BackupID backup_id = 0U; + // Creation time, according to GetCurrentTime + int64_t timestamp = 0; + + // Total size in bytes (based on file payloads, not including filesystem + // overheads or backup meta file) + uint64_t size = 0U; + + // Number of backed up files, some of which might be shared with other + // backups. Does not include backup meta file. + uint32_t number_files = 0U; + + // Backup API user metadata + std::string app_metadata; + + // Backup file details, if requested with include_file_details=true + std::vector file_details; + + // DB "name" (a directory in the backup_env) for opening this backup as a + // read-only DB. This should also be used as the DBOptions::wal_dir, such + // as by default setting wal_dir="". See also env_for_open. + // This field is only set if include_file_details=true + std::string name_for_open; + + // An Env(+FileSystem) for opening this backup as a read-only DB, with + // DB::OpenForReadOnly or similar. This field is only set if + // include_file_details=true. (The FileSystem in this Env takes care + // of making shared backup files openable from the `name_for_open` DB + // directory.) See also name_for_open. + // + // This Env might or might not be shared with other backups. To work + // around DBOptions::env being a raw pointer, this is a shared_ptr so + // that keeping either this BackupInfo, the BackupEngine, or a copy of + // this shared_ptr alive is sufficient to keep the Env alive for use by + // a read-only DB. + std::shared_ptr env_for_open; + + BackupInfo() {} + + BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size, + uint32_t _number_files, const std::string& _app_metadata) + : backup_id(_backup_id), + timestamp(_timestamp), + size(_size), + number_files(_number_files), + app_metadata(_app_metadata) {} +}; + +class BackupStatistics { + public: + BackupStatistics() { + number_success_backup = 0; + number_fail_backup = 0; + } + + BackupStatistics(uint32_t _number_success_backup, + uint32_t _number_fail_backup) + : number_success_backup(_number_success_backup), + number_fail_backup(_number_fail_backup) {} + + ~BackupStatistics() {} + + void IncrementNumberSuccessBackup(); + void IncrementNumberFailBackup(); + + uint32_t GetNumberSuccessBackup() const; + uint32_t GetNumberFailBackup() const; + + std::string ToString() const; + + private: + uint32_t number_success_backup; + uint32_t number_fail_backup; +}; + +// Read-only functions of a BackupEngine. (Restore writes to another directory +// not the backup directory.) See BackupEngine comments for details on +// safe concurrent operations. +class BackupEngineReadOnlyBase { + public: + virtual ~BackupEngineReadOnlyBase() {} + + // Returns info about the latest good backup in backup_info, or NotFound + // no good backup exists. + // Setting include_file_details=true provides information about each + // backed-up file in BackupInfo::file_details and more. + virtual Status GetLatestBackupInfo( + BackupInfo* backup_info, bool include_file_details = false) const = 0; + + // Returns info about a specific backup in backup_info, or NotFound + // or Corruption status if the requested backup id does not exist or is + // known corrupt. + // Setting include_file_details=true provides information about each + // backed-up file in BackupInfo::file_details and more. + virtual Status GetBackupInfo(BackupID backup_id, BackupInfo* backup_info, + bool include_file_details = false) const = 0; + + // Returns info about backups in backup_info + // Setting include_file_details=true provides information about each + // backed-up file in BackupInfo::file_details and more. + virtual void GetBackupInfo(std::vector* backup_info, + bool include_file_details = false) const = 0; + + // Returns info about corrupt backups in corrupt_backups. + // WARNING: Any write to the BackupEngine could trigger automatic + // GarbageCollect(), which could delete files that would be needed to + // manually recover a corrupt backup or to preserve an unrecognized (e.g. + // incompatible future version) backup. + virtual void GetCorruptedBackups( + std::vector* corrupt_backup_ids) const = 0; + + // Restore to specified db_dir and wal_dir from backup_id. + virtual Status RestoreDBFromBackup(const RestoreOptions& options, + BackupID backup_id, + const std::string& db_dir, + const std::string& wal_dir) const = 0; + + // keep for backward compatibility. + virtual Status RestoreDBFromBackup( + BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& options = RestoreOptions()) const { + return RestoreDBFromBackup(options, backup_id, db_dir, wal_dir); + } + + // Like RestoreDBFromBackup but restores from latest non-corrupt backup_id + virtual Status RestoreDBFromLatestBackup( + const RestoreOptions& options, const std::string& db_dir, + const std::string& wal_dir) const = 0; + + // keep for backward compatibility. + virtual Status RestoreDBFromLatestBackup( + const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& options = RestoreOptions()) const { + return RestoreDBFromLatestBackup(options, db_dir, wal_dir); + } + + // If verify_with_checksum is true, this function + // inspects the current checksums and file sizes of backup files to see if + // they match our expectation. + // + // If verify_with_checksum is false, this function + // checks that each file exists and that the size of the file matches our + // expectation. It does not check file checksum. + // + // If this BackupEngine created the backup, it compares the files' current + // sizes (and current checksum) against the number of bytes written to + // them (and the checksum calculated) during creation. + // Otherwise, it compares the files' current sizes (and checksums) against + // their sizes (and checksums) when the BackupEngine was opened. + // + // Returns Status::OK() if all checks are good + virtual Status VerifyBackup(BackupID backup_id, + bool verify_with_checksum = false) const = 0; +}; + +// Append-only functions of a BackupEngine. See BackupEngine comment for +// details on distinction between Append and Write operations and safe +// concurrent operations. +class BackupEngineAppendOnlyBase { + public: + virtual ~BackupEngineAppendOnlyBase() {} + + // same as CreateNewBackup, but stores extra application metadata. + virtual Status CreateNewBackupWithMetadata( + const CreateBackupOptions& options, DB* db, + const std::string& app_metadata, BackupID* new_backup_id = nullptr) = 0; + + // keep here for backward compatibility. + virtual Status CreateNewBackupWithMetadata( + DB* db, const std::string& app_metadata, bool flush_before_backup = false, + std::function progress_callback = []() {}) { + CreateBackupOptions options; + options.flush_before_backup = flush_before_backup; + options.progress_callback = progress_callback; + return CreateNewBackupWithMetadata(options, db, app_metadata); + } + + // Captures the state of the database by creating a new (latest) backup. + // On success (OK status), the BackupID of the new backup is saved to + // *new_backup_id when not nullptr. + virtual Status CreateNewBackup(const CreateBackupOptions& options, DB* db, + BackupID* new_backup_id = nullptr) { + return CreateNewBackupWithMetadata(options, db, "", new_backup_id); + } + + // keep here for backward compatibility. + virtual Status CreateNewBackup( + DB* db, bool flush_before_backup = false, + std::function progress_callback = []() {}) { + CreateBackupOptions options; + options.flush_before_backup = flush_before_backup; + options.progress_callback = progress_callback; + return CreateNewBackup(options, db); + } + + // Call this from another thread if you want to stop the backup + // that is currently happening. It will return immediately, will + // not wait for the backup to stop. + // The backup will stop ASAP and the call to CreateNewBackup will + // return Status::Incomplete(). It will not clean up after itself, but + // the state will remain consistent. The state will be cleaned up the + // next time you call CreateNewBackup or GarbageCollect. + virtual void StopBackup() = 0; + + // Will delete any files left over from incomplete creation or deletion of + // a backup. This is not normally needed as those operations also clean up + // after prior incomplete calls to the same kind of operation (create or + // delete). This does not delete corrupt backups but can delete files that + // would be needed to manually recover a corrupt backup or to preserve an + // unrecognized (e.g. incompatible future version) backup. + // NOTE: This is not designed to delete arbitrary files added to the backup + // directory outside of BackupEngine, and clean-up is always subject to + // permissions on and availability of the underlying filesystem. + // NOTE2: For concurrency and interference purposes (see BackupEngine + // comment), GarbageCollect (GC) is like other Append operations, even + // though it seems different. Although GC can delete physical data, it does + // not delete any logical data read by Read operations. GC can interfere + // with Append or Write operations in another BackupEngine on the same + // backup_dir, because temporary files will be treated as obsolete and + // deleted. + virtual Status GarbageCollect() = 0; +}; + +// A backup engine for organizing and managing backups. +// This class is not user-extensible. +// +// This class declaration adds "Write" operations in addition to the +// operations from BackupEngineAppendOnlyBase and BackupEngineReadOnlyBase. +// +// # Concurrency between threads on the same BackupEngine* object +// +// As of version 6.20, BackupEngine* operations are generally thread-safe, +// using a read-write lock, though single-thread operation is still +// recommended to avoid TOCTOU bugs. Specifically, particular kinds of +// concurrent operations behave like this: +// +// op1\op2| Read | Append | Write +// -------|-------|--------|-------- +// Read | conc | block | block +// Append | block | block | block +// Write | block | block | block +// +// conc = operations safely proceed concurrently +// block = one of the operations safely blocks until the other completes. +// There is generally no guarantee as to which completes first. +// +// StopBackup is the only operation that affects an ongoing operation. +// +// # Interleaving operations between BackupEngine* objects open on the +// same backup_dir +// +// It is recommended only to have one BackupEngine* object open for a given +// backup_dir, but it is possible to mix / interleave some operations +// (regardless of whether they are concurrent) with these caveats: +// +// op1\op2| Open | Read | Append | Write +// -------|--------|--------|--------|-------- +// Open | conc | conc | atomic | unspec +// Read | conc | conc | old | unspec +// Append | atomic | old | unspec | unspec +// Write | unspec | unspec | unspec | unspec +// +// Special case: Open with destroy_old_data=true is really a Write +// +// conc = operations safely proceed, concurrently when applicable +// atomic = operations are effectively atomic; if a concurrent Append +// operation has not completed at some key point during Open, the +// opened BackupEngine* will never see the result of the Append op. +// old = Read operations do not include any state changes from other +// BackupEngine* objects; they return the state at their Open time. +// unspec = Behavior is unspecified, including possibly trashing the +// backup_dir, but is "memory safe" (no C++ undefined behavior) +// +class BackupEngine : public BackupEngineReadOnlyBase, + public BackupEngineAppendOnlyBase { + public: + virtual ~BackupEngine() {} + + // BackupEngineOptions have to be the same as the ones used in previous + // BackupEngines for the same backup directory. + static Status Open(const BackupEngineOptions& options, Env* db_env, + BackupEngine** backup_engine_ptr); + + // keep for backward compatibility. + static Status Open(Env* db_env, const BackupEngineOptions& options, + BackupEngine** backup_engine_ptr) { + return BackupEngine::Open(options, db_env, backup_engine_ptr); + } + + // Deletes old backups, keeping latest num_backups_to_keep alive. + // See also DeleteBackup. + virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; + + // Deletes a specific backup. If this operation (or PurgeOldBackups) + // is not completed due to crash, power failure, etc. the state + // will be cleaned up the next time you call DeleteBackup, + // PurgeOldBackups, or GarbageCollect. + virtual Status DeleteBackup(BackupID backup_id) = 0; +}; + +// A variant of BackupEngine that only allows "Read" operations. See +// BackupEngine comment for details. This class is not user-extensible. +class BackupEngineReadOnly : public BackupEngineReadOnlyBase { + public: + virtual ~BackupEngineReadOnly() {} + + static Status Open(const BackupEngineOptions& options, Env* db_env, + BackupEngineReadOnly** backup_engine_ptr); + // keep for backward compatibility. + static Status Open(Env* db_env, const BackupEngineOptions& options, + BackupEngineReadOnly** backup_engine_ptr) { + return BackupEngineReadOnly::Open(options, db_env, backup_engine_ptr); + } +}; + +} // namespace ROCKSDB_NAMESPACE +#endif // ROCKSDB_LITE diff --git a/include/rocksdb/utilities/backupable_db.h b/include/rocksdb/utilities/backupable_db.h index 81f0b0e5d..de040b552 100644 --- a/include/rocksdb/utilities/backupable_db.h +++ b/include/rocksdb/utilities/backupable_db.h @@ -1,608 +1,26 @@ -// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. // This source code is licensed under both the GPLv2 (found in the // COPYING file in the root directory) and Apache 2.0 License // (found in the LICENSE.Apache file in the root directory). -// -// Copyright (c) 2011 The LevelDB Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// This is a DEPRECATED header for API backward compatibility. Please +// use backup_engine.h. #pragma once #ifndef ROCKSDB_LITE +// A legacy unnecessary include #include -#include -#include -#include -#include +#include "rocksdb/utilities/backup_engine.h" + +// A legacy unnecessary include #include "rocksdb/utilities/stackable_db.h" -#include "rocksdb/env.h" -#include "rocksdb/options.h" -#include "rocksdb/status.h" - namespace ROCKSDB_NAMESPACE { -// The default DB file checksum function name. -constexpr char kDbFileChecksumFuncName[] = "FileChecksumCrc32c"; -// The default BackupEngine file checksum function name. -constexpr char kBackupFileChecksumFuncName[] = "crc32c"; - -struct BackupableDBOptions { - // Where to keep the backup files. Has to be different than dbname_ - // Best to set this to dbname_ + "/backups" - // Required - std::string backup_dir; - - // Backup Env object. It will be used for backup file I/O. If it's - // nullptr, backups will be written out using DBs Env. If it's - // non-nullptr, backup's I/O will be performed using this object. - // If you want to have backups on HDFS, use HDFS Env here! - // Default: nullptr - Env* backup_env; - - // share_table_files supports table and blob files. - // - // If share_table_files == true, the backup directory will share table and - // blob files among backups, to save space among backups of the same DB and to - // enable incremental backups by only copying new files. - // If share_table_files == false, each backup will be on its own and will not - // share any data with other backups. - // - // default: true - bool share_table_files; - - // Backup info and error messages will be written to info_log - // if non-nullptr. - // Default: nullptr - Logger* info_log; - - // If sync == true, we can guarantee you'll get consistent backup even - // on a machine crash/reboot. Backup process is slower with sync enabled. - // If sync == false, we don't guarantee anything on machine reboot. However, - // chances are some of the backups are consistent. - // Default: true - bool sync; - - // If true, it will delete whatever backups there are already - // Default: false - bool destroy_old_data; - - // If false, we won't backup log files. This option can be useful for backing - // up in-memory databases where log file are persisted, but table files are in - // memory. - // Default: true - bool backup_log_files; - - // Max bytes that can be transferred in a second during backup. - // If 0, go as fast as you can - // Default: 0 - uint64_t backup_rate_limit; - - // Backup rate limiter. Used to control transfer speed for backup. If this is - // not null, backup_rate_limit is ignored. - // Default: nullptr - std::shared_ptr backup_rate_limiter{nullptr}; - - // Max bytes that can be transferred in a second during restore. - // If 0, go as fast as you can - // Default: 0 - uint64_t restore_rate_limit; - - // Restore rate limiter. Used to control transfer speed during restore. If - // this is not null, restore_rate_limit is ignored. - // Default: nullptr - std::shared_ptr restore_rate_limiter{nullptr}; - - // share_files_with_checksum supports table and blob files. - // - // Only used if share_table_files is set to true. Setting to false is - // DEPRECATED and potentially dangerous because in that case BackupEngine - // can lose data if backing up databases with distinct or divergent - // history, for example if restoring from a backup other than the latest, - // writing to the DB, and creating another backup. Setting to true (default) - // prevents these issues by ensuring that different table files (SSTs) and - // blob files with the same number are treated as distinct. See - // share_files_with_checksum_naming and ShareFilesNaming. - // - // Default: true - bool share_files_with_checksum; - - // Up to this many background threads will copy files for CreateNewBackup() - // and RestoreDBFromBackup() - // Default: 1 - int max_background_operations; - - // During backup user can get callback every time next - // callback_trigger_interval_size bytes being copied. - // Default: 4194304 - uint64_t callback_trigger_interval_size; - - // For BackupEngineReadOnly, Open() will open at most this many of the - // latest non-corrupted backups. - // - // Note: this setting is ignored (behaves like INT_MAX) for any kind of - // writable BackupEngine because it would inhibit accounting for shared - // files for proper backup deletion, including purging any incompletely - // created backups on creation of a new backup. - // - // Default: INT_MAX - int max_valid_backups_to_open; - - // ShareFilesNaming describes possible naming schemes for backup - // table and blob file names when they are stored in the - // shared_checksum directory (i.e., both share_table_files and - // share_files_with_checksum are true). - enum ShareFilesNaming : uint32_t { - // Backup blob filenames are __.blob and - // backup SST filenames are __.sst - // where is an unsigned decimal integer. This is the - // original/legacy naming scheme for share_files_with_checksum, - // with two problems: - // * At massive scale, collisions on this triple with different file - // contents is plausible. - // * Determining the name to use requires computing the checksum, - // so generally requires reading the whole file even if the file - // is already backed up. - // - // ** ONLY RECOMMENDED FOR PRESERVING OLD BEHAVIOR ** - kLegacyCrc32cAndFileSize = 1U, - - // Backup SST filenames are _s.sst. This - // pair of values should be very strongly unique for a given SST file - // and easily determined before computing a checksum. The 's' indicates - // the value is a DB session id, not a checksum. - // - // Exceptions: - // * For blob files, kLegacyCrc32cAndFileSize is used as currently - // db_session_id is not supported by the blob file format. - // * For old SST files without a DB session id, kLegacyCrc32cAndFileSize - // will be used instead, matching the names assigned by RocksDB versions - // not supporting the newer naming scheme. - // * See also flags below. - kUseDbSessionId = 2U, - - kMaskNoNamingFlags = 0xffffU, - - // If not already part of the naming scheme, insert - // _ - // before .sst and .blob in the name. In case of user code actually parsing - // the last _ before the .sst and .blob as the file size, this - // preserves that feature of kLegacyCrc32cAndFileSize. In other words, this - // option makes official that unofficial feature of the backup metadata. - // - // We do not consider SST and blob file sizes to have sufficient entropy to - // contribute significantly to naming uniqueness. - kFlagIncludeFileSize = 1U << 31, - - kMaskNamingFlags = ~kMaskNoNamingFlags, - }; - - // Naming option for share_files_with_checksum table and blob files. See - // ShareFilesNaming for details. - // - // Modifying this option cannot introduce a downgrade compatibility issue - // because RocksDB can read, restore, and delete backups using different file - // names, and it's OK for a backup directory to use a mixture of table and - // blob files naming schemes. - // - // However, modifying this option and saving more backups to the same - // directory can lead to the same file getting saved again to that - // directory, under the new shared name in addition to the old shared - // name. - // - // Default: kUseDbSessionId | kFlagIncludeFileSize - // - // Note: This option comes into effect only if both share_files_with_checksum - // and share_table_files are true. - ShareFilesNaming share_files_with_checksum_naming; - - void Dump(Logger* logger) const; - - explicit BackupableDBOptions( - const std::string& _backup_dir, Env* _backup_env = nullptr, - bool _share_table_files = true, Logger* _info_log = nullptr, - bool _sync = true, bool _destroy_old_data = false, - bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, - uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, - uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024, - int _max_valid_backups_to_open = INT_MAX, - ShareFilesNaming _share_files_with_checksum_naming = - static_cast(kUseDbSessionId | kFlagIncludeFileSize)) - : backup_dir(_backup_dir), - backup_env(_backup_env), - share_table_files(_share_table_files), - info_log(_info_log), - sync(_sync), - destroy_old_data(_destroy_old_data), - backup_log_files(_backup_log_files), - backup_rate_limit(_backup_rate_limit), - restore_rate_limit(_restore_rate_limit), - share_files_with_checksum(true), - max_background_operations(_max_background_operations), - callback_trigger_interval_size(_callback_trigger_interval_size), - max_valid_backups_to_open(_max_valid_backups_to_open), - share_files_with_checksum_naming(_share_files_with_checksum_naming) { - assert(share_table_files || !share_files_with_checksum); - assert((share_files_with_checksum_naming & kMaskNoNamingFlags) != 0); - } -}; - -inline BackupableDBOptions::ShareFilesNaming operator&( - BackupableDBOptions::ShareFilesNaming lhs, - BackupableDBOptions::ShareFilesNaming rhs) { - uint32_t l = static_cast(lhs); - uint32_t r = static_cast(rhs); - assert(r == BackupableDBOptions::kMaskNoNamingFlags || - (r & BackupableDBOptions::kMaskNoNamingFlags) == 0); - return static_cast(l & r); -} - -inline BackupableDBOptions::ShareFilesNaming operator|( - BackupableDBOptions::ShareFilesNaming lhs, - BackupableDBOptions::ShareFilesNaming rhs) { - uint32_t l = static_cast(lhs); - uint32_t r = static_cast(rhs); - assert((r & BackupableDBOptions::kMaskNoNamingFlags) == 0); - return static_cast(l | r); -} - -struct CreateBackupOptions { - // Flush will always trigger if 2PC is enabled. - // If write-ahead logs are disabled, set flush_before_backup=true to - // avoid losing unflushed key/value pairs from the memtable. - bool flush_before_backup = false; - - // Callback for reporting progress, based on callback_trigger_interval_size. - std::function progress_callback = []() {}; - - // If false, background_thread_cpu_priority is ignored. - // Otherwise, the cpu priority can be decreased, - // if you try to increase the priority, the priority will not change. - // The initial priority of the threads is CpuPriority::kNormal, - // so you can decrease to priorities lower than kNormal. - bool decrease_background_thread_cpu_priority = false; - CpuPriority background_thread_cpu_priority = CpuPriority::kNormal; -}; - -struct RestoreOptions { - // If true, restore won't overwrite the existing log files in wal_dir. It will - // also move all log files from archive directory to wal_dir. Use this option - // in combination with BackupableDBOptions::backup_log_files = false for - // persisting in-memory databases. - // Default: false - bool keep_log_files; - - explicit RestoreOptions(bool _keep_log_files = false) - : keep_log_files(_keep_log_files) {} -}; - -struct BackupFileInfo { - // File name and path relative to the backup_dir directory. - std::string relative_filename; - - // Size of the file in bytes, not including filesystem overheads. - uint64_t size; -}; - -typedef uint32_t BackupID; - -struct BackupInfo { - BackupID backup_id = 0U; - // Creation time, according to GetCurrentTime - int64_t timestamp = 0; - - // Total size in bytes (based on file payloads, not including filesystem - // overheads or backup meta file) - uint64_t size = 0U; - - // Number of backed up files, some of which might be shared with other - // backups. Does not include backup meta file. - uint32_t number_files = 0U; - - // Backup API user metadata - std::string app_metadata; - - // Backup file details, if requested with include_file_details=true - std::vector file_details; - - // DB "name" (a directory in the backup_env) for opening this backup as a - // read-only DB. This should also be used as the DBOptions::wal_dir, such - // as by default setting wal_dir="". See also env_for_open. - // This field is only set if include_file_details=true - std::string name_for_open; - - // An Env(+FileSystem) for opening this backup as a read-only DB, with - // DB::OpenForReadOnly or similar. This field is only set if - // include_file_details=true. (The FileSystem in this Env takes care - // of making shared backup files openable from the `name_for_open` DB - // directory.) See also name_for_open. - // - // This Env might or might not be shared with other backups. To work - // around DBOptions::env being a raw pointer, this is a shared_ptr so - // that keeping either this BackupInfo, the BackupEngine, or a copy of - // this shared_ptr alive is sufficient to keep the Env alive for use by - // a read-only DB. - std::shared_ptr env_for_open; - - BackupInfo() {} - - BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size, - uint32_t _number_files, const std::string& _app_metadata) - : backup_id(_backup_id), - timestamp(_timestamp), - size(_size), - number_files(_number_files), - app_metadata(_app_metadata) {} -}; - -class BackupStatistics { - public: - BackupStatistics() { - number_success_backup = 0; - number_fail_backup = 0; - } - - BackupStatistics(uint32_t _number_success_backup, - uint32_t _number_fail_backup) - : number_success_backup(_number_success_backup), - number_fail_backup(_number_fail_backup) {} - - ~BackupStatistics() {} - - void IncrementNumberSuccessBackup(); - void IncrementNumberFailBackup(); - - uint32_t GetNumberSuccessBackup() const; - uint32_t GetNumberFailBackup() const; - - std::string ToString() const; - - private: - uint32_t number_success_backup; - uint32_t number_fail_backup; -}; - -// Read-only functions of a BackupEngine. (Restore writes to another directory -// not the backup directory.) See BackupEngine comments for details on -// safe concurrent operations. -class BackupEngineReadOnlyBase { - public: - virtual ~BackupEngineReadOnlyBase() {} - - // Returns info about the latest good backup in backup_info, or NotFound - // no good backup exists. - // Setting include_file_details=true provides information about each - // backed-up file in BackupInfo::file_details and more. - virtual Status GetLatestBackupInfo( - BackupInfo* backup_info, bool include_file_details = false) const = 0; - - // Returns info about a specific backup in backup_info, or NotFound - // or Corruption status if the requested backup id does not exist or is - // known corrupt. - // Setting include_file_details=true provides information about each - // backed-up file in BackupInfo::file_details and more. - virtual Status GetBackupInfo(BackupID backup_id, BackupInfo* backup_info, - bool include_file_details = false) const = 0; - - // Returns info about backups in backup_info - // Setting include_file_details=true provides information about each - // backed-up file in BackupInfo::file_details and more. - virtual void GetBackupInfo(std::vector* backup_info, - bool include_file_details = false) const = 0; - - // Returns info about corrupt backups in corrupt_backups. - // WARNING: Any write to the BackupEngine could trigger automatic - // GarbageCollect(), which could delete files that would be needed to - // manually recover a corrupt backup or to preserve an unrecognized (e.g. - // incompatible future version) backup. - virtual void GetCorruptedBackups( - std::vector* corrupt_backup_ids) const = 0; - - // Restore to specified db_dir and wal_dir from backup_id. - virtual Status RestoreDBFromBackup(const RestoreOptions& options, - BackupID backup_id, - const std::string& db_dir, - const std::string& wal_dir) const = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromBackup( - BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) const { - return RestoreDBFromBackup(options, backup_id, db_dir, wal_dir); - } - - // Like RestoreDBFromBackup but restores from latest non-corrupt backup_id - virtual Status RestoreDBFromLatestBackup( - const RestoreOptions& options, const std::string& db_dir, - const std::string& wal_dir) const = 0; - - // keep for backward compatibility. - virtual Status RestoreDBFromLatestBackup( - const std::string& db_dir, const std::string& wal_dir, - const RestoreOptions& options = RestoreOptions()) const { - return RestoreDBFromLatestBackup(options, db_dir, wal_dir); - } - - // If verify_with_checksum is true, this function - // inspects the current checksums and file sizes of backup files to see if - // they match our expectation. - // - // If verify_with_checksum is false, this function - // checks that each file exists and that the size of the file matches our - // expectation. It does not check file checksum. - // - // If this BackupEngine created the backup, it compares the files' current - // sizes (and current checksum) against the number of bytes written to - // them (and the checksum calculated) during creation. - // Otherwise, it compares the files' current sizes (and checksums) against - // their sizes (and checksums) when the BackupEngine was opened. - // - // Returns Status::OK() if all checks are good - virtual Status VerifyBackup(BackupID backup_id, - bool verify_with_checksum = false) const = 0; -}; - -// Append-only functions of a BackupEngine. See BackupEngine comment for -// details on distinction between Append and Write operations and safe -// concurrent operations. -class BackupEngineAppendOnlyBase { - public: - virtual ~BackupEngineAppendOnlyBase() {} - - // same as CreateNewBackup, but stores extra application metadata. - virtual Status CreateNewBackupWithMetadata( - const CreateBackupOptions& options, DB* db, - const std::string& app_metadata, BackupID* new_backup_id = nullptr) = 0; - - // keep here for backward compatibility. - virtual Status CreateNewBackupWithMetadata( - DB* db, const std::string& app_metadata, bool flush_before_backup = false, - std::function progress_callback = []() {}) { - CreateBackupOptions options; - options.flush_before_backup = flush_before_backup; - options.progress_callback = progress_callback; - return CreateNewBackupWithMetadata(options, db, app_metadata); - } - - // Captures the state of the database by creating a new (latest) backup. - // On success (OK status), the BackupID of the new backup is saved to - // *new_backup_id when not nullptr. - virtual Status CreateNewBackup(const CreateBackupOptions& options, DB* db, - BackupID* new_backup_id = nullptr) { - return CreateNewBackupWithMetadata(options, db, "", new_backup_id); - } - - // keep here for backward compatibility. - virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false, - std::function progress_callback = - []() {}) { - CreateBackupOptions options; - options.flush_before_backup = flush_before_backup; - options.progress_callback = progress_callback; - return CreateNewBackup(options, db); - } - - // Call this from another thread if you want to stop the backup - // that is currently happening. It will return immediately, will - // not wait for the backup to stop. - // The backup will stop ASAP and the call to CreateNewBackup will - // return Status::Incomplete(). It will not clean up after itself, but - // the state will remain consistent. The state will be cleaned up the - // next time you call CreateNewBackup or GarbageCollect. - virtual void StopBackup() = 0; - - // Will delete any files left over from incomplete creation or deletion of - // a backup. This is not normally needed as those operations also clean up - // after prior incomplete calls to the same kind of operation (create or - // delete). This does not delete corrupt backups but can delete files that - // would be needed to manually recover a corrupt backup or to preserve an - // unrecognized (e.g. incompatible future version) backup. - // NOTE: This is not designed to delete arbitrary files added to the backup - // directory outside of BackupEngine, and clean-up is always subject to - // permissions on and availability of the underlying filesystem. - // NOTE2: For concurrency and interference purposes (see BackupEngine - // comment), GarbageCollect (GC) is like other Append operations, even - // though it seems different. Although GC can delete physical data, it does - // not delete any logical data read by Read operations. GC can interfere - // with Append or Write operations in another BackupEngine on the same - // backup_dir, because temporary files will be treated as obsolete and - // deleted. - virtual Status GarbageCollect() = 0; -}; - -// A backup engine for organizing and managing backups. -// This class is not user-extensible. -// -// This class declaration adds "Write" operations in addition to the -// operations from BackupEngineAppendOnlyBase and BackupEngineReadOnlyBase. -// -// # Concurrency between threads on the same BackupEngine* object -// -// As of version 6.20, BackupEngine* operations are generally thread-safe, -// using a read-write lock, though single-thread operation is still -// recommended to avoid TOCTOU bugs. Specifically, particular kinds of -// concurrent operations behave like this: -// -// op1\op2| Read | Append | Write -// -------|-------|--------|-------- -// Read | conc | block | block -// Append | block | block | block -// Write | block | block | block -// -// conc = operations safely proceed concurrently -// block = one of the operations safely blocks until the other completes. -// There is generally no guarantee as to which completes first. -// -// StopBackup is the only operation that affects an ongoing operation. -// -// # Interleaving operations between BackupEngine* objects open on the -// same backup_dir -// -// It is recommended only to have one BackupEngine* object open for a given -// backup_dir, but it is possible to mix / interleave some operations -// (regardless of whether they are concurrent) with these caveats: -// -// op1\op2| Open | Read | Append | Write -// -------|--------|--------|--------|-------- -// Open | conc | conc | atomic | unspec -// Read | conc | conc | old | unspec -// Append | atomic | old | unspec | unspec -// Write | unspec | unspec | unspec | unspec -// -// Special case: Open with destroy_old_data=true is really a Write -// -// conc = operations safely proceed, concurrently when applicable -// atomic = operations are effectively atomic; if a concurrent Append -// operation has not completed at some key point during Open, the -// opened BackupEngine* will never see the result of the Append op. -// old = Read operations do not include any state changes from other -// BackupEngine* objects; they return the state at their Open time. -// unspec = Behavior is unspecified, including possibly trashing the -// backup_dir, but is "memory safe" (no C++ undefined behavior) -// -class BackupEngine : public BackupEngineReadOnlyBase, - public BackupEngineAppendOnlyBase { - public: - virtual ~BackupEngine() {} - - // BackupableDBOptions have to be the same as the ones used in previous - // BackupEngines for the same backup directory. - static Status Open(const BackupableDBOptions& options, Env* db_env, - BackupEngine** backup_engine_ptr); - - // keep for backward compatibility. - static Status Open(Env* db_env, const BackupableDBOptions& options, - BackupEngine** backup_engine_ptr) { - return BackupEngine::Open(options, db_env, backup_engine_ptr); - } - - // Deletes old backups, keeping latest num_backups_to_keep alive. - // See also DeleteBackup. - virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; - - // Deletes a specific backup. If this operation (or PurgeOldBackups) - // is not completed due to crash, power failure, etc. the state - // will be cleaned up the next time you call DeleteBackup, - // PurgeOldBackups, or GarbageCollect. - virtual Status DeleteBackup(BackupID backup_id) = 0; -}; - -// A variant of BackupEngine that only allows "Read" operations. See -// BackupEngine comment for details. This class is not user-extensible. -class BackupEngineReadOnly : public BackupEngineReadOnlyBase { - public: - virtual ~BackupEngineReadOnly() {} - - static Status Open(const BackupableDBOptions& options, Env* db_env, - BackupEngineReadOnly** backup_engine_ptr); - // keep for backward compatibility. - static Status Open(Env* db_env, const BackupableDBOptions& options, - BackupEngineReadOnly** backup_engine_ptr) { - return BackupEngineReadOnly::Open(options, db_env, backup_engine_ptr); - } -}; +using BackupableDBOptions = BackupEngineOptions; } // namespace ROCKSDB_NAMESPACE + #endif // ROCKSDB_LITE diff --git a/utilities/backupable/backupable_db.cc b/utilities/backupable/backupable_db.cc index c06fe3f96..b98c56bc6 100644 --- a/utilities/backupable/backupable_db.cc +++ b/utilities/backupable/backupable_db.cc @@ -9,11 +9,10 @@ #ifndef ROCKSDB_LITE -#include - #include #include #include +#include #include #include #include @@ -50,7 +49,7 @@ namespace ROCKSDB_NAMESPACE { namespace { -using ShareFilesNaming = BackupableDBOptions::ShareFilesNaming; +using ShareFilesNaming = BackupEngineOptions::ShareFilesNaming; constexpr BackupID kLatestBackupIDMarker = static_cast(-2); @@ -100,7 +99,7 @@ std::string BackupStatistics::ToString() const { return result; } -void BackupableDBOptions::Dump(Logger* logger) const { +void BackupEngineOptions::Dump(Logger* logger) const { ROCKS_LOG_INFO(logger, " Options.backup_dir: %s", backup_dir.c_str()); ROCKS_LOG_INFO(logger, " Options.backup_env: %p", backup_env); @@ -124,7 +123,7 @@ void BackupableDBOptions::Dump(Logger* logger) const { // -------- BackupEngineImpl class --------- class BackupEngineImpl { public: - BackupEngineImpl(const BackupableDBOptions& options, Env* db_env, + BackupEngineImpl(const BackupEngineOptions& options, Env* db_env, bool read_only = false); ~BackupEngineImpl(); @@ -168,11 +167,11 @@ class BackupEngineImpl { ShareFilesNaming GetNamingNoFlags() const { return options_.share_files_with_checksum_naming & - BackupableDBOptions::kMaskNoNamingFlags; + BackupEngineOptions::kMaskNoNamingFlags; } ShareFilesNaming GetNamingFlags() const { return options_.share_files_with_checksum_naming & - BackupableDBOptions::kMaskNamingFlags; + BackupEngineOptions::kMaskNamingFlags; } private: @@ -494,7 +493,7 @@ class BackupEngineImpl { } inline bool UseLegacyNaming(const std::string& sid) const { return GetNamingNoFlags() == - BackupableDBOptions::kLegacyCrc32cAndFileSize || + BackupEngineOptions::kLegacyCrc32cAndFileSize || sid.empty(); } inline std::string GetSharedFileWithChecksum( @@ -509,7 +508,7 @@ class BackupEngineImpl { ToString(file_size)); } else { file_copy.insert(file_copy.find_last_of('.'), "_s" + db_session_id); - if (GetNamingFlags() & BackupableDBOptions::kFlagIncludeFileSize) { + if (GetNamingFlags() & BackupEngineOptions::kFlagIncludeFileSize) { file_copy.insert(file_copy.find_last_of('.'), "_" + ToString(file_size)); } @@ -775,7 +774,7 @@ class BackupEngineImpl { std::atomic stop_backup_; // options data - BackupableDBOptions options_; + BackupEngineOptions options_; Env* db_env_; Env* backup_env_; @@ -803,7 +802,7 @@ class BackupEngineImpl { class BackupEngineImplThreadSafe : public BackupEngine, public BackupEngineReadOnly { public: - BackupEngineImplThreadSafe(const BackupableDBOptions& options, Env* db_env, + BackupEngineImplThreadSafe(const BackupEngineOptions& options, Env* db_env, bool read_only = false) : impl_(options, db_env, read_only) {} ~BackupEngineImplThreadSafe() override {} @@ -902,7 +901,7 @@ class BackupEngineImplThreadSafe : public BackupEngine, BackupEngineImpl impl_; }; -Status BackupEngine::Open(const BackupableDBOptions& options, Env* env, +Status BackupEngine::Open(const BackupEngineOptions& options, Env* env, BackupEngine** backup_engine_ptr) { std::unique_ptr backup_engine( new BackupEngineImplThreadSafe(options, env)); @@ -915,7 +914,7 @@ Status BackupEngine::Open(const BackupableDBOptions& options, Env* env, return Status::OK(); } -BackupEngineImpl::BackupEngineImpl(const BackupableDBOptions& options, +BackupEngineImpl::BackupEngineImpl(const BackupEngineOptions& options, Env* db_env, bool read_only) : initialized_(false), threads_cpu_priority_(), @@ -1256,7 +1255,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata( if (options_.share_table_files && !options_.share_files_with_checksum) { ROCKS_LOG_WARN(options_.info_log, - "BackupableDBOptions::share_files_with_checksum=false is " + "BackupEngineOptions::share_files_with_checksum=false is " "DEPRECATED and could lead to data loss."); } @@ -1971,7 +1970,7 @@ Status BackupEngineImpl::AddBackupFileWorkItem( // Step 1: Prepare the relative path to destination if (shared && shared_checksum) { - if (GetNamingNoFlags() != BackupableDBOptions::kLegacyCrc32cAndFileSize && + if (GetNamingNoFlags() != BackupEngineOptions::kLegacyCrc32cAndFileSize && file_type != kBlobFile) { // Prepare db_session_id to add to the file name // Ignore the returned status @@ -2834,7 +2833,7 @@ Status BackupEngineImpl::BackupMeta::StoreToFile( return s; } -Status BackupEngineReadOnly::Open(const BackupableDBOptions& options, Env* env, +Status BackupEngineReadOnly::Open(const BackupEngineOptions& options, Env* env, BackupEngineReadOnly** backup_engine_ptr) { if (options.destroy_old_data) { return Status::InvalidArgument( diff --git a/utilities/backupable/backupable_db_test.cc b/utilities/backupable/backupable_db_test.cc index b4c62c7df..a74409512 100644 --- a/utilities/backupable/backupable_db_test.cc +++ b/utilities/backupable/backupable_db_test.cc @@ -598,7 +598,7 @@ void AssertEmpty(DB* db, int from, int to) { } } // namespace -class BackupableDBTest : public testing::Test { +class BackupEngineTest : public testing::Test { public: enum ShareOption { kNoShare, @@ -609,10 +609,10 @@ class BackupableDBTest : public testing::Test { const std::vector kAllShareOptions = { kNoShare, kShareNoChecksum, kShareWithChecksum}; - BackupableDBTest() { + BackupEngineTest() { // set up files - std::string db_chroot = test::PerThreadDBPath("backupable_db"); - std::string backup_chroot = test::PerThreadDBPath("backupable_db_backup"); + std::string db_chroot = test::PerThreadDBPath("db_for_backup"); + std::string backup_chroot = test::PerThreadDBPath("db_backups"); EXPECT_OK(Env::Default()->CreateDirIfMissing(db_chroot)); EXPECT_OK(Env::Default()->CreateDirIfMissing(backup_chroot)); dbname_ = "/tempdb"; @@ -946,7 +946,7 @@ class BackupableDBTest : public testing::Test { std::unique_ptr db_file_manager_; // all the dbs! - DummyDB* dummy_db_; // BackupableDB owns dummy_db_ + DummyDB* dummy_db_; // owned as db_ when present std::unique_ptr db_; std::unique_ptr backup_engine_; @@ -955,7 +955,7 @@ class BackupableDBTest : public testing::Test { protected: std::unique_ptr backupable_options_; -}; // BackupableDBTest +}; // BackupEngineTest void AppendPath(const std::string& path, std::vector& v) { for (auto& f : v) { @@ -963,16 +963,16 @@ void AppendPath(const std::string& path, std::vector& v) { } } -class BackupableDBTestWithParam : public BackupableDBTest, +class BackupEngineTestWithParam : public BackupEngineTest, public testing::WithParamInterface { public: - BackupableDBTestWithParam() { + BackupEngineTestWithParam() { backupable_options_->share_files_with_checksum = GetParam(); } void OpenDBAndBackupEngine( bool destroy_old_data = false, bool dummy = false, ShareOption shared_option = kShareNoChecksum) override { - BackupableDBTest::InitializeDBAndBackupEngine(dummy); + BackupEngineTest::InitializeDBAndBackupEngine(dummy); // reset backup env defaults test_backup_env_->SetLimitWrittenFiles(1000000); backupable_options_->destroy_old_data = destroy_old_data; @@ -982,7 +982,7 @@ class BackupableDBTestWithParam : public BackupableDBTest, } }; -TEST_F(BackupableDBTest, FileCollision) { +TEST_F(BackupEngineTest, FileCollision) { const int keys_iteration = 5000; for (const auto& sopt : kAllShareOptions) { OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, sopt); @@ -1019,7 +1019,7 @@ TEST_F(BackupableDBTest, FileCollision) { // This test verifies that the verifyBackup method correctly identifies // invalid backups -TEST_P(BackupableDBTestWithParam, VerifyBackup) { +TEST_P(BackupEngineTestWithParam, VerifyBackup) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true); // create five backups @@ -1049,7 +1049,7 @@ TEST_P(BackupableDBTestWithParam, VerifyBackup) { } // open DB, write, close DB, backup, restore, repeat -TEST_P(BackupableDBTestWithParam, OfflineIntegrationTest) { +TEST_P(BackupEngineTestWithParam, OfflineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -1097,7 +1097,7 @@ TEST_P(BackupableDBTestWithParam, OfflineIntegrationTest) { } // open DB, write, backup, write, backup, close, restore -TEST_P(BackupableDBTestWithParam, OnlineIntegrationTest) { +TEST_P(BackupEngineTestWithParam, OnlineIntegrationTest) { // has to be a big number, so that it triggers the memtable flush const int keys_iteration = 5000; const int max_key = keys_iteration * 4 + 10; @@ -1159,11 +1159,11 @@ TEST_P(BackupableDBTestWithParam, OnlineIntegrationTest) { CloseBackupEngine(); } -INSTANTIATE_TEST_CASE_P(BackupableDBTestWithParam, BackupableDBTestWithParam, +INSTANTIATE_TEST_CASE_P(BackupEngineTestWithParam, BackupEngineTestWithParam, ::testing::Bool()); // this will make sure that backup does not copy the same file twice -TEST_F(BackupableDBTest, NoDoubleCopy_And_AutoGC) { +TEST_F(BackupEngineTest, NoDoubleCopy_And_AutoGC) { OpenDBAndBackupEngine(true, true); // should write 5 DB files + one meta file @@ -1270,7 +1270,7 @@ TEST_F(BackupableDBTest, NoDoubleCopy_And_AutoGC) { // fine // 3. Corrupted checksum value - if the checksum is not a valid uint32_t, // db open should fail, otherwise, it aborts during the restore process. -TEST_F(BackupableDBTest, CorruptionsTest) { +TEST_F(BackupEngineTest, CorruptionsTest) { const int keys_iteration = 5000; Random rnd(6); Status s; @@ -1371,7 +1371,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) { } // Corrupt a file but maintain its size -TEST_F(BackupableDBTest, CorruptFileMaintainSize) { +TEST_F(BackupEngineTest, CorruptFileMaintainSize) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true); // create a backup @@ -1437,7 +1437,7 @@ TEST_F(BackupableDBTest, CorruptFileMaintainSize) { } // Corrupt a blob file but maintain its size -TEST_P(BackupableDBTestWithParam, CorruptBlobFileMaintainSize) { +TEST_P(BackupEngineTestWithParam, CorruptBlobFileMaintainSize) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true); // create a backup @@ -1483,7 +1483,7 @@ TEST_P(BackupableDBTestWithParam, CorruptBlobFileMaintainSize) { // Test if BackupEngine will fail to create new backup if some table has been // corrupted and the table file checksum is stored in the DB manifest -TEST_F(BackupableDBTest, TableFileCorruptedBeforeBackup) { +TEST_F(BackupEngineTest, TableFileCorruptedBeforeBackup) { const int keys_iteration = 50000; OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, @@ -1516,7 +1516,7 @@ TEST_F(BackupableDBTest, TableFileCorruptedBeforeBackup) { // Test if BackupEngine will fail to create new backup if some blob files has // been corrupted and the blob file checksum is stored in the DB manifest -TEST_F(BackupableDBTest, BlobFileCorruptedBeforeBackup) { +TEST_F(BackupEngineTest, BlobFileCorruptedBeforeBackup) { const int keys_iteration = 50000; OpenDBAndBackupEngine(true /* destroy_old_data */, false /* dummy */, @@ -1551,7 +1551,7 @@ TEST_F(BackupableDBTest, BlobFileCorruptedBeforeBackup) { // Test if BackupEngine will fail to create new backup if some table has been // corrupted and the table file checksum is stored in the DB manifest for the // case when backup table files will be stored in a shared directory -TEST_P(BackupableDBTestWithParam, TableFileCorruptedBeforeBackup) { +TEST_P(BackupEngineTestWithParam, TableFileCorruptedBeforeBackup) { const int keys_iteration = 50000; OpenDBAndBackupEngine(true /* destroy_old_data */); @@ -1581,7 +1581,7 @@ TEST_P(BackupableDBTestWithParam, TableFileCorruptedBeforeBackup) { // Test if BackupEngine will fail to create new backup if some blob files have // been corrupted and the blob file checksum is stored in the DB manifest for // the case when backup blob files will be stored in a shared directory -TEST_P(BackupableDBTestWithParam, BlobFileCorruptedBeforeBackup) { +TEST_P(BackupEngineTestWithParam, BlobFileCorruptedBeforeBackup) { const int keys_iteration = 50000; OpenDBAndBackupEngine(true /* destroy_old_data */); FillDB(db_.get(), 0, keys_iteration); @@ -1607,7 +1607,7 @@ TEST_P(BackupableDBTestWithParam, BlobFileCorruptedBeforeBackup) { CloseDBAndBackupEngine(); } -TEST_F(BackupableDBTest, TableFileWithoutDbChecksumCorruptedDuringBackup) { +TEST_F(BackupEngineTest, TableFileWithoutDbChecksumCorruptedDuringBackup) { const int keys_iteration = 50000; backupable_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; @@ -1648,7 +1648,7 @@ TEST_F(BackupableDBTest, TableFileWithoutDbChecksumCorruptedDuringBackup) { ASSERT_OK(DestroyDB(dbname_, options_)); } -TEST_F(BackupableDBTest, TableFileWithDbChecksumCorruptedDuringBackup) { +TEST_F(BackupEngineTest, TableFileWithDbChecksumCorruptedDuringBackup) { const int keys_iteration = 50000; options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); for (auto& sopt : kAllShareOptions) { @@ -1688,7 +1688,7 @@ TEST_F(BackupableDBTest, TableFileWithDbChecksumCorruptedDuringBackup) { } } -TEST_F(BackupableDBTest, InterruptCreationTest) { +TEST_F(BackupEngineTest, InterruptCreationTest) { // Interrupt backup creation by failing new writes and failing cleanup of the // partial state. Then verify a subsequent backup can still succeed. const int keys_iteration = 5000; @@ -1713,7 +1713,7 @@ TEST_F(BackupableDBTest, InterruptCreationTest) { AssertBackupConsistency(0, 0, keys_iteration); } -TEST_F(BackupableDBTest, FlushCompactDuringBackupCheckpoint) { +TEST_F(BackupEngineTest, FlushCompactDuringBackupCheckpoint) { const int keys_iteration = 5000; options_.file_checksum_gen_factory = GetFileChecksumGenCrc32cFactory(); for (const auto& sopt : kAllShareOptions) { @@ -1722,13 +1722,13 @@ TEST_F(BackupableDBTest, FlushCompactDuringBackupCheckpoint) { // That FillDB leaves a mix of flushed and unflushed data SyncPoint::GetInstance()->LoadDependency( {{"CheckpointImpl::CreateCustomCheckpoint:AfterGetLive1", - "BackupableDBTest::FlushCompactDuringBackupCheckpoint:Before"}, - {"BackupableDBTest::FlushCompactDuringBackupCheckpoint:After", + "BackupEngineTest::FlushCompactDuringBackupCheckpoint:Before"}, + {"BackupEngineTest::FlushCompactDuringBackupCheckpoint:After", "CheckpointImpl::CreateCustomCheckpoint:AfterGetLive2"}}); SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::port::Thread flush_thread{[this]() { TEST_SYNC_POINT( - "BackupableDBTest::FlushCompactDuringBackupCheckpoint:Before"); + "BackupEngineTest::FlushCompactDuringBackupCheckpoint:Before"); FillDB(db_.get(), keys_iteration, 2 * keys_iteration); ASSERT_OK(db_->Flush(FlushOptions())); DBImpl* dbi = static_cast(db_.get()); @@ -1736,7 +1736,7 @@ TEST_F(BackupableDBTest, FlushCompactDuringBackupCheckpoint) { ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); ASSERT_OK(dbi->TEST_WaitForCompact()); TEST_SYNC_POINT( - "BackupableDBTest::FlushCompactDuringBackupCheckpoint:After"); + "BackupEngineTest::FlushCompactDuringBackupCheckpoint:After"); }}; ASSERT_OK(backup_engine_->CreateNewBackup(db_.get())); flush_thread.join(); @@ -1774,7 +1774,7 @@ inline std::string OptionsPath(std::string ret, int backupID) { // Backup the LATEST options file to // "/private//OPTIONS" -TEST_F(BackupableDBTest, BackupOptions) { +TEST_F(BackupEngineTest, BackupOptions) { OpenDBAndBackupEngine(true); for (int i = 1; i < 5; i++) { std::string name; @@ -1799,17 +1799,17 @@ TEST_F(BackupableDBTest, BackupOptions) { CloseDBAndBackupEngine(); } -TEST_F(BackupableDBTest, SetOptionsBackupRaceCondition) { +TEST_F(BackupEngineTest, SetOptionsBackupRaceCondition) { OpenDBAndBackupEngine(true); SyncPoint::GetInstance()->LoadDependency( {{"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1", - "BackupableDBTest::SetOptionsBackupRaceCondition:BeforeSetOptions"}, - {"BackupableDBTest::SetOptionsBackupRaceCondition:AfterSetOptions", + "BackupEngineTest::SetOptionsBackupRaceCondition:BeforeSetOptions"}, + {"BackupEngineTest::SetOptionsBackupRaceCondition:AfterSetOptions", "CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"}}); SyncPoint::GetInstance()->EnableProcessing(); ROCKSDB_NAMESPACE::port::Thread setoptions_thread{[this]() { TEST_SYNC_POINT( - "BackupableDBTest::SetOptionsBackupRaceCondition:BeforeSetOptions"); + "BackupEngineTest::SetOptionsBackupRaceCondition:BeforeSetOptions"); DBImpl* dbi = static_cast(db_.get()); // Change arbitrary option to trigger OPTIONS file deletion ASSERT_OK(dbi->SetOptions(dbi->DefaultColumnFamily(), @@ -1819,7 +1819,7 @@ TEST_F(BackupableDBTest, SetOptionsBackupRaceCondition) { ASSERT_OK(dbi->SetOptions(dbi->DefaultColumnFamily(), {{"paranoid_file_checks", "false"}})); TEST_SYNC_POINT( - "BackupableDBTest::SetOptionsBackupRaceCondition:AfterSetOptions"); + "BackupEngineTest::SetOptionsBackupRaceCondition:AfterSetOptions"); }}; ASSERT_OK(backup_engine_->CreateNewBackup(db_.get())); setoptions_thread.join(); @@ -1828,7 +1828,7 @@ TEST_F(BackupableDBTest, SetOptionsBackupRaceCondition) { // This test verifies we don't delete the latest backup when read-only option is // set -TEST_F(BackupableDBTest, NoDeleteWithReadOnly) { +TEST_F(BackupEngineTest, NoDeleteWithReadOnly) { const int keys_iteration = 5000; Random rnd(6); @@ -1860,7 +1860,7 @@ TEST_F(BackupableDBTest, NoDeleteWithReadOnly) { delete read_only_backup_engine; } -TEST_F(BackupableDBTest, FailOverwritingBackups) { +TEST_F(BackupEngineTest, FailOverwritingBackups) { options_.write_buffer_size = 1024 * 1024 * 1024; // 1GB options_.disable_auto_compactions = true; @@ -1898,7 +1898,7 @@ TEST_F(BackupableDBTest, FailOverwritingBackups) { CloseDBAndBackupEngine(); } -TEST_F(BackupableDBTest, NoShareTableFiles) { +TEST_F(BackupEngineTest, NoShareTableFiles) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true, false, kNoShare); for (int i = 0; i < 5; ++i) { @@ -1914,7 +1914,7 @@ TEST_F(BackupableDBTest, NoShareTableFiles) { } // Verify that you can backup and restore with share_files_with_checksum on -TEST_F(BackupableDBTest, ShareTableFilesWithChecksums) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksums) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true, false, kShareWithChecksum); for (int i = 0; i < 5; ++i) { @@ -1931,7 +1931,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksums) { // Verify that you can backup and restore using share_files_with_checksum set to // false and then transition this option to true -TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsTransition) { const int keys_iteration = 5000; // set share_files_with_checksum to false OpenDBAndBackupEngine(true, false, kShareNoChecksum); @@ -1973,7 +1973,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) { } // Verify backup and restore with various naming options, check names -TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNaming) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNaming) { ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming == kNamingDefault); @@ -2017,7 +2017,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNaming) { // Mimic SST file generated by pre-6.12 releases and verify that // old names are always used regardless of naming option. -TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsOldFileNaming) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsOldFileNaming) { const int keys_iteration = 5000; // Pre-6.12 release did not include db id and db session id properties. @@ -2058,7 +2058,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsOldFileNaming) { // Test how naming options interact with detecting DB corruption // between incremental backups -TEST_F(BackupableDBTest, TableFileCorruptionBeforeIncremental) { +TEST_F(BackupEngineTest, TableFileCorruptionBeforeIncremental) { const auto share_no_checksum = static_cast(0); for (bool corrupt_before_first_backup : {false, true}) { @@ -2166,7 +2166,7 @@ TEST_F(BackupableDBTest, TableFileCorruptionBeforeIncremental) { // Test how naming options interact with detecting file size corruption // between incremental backups -TEST_F(BackupableDBTest, FileSizeForIncremental) { +TEST_F(BackupEngineTest, FileSizeForIncremental) { const auto share_no_checksum = static_cast(0); // TODO: enable blob files once Integrated BlobDB supports DB session id. options_.enable_blob_files = false; @@ -2292,7 +2292,7 @@ TEST_F(BackupableDBTest, FileSizeForIncremental) { // Verify backup and restore with share_files_with_checksum off and then // transition this option to on and share_files_with_checksum_naming to be // based on kUseDbSessionId -TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingTransition) { const int keys_iteration = 5000; // We may set share_files_with_checksum_naming to kLegacyCrc32cAndFileSize // here but even if we don't, it should have no effect when @@ -2366,7 +2366,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) { // Verify backup and restore with share_files_with_checksum on and transition // from kLegacyCrc32cAndFileSize to kUseDbSessionId -TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { +TEST_F(BackupEngineTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { backupable_options_->share_files_with_checksum_naming = kLegacyCrc32cAndFileSize; const int keys_iteration = 5000; @@ -2431,7 +2431,7 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) { // This test simulates cleaning up after aborted or incomplete creation // of a new backup. -TEST_F(BackupableDBTest, DeleteTmpFiles) { +TEST_F(BackupEngineTest, DeleteTmpFiles) { for (int cleanup_fn : {1, 2, 3, 4}) { for (ShareOption shared_option : kAllShareOptions) { OpenDBAndBackupEngine(false /* destroy_old_data */, false /* dummy */, @@ -2509,7 +2509,7 @@ TEST_F(BackupableDBTest, DeleteTmpFiles) { } } -TEST_F(BackupableDBTest, KeepLogFiles) { +TEST_F(BackupEngineTest, KeepLogFiles) { backupable_options_->backup_log_files = false; // basically infinite options_.WAL_ttl_seconds = 24 * 60 * 60; @@ -2526,20 +2526,20 @@ TEST_F(BackupableDBTest, KeepLogFiles) { AssertBackupConsistency(0, 0, 500, 600, true); } -class BackupableDBRateLimitingTestWithParam - : public BackupableDBTest, +class BackupEngineRateLimitingTestWithParam + : public BackupEngineTest, public testing::WithParamInterface< std::tuple /* limits */>> { public: - BackupableDBRateLimitingTestWithParam() {} + BackupEngineRateLimitingTestWithParam() {} }; uint64_t const MB = 1024 * 1024; INSTANTIATE_TEST_CASE_P( - RateLimiting, BackupableDBRateLimitingTestWithParam, + RateLimiting, BackupEngineRateLimitingTestWithParam, ::testing::Values(std::make_tuple(false, 0, std::make_pair(1 * MB, 5 * MB)), std::make_tuple(false, 0, std::make_pair(2 * MB, 3 * MB)), std::make_tuple(false, 1, std::make_pair(1 * MB, 5 * MB)), @@ -2550,7 +2550,7 @@ INSTANTIATE_TEST_CASE_P( std::make_tuple(true, 1, std::make_pair(2 * MB, 3 * MB)))); -TEST_P(BackupableDBRateLimitingTestWithParam, RateLimiting) { +TEST_P(BackupEngineRateLimitingTestWithParam, RateLimiting) { size_t const kMicrosPerSec = 1000 * 1000LL; std::shared_ptr backupThrottler(NewGenericRateLimiter(1)); @@ -2601,7 +2601,7 @@ TEST_P(BackupableDBRateLimitingTestWithParam, RateLimiting) { AssertBackupConsistency(0, 0, 100000, 100010); } -TEST_F(BackupableDBTest, ReadOnlyBackupEngine) { +TEST_F(BackupEngineTest, ReadOnlyBackupEngine) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); @@ -2633,7 +2633,7 @@ TEST_F(BackupableDBTest, ReadOnlyBackupEngine) { delete db; } -TEST_F(BackupableDBTest, OpenBackupAsReadOnlyDB) { +TEST_F(BackupEngineTest, OpenBackupAsReadOnlyDB) { DestroyDB(dbname_, options_); options_.write_dbid_to_manifest = false; @@ -2699,7 +2699,7 @@ TEST_F(BackupableDBTest, OpenBackupAsReadOnlyDB) { ASSERT_TRUE(DB::Open(opts, name, &db).IsIOError()); } -TEST_F(BackupableDBTest, ProgressCallbackDuringBackup) { +TEST_F(BackupEngineTest, ProgressCallbackDuringBackup) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); FillDB(db_.get(), 0, 100); @@ -2713,7 +2713,7 @@ TEST_F(BackupableDBTest, ProgressCallbackDuringBackup) { DestroyDB(dbname_, options_); } -TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) { +TEST_F(BackupEngineTest, GarbageCollectionBeforeBackup) { DestroyDB(dbname_, options_); OpenDBAndBackupEngine(true); @@ -2739,7 +2739,7 @@ TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) { } // Test that we properly propagate Env failures -TEST_F(BackupableDBTest, EnvFailures) { +TEST_F(BackupEngineTest, EnvFailures) { BackupEngine* backup_engine; // get children failure @@ -2792,7 +2792,7 @@ TEST_F(BackupableDBTest, EnvFailures) { // Verify manifest can roll while a backup is being created with the old // manifest. -TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) { +TEST_F(BackupEngineTest, ChangeManifestDuringBackupCreation) { DestroyDB(dbname_, options_); options_.max_manifest_file_size = 0; // always rollover manifest for file add OpenDBAndBackupEngine(true); @@ -2831,7 +2831,7 @@ TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) { } // see https://github.com/facebook/rocksdb/issues/921 -TEST_F(BackupableDBTest, Issue921Test) { +TEST_F(BackupEngineTest, Issue921Test) { BackupEngine* backup_engine; backupable_options_->share_table_files = false; ASSERT_OK( @@ -2843,7 +2843,7 @@ TEST_F(BackupableDBTest, Issue921Test) { delete backup_engine; } -TEST_F(BackupableDBTest, BackupWithMetadata) { +TEST_F(BackupEngineTest, BackupWithMetadata) { const int keys_iteration = 5000; OpenDBAndBackupEngine(true); // create five backups @@ -2881,7 +2881,7 @@ TEST_F(BackupableDBTest, BackupWithMetadata) { DestroyDB(dbname_, options_); } -TEST_F(BackupableDBTest, BinaryMetadata) { +TEST_F(BackupEngineTest, BinaryMetadata) { OpenDBAndBackupEngine(true); std::string binaryMetadata = "abc\ndef"; binaryMetadata.push_back('\0'); @@ -2899,7 +2899,7 @@ TEST_F(BackupableDBTest, BinaryMetadata) { DestroyDB(dbname_, options_); } -TEST_F(BackupableDBTest, MetadataTooLarge) { +TEST_F(BackupEngineTest, MetadataTooLarge) { OpenDBAndBackupEngine(true); std::string largeMetadata(1024 * 1024 + 1, 0); ASSERT_NOK( @@ -2908,7 +2908,7 @@ TEST_F(BackupableDBTest, MetadataTooLarge) { DestroyDB(dbname_, options_); } -TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_SizeCorruption) { +TEST_F(BackupEngineTest, FutureMetaSchemaVersion2_SizeCorruption) { OpenDBAndBackupEngine(true); // Backup 1: no future schema, no sizes, with checksums @@ -2971,7 +2971,7 @@ TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_SizeCorruption) { CloseBackupEngine(); } -TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_NotSupported) { +TEST_F(BackupEngineTest, FutureMetaSchemaVersion2_NotSupported) { TEST_FutureSchemaVersion2Options test_opts; std::string app_metadata = "abc\ndef"; @@ -3023,7 +3023,7 @@ TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_NotSupported) { CloseBackupEngine(); } -TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_Restore) { +TEST_F(BackupEngineTest, FutureMetaSchemaVersion2_Restore) { TEST_FutureSchemaVersion2Options test_opts; const int keys_iteration = 5000; @@ -3090,7 +3090,7 @@ TEST_F(BackupableDBTest, FutureMetaSchemaVersion2_Restore) { } } -TEST_F(BackupableDBTest, Concurrency) { +TEST_F(BackupEngineTest, Concurrency) { // Check that we can simultaneously: // * Run several read operations in different threads on a single // BackupEngine object, and @@ -3226,7 +3226,7 @@ TEST_F(BackupableDBTest, Concurrency) { CloseDBAndBackupEngine(); } -TEST_F(BackupableDBTest, LimitBackupsOpened) { +TEST_F(BackupEngineTest, LimitBackupsOpened) { // Verify the specified max backups are opened, including skipping over // corrupted backups. // @@ -3263,7 +3263,7 @@ TEST_F(BackupableDBTest, LimitBackupsOpened) { delete read_only_backup_engine; } -TEST_F(BackupableDBTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) { +TEST_F(BackupEngineTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) { // Verify the specified max_valid_backups_to_open is ignored if the engine // is not read-only. // @@ -3297,7 +3297,7 @@ TEST_F(BackupableDBTest, IgnoreLimitBackupsOpenedWhenNotReadOnly) { DestroyDB(dbname_, options_); } -TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) { +TEST_F(BackupEngineTest, CreateWhenLatestBackupCorrupted) { // we should pick an ID greater than corrupted backups' IDs so creation can // succeed even when latest backup is corrupted. const int kNumKeys = 5000; @@ -3334,7 +3334,7 @@ TEST_F(BackupableDBTest, CreateWhenLatestBackupCorrupted) { backup_engine_->GetBackupInfo(999999U, &backup_info).IsNotFound()); } -TEST_F(BackupableDBTest, WriteOnlyEngineNoSharedFileDeletion) { +TEST_F(BackupEngineTest, WriteOnlyEngineNoSharedFileDeletion) { // Verifies a write-only BackupEngine does not delete files belonging to valid // backups when GarbageCollect, PurgeOldBackups, or DeleteBackup are called. const int kNumKeys = 5000; @@ -3366,7 +3366,7 @@ TEST_F(BackupableDBTest, WriteOnlyEngineNoSharedFileDeletion) { } } -TEST_P(BackupableDBTestWithParam, BackupUsingDirectIO) { +TEST_P(BackupEngineTestWithParam, BackupUsingDirectIO) { // Tests direct I/O on the backup engine's reads and writes on the DB env and // backup env // We use ChrootEnv underneath so the below line checks for direct I/O support @@ -3419,7 +3419,7 @@ TEST_P(BackupableDBTestWithParam, BackupUsingDirectIO) { } } -TEST_F(BackupableDBTest, BackgroundThreadCpuPriority) { +TEST_F(BackupEngineTest, BackgroundThreadCpuPriority) { std::atomic priority(CpuPriority::kNormal); ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack( "BackupEngineImpl::Initialize:SetCpuPriority", [&](void* new_priority) { @@ -3518,7 +3518,7 @@ int main(int argc, char** argv) { #include int main(int /*argc*/, char** /*argv*/) { - fprintf(stderr, "SKIPPED as BackupableDB is not supported in ROCKSDB_LITE\n"); + fprintf(stderr, "SKIPPED as BackupEngine is not supported in ROCKSDB_LITE\n"); return 0; }