Add more tests to the ASC pass list (#7834)
Summary: Fixed the following to now pass ASC checks: * `ttl_test` * `blob_db_test` * `backupable_db_test`, * `delete_scheduler_test` Pull Request resolved: https://github.com/facebook/rocksdb/pull/7834 Reviewed By: jay-zhuang Differential Revision: D25795398 Pulled By: ajkr fbshipit-source-id: a10037817deda4fc7cbb353a2e00b62ed89b6476
This commit is contained in:
parent
8f7b6c8339
commit
cc2a180d00
4
Makefile
4
Makefile
@ -659,6 +659,7 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
external_sst_file_basic_test \
|
external_sst_file_basic_test \
|
||||||
auto_roll_logger_test \
|
auto_roll_logger_test \
|
||||||
file_indexer_test \
|
file_indexer_test \
|
||||||
|
delete_scheduler_test \
|
||||||
flush_job_test \
|
flush_job_test \
|
||||||
hash_table_test \
|
hash_table_test \
|
||||||
hash_test \
|
hash_test \
|
||||||
@ -747,6 +748,9 @@ ifdef ASSERT_STATUS_CHECKED
|
|||||||
cuckoo_table_reader_test \
|
cuckoo_table_reader_test \
|
||||||
memory_test \
|
memory_test \
|
||||||
table_test \
|
table_test \
|
||||||
|
backupable_db_test \
|
||||||
|
blob_db_test \
|
||||||
|
ttl_test \
|
||||||
write_batch_test \
|
write_batch_test \
|
||||||
write_batch_with_index_test \
|
write_batch_with_index_test \
|
||||||
|
|
||||||
|
@ -98,12 +98,18 @@ Status ListColumnFamiliesHandler::ApplyVersionEdit(
|
|||||||
Status FileChecksumRetriever::ApplyVersionEdit(VersionEdit& edit,
|
Status FileChecksumRetriever::ApplyVersionEdit(VersionEdit& edit,
|
||||||
ColumnFamilyData** /*unused*/) {
|
ColumnFamilyData** /*unused*/) {
|
||||||
for (const auto& deleted_file : edit.GetDeletedFiles()) {
|
for (const auto& deleted_file : edit.GetDeletedFiles()) {
|
||||||
file_checksum_list_.RemoveOneFileChecksum(deleted_file.second);
|
Status s = file_checksum_list_.RemoveOneFileChecksum(deleted_file.second);
|
||||||
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
for (const auto& new_file : edit.GetNewFiles()) {
|
for (const auto& new_file : edit.GetNewFiles()) {
|
||||||
file_checksum_list_.InsertOneFileChecksum(
|
Status s = file_checksum_list_.InsertOneFileChecksum(
|
||||||
new_file.second.fd.GetNumber(), new_file.second.file_checksum,
|
new_file.second.fd.GetNumber(), new_file.second.file_checksum,
|
||||||
new_file.second.file_checksum_func_name);
|
new_file.second.file_checksum_func_name);
|
||||||
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return Status::OK();
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
@ -51,6 +51,9 @@ DeleteScheduler::~DeleteScheduler() {
|
|||||||
if (bg_thread_) {
|
if (bg_thread_) {
|
||||||
bg_thread_->join();
|
bg_thread_->join();
|
||||||
}
|
}
|
||||||
|
for (const auto& it : bg_errors_) {
|
||||||
|
it.second.PermitUncheckedError();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
Status DeleteScheduler::DeleteFile(const std::string& file_path,
|
||||||
|
@ -26,7 +26,7 @@ class SstFileManagerImpl;
|
|||||||
|
|
||||||
// DeleteScheduler allows the DB to enforce a rate limit on file deletion,
|
// DeleteScheduler allows the DB to enforce a rate limit on file deletion,
|
||||||
// Instead of deleteing files immediately, files are marked as trash
|
// Instead of deleteing files immediately, files are marked as trash
|
||||||
// and deleted in a background thread that apply sleep penlty between deletes
|
// and deleted in a background thread that apply sleep penalty between deletes
|
||||||
// if they are happening in a rate faster than rate_bytes_per_sec,
|
// if they are happening in a rate faster than rate_bytes_per_sec,
|
||||||
//
|
//
|
||||||
// Rate limiting can be turned off by setting rate_bytes_per_sec = 0, In this
|
// Rate limiting can be turned off by setting rate_bytes_per_sec = 0, In this
|
||||||
@ -48,7 +48,7 @@ class DeleteScheduler {
|
|||||||
MaybeCreateBackgroundThread();
|
MaybeCreateBackgroundThread();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark file as trash directory and schedule it's deletion. If force_bg is
|
// Mark file as trash directory and schedule its deletion. If force_bg is
|
||||||
// set, it forces the file to always be deleted in the background thread,
|
// set, it forces the file to always be deleted in the background thread,
|
||||||
// except when rate limiting is disabled
|
// except when rate limiting is disabled
|
||||||
Status DeleteFile(const std::string& fname, const std::string& dir_to_sync,
|
Status DeleteFile(const std::string& fname, const std::string& dir_to_sync,
|
||||||
@ -78,7 +78,7 @@ class DeleteScheduler {
|
|||||||
static const std::string kTrashExtension;
|
static const std::string kTrashExtension;
|
||||||
static bool IsTrashFile(const std::string& file_path);
|
static bool IsTrashFile(const std::string& file_path);
|
||||||
|
|
||||||
// Check if there are any .trash filse in path, and schedule their deletion
|
// Check if there are any .trash files in path, and schedule their deletion
|
||||||
// Or delete immediately if sst_file_manager is nullptr
|
// Or delete immediately if sst_file_manager is nullptr
|
||||||
static Status CleanupDirectory(Env* env, SstFileManagerImpl* sfm,
|
static Status CleanupDirectory(Env* env, SstFileManagerImpl* sfm,
|
||||||
const std::string& path);
|
const std::string& path);
|
||||||
|
@ -423,7 +423,9 @@ TEST_F(DeleteSchedulerTest, BackgroundError) {
|
|||||||
delete_scheduler_->WaitForEmptyTrash();
|
delete_scheduler_->WaitForEmptyTrash();
|
||||||
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
||||||
ASSERT_EQ(bg_errors.size(), 10);
|
ASSERT_EQ(bg_errors.size(), 10);
|
||||||
|
for (const auto& it : bg_errors) {
|
||||||
|
ASSERT_TRUE(it.second.IsPathNotFound());
|
||||||
|
}
|
||||||
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -667,7 +669,7 @@ TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (std::string& file_name : generated_files) {
|
for (std::string& file_name : generated_files) {
|
||||||
delete_scheduler_->DeleteFile(file_name, "");
|
ASSERT_OK(delete_scheduler_->DeleteFile(file_name, ""));
|
||||||
}
|
}
|
||||||
|
|
||||||
// When we end up with 26 files in trash we will start
|
// When we end up with 26 files in trash we will start
|
||||||
|
@ -215,9 +215,7 @@ class BackupEngineImpl : public BackupEngine {
|
|||||||
|
|
||||||
~BackupMeta() {}
|
~BackupMeta() {}
|
||||||
|
|
||||||
void RecordTimestamp() {
|
Status RecordTimestamp() { return env_->GetCurrentTime(×tamp_); }
|
||||||
env_->GetCurrentTime(×tamp_);
|
|
||||||
}
|
|
||||||
int64_t GetTimestamp() const {
|
int64_t GetTimestamp() const {
|
||||||
return timestamp_;
|
return timestamp_;
|
||||||
}
|
}
|
||||||
@ -402,6 +400,16 @@ class BackupEngineImpl : public BackupEngine {
|
|||||||
std::string* db_session_id);
|
std::string* db_session_id);
|
||||||
|
|
||||||
struct CopyOrCreateResult {
|
struct CopyOrCreateResult {
|
||||||
|
~CopyOrCreateResult() {
|
||||||
|
// The Status needs to be ignored here for two reasons.
|
||||||
|
// First, if the BackupEngineImpl shuts down with jobs outstanding, then
|
||||||
|
// it is possible that the Status in the future/promise is never read,
|
||||||
|
// resulting in an unchecked Status. Second, if there are items in the
|
||||||
|
// channel when the BackupEngineImpl is shutdown, these will also have
|
||||||
|
// Status that have not been checked. This
|
||||||
|
// TODO: Fix those issues so that the Status
|
||||||
|
status.PermitUncheckedError();
|
||||||
|
}
|
||||||
uint64_t size;
|
uint64_t size;
|
||||||
std::string checksum_hex;
|
std::string checksum_hex;
|
||||||
std::string db_id;
|
std::string db_id;
|
||||||
@ -670,6 +678,9 @@ BackupEngineImpl::~BackupEngineImpl() {
|
|||||||
t.join();
|
t.join();
|
||||||
}
|
}
|
||||||
LogFlush(options_.info_log);
|
LogFlush(options_.info_log);
|
||||||
|
for (const auto& it : corrupt_backups_) {
|
||||||
|
it.second.first.PermitUncheckedError();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Status BackupEngineImpl::Initialize() {
|
Status BackupEngineImpl::Initialize() {
|
||||||
@ -783,7 +794,9 @@ Status BackupEngineImpl::Initialize() {
|
|||||||
for (const auto& rel_dir :
|
for (const auto& rel_dir :
|
||||||
{GetSharedFileRel(), GetSharedFileWithChecksumRel()}) {
|
{GetSharedFileRel(), GetSharedFileWithChecksumRel()}) {
|
||||||
const auto abs_dir = GetAbsolutePath(rel_dir);
|
const auto abs_dir = GetAbsolutePath(rel_dir);
|
||||||
InsertPathnameToSizeBytes(abs_dir, backup_env_, &abs_path_to_size);
|
// TODO: What do do on error?
|
||||||
|
InsertPathnameToSizeBytes(abs_dir, backup_env_, &abs_path_to_size)
|
||||||
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
// load the backups if any, until valid_backups_to_open of the latest
|
// load the backups if any, until valid_backups_to_open of the latest
|
||||||
// non-corrupted backups have been successfully opened.
|
// non-corrupted backups have been successfully opened.
|
||||||
@ -801,11 +814,13 @@ Status BackupEngineImpl::Initialize() {
|
|||||||
|
|
||||||
// Insert files and their sizes in backup sub-directories
|
// Insert files and their sizes in backup sub-directories
|
||||||
// (private/backup_id) to abs_path_to_size
|
// (private/backup_id) to abs_path_to_size
|
||||||
InsertPathnameToSizeBytes(
|
Status s = InsertPathnameToSizeBytes(
|
||||||
GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_,
|
GetAbsolutePath(GetPrivateFileRel(backup_iter->first)), backup_env_,
|
||||||
&abs_path_to_size);
|
&abs_path_to_size);
|
||||||
Status s = backup_iter->second->LoadFromFile(options_.backup_dir,
|
if (s.ok()) {
|
||||||
abs_path_to_size);
|
s = backup_iter->second->LoadFromFile(options_.backup_dir,
|
||||||
|
abs_path_to_size);
|
||||||
|
}
|
||||||
if (s.IsCorruption()) {
|
if (s.IsCorruption()) {
|
||||||
ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s",
|
ROCKS_LOG_INFO(options_.info_log, "Backup %u corrupted -- %s",
|
||||||
backup_iter->first, s.ToString().c_str());
|
backup_iter->first, s.ToString().c_str());
|
||||||
@ -961,7 +976,8 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
|
|||||||
&backuped_file_infos_, backup_env_))));
|
&backuped_file_infos_, backup_env_))));
|
||||||
assert(ret.second == true);
|
assert(ret.second == true);
|
||||||
auto& new_backup = ret.first->second;
|
auto& new_backup = ret.first->second;
|
||||||
new_backup->RecordTimestamp();
|
// TODO: What should we do on error here?
|
||||||
|
new_backup->RecordTimestamp().PermitUncheckedError();
|
||||||
new_backup->SetAppMetadata(app_metadata);
|
new_backup->SetAppMetadata(app_metadata);
|
||||||
|
|
||||||
auto start_backup = backup_env_->NowMicros();
|
auto start_backup = backup_env_->NowMicros();
|
||||||
@ -986,7 +1002,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
|
|||||||
|
|
||||||
std::vector<BackupAfterCopyOrCreateWorkItem> backup_items_to_finish;
|
std::vector<BackupAfterCopyOrCreateWorkItem> backup_items_to_finish;
|
||||||
// Add a CopyOrCreateWorkItem to the channel for each live file
|
// Add a CopyOrCreateWorkItem to the channel for each live file
|
||||||
db->DisableFileDeletions();
|
Status disabled = db->DisableFileDeletions();
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
CheckpointImpl checkpoint(db);
|
CheckpointImpl checkpoint(db);
|
||||||
uint64_t sequence_number = 0;
|
uint64_t sequence_number = 0;
|
||||||
@ -1091,8 +1107,9 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// we copied all the files, enable file deletions
|
// we copied all the files, enable file deletions
|
||||||
db->EnableFileDeletions(false);
|
if (disabled.ok()) { // If we successfully disabled file deletions
|
||||||
|
db->EnableFileDeletions(false).PermitUncheckedError();
|
||||||
|
}
|
||||||
auto backup_time = backup_env_->NowMicros() - start_backup;
|
auto backup_time = backup_env_->NowMicros() - start_backup;
|
||||||
|
|
||||||
if (s.ok()) {
|
if (s.ok()) {
|
||||||
@ -1133,7 +1150,7 @@ Status BackupEngineImpl::CreateNewBackupWithMetadata(
|
|||||||
backup_statistics_.ToString().c_str());
|
backup_statistics_.ToString().c_str());
|
||||||
// delete files that we might have already written
|
// delete files that we might have already written
|
||||||
might_need_garbage_collect_ = true;
|
might_need_garbage_collect_ = true;
|
||||||
DeleteBackup(new_backup_id);
|
DeleteBackup(new_backup_id).PermitUncheckedError();
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1201,6 +1218,7 @@ Status BackupEngineImpl::DeleteBackup(BackupID backup_id) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!s1.ok()) {
|
if (!s1.ok()) {
|
||||||
|
s2.PermitUncheckedError(); // What to do?
|
||||||
return s1;
|
return s1;
|
||||||
} else {
|
} else {
|
||||||
return s2;
|
return s2;
|
||||||
@ -1229,6 +1247,7 @@ Status BackupEngineImpl::DeleteBackupInternal(BackupID backup_id) {
|
|||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
corrupt->second.first.PermitUncheckedError();
|
||||||
corrupt_backups_.erase(corrupt);
|
corrupt_backups_.erase(corrupt);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1310,8 +1329,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(const RestoreOptions& options,
|
|||||||
static_cast<int>(options.keep_log_files));
|
static_cast<int>(options.keep_log_files));
|
||||||
|
|
||||||
// just in case. Ignore errors
|
// just in case. Ignore errors
|
||||||
db_env_->CreateDirIfMissing(db_dir);
|
db_env_->CreateDirIfMissing(db_dir).PermitUncheckedError();
|
||||||
db_env_->CreateDirIfMissing(wal_dir);
|
db_env_->CreateDirIfMissing(wal_dir).PermitUncheckedError();
|
||||||
|
|
||||||
if (options.keep_log_files) {
|
if (options.keep_log_files) {
|
||||||
// delete files in db_dir, but keep all the log files
|
// delete files in db_dir, but keep all the log files
|
||||||
@ -1319,7 +1338,8 @@ Status BackupEngineImpl::RestoreDBFromBackup(const RestoreOptions& options,
|
|||||||
// move all the files from archive dir to wal_dir
|
// move all the files from archive dir to wal_dir
|
||||||
std::string archive_dir = ArchivalDirectory(wal_dir);
|
std::string archive_dir = ArchivalDirectory(wal_dir);
|
||||||
std::vector<std::string> archive_files;
|
std::vector<std::string> archive_files;
|
||||||
db_env_->GetChildren(archive_dir, &archive_files); // ignore errors
|
db_env_->GetChildren(archive_dir, &archive_files)
|
||||||
|
.PermitUncheckedError(); // ignore errors
|
||||||
for (const auto& f : archive_files) {
|
for (const auto& f : archive_files) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
@ -1439,7 +1459,9 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id,
|
|||||||
for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(),
|
for (const auto& rel_dir : {GetPrivateFileRel(backup_id), GetSharedFileRel(),
|
||||||
GetSharedFileWithChecksumRel()}) {
|
GetSharedFileWithChecksumRel()}) {
|
||||||
const auto abs_dir = GetAbsolutePath(rel_dir);
|
const auto abs_dir = GetAbsolutePath(rel_dir);
|
||||||
InsertPathnameToSizeBytes(abs_dir, backup_env_, &curr_abs_path_to_size);
|
// TODO: What to do on error?
|
||||||
|
InsertPathnameToSizeBytes(abs_dir, backup_env_, &curr_abs_path_to_size)
|
||||||
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
|
|
||||||
// For all files registered in backup
|
// For all files registered in backup
|
||||||
@ -1463,9 +1485,11 @@ Status BackupEngineImpl::VerifyBackup(BackupID backup_id,
|
|||||||
std::string checksum_hex;
|
std::string checksum_hex;
|
||||||
ROCKS_LOG_INFO(options_.info_log, "Verifying %s checksum...\n",
|
ROCKS_LOG_INFO(options_.info_log, "Verifying %s checksum...\n",
|
||||||
abs_path.c_str());
|
abs_path.c_str());
|
||||||
ReadFileAndComputeChecksum(abs_path, backup_env_, EnvOptions(),
|
Status s = ReadFileAndComputeChecksum(abs_path, backup_env_, EnvOptions(),
|
||||||
0 /* size_limit */, &checksum_hex);
|
0 /* size_limit */, &checksum_hex);
|
||||||
if (file_info->checksum_hex != checksum_hex) {
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
} else if (file_info->checksum_hex != checksum_hex) {
|
||||||
std::string checksum_info(
|
std::string checksum_info(
|
||||||
"Expected checksum is " + file_info->checksum_hex +
|
"Expected checksum is " + file_info->checksum_hex +
|
||||||
" while computed checksum is " + checksum_hex);
|
" while computed checksum is " + checksum_hex);
|
||||||
@ -1589,7 +1613,6 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
|
|
||||||
std::string dst_relative = fname.substr(1);
|
std::string dst_relative = fname.substr(1);
|
||||||
std::string dst_relative_tmp;
|
std::string dst_relative_tmp;
|
||||||
Status s;
|
|
||||||
std::string checksum_hex;
|
std::string checksum_hex;
|
||||||
std::string db_id;
|
std::string db_id;
|
||||||
std::string db_session_id;
|
std::string db_session_id;
|
||||||
@ -1622,15 +1645,16 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
// Ignore the returned status
|
// Ignore the returned status
|
||||||
// In the failed cases, db_id and db_session_id will be empty
|
// In the failed cases, db_id and db_session_id will be empty
|
||||||
GetFileDbIdentities(db_env_, src_env_options, src_dir + fname, &db_id,
|
GetFileDbIdentities(db_env_, src_env_options, src_dir + fname, &db_id,
|
||||||
&db_session_id);
|
&db_session_id)
|
||||||
|
.PermitUncheckedError();
|
||||||
}
|
}
|
||||||
// Calculate checksum if checksum and db session id are not available.
|
// Calculate checksum if checksum and db session id are not available.
|
||||||
// If db session id is available, we will not calculate the checksum
|
// If db session id is available, we will not calculate the checksum
|
||||||
// since the session id should suffice to avoid file name collision in
|
// since the session id should suffice to avoid file name collision in
|
||||||
// the shared_checksum directory.
|
// the shared_checksum directory.
|
||||||
if (!has_checksum && db_session_id.empty()) {
|
if (!has_checksum && db_session_id.empty()) {
|
||||||
s = ReadFileAndComputeChecksum(src_dir + fname, db_env_, src_env_options,
|
Status s = ReadFileAndComputeChecksum(
|
||||||
size_limit, &checksum_hex);
|
src_dir + fname, db_env_, src_env_options, size_limit, &checksum_hex);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -1692,7 +1716,6 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
} else if (exist.IsNotFound()) {
|
} else if (exist.IsNotFound()) {
|
||||||
file_exists = false;
|
file_exists = false;
|
||||||
} else {
|
} else {
|
||||||
assert(s.IsIOError());
|
|
||||||
return exist;
|
return exist;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1710,7 +1733,8 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
"overwrite the file.",
|
"overwrite the file.",
|
||||||
fname.c_str());
|
fname.c_str());
|
||||||
need_to_copy = true;
|
need_to_copy = true;
|
||||||
backup_env_->DeleteFile(final_dest_path);
|
//**TODO: What to do on error?
|
||||||
|
backup_env_->DeleteFile(final_dest_path).PermitUncheckedError();
|
||||||
} else {
|
} else {
|
||||||
// file exists and referenced
|
// file exists and referenced
|
||||||
if (!has_checksum) {
|
if (!has_checksum) {
|
||||||
@ -1739,9 +1763,9 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
// same_path should not happen for a standard DB, so OK to
|
// same_path should not happen for a standard DB, so OK to
|
||||||
// read file contents to check for checksum mismatch between
|
// read file contents to check for checksum mismatch between
|
||||||
// two files from same DB getting same name.
|
// two files from same DB getting same name.
|
||||||
s = ReadFileAndComputeChecksum(src_dir + fname, db_env_,
|
Status s = ReadFileAndComputeChecksum(src_dir + fname, db_env_,
|
||||||
src_env_options, size_limit,
|
src_env_options, size_limit,
|
||||||
&checksum_hex);
|
&checksum_hex);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -1783,14 +1807,14 @@ Status BackupEngineImpl::AddBackupFileWorkItem(
|
|||||||
temp_dest_path, final_dest_path, dst_relative);
|
temp_dest_path, final_dest_path, dst_relative);
|
||||||
backup_items_to_finish.push_back(std::move(after_copy_or_create_work_item));
|
backup_items_to_finish.push_back(std::move(after_copy_or_create_work_item));
|
||||||
CopyOrCreateResult result;
|
CopyOrCreateResult result;
|
||||||
result.status = s;
|
result.status = Status::OK();
|
||||||
result.size = size_bytes;
|
result.size = size_bytes;
|
||||||
result.checksum_hex = std::move(checksum_hex);
|
result.checksum_hex = std::move(checksum_hex);
|
||||||
result.db_id = std::move(db_id);
|
result.db_id = std::move(db_id);
|
||||||
result.db_session_id = std::move(db_session_id);
|
result.db_session_id = std::move(db_session_id);
|
||||||
promise_result.set_value(std::move(result));
|
promise_result.set_value(std::move(result));
|
||||||
}
|
}
|
||||||
return s;
|
return Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
Status BackupEngineImpl::ReadFileAndComputeChecksum(
|
Status BackupEngineImpl::ReadFileAndComputeChecksum(
|
||||||
@ -1893,7 +1917,7 @@ Status BackupEngineImpl::GetFileDbIdentities(Env* src_env,
|
|||||||
void BackupEngineImpl::DeleteChildren(const std::string& dir,
|
void BackupEngineImpl::DeleteChildren(const std::string& dir,
|
||||||
uint32_t file_type_filter) {
|
uint32_t file_type_filter) {
|
||||||
std::vector<std::string> children;
|
std::vector<std::string> children;
|
||||||
db_env_->GetChildren(dir, &children); // ignore errors
|
db_env_->GetChildren(dir, &children).PermitUncheckedError(); // ignore errors
|
||||||
|
|
||||||
for (const auto& f : children) {
|
for (const auto& f : children) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
@ -1903,7 +1927,7 @@ void BackupEngineImpl::DeleteChildren(const std::string& dir,
|
|||||||
// don't delete this file
|
// don't delete this file
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
db_env_->DeleteFile(dir + "/" + f); // ignore errors
|
db_env_->DeleteFile(dir + "/" + f).PermitUncheckedError(); // ignore errors
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2016,18 +2040,19 @@ Status BackupEngineImpl::GarbageCollect() {
|
|||||||
std::string full_private_path =
|
std::string full_private_path =
|
||||||
GetAbsolutePath(GetPrivateFileRel(backup_id));
|
GetAbsolutePath(GetPrivateFileRel(backup_id));
|
||||||
std::vector<std::string> subchildren;
|
std::vector<std::string> subchildren;
|
||||||
backup_env_->GetChildren(full_private_path, &subchildren);
|
if (backup_env_->GetChildren(full_private_path, &subchildren).ok()) {
|
||||||
for (auto& subchild : subchildren) {
|
for (auto& subchild : subchildren) {
|
||||||
if (subchild == "." || subchild == "..") {
|
if (subchild == "." || subchild == "..") {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Status s = backup_env_->DeleteFile(full_private_path + subchild);
|
Status s = backup_env_->DeleteFile(full_private_path + subchild);
|
||||||
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
|
ROCKS_LOG_INFO(options_.info_log, "Deleting %s -- %s",
|
||||||
(full_private_path + subchild).c_str(),
|
(full_private_path + subchild).c_str(),
|
||||||
s.ToString().c_str());
|
s.ToString().c_str());
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
// Trying again later might work
|
// Trying again later might work
|
||||||
might_need_garbage_collect_ = true;
|
might_need_garbage_collect_ = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// finally delete the private dir
|
// finally delete the private dir
|
||||||
|
@ -408,8 +408,10 @@ class FileManager : public EnvWrapper {
|
|||||||
Status GetRandomFileInDir(const std::string& dir, std::string* fname,
|
Status GetRandomFileInDir(const std::string& dir, std::string* fname,
|
||||||
uint64_t* fsize) {
|
uint64_t* fsize) {
|
||||||
std::vector<FileAttributes> children;
|
std::vector<FileAttributes> children;
|
||||||
GetChildrenFileAttributes(dir, &children);
|
auto s = GetChildrenFileAttributes(dir, &children);
|
||||||
if (children.size() <= 2) { // . and ..
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
} else if (children.size() <= 2) { // . and ..
|
||||||
return Status::NotFound("Empty directory: " + dir);
|
return Status::NotFound("Empty directory: " + dir);
|
||||||
}
|
}
|
||||||
assert(fname != nullptr);
|
assert(fname != nullptr);
|
||||||
@ -428,8 +430,10 @@ class FileManager : public EnvWrapper {
|
|||||||
|
|
||||||
Status DeleteRandomFileInDir(const std::string& dir) {
|
Status DeleteRandomFileInDir(const std::string& dir) {
|
||||||
std::vector<std::string> children;
|
std::vector<std::string> children;
|
||||||
GetChildren(dir, &children);
|
Status s = GetChildren(dir, &children);
|
||||||
if (children.size() <= 2) { // . and ..
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
} else if (children.size() <= 2) { // . and ..
|
||||||
return Status::NotFound("");
|
return Status::NotFound("");
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -446,8 +450,10 @@ class FileManager : public EnvWrapper {
|
|||||||
Status AppendToRandomFileInDir(const std::string& dir,
|
Status AppendToRandomFileInDir(const std::string& dir,
|
||||||
const std::string& data) {
|
const std::string& data) {
|
||||||
std::vector<std::string> children;
|
std::vector<std::string> children;
|
||||||
GetChildren(dir, &children);
|
Status s = GetChildren(dir, &children);
|
||||||
if (children.size() <= 2) {
|
if (!s.ok()) {
|
||||||
|
return s;
|
||||||
|
} else if (children.size() <= 2) {
|
||||||
return Status::NotFound("");
|
return Status::NotFound("");
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
@ -617,8 +623,8 @@ class BackupableDBTest : public testing::Test {
|
|||||||
// set up files
|
// set up files
|
||||||
std::string db_chroot = test::PerThreadDBPath("backupable_db");
|
std::string db_chroot = test::PerThreadDBPath("backupable_db");
|
||||||
std::string backup_chroot = test::PerThreadDBPath("backupable_db_backup");
|
std::string backup_chroot = test::PerThreadDBPath("backupable_db_backup");
|
||||||
Env::Default()->CreateDir(db_chroot);
|
EXPECT_OK(Env::Default()->CreateDirIfMissing(db_chroot));
|
||||||
Env::Default()->CreateDir(backup_chroot);
|
EXPECT_OK(Env::Default()->CreateDirIfMissing(backup_chroot));
|
||||||
dbname_ = "/tempdb";
|
dbname_ = "/tempdb";
|
||||||
backupdir_ = "/tempbk";
|
backupdir_ = "/tempbk";
|
||||||
|
|
||||||
@ -640,7 +646,10 @@ class BackupableDBTest : public testing::Test {
|
|||||||
// Create logger
|
// Create logger
|
||||||
DBOptions logger_options;
|
DBOptions logger_options;
|
||||||
logger_options.env = db_chroot_env_.get();
|
logger_options.env = db_chroot_env_.get();
|
||||||
CreateLoggerFromOptions(dbname_, logger_options, &logger_);
|
// TODO: This should really be an EXPECT_OK, but this CreateLogger fails
|
||||||
|
// regularly in some environments with "no such directory"
|
||||||
|
CreateLoggerFromOptions(dbname_, logger_options, &logger_)
|
||||||
|
.PermitUncheckedError();
|
||||||
|
|
||||||
// set up backup db options
|
// set up backup db options
|
||||||
backupable_options_.reset(new BackupableDBOptions(
|
backupable_options_.reset(new BackupableDBOptions(
|
||||||
@ -748,13 +757,13 @@ class BackupableDBTest : public testing::Test {
|
|||||||
|
|
||||||
void DeleteLogFiles() {
|
void DeleteLogFiles() {
|
||||||
std::vector<std::string> delete_logs;
|
std::vector<std::string> delete_logs;
|
||||||
db_chroot_env_->GetChildren(dbname_, &delete_logs);
|
ASSERT_OK(db_chroot_env_->GetChildren(dbname_, &delete_logs));
|
||||||
for (auto f : delete_logs) {
|
for (auto f : delete_logs) {
|
||||||
uint64_t number;
|
uint64_t number;
|
||||||
FileType type;
|
FileType type;
|
||||||
bool ok = ParseFileName(f, &number, &type);
|
bool ok = ParseFileName(f, &number, &type);
|
||||||
if (ok && type == kWalFile) {
|
if (ok && type == kWalFile) {
|
||||||
db_chroot_env_->DeleteFile(dbname_ + "/" + f);
|
ASSERT_OK(db_chroot_env_->DeleteFile(dbname_ + "/" + f));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -943,7 +952,6 @@ TEST_F(BackupableDBTest, FileCollision) {
|
|||||||
// invalid backups
|
// invalid backups
|
||||||
TEST_P(BackupableDBTestWithParam, VerifyBackup) {
|
TEST_P(BackupableDBTestWithParam, VerifyBackup) {
|
||||||
const int keys_iteration = 5000;
|
const int keys_iteration = 5000;
|
||||||
Status s;
|
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
// create five backups
|
// create five backups
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
@ -957,13 +965,13 @@ TEST_P(BackupableDBTestWithParam, VerifyBackup) {
|
|||||||
ASSERT_TRUE(backup_engine_->VerifyBackup(1).ok());
|
ASSERT_TRUE(backup_engine_->VerifyBackup(1).ok());
|
||||||
|
|
||||||
// ---------- case 2. - delete a file -----------i
|
// ---------- case 2. - delete a file -----------i
|
||||||
file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/1");
|
ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/1"));
|
||||||
ASSERT_TRUE(backup_engine_->VerifyBackup(1).IsNotFound());
|
ASSERT_TRUE(backup_engine_->VerifyBackup(1).IsNotFound());
|
||||||
|
|
||||||
// ---------- case 3. - corrupt a file -----------
|
// ---------- case 3. - corrupt a file -----------
|
||||||
std::string append_data = "Corrupting a random file";
|
std::string append_data = "Corrupting a random file";
|
||||||
file_manager_->AppendToRandomFileInDir(backupdir_ + "/private/2",
|
ASSERT_OK(file_manager_->AppendToRandomFileInDir(backupdir_ + "/private/2",
|
||||||
append_data);
|
append_data));
|
||||||
ASSERT_TRUE(backup_engine_->VerifyBackup(2).IsCorruption());
|
ASSERT_TRUE(backup_engine_->VerifyBackup(2).IsCorruption());
|
||||||
|
|
||||||
// ---------- case 4. - invalid backup -----------
|
// ---------- case 4. - invalid backup -----------
|
||||||
@ -1139,9 +1147,11 @@ TEST_F(BackupableDBTest, NoDoubleCopy_And_AutoGC) {
|
|||||||
|
|
||||||
// MANIFEST file size should be only 100
|
// MANIFEST file size should be only 100
|
||||||
uint64_t size = 0;
|
uint64_t size = 0;
|
||||||
test_backup_env_->GetFileSize(backupdir_ + "/private/2/MANIFEST-01", &size);
|
ASSERT_OK(test_backup_env_->GetFileSize(backupdir_ + "/private/2/MANIFEST-01",
|
||||||
|
&size));
|
||||||
ASSERT_EQ(100UL, size);
|
ASSERT_EQ(100UL, size);
|
||||||
test_backup_env_->GetFileSize(backupdir_ + "/shared/00015.sst", &size);
|
ASSERT_OK(
|
||||||
|
test_backup_env_->GetFileSize(backupdir_ + "/shared/00015.sst", &size));
|
||||||
ASSERT_EQ(200UL, size);
|
ASSERT_EQ(200UL, size);
|
||||||
|
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
@ -1209,7 +1219,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
test_backup_env_->SetLimitWrittenFiles(2);
|
test_backup_env_->SetLimitWrittenFiles(2);
|
||||||
// should fail
|
// should fail
|
||||||
s = backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2));
|
s = backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2));
|
||||||
ASSERT_TRUE(!s.ok());
|
ASSERT_NOK(s);
|
||||||
test_backup_env_->SetLimitWrittenFiles(1000000);
|
test_backup_env_->SetLimitWrittenFiles(1000000);
|
||||||
// latest backup should have all the keys
|
// latest backup should have all the keys
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
@ -1221,7 +1231,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
AssertBackupConsistency(0, 0, keys_iteration * 4, keys_iteration * 5);
|
AssertBackupConsistency(0, 0, keys_iteration * 4, keys_iteration * 5);
|
||||||
OpenBackupEngine();
|
OpenBackupEngine();
|
||||||
s = backup_engine_->RestoreDBFromBackup(5, dbname_, dbname_);
|
s = backup_engine_->RestoreDBFromBackup(5, dbname_, dbname_);
|
||||||
ASSERT_TRUE(!s.ok());
|
ASSERT_NOK(s);
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/4"));
|
ASSERT_OK(file_manager_->DeleteRandomFileInDir(backupdir_ + "/private/4"));
|
||||||
// 4 is corrupted, 3 is the latest backup now
|
// 4 is corrupted, 3 is the latest backup now
|
||||||
@ -1229,7 +1239,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
OpenBackupEngine();
|
OpenBackupEngine();
|
||||||
s = backup_engine_->RestoreDBFromBackup(4, dbname_, dbname_);
|
s = backup_engine_->RestoreDBFromBackup(4, dbname_, dbname_);
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
ASSERT_TRUE(!s.ok());
|
ASSERT_NOK(s);
|
||||||
|
|
||||||
// --------- case 3. corrupted checksum value ----
|
// --------- case 3. corrupted checksum value ----
|
||||||
ASSERT_OK(file_manager_->CorruptChecksum(backupdir_ + "/meta/3", false));
|
ASSERT_OK(file_manager_->CorruptChecksum(backupdir_ + "/meta/3", false));
|
||||||
@ -1243,7 +1253,7 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
OpenBackupEngine();
|
OpenBackupEngine();
|
||||||
ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2"));
|
ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/2"));
|
||||||
s = backup_engine_->RestoreDBFromBackup(2, dbname_, dbname_);
|
s = backup_engine_->RestoreDBFromBackup(2, dbname_, dbname_);
|
||||||
ASSERT_TRUE(!s.ok());
|
ASSERT_NOK(s);
|
||||||
|
|
||||||
// make sure that no corrupt backups have actually been deleted!
|
// make sure that no corrupt backups have actually been deleted!
|
||||||
ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/1"));
|
ASSERT_OK(file_manager_->FileExists(backupdir_ + "/meta/1"));
|
||||||
@ -1280,7 +1290,6 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
file_manager_->FileExists(backupdir_ + "/meta/2"));
|
file_manager_->FileExists(backupdir_ + "/meta/2"));
|
||||||
ASSERT_EQ(Status::NotFound(),
|
ASSERT_EQ(Status::NotFound(),
|
||||||
file_manager_->FileExists(backupdir_ + "/private/2"));
|
file_manager_->FileExists(backupdir_ + "/private/2"));
|
||||||
|
|
||||||
CloseBackupEngine();
|
CloseBackupEngine();
|
||||||
AssertBackupConsistency(0, 0, keys_iteration * 1, keys_iteration * 5);
|
AssertBackupConsistency(0, 0, keys_iteration * 1, keys_iteration * 5);
|
||||||
|
|
||||||
@ -1295,7 +1304,6 @@ TEST_F(BackupableDBTest, CorruptionsTest) {
|
|||||||
// Corrupt a file but maintain its size
|
// Corrupt a file but maintain its size
|
||||||
TEST_F(BackupableDBTest, CorruptFileMaintainSize) {
|
TEST_F(BackupableDBTest, CorruptFileMaintainSize) {
|
||||||
const int keys_iteration = 5000;
|
const int keys_iteration = 5000;
|
||||||
Status s;
|
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
// create a backup
|
// create a backup
|
||||||
FillDB(db_.get(), 0, keys_iteration);
|
FillDB(db_.get(), 0, keys_iteration);
|
||||||
@ -1514,8 +1522,7 @@ TEST_F(BackupableDBTest, InterruptCreationTest) {
|
|||||||
test_backup_env_->SetLimitWrittenFiles(2);
|
test_backup_env_->SetLimitWrittenFiles(2);
|
||||||
test_backup_env_->SetDeleteFileFailure(true);
|
test_backup_env_->SetDeleteFileFailure(true);
|
||||||
// should fail creation
|
// should fail creation
|
||||||
ASSERT_FALSE(
|
ASSERT_NOK(backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)));
|
||||||
backup_engine_->CreateNewBackup(db_.get(), !!(rnd.Next() % 2)).ok());
|
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
// should also fail cleanup so the tmp directory stays behind
|
// should also fail cleanup so the tmp directory stays behind
|
||||||
ASSERT_OK(backup_chroot_env_->FileExists(backupdir_ + "/private/1/"));
|
ASSERT_OK(backup_chroot_env_->FileExists(backupdir_ + "/private/1/"));
|
||||||
@ -1548,9 +1555,9 @@ TEST_F(BackupableDBTest, FlushCompactDuringBackupCheckpoint) {
|
|||||||
FillDB(db_.get(), keys_iteration, 2 * keys_iteration);
|
FillDB(db_.get(), keys_iteration, 2 * keys_iteration);
|
||||||
ASSERT_OK(db_->Flush(FlushOptions()));
|
ASSERT_OK(db_->Flush(FlushOptions()));
|
||||||
DBImpl* dbi = static_cast<DBImpl*>(db_.get());
|
DBImpl* dbi = static_cast<DBImpl*>(db_.get());
|
||||||
dbi->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
|
||||||
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr));
|
||||||
dbi->TEST_WaitForCompact();
|
ASSERT_OK(dbi->TEST_WaitForCompact());
|
||||||
TEST_SYNC_POINT(
|
TEST_SYNC_POINT(
|
||||||
"BackupableDBTest::FlushCompactDuringBackupCheckpoint:After");
|
"BackupableDBTest::FlushCompactDuringBackupCheckpoint:After");
|
||||||
}};
|
}};
|
||||||
@ -1600,10 +1607,11 @@ TEST_F(BackupableDBTest, BackupOptions) {
|
|||||||
db_.reset();
|
db_.reset();
|
||||||
db_.reset(OpenDB());
|
db_.reset(OpenDB());
|
||||||
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
ROCKSDB_NAMESPACE::GetLatestOptionsFileName(db_->GetName(), options_.env,
|
ASSERT_OK(ROCKSDB_NAMESPACE::GetLatestOptionsFileName(db_->GetName(),
|
||||||
&name);
|
options_.env, &name));
|
||||||
ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
|
ASSERT_OK(file_manager_->FileExists(OptionsPath(backupdir_, i) + name));
|
||||||
backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i), &filenames);
|
ASSERT_OK(backup_chroot_env_->GetChildren(OptionsPath(backupdir_, i),
|
||||||
|
&filenames));
|
||||||
for (auto fn : filenames) {
|
for (auto fn : filenames) {
|
||||||
if (fn.compare(0, 7, "OPTIONS") == 0) {
|
if (fn.compare(0, 7, "OPTIONS") == 0) {
|
||||||
ASSERT_EQ(name, fn);
|
ASSERT_EQ(name, fn);
|
||||||
@ -1646,7 +1654,6 @@ TEST_F(BackupableDBTest, SetOptionsBackupRaceCondition) {
|
|||||||
TEST_F(BackupableDBTest, NoDeleteWithReadOnly) {
|
TEST_F(BackupableDBTest, NoDeleteWithReadOnly) {
|
||||||
const int keys_iteration = 5000;
|
const int keys_iteration = 5000;
|
||||||
Random rnd(6);
|
Random rnd(6);
|
||||||
Status s;
|
|
||||||
|
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
// create five backups
|
// create five backups
|
||||||
@ -1777,8 +1784,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsTransition) {
|
|||||||
// For an extra challenge, make sure that GarbageCollect / DeleteBackup
|
// For an extra challenge, make sure that GarbageCollect / DeleteBackup
|
||||||
// is OK even if we open without share_table_files
|
// is OK even if we open without share_table_files
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
||||||
backup_engine_->DeleteBackup(1);
|
ASSERT_OK(backup_engine_->DeleteBackup(1));
|
||||||
backup_engine_->GarbageCollect();
|
ASSERT_OK(backup_engine_->GarbageCollect());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// Verify rest (not deleted)
|
// Verify rest (not deleted)
|
||||||
@ -1939,7 +1946,7 @@ TEST_F(BackupableDBTest, TableFileCorruptionBeforeIncremental) {
|
|||||||
// And a bigger one
|
// And a bigger one
|
||||||
ASSERT_OK(dbi->Put(WriteOptions(), "y", Random(42).RandomString(500)));
|
ASSERT_OK(dbi->Put(WriteOptions(), "y", Random(42).RandomString(500)));
|
||||||
ASSERT_OK(dbi->Flush(FlushOptions()));
|
ASSERT_OK(dbi->Flush(FlushOptions()));
|
||||||
dbi->TEST_WaitForFlushMemTable();
|
ASSERT_OK(dbi->TEST_WaitForFlushMemTable());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
std::vector<FileAttributes> table_files;
|
std::vector<FileAttributes> table_files;
|
||||||
@ -2198,8 +2205,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) {
|
|||||||
ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming ==
|
ASSERT_TRUE(backupable_options_->share_files_with_checksum_naming ==
|
||||||
kNamingDefault);
|
kNamingDefault);
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
||||||
backup_engine_->DeleteBackup(1);
|
ASSERT_OK(backup_engine_->DeleteBackup(1));
|
||||||
backup_engine_->GarbageCollect();
|
ASSERT_OK(backup_engine_->GarbageCollect());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// Verify second (about to delete)
|
// Verify second (about to delete)
|
||||||
@ -2211,8 +2218,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingTransition) {
|
|||||||
backupable_options_->share_files_with_checksum_naming =
|
backupable_options_->share_files_with_checksum_naming =
|
||||||
kLegacyCrc32cAndFileSize;
|
kLegacyCrc32cAndFileSize;
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
||||||
backup_engine_->DeleteBackup(2);
|
ASSERT_OK(backup_engine_->DeleteBackup(2));
|
||||||
backup_engine_->GarbageCollect();
|
ASSERT_OK(backup_engine_->GarbageCollect());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// Verify rest (not deleted)
|
// Verify rest (not deleted)
|
||||||
@ -2263,8 +2270,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) {
|
|||||||
// For an extra challenge, make sure that GarbageCollect / DeleteBackup
|
// For an extra challenge, make sure that GarbageCollect / DeleteBackup
|
||||||
// is OK even if we open without share_table_files
|
// is OK even if we open without share_table_files
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
||||||
backup_engine_->DeleteBackup(1);
|
ASSERT_OK(backup_engine_->DeleteBackup(1));
|
||||||
backup_engine_->GarbageCollect();
|
ASSERT_OK(backup_engine_->GarbageCollect());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// Verify second (about to delete)
|
// Verify second (about to delete)
|
||||||
@ -2276,8 +2283,8 @@ TEST_F(BackupableDBTest, ShareTableFilesWithChecksumsNewNamingUpgrade) {
|
|||||||
backupable_options_->share_files_with_checksum_naming =
|
backupable_options_->share_files_with_checksum_naming =
|
||||||
kLegacyCrc32cAndFileSize;
|
kLegacyCrc32cAndFileSize;
|
||||||
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
OpenDBAndBackupEngine(false /* destroy_old_data */, false, kNoShare);
|
||||||
backup_engine_->DeleteBackup(2);
|
ASSERT_OK(backup_engine_->DeleteBackup(2));
|
||||||
backup_engine_->GarbageCollect();
|
ASSERT_OK(backup_engine_->GarbageCollect());
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
|
|
||||||
// Verify rest (not deleted)
|
// Verify rest (not deleted)
|
||||||
@ -2322,11 +2329,11 @@ TEST_F(BackupableDBTest, DeleteTmpFiles) {
|
|||||||
std::make_pair(next_private, std::string("00003.sst")),
|
std::make_pair(next_private, std::string("00003.sst")),
|
||||||
}) {
|
}) {
|
||||||
std::string dir = backupdir_ + "/" + dir_and_file.first;
|
std::string dir = backupdir_ + "/" + dir_and_file.first;
|
||||||
file_manager_->CreateDir(dir);
|
ASSERT_OK(file_manager_->CreateDirIfMissing(dir));
|
||||||
ASSERT_OK(file_manager_->FileExists(dir));
|
ASSERT_OK(file_manager_->FileExists(dir));
|
||||||
|
|
||||||
std::string file = dir + "/" + dir_and_file.second;
|
std::string file = dir + "/" + dir_and_file.second;
|
||||||
file_manager_->WriteToFile(file, "tmp");
|
ASSERT_OK(file_manager_->WriteToFile(file, "tmp"));
|
||||||
ASSERT_OK(file_manager_->FileExists(file));
|
ASSERT_OK(file_manager_->FileExists(file));
|
||||||
|
|
||||||
tmp_files_and_dirs.push_back(file);
|
tmp_files_and_dirs.push_back(file);
|
||||||
@ -2509,7 +2516,7 @@ TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) {
|
|||||||
DestroyDB(dbname_, options_);
|
DestroyDB(dbname_, options_);
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
|
|
||||||
backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared");
|
ASSERT_OK(backup_chroot_env_->CreateDirIfMissing(backupdir_ + "/shared"));
|
||||||
std::string file_five = backupdir_ + "/shared/000008.sst";
|
std::string file_five = backupdir_ + "/shared/000008.sst";
|
||||||
std::string file_five_contents = "I'm not really a sst file";
|
std::string file_five_contents = "I'm not really a sst file";
|
||||||
// this depends on the fact that 00008.sst is the first file created by the DB
|
// this depends on the fact that 00008.sst is the first file created by the DB
|
||||||
@ -2517,7 +2524,7 @@ TEST_F(BackupableDBTest, GarbageCollectionBeforeBackup) {
|
|||||||
|
|
||||||
FillDB(db_.get(), 0, 100);
|
FillDB(db_.get(), 0, 100);
|
||||||
// backup overwrites file 000008.sst
|
// backup overwrites file 000008.sst
|
||||||
ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok());
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
|
|
||||||
std::string new_file_five_contents;
|
std::string new_file_five_contents;
|
||||||
ASSERT_OK(ReadFileToString(backup_chroot_env_.get(), file_five,
|
ASSERT_OK(ReadFileToString(backup_chroot_env_.get(), file_five,
|
||||||
@ -2563,7 +2570,7 @@ TEST_F(BackupableDBTest, EnvFailures) {
|
|||||||
DestroyDB(dbname_, options_);
|
DestroyDB(dbname_, options_);
|
||||||
OpenDBAndBackupEngine(true);
|
OpenDBAndBackupEngine(true);
|
||||||
FillDB(db_.get(), 0, 100);
|
FillDB(db_.get(), 0, 100);
|
||||||
ASSERT_TRUE(backup_engine_->CreateNewBackup(db_.get(), true).ok());
|
ASSERT_OK(backup_engine_->CreateNewBackup(db_.get(), true));
|
||||||
CloseDBAndBackupEngine();
|
CloseDBAndBackupEngine();
|
||||||
test_backup_env_->SetDummySequentialFile(true);
|
test_backup_env_->SetDummySequentialFile(true);
|
||||||
test_backup_env_->SetDummySequentialFileFailReads(true);
|
test_backup_env_->SetDummySequentialFileFailReads(true);
|
||||||
@ -2626,7 +2633,8 @@ TEST_F(BackupableDBTest, ChangeManifestDuringBackupCreation) {
|
|||||||
TEST_F(BackupableDBTest, Issue921Test) {
|
TEST_F(BackupableDBTest, Issue921Test) {
|
||||||
BackupEngine* backup_engine;
|
BackupEngine* backup_engine;
|
||||||
backupable_options_->share_table_files = false;
|
backupable_options_->share_table_files = false;
|
||||||
backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir);
|
ASSERT_OK(
|
||||||
|
backup_chroot_env_->CreateDirIfMissing(backupable_options_->backup_dir));
|
||||||
backupable_options_->backup_dir += "/new_dir";
|
backupable_options_->backup_dir += "/new_dir";
|
||||||
ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *backupable_options_,
|
ASSERT_OK(BackupEngine::Open(backup_chroot_env_.get(), *backupable_options_,
|
||||||
&backup_engine));
|
&backup_engine));
|
||||||
@ -2814,6 +2822,7 @@ TEST_P(BackupableDBTestWithParam, BackupUsingDirectIO) {
|
|||||||
// We use ChrootEnv underneath so the below line checks for direct I/O support
|
// We use ChrootEnv underneath so the below line checks for direct I/O support
|
||||||
// in the chroot directory, not the true filesystem root.
|
// in the chroot directory, not the true filesystem root.
|
||||||
if (!test::IsDirectIOSupported(test_db_env_.get(), "/")) {
|
if (!test::IsDirectIOSupported(test_db_env_.get(), "/")) {
|
||||||
|
ROCKSDB_GTEST_SKIP("Test requires Direct I/O Support");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const int kNumKeysPerBackup = 100;
|
const int kNumKeysPerBackup = 100;
|
||||||
|
@ -2055,21 +2055,21 @@ Status DestroyBlobDB(const std::string& dbname, const Options& options,
|
|||||||
: bdb_options.blob_dir;
|
: bdb_options.blob_dir;
|
||||||
|
|
||||||
std::vector<std::string> filenames;
|
std::vector<std::string> filenames;
|
||||||
env->GetChildren(blobdir, &filenames);
|
if (env->GetChildren(blobdir, &filenames).ok()) {
|
||||||
|
for (const auto& f : filenames) {
|
||||||
for (const auto& f : filenames) {
|
uint64_t number;
|
||||||
uint64_t number;
|
FileType type;
|
||||||
FileType type;
|
if (ParseFileName(f, &number, &type) && type == kBlobFile) {
|
||||||
if (ParseFileName(f, &number, &type) && type == kBlobFile) {
|
Status del = DeleteDBFile(&soptions, blobdir + "/" + f, blobdir, true,
|
||||||
Status del = DeleteDBFile(&soptions, blobdir + "/" + f, blobdir, true,
|
/*force_fg=*/false);
|
||||||
/*force_fg=*/false);
|
if (status.ok() && !del.ok()) {
|
||||||
if (status.ok() && !del.ok()) {
|
status = del;
|
||||||
status = del;
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// TODO: What to do if we cannot delete the directory?
|
||||||
|
env->DeleteDir(blobdir).PermitUncheckedError();
|
||||||
}
|
}
|
||||||
env->DeleteDir(blobdir);
|
|
||||||
|
|
||||||
Status destroy = DestroyDB(dbname, options);
|
Status destroy = DestroyDB(dbname, options);
|
||||||
if (status.ok() && !destroy.ok()) {
|
if (status.ok() && !destroy.ok()) {
|
||||||
status = destroy;
|
status = destroy;
|
||||||
|
@ -236,7 +236,7 @@ class BlobDBTest : public testing::Test {
|
|||||||
DB *db = blob_db_->GetRootDB();
|
DB *db = blob_db_->GetRootDB();
|
||||||
const size_t kMaxKeys = 10000;
|
const size_t kMaxKeys = 10000;
|
||||||
std::vector<KeyVersion> versions;
|
std::vector<KeyVersion> versions;
|
||||||
GetAllKeyVersions(db, "", "", kMaxKeys, &versions);
|
ASSERT_OK(GetAllKeyVersions(db, "", "", kMaxKeys, &versions));
|
||||||
ASSERT_EQ(expected_versions.size(), versions.size());
|
ASSERT_EQ(expected_versions.size(), versions.size());
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
for (auto &key_version : expected_versions) {
|
for (auto &key_version : expected_versions) {
|
||||||
@ -412,8 +412,8 @@ TEST_F(BlobDBTest, GetExpiration) {
|
|||||||
bdb_options.disable_background_tasks = true;
|
bdb_options.disable_background_tasks = true;
|
||||||
mock_env_->set_current_time(100);
|
mock_env_->set_current_time(100);
|
||||||
Open(bdb_options, options);
|
Open(bdb_options, options);
|
||||||
Put("key1", "value1");
|
ASSERT_OK(Put("key1", "value1"));
|
||||||
PutWithTTL("key2", "value2", 200);
|
ASSERT_OK(PutWithTTL("key2", "value2", 200));
|
||||||
PinnableSlice value;
|
PinnableSlice value;
|
||||||
uint64_t expiration;
|
uint64_t expiration;
|
||||||
ASSERT_OK(blob_db_->Get(ReadOptions(), "key1", &value, &expiration));
|
ASSERT_OK(blob_db_->Get(ReadOptions(), "key1", &value, &expiration));
|
||||||
@ -466,7 +466,8 @@ TEST_F(BlobDBTest, WriteBatch) {
|
|||||||
for (size_t j = 0; j < 10; j++) {
|
for (size_t j = 0; j < 10; j++) {
|
||||||
PutRandomToWriteBatch("key" + ToString(j * 100 + i), &rnd, &batch, &data);
|
PutRandomToWriteBatch("key" + ToString(j * 100 + i), &rnd, &batch, &data);
|
||||||
}
|
}
|
||||||
blob_db_->Write(WriteOptions(), &batch);
|
|
||||||
|
ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
|
||||||
}
|
}
|
||||||
VerifyDB(data);
|
VerifyDB(data);
|
||||||
}
|
}
|
||||||
@ -498,7 +499,7 @@ TEST_F(BlobDBTest, DeleteBatch) {
|
|||||||
}
|
}
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
for (size_t i = 0; i < 100; i++) {
|
for (size_t i = 0; i < 100; i++) {
|
||||||
batch.Delete("key" + ToString(i));
|
ASSERT_OK(batch.Delete("key" + ToString(i)));
|
||||||
}
|
}
|
||||||
ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
|
ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
|
||||||
// DB should be empty.
|
// DB should be empty.
|
||||||
@ -540,7 +541,7 @@ TEST_F(BlobDBTest, Compression) {
|
|||||||
PutRandomToWriteBatch("write-batch-key" + ToString(j * 100 + i), &rnd,
|
PutRandomToWriteBatch("write-batch-key" + ToString(j * 100 + i), &rnd,
|
||||||
&batch, &data);
|
&batch, &data);
|
||||||
}
|
}
|
||||||
blob_db_->Write(WriteOptions(), &batch);
|
ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
|
||||||
}
|
}
|
||||||
VerifyDB(data);
|
VerifyDB(data);
|
||||||
}
|
}
|
||||||
@ -732,7 +733,7 @@ TEST_F(BlobDBTest, MultipleWriters) {
|
|||||||
} else {
|
} else {
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
PutRandomToWriteBatch(key, &rnd, &batch, &data_set[id]);
|
PutRandomToWriteBatch(key, &rnd, &batch, &data_set[id]);
|
||||||
blob_db_->Write(WriteOptions(), &batch);
|
ASSERT_OK(blob_db_->Write(WriteOptions(), &batch));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@ -775,7 +776,7 @@ TEST_F(BlobDBTest, SstFileManager) {
|
|||||||
Open(bdb_options, db_options);
|
Open(bdb_options, db_options);
|
||||||
|
|
||||||
// Create one obselete file and clean it.
|
// Create one obselete file and clean it.
|
||||||
blob_db_->Put(WriteOptions(), "foo", "bar");
|
ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "bar"));
|
||||||
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
ASSERT_EQ(1, blob_files.size());
|
ASSERT_EQ(1, blob_files.size());
|
||||||
std::shared_ptr<BlobFile> bfile = blob_files[0];
|
std::shared_ptr<BlobFile> bfile = blob_files[0];
|
||||||
@ -820,14 +821,15 @@ TEST_F(BlobDBTest, SstFileManagerRestart) {
|
|||||||
|
|
||||||
Open(bdb_options, db_options);
|
Open(bdb_options, db_options);
|
||||||
std::string blob_dir = blob_db_impl()->TEST_blob_dir();
|
std::string blob_dir = blob_db_impl()->TEST_blob_dir();
|
||||||
blob_db_->Put(WriteOptions(), "foo", "bar");
|
ASSERT_OK(blob_db_->Put(WriteOptions(), "foo", "bar"));
|
||||||
Close();
|
Close();
|
||||||
|
|
||||||
// Create 3 dummy trash files under the blob_dir
|
// Create 3 dummy trash files under the blob_dir
|
||||||
const auto &fs = db_options.env->GetFileSystem();
|
const auto &fs = db_options.env->GetFileSystem();
|
||||||
CreateFile(fs, blob_dir + "/000666.blob.trash", "", false);
|
ASSERT_OK(CreateFile(fs, blob_dir + "/000666.blob.trash", "", false));
|
||||||
CreateFile(fs, blob_dir + "/000888.blob.trash", "", true);
|
ASSERT_OK(CreateFile(fs, blob_dir + "/000888.blob.trash", "", true));
|
||||||
CreateFile(fs, blob_dir + "/something_not_match.trash", "", false);
|
ASSERT_OK(
|
||||||
|
CreateFile(fs, blob_dir + "/something_not_match.trash", "", false));
|
||||||
|
|
||||||
// Make sure that reopening the DB rescan the existing trash files
|
// Make sure that reopening the DB rescan the existing trash files
|
||||||
Open(bdb_options, db_options);
|
Open(bdb_options, db_options);
|
||||||
@ -938,8 +940,8 @@ TEST_F(BlobDBTest, ColumnFamilyNotSupported) {
|
|||||||
ASSERT_TRUE(blob_db_->PutUntil(WriteOptions(), handle, "k", "v", 100)
|
ASSERT_TRUE(blob_db_->PutUntil(WriteOptions(), handle, "k", "v", 100)
|
||||||
.IsNotSupported());
|
.IsNotSupported());
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
batch.Put("k1", "v1");
|
ASSERT_OK(batch.Put("k1", "v1"));
|
||||||
batch.Put(handle, "k2", "v2");
|
ASSERT_OK(batch.Put(handle, "k2", "v2"));
|
||||||
ASSERT_TRUE(blob_db_->Write(WriteOptions(), &batch).IsNotSupported());
|
ASSERT_TRUE(blob_db_->Write(WriteOptions(), &batch).IsNotSupported());
|
||||||
ASSERT_TRUE(blob_db_->Get(ReadOptions(), "k1", &value).IsNotFound());
|
ASSERT_TRUE(blob_db_->Get(ReadOptions(), "k1", &value).IsNotFound());
|
||||||
ASSERT_TRUE(
|
ASSERT_TRUE(
|
||||||
@ -1538,7 +1540,7 @@ TEST_F(BlobDBTest, FilterExpiredBlobIndex) {
|
|||||||
// Verify expired blob index are filtered.
|
// Verify expired blob index are filtered.
|
||||||
std::vector<KeyVersion> versions;
|
std::vector<KeyVersion> versions;
|
||||||
const size_t kMaxKeys = 10000;
|
const size_t kMaxKeys = 10000;
|
||||||
GetAllKeyVersions(blob_db_, "", "", kMaxKeys, &versions);
|
ASSERT_OK(GetAllKeyVersions(blob_db_, "", "", kMaxKeys, &versions));
|
||||||
ASSERT_EQ(data_after_compact.size(), versions.size());
|
ASSERT_EQ(data_after_compact.size(), versions.size());
|
||||||
for (auto &version : versions) {
|
for (auto &version : versions) {
|
||||||
ASSERT_TRUE(data_after_compact.count(version.user_key) > 0);
|
ASSERT_TRUE(data_after_compact.count(version.user_key) > 0);
|
||||||
@ -1889,8 +1891,8 @@ TEST_F(BlobDBTest, GarbageCollectionFailure) {
|
|||||||
Open(bdb_options, db_options);
|
Open(bdb_options, db_options);
|
||||||
|
|
||||||
// Write a couple of valid blobs.
|
// Write a couple of valid blobs.
|
||||||
Put("foo", "bar");
|
ASSERT_OK(Put("foo", "bar"));
|
||||||
Put("dead", "beef");
|
ASSERT_OK(Put("dead", "beef"));
|
||||||
|
|
||||||
// Write a fake blob reference into the base DB that cannot be parsed.
|
// Write a fake blob reference into the base DB that cannot be parsed.
|
||||||
WriteBatch batch;
|
WriteBatch batch;
|
||||||
@ -2325,7 +2327,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeClose) {
|
|||||||
|
|
||||||
Open(blob_options, options);
|
Open(blob_options, options);
|
||||||
|
|
||||||
Put("foo", "bar");
|
ASSERT_OK(Put("foo", "bar"));
|
||||||
|
|
||||||
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
ASSERT_EQ(blob_files.size(), 1);
|
ASSERT_EQ(blob_files.size(), 1);
|
||||||
@ -2345,7 +2347,7 @@ TEST_F(BlobDBTest, SyncBlobFileBeforeCloseIOError) {
|
|||||||
|
|
||||||
Open(blob_options, options);
|
Open(blob_options, options);
|
||||||
|
|
||||||
Put("foo", "bar");
|
ASSERT_OK(Put("foo", "bar"));
|
||||||
|
|
||||||
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
auto blob_files = blob_db_impl()->TEST_GetBlobFiles();
|
||||||
ASSERT_EQ(blob_files.size(), 1);
|
ASSERT_EQ(blob_files.size(), 1);
|
||||||
|
@ -158,8 +158,9 @@ Status BlobFile::GetReader(Env* env, const EnvOptions& env_options,
|
|||||||
assert(fresh_open != nullptr);
|
assert(fresh_open != nullptr);
|
||||||
*fresh_open = false;
|
*fresh_open = false;
|
||||||
int64_t current_time = 0;
|
int64_t current_time = 0;
|
||||||
env->GetCurrentTime(¤t_time);
|
if (env->GetCurrentTime(¤t_time).ok()) {
|
||||||
last_access_.store(current_time);
|
last_access_.store(current_time);
|
||||||
|
}
|
||||||
Status s;
|
Status s;
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -95,7 +95,7 @@ class TtlTest : public testing::Test {
|
|||||||
void CloseTtlHelper(bool close_db) {
|
void CloseTtlHelper(bool close_db) {
|
||||||
if (db_ttl_ != nullptr) {
|
if (db_ttl_ != nullptr) {
|
||||||
if (close_db) {
|
if (close_db) {
|
||||||
db_ttl_->Close();
|
EXPECT_OK(db_ttl_->Close());
|
||||||
}
|
}
|
||||||
delete db_ttl_;
|
delete db_ttl_;
|
||||||
db_ttl_ = nullptr;
|
db_ttl_ = nullptr;
|
||||||
@ -137,17 +137,17 @@ class TtlTest : public testing::Test {
|
|||||||
for (int64_t i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, ++kv_it_) {
|
for (int64_t i = 0; i < num_ops && kv_it_ != kvmap_.end(); i++, ++kv_it_) {
|
||||||
switch (batch_ops[i]) {
|
switch (batch_ops[i]) {
|
||||||
case OP_PUT:
|
case OP_PUT:
|
||||||
batch.Put(kv_it_->first, kv_it_->second);
|
ASSERT_OK(batch.Put(kv_it_->first, kv_it_->second));
|
||||||
break;
|
break;
|
||||||
case OP_DELETE:
|
case OP_DELETE:
|
||||||
batch.Delete(kv_it_->first);
|
ASSERT_OK(batch.Delete(kv_it_->first));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
FAIL();
|
FAIL();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
db_ttl_->Write(wopts, &batch);
|
ASSERT_OK(db_ttl_->Write(wopts, &batch));
|
||||||
db_ttl_->Flush(flush_opts);
|
ASSERT_OK(db_ttl_->Flush(flush_opts));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Puts num_entries starting from start_pos_map from kvmap_ into the database
|
// Puts num_entries starting from start_pos_map from kvmap_ into the database
|
||||||
@ -170,19 +170,19 @@ class TtlTest : public testing::Test {
|
|||||||
: db_ttl_->Put(wopts, cf, "keymock", "valuemock"));
|
: db_ttl_->Put(wopts, cf, "keymock", "valuemock"));
|
||||||
if (flush) {
|
if (flush) {
|
||||||
if (cf == nullptr) {
|
if (cf == nullptr) {
|
||||||
db_ttl_->Flush(flush_opts);
|
ASSERT_OK(db_ttl_->Flush(flush_opts));
|
||||||
} else {
|
} else {
|
||||||
db_ttl_->Flush(flush_opts, cf);
|
ASSERT_OK(db_ttl_->Flush(flush_opts, cf));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runs a manual compaction
|
// Runs a manual compaction
|
||||||
void ManualCompact(ColumnFamilyHandle* cf = nullptr) {
|
Status ManualCompact(ColumnFamilyHandle* cf = nullptr) {
|
||||||
if (cf == nullptr) {
|
if (cf == nullptr) {
|
||||||
db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
return db_ttl_->CompactRange(CompactRangeOptions(), nullptr, nullptr);
|
||||||
} else {
|
} else {
|
||||||
db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr);
|
return db_ttl_->CompactRange(CompactRangeOptions(), cf, nullptr, nullptr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -225,18 +225,9 @@ class TtlTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleeps for slp_tim then runs a manual compaction
|
void CompactCheck(int64_t st_pos, int64_t span, bool check = true,
|
||||||
// Checks span starting from st_pos from kvmap_ in the db and
|
bool test_compaction_change = false,
|
||||||
// Gets should return true if check is true and false otherwise
|
ColumnFamilyHandle* cf = nullptr) {
|
||||||
// Also checks that value that we got is the same as inserted; and =kNewValue
|
|
||||||
// if test_compaction_change is true
|
|
||||||
void SleepCompactCheck(int slp_tim, int64_t st_pos, int64_t span,
|
|
||||||
bool check = true, bool test_compaction_change = false,
|
|
||||||
ColumnFamilyHandle* cf = nullptr) {
|
|
||||||
ASSERT_TRUE(db_ttl_);
|
|
||||||
|
|
||||||
env_->Sleep(slp_tim);
|
|
||||||
ManualCompact(cf);
|
|
||||||
static ReadOptions ropts;
|
static ReadOptions ropts;
|
||||||
kv_it_ = kvmap_.begin();
|
kv_it_ = kvmap_.begin();
|
||||||
advance(kv_it_, st_pos);
|
advance(kv_it_, st_pos);
|
||||||
@ -267,13 +258,27 @@ class TtlTest : public testing::Test {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Sleeps for slp_tim then runs a manual compaction
|
||||||
|
// Checks span starting from st_pos from kvmap_ in the db and
|
||||||
|
// Gets should return true if check is true and false otherwise
|
||||||
|
// Also checks that value that we got is the same as inserted; and =kNewValue
|
||||||
|
// if test_compaction_change is true
|
||||||
|
void SleepCompactCheck(int slp_tim, int64_t st_pos, int64_t span,
|
||||||
|
bool check = true, bool test_compaction_change = false,
|
||||||
|
ColumnFamilyHandle* cf = nullptr) {
|
||||||
|
ASSERT_TRUE(db_ttl_);
|
||||||
|
|
||||||
|
env_->Sleep(slp_tim);
|
||||||
|
ASSERT_OK(ManualCompact(cf));
|
||||||
|
CompactCheck(st_pos, span, check, test_compaction_change, cf);
|
||||||
|
}
|
||||||
|
|
||||||
// Similar as SleepCompactCheck but uses TtlIterator to read from db
|
// Similar as SleepCompactCheck but uses TtlIterator to read from db
|
||||||
void SleepCompactCheckIter(int slp, int st_pos, int64_t span,
|
void SleepCompactCheckIter(int slp, int st_pos, int64_t span,
|
||||||
bool check = true) {
|
bool check = true) {
|
||||||
ASSERT_TRUE(db_ttl_);
|
ASSERT_TRUE(db_ttl_);
|
||||||
env_->Sleep(slp);
|
env_->Sleep(slp);
|
||||||
ManualCompact();
|
ASSERT_OK(ManualCompact());
|
||||||
static ReadOptions ropts;
|
static ReadOptions ropts;
|
||||||
Iterator *dbiter = db_ttl_->NewIterator(ropts);
|
Iterator *dbiter = db_ttl_->NewIterator(ropts);
|
||||||
kv_it_ = kvmap_.begin();
|
kv_it_ = kvmap_.begin();
|
||||||
@ -292,6 +297,7 @@ class TtlTest : public testing::Test {
|
|||||||
dbiter->Next();
|
dbiter->Next();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ASSERT_OK(dbiter->status());
|
||||||
delete dbiter;
|
delete dbiter;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -534,11 +540,16 @@ TEST_F(TtlTest, ReadOnlyPresentForever) {
|
|||||||
MakeKVMap(kSampleSize_);
|
MakeKVMap(kSampleSize_);
|
||||||
|
|
||||||
OpenTtl(1); // T=0:Open the db normally
|
OpenTtl(1); // T=0:Open the db normally
|
||||||
PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1
|
PutValues(0, kSampleSize_); // T=0:Insert Set1. Delete at t=1
|
||||||
CloseTtl();
|
CloseTtl();
|
||||||
|
|
||||||
OpenReadOnlyTtl(1);
|
OpenReadOnlyTtl(1);
|
||||||
SleepCompactCheck(2, 0, kSampleSize_); // T=2:Set1 should still be there
|
ASSERT_TRUE(db_ttl_);
|
||||||
|
|
||||||
|
env_->Sleep(2);
|
||||||
|
Status s = ManualCompact(); // T=2:Set1 should still be there
|
||||||
|
ASSERT_TRUE(s.IsNotSupported());
|
||||||
|
CompactCheck(0, kSampleSize_);
|
||||||
CloseTtl();
|
CloseTtl();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user