2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
#ifndef __STDC_FORMAT_MACROS
|
|
|
|
#define __STDC_FORMAT_MACROS
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <inttypes.h>
|
|
|
|
#include <atomic>
|
|
|
|
#include <thread>
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/options.h"
|
2016-01-29 03:35:01 +01:00
|
|
|
#include "util/delete_scheduler.h"
|
2017-06-13 01:51:37 +02:00
|
|
|
#include "util/sst_file_manager_impl.h"
|
2015-08-05 05:45:27 +02:00
|
|
|
#include "util/string_util.h"
|
|
|
|
#include "util/sync_point.h"
|
|
|
|
#include "util/testharness.h"
|
2016-10-19 01:59:37 +02:00
|
|
|
#include "util/testutil.h"
|
2015-08-05 05:45:27 +02:00
|
|
|
|
2016-12-22 02:35:00 +01:00
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
namespace rocksdb {
|
|
|
|
|
|
|
|
class DeleteSchedulerTest : public testing::Test {
|
|
|
|
public:
|
|
|
|
DeleteSchedulerTest() : env_(Env::Default()) {
|
2016-06-11 00:39:17 +02:00
|
|
|
dummy_files_dir_ = test::TmpDir(env_) + "/delete_scheduler_dummy_data_dir";
|
2015-08-05 05:45:27 +02:00
|
|
|
DestroyAndCreateDir(dummy_files_dir_);
|
|
|
|
}
|
|
|
|
|
|
|
|
~DeleteSchedulerTest() {
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({});
|
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
|
2016-10-19 01:59:37 +02:00
|
|
|
test::DestroyDir(env_, dummy_files_dir_);
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
void DestroyAndCreateDir(const std::string& dir) {
|
2016-10-19 01:59:37 +02:00
|
|
|
ASSERT_OK(test::DestroyDir(env_, dir));
|
2015-08-05 05:45:27 +02:00
|
|
|
EXPECT_OK(env_->CreateDir(dir));
|
|
|
|
}
|
|
|
|
|
2017-10-27 22:25:54 +02:00
|
|
|
int CountNormalFiles() {
|
2015-08-05 05:45:27 +02:00
|
|
|
std::vector<std::string> files_in_dir;
|
2017-10-27 22:25:54 +02:00
|
|
|
EXPECT_OK(env_->GetChildren(dummy_files_dir_, &files_in_dir));
|
|
|
|
|
|
|
|
int normal_cnt = 0;
|
|
|
|
for (auto& f : files_in_dir) {
|
|
|
|
if (!DeleteScheduler::IsTrashFile(f) && f != "." && f != "..") {
|
|
|
|
printf("%s\n", f.c_str());
|
|
|
|
normal_cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return normal_cnt;
|
|
|
|
}
|
|
|
|
|
|
|
|
int CountTrashFiles() {
|
|
|
|
std::vector<std::string> files_in_dir;
|
|
|
|
EXPECT_OK(env_->GetChildren(dummy_files_dir_, &files_in_dir));
|
|
|
|
|
|
|
|
int trash_cnt = 0;
|
|
|
|
for (auto& f : files_in_dir) {
|
|
|
|
if (DeleteScheduler::IsTrashFile(f)) {
|
|
|
|
trash_cnt++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return trash_cnt;
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
std::string NewDummyFile(const std::string& file_name, uint64_t size = 1024) {
|
|
|
|
std::string file_path = dummy_files_dir_ + "/" + file_name;
|
|
|
|
std::unique_ptr<WritableFile> f;
|
|
|
|
env_->NewWritableFile(file_path, &f, EnvOptions());
|
|
|
|
std::string data(size, 'A');
|
|
|
|
EXPECT_OK(f->Append(data));
|
|
|
|
EXPECT_OK(f->Close());
|
2017-06-13 01:51:37 +02:00
|
|
|
sst_file_mgr_->OnAddFile(file_path);
|
2015-08-05 05:45:27 +02:00
|
|
|
return file_path;
|
|
|
|
}
|
|
|
|
|
2016-01-29 03:35:01 +01:00
|
|
|
void NewDeleteScheduler() {
|
2017-11-17 20:56:41 +01:00
|
|
|
// Tests in this file are for DeleteScheduler component and dont create any
|
|
|
|
// DBs, so we need to set max_trash_db_ratio to 100% (instead of default
|
|
|
|
// 25%)
|
2017-06-13 01:51:37 +02:00
|
|
|
sst_file_mgr_.reset(
|
2017-11-17 20:56:41 +01:00
|
|
|
new SstFileManagerImpl(env_, nullptr, rate_bytes_per_sec_,
|
|
|
|
/* max_trash_db_ratio= */ 1.1));
|
2017-06-13 01:51:37 +02:00
|
|
|
delete_scheduler_ = sst_file_mgr_->delete_scheduler();
|
2016-01-29 03:35:01 +01:00
|
|
|
}
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
Env* env_;
|
|
|
|
std::string dummy_files_dir_;
|
|
|
|
int64_t rate_bytes_per_sec_;
|
2017-06-13 01:51:37 +02:00
|
|
|
DeleteScheduler* delete_scheduler_;
|
|
|
|
std::unique_ptr<SstFileManagerImpl> sst_file_mgr_;
|
2015-08-05 05:45:27 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
// Test the basic functionality of DeleteScheduler (Rate Limiting).
|
|
|
|
// 1- Create 100 dummy files
|
|
|
|
// 2- Delete the 100 dummy files using DeleteScheduler
|
2016-01-29 03:35:01 +01:00
|
|
|
// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
|
2015-08-05 05:45:27 +02:00
|
|
|
// 3- Wait for DeleteScheduler to delete all files in trash
|
2015-08-06 04:16:52 +02:00
|
|
|
// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
|
2015-08-05 05:45:27 +02:00
|
|
|
// 5- Make sure that all created files were completely deleted
|
|
|
|
TEST_F(DeleteSchedulerTest, BasicRateLimiting) {
|
2015-08-06 04:16:52 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DeleteSchedulerTest::BasicRateLimiting:1",
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash"},
|
2015-08-06 04:16:52 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
std::vector<uint64_t> penalties;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash:Wait",
|
2017-04-22 05:41:37 +02:00
|
|
|
[&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
|
2015-08-06 04:16:52 +02:00
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
int num_files = 100; // 100 files
|
|
|
|
uint64_t file_size = 1024; // every file is 1 kb
|
|
|
|
std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
|
|
|
|
|
|
|
|
for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
|
2015-08-06 04:16:52 +02:00
|
|
|
penalties.clear();
|
2015-09-29 00:58:50 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearTrace();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
DestroyAndCreateDir(dummy_files_dir_);
|
|
|
|
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Create 100 dummy files, every file is 1 Kb
|
|
|
|
std::vector<std::string> generated_files;
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
std::string file_name = "file" + ToString(i) + ".data";
|
|
|
|
generated_files.push_back(NewDummyFile(file_name, file_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete dummy files and measure time spent to empty trash
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
|
|
|
|
}
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
2015-08-06 04:16:52 +02:00
|
|
|
uint64_t delete_start_time = env_->NowMicros();
|
2015-08-08 00:37:56 +02:00
|
|
|
TEST_SYNC_POINT("DeleteSchedulerTest::BasicRateLimiting:1");
|
2015-08-20 00:02:17 +02:00
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
2015-08-05 05:45:27 +02:00
|
|
|
uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
|
2015-08-06 04:16:52 +02:00
|
|
|
|
2015-09-29 00:58:50 +02:00
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 0);
|
|
|
|
|
2015-08-06 04:16:52 +02:00
|
|
|
uint64_t total_files_size = 0;
|
|
|
|
uint64_t expected_penlty = 0;
|
|
|
|
ASSERT_EQ(penalties.size(), num_files);
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
total_files_size += file_size;
|
|
|
|
expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
|
|
|
|
ASSERT_EQ(expected_penlty, penalties[i]);
|
|
|
|
}
|
|
|
|
ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2015-09-29 00:58:50 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same as the BasicRateLimiting test but delete files in multiple threads.
|
|
|
|
// 1- Create 100 dummy files
|
|
|
|
// 2- Delete the 100 dummy files using DeleteScheduler using 10 threads
|
2016-01-29 03:35:01 +01:00
|
|
|
// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
|
2015-08-05 05:45:27 +02:00
|
|
|
// 3- Wait for DeleteScheduler to delete all files in queue
|
2015-08-06 04:16:52 +02:00
|
|
|
// 4- Verify that BackgroundEmptyTrash used to correct penlties for the files
|
2015-08-05 05:45:27 +02:00
|
|
|
// 5- Make sure that all created files were completely deleted
|
|
|
|
TEST_F(DeleteSchedulerTest, RateLimitingMultiThreaded) {
|
2015-08-06 04:16:52 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DeleteSchedulerTest::RateLimitingMultiThreaded:1",
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash"},
|
2015-08-06 04:16:52 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
std::vector<uint64_t> penalties;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash:Wait",
|
2017-04-22 05:41:37 +02:00
|
|
|
[&](void* arg) { penalties.push_back(*(static_cast<uint64_t*>(arg))); });
|
2015-08-06 04:16:52 +02:00
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
int thread_cnt = 10;
|
|
|
|
int num_files = 10; // 10 files per thread
|
|
|
|
uint64_t file_size = 1024; // every file is 1 kb
|
|
|
|
|
2015-08-05 20:45:31 +02:00
|
|
|
std::vector<uint64_t> delete_kbs_per_sec = {512, 200, 100, 50, 25};
|
2015-08-05 05:45:27 +02:00
|
|
|
for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
|
2015-08-06 04:16:52 +02:00
|
|
|
penalties.clear();
|
2015-09-29 00:58:50 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearTrace();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
DestroyAndCreateDir(dummy_files_dir_);
|
|
|
|
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Create 100 dummy files, every file is 1 Kb
|
|
|
|
std::vector<std::string> generated_files;
|
|
|
|
for (int i = 0; i < num_files * thread_cnt; i++) {
|
|
|
|
std::string file_name = "file" + ToString(i) + ".data";
|
|
|
|
generated_files.push_back(NewDummyFile(file_name, file_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete dummy files using 10 threads and measure time spent to empty trash
|
|
|
|
std::atomic<int> thread_num(0);
|
2017-02-06 23:43:55 +01:00
|
|
|
std::vector<port::Thread> threads;
|
2015-08-05 20:45:31 +02:00
|
|
|
std::function<void()> delete_thread = [&]() {
|
|
|
|
int idx = thread_num.fetch_add(1);
|
|
|
|
int range_start = idx * num_files;
|
|
|
|
int range_end = range_start + num_files;
|
|
|
|
for (int j = range_start; j < range_end; j++) {
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[j]));
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
for (int i = 0; i < thread_cnt; i++) {
|
2015-08-05 20:45:31 +02:00
|
|
|
threads.emplace_back(delete_thread);
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for (size_t i = 0; i < threads.size(); i++) {
|
|
|
|
threads[i].join();
|
|
|
|
}
|
|
|
|
|
2015-08-06 04:16:52 +02:00
|
|
|
uint64_t delete_start_time = env_->NowMicros();
|
2015-08-08 00:37:56 +02:00
|
|
|
TEST_SYNC_POINT("DeleteSchedulerTest::RateLimitingMultiThreaded:1");
|
2015-08-20 00:02:17 +02:00
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
2015-08-05 05:45:27 +02:00
|
|
|
uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
|
2015-08-06 04:16:52 +02:00
|
|
|
|
2015-09-29 00:58:50 +02:00
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 0);
|
|
|
|
|
2015-08-06 04:16:52 +02:00
|
|
|
uint64_t total_files_size = 0;
|
|
|
|
uint64_t expected_penlty = 0;
|
|
|
|
ASSERT_EQ(penalties.size(), num_files * thread_cnt);
|
|
|
|
for (int i = 0; i < num_files * thread_cnt; i++) {
|
|
|
|
total_files_size += file_size;
|
|
|
|
expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
|
|
|
|
ASSERT_EQ(expected_penlty, penalties[i]);
|
|
|
|
}
|
|
|
|
ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2015-09-29 00:58:50 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Disable rate limiting by setting rate_bytes_per_sec_ to 0 and make sure
|
|
|
|
// that when DeleteScheduler delete a file it delete it immediately and dont
|
|
|
|
// move it to trash
|
|
|
|
TEST_F(DeleteSchedulerTest, DisableRateLimiting) {
|
|
|
|
int bg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::DeleteTrashFile:DeleteFile",
|
2017-07-22 03:13:59 +02:00
|
|
|
[&](void* arg) { bg_delete_file++; });
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2016-01-29 03:35:01 +01:00
|
|
|
rate_bytes_per_sec_ = 0;
|
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
// Every file we delete will be deleted immediately
|
|
|
|
std::string dummy_file = NewDummyFile("dummy.data");
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
|
|
|
|
ASSERT_TRUE(env_->FileExists(dummy_file).IsNotFound());
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ(bg_delete_file, 0);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Testing that moving files to trash with the same name is not a problem
|
|
|
|
// 1- Create 10 files with the same name "conflict.data"
|
|
|
|
// 2- Delete the 10 files using DeleteScheduler
|
|
|
|
// 3- Make sure that trash directory contain 10 files ("conflict.data" x 10)
|
2016-01-29 03:35:01 +01:00
|
|
|
// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
|
2015-08-05 05:45:27 +02:00
|
|
|
// 4- Make sure that files are deleted from trash
|
|
|
|
TEST_F(DeleteSchedulerTest, ConflictNames) {
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DeleteSchedulerTest::ConflictNames:1",
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash"},
|
2015-08-05 05:45:27 +02:00
|
|
|
});
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Create "conflict.data" and move it to trash 10 times
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
std::string dummy_file = NewDummyFile("conflict.data");
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(dummy_file));
|
|
|
|
}
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
// 10 files ("conflict.data" x 10) in trash
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountTrashFiles(), 10);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Hold BackgroundEmptyTrash
|
|
|
|
TEST_SYNC_POINT("DeleteSchedulerTest::ConflictNames:1");
|
2015-08-20 00:02:17 +02:00
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 0);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1- Create 10 dummy files
|
|
|
|
// 2- Delete the 10 files using DeleteScheduler (move them to trsah)
|
|
|
|
// 3- Delete the 10 files directly (using env_->DeleteFile)
|
2016-01-29 03:35:01 +01:00
|
|
|
// --- Hold DeleteScheduler::BackgroundEmptyTrash ---
|
2015-08-05 05:45:27 +02:00
|
|
|
// 4- Make sure that DeleteScheduler failed to delete the 10 files and
|
|
|
|
// reported 10 background errors
|
|
|
|
TEST_F(DeleteSchedulerTest, BackgroundError) {
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DeleteSchedulerTest::BackgroundError:1",
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::BackgroundEmptyTrash"},
|
2015-08-05 05:45:27 +02:00
|
|
|
});
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
rate_bytes_per_sec_ = 1024 * 1024; // 1 Mb/sec
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Generate 10 dummy files and move them to trash
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
std::string file_name = "data_" + ToString(i) + ".data";
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
|
|
|
|
}
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
|
|
|
ASSERT_EQ(CountTrashFiles(), 10);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Delete 10 files from trash, this will cause background errors in
|
|
|
|
// BackgroundEmptyTrash since we already deleted the files it was
|
|
|
|
// goind to delete
|
|
|
|
for (int i = 0; i < 10; i++) {
|
2017-10-27 22:25:54 +02:00
|
|
|
std::string file_name = "data_" + ToString(i) + ".data.trash";
|
|
|
|
ASSERT_OK(env_->DeleteFile(dummy_files_dir_ + "/" + file_name));
|
2015-08-05 05:45:27 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Hold BackgroundEmptyTrash
|
|
|
|
TEST_SYNC_POINT("DeleteSchedulerTest::BackgroundError:1");
|
2015-08-20 00:02:17 +02:00
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
2015-08-05 05:45:27 +02:00
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 10);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1- Create 10 dummy files
|
|
|
|
// 2- Delete 10 dummy files using DeleteScheduler
|
|
|
|
// 3- Wait for DeleteScheduler to delete all files in queue
|
|
|
|
// 4- Make sure all files in trash directory were deleted
|
|
|
|
// 5- Repeat previous steps 5 times
|
|
|
|
TEST_F(DeleteSchedulerTest, StartBGEmptyTrashMultipleTimes) {
|
|
|
|
int bg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::DeleteTrashFile:DeleteFile",
|
2017-07-22 03:13:59 +02:00
|
|
|
[&](void* arg) { bg_delete_file++; });
|
2015-08-05 05:45:27 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
rate_bytes_per_sec_ = 1024 * 1024; // 1 MB / sec
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
// Move files to trash, wait for empty trash, start again
|
|
|
|
for (int run = 1; run <= 5; run++) {
|
|
|
|
// Generate 10 dummy files and move them to trash
|
|
|
|
for (int i = 0; i < 10; i++) {
|
|
|
|
std::string file_name = "data_" + ToString(i) + ".data";
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
|
|
|
|
}
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
2015-08-20 00:02:17 +02:00
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
2015-08-05 05:45:27 +02:00
|
|
|
ASSERT_EQ(bg_delete_file, 10 * run);
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_EQ(bg_delete_file, 50);
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
}
|
|
|
|
|
|
|
|
// 1- Create a DeleteScheduler with very slow rate limit (1 Byte / sec)
|
|
|
|
// 2- Delete 100 files using DeleteScheduler
|
|
|
|
// 3- Delete the DeleteScheduler (call the destructor while queue is not empty)
|
|
|
|
// 4- Make sure that not all files were deleted from trash and that
|
|
|
|
// DeleteScheduler background thread did not delete all files
|
|
|
|
TEST_F(DeleteSchedulerTest, DestructorWithNonEmptyQueue) {
|
|
|
|
int bg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2016-01-29 03:35:01 +01:00
|
|
|
"DeleteScheduler::DeleteTrashFile:DeleteFile",
|
2017-07-22 03:13:59 +02:00
|
|
|
[&](void* arg) { bg_delete_file++; });
|
2015-08-05 05:45:27 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
rate_bytes_per_sec_ = 1; // 1 Byte / sec
|
2016-01-29 03:35:01 +01:00
|
|
|
NewDeleteScheduler();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
for (int i = 0; i < 100; i++) {
|
|
|
|
std::string file_name = "data_" + ToString(i) + ".data";
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(NewDummyFile(file_name)));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Deleting 100 files will need >28 hours to delete
|
|
|
|
// we will delete the DeleteScheduler while delete queue is not empty
|
2017-06-13 01:51:37 +02:00
|
|
|
sst_file_mgr_.reset();
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
ASSERT_LT(bg_delete_file, 100);
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_GT(CountTrashFiles(), 0);
|
2015-08-05 05:45:27 +02:00
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
2017-03-16 20:06:04 +01:00
|
|
|
|
2017-03-31 01:45:39 +02:00
|
|
|
TEST_F(DeleteSchedulerTest, DISABLED_DynamicRateLimiting1) {
|
2017-03-16 20:06:04 +01:00
|
|
|
std::vector<uint64_t> penalties;
|
|
|
|
int bg_delete_file = 0;
|
|
|
|
int fg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DeleteScheduler::DeleteTrashFile:DeleteFile",
|
2017-07-22 03:13:59 +02:00
|
|
|
[&](void* arg) { bg_delete_file++; });
|
2017-03-16 20:06:04 +01:00
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2017-07-22 03:13:59 +02:00
|
|
|
"DeleteScheduler::DeleteFile",
|
|
|
|
[&](void* arg) { fg_delete_file++; });
|
2017-03-16 20:06:04 +01:00
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DeleteScheduler::BackgroundEmptyTrash:Wait",
|
|
|
|
[&](void* arg) { penalties.push_back(*(static_cast<int*>(arg))); });
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DeleteSchedulerTest::DynamicRateLimiting1:1",
|
|
|
|
"DeleteScheduler::BackgroundEmptyTrash"},
|
|
|
|
});
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
rate_bytes_per_sec_ = 0; // Disable rate limiting initially
|
|
|
|
NewDeleteScheduler();
|
|
|
|
|
|
|
|
|
|
|
|
int num_files = 10; // 10 files
|
|
|
|
uint64_t file_size = 1024; // every file is 1 kb
|
|
|
|
|
|
|
|
std::vector<int64_t> delete_kbs_per_sec = {512, 200, 0, 100, 50, -2, 25};
|
|
|
|
for (size_t t = 0; t < delete_kbs_per_sec.size(); t++) {
|
|
|
|
penalties.clear();
|
|
|
|
bg_delete_file = 0;
|
|
|
|
fg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->ClearTrace();
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
DestroyAndCreateDir(dummy_files_dir_);
|
|
|
|
rate_bytes_per_sec_ = delete_kbs_per_sec[t] * 1024;
|
|
|
|
delete_scheduler_->SetRateBytesPerSecond(rate_bytes_per_sec_);
|
|
|
|
|
|
|
|
// Create 100 dummy files, every file is 1 Kb
|
|
|
|
std::vector<std::string> generated_files;
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
std::string file_name = "file" + ToString(i) + ".data";
|
|
|
|
generated_files.push_back(NewDummyFile(file_name, file_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete dummy files and measure time spent to empty trash
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
ASSERT_OK(delete_scheduler_->DeleteFile(generated_files[i]));
|
|
|
|
}
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountNormalFiles(), 0);
|
2017-03-16 20:06:04 +01:00
|
|
|
|
|
|
|
if (rate_bytes_per_sec_ > 0) {
|
|
|
|
uint64_t delete_start_time = env_->NowMicros();
|
|
|
|
TEST_SYNC_POINT("DeleteSchedulerTest::DynamicRateLimiting1:1");
|
|
|
|
delete_scheduler_->WaitForEmptyTrash();
|
|
|
|
uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time;
|
|
|
|
|
|
|
|
auto bg_errors = delete_scheduler_->GetBackgroundErrors();
|
|
|
|
ASSERT_EQ(bg_errors.size(), 0);
|
|
|
|
|
|
|
|
uint64_t total_files_size = 0;
|
|
|
|
uint64_t expected_penlty = 0;
|
|
|
|
ASSERT_EQ(penalties.size(), num_files);
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
total_files_size += file_size;
|
|
|
|
expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec_);
|
|
|
|
ASSERT_EQ(expected_penlty, penalties[i]);
|
|
|
|
}
|
|
|
|
ASSERT_GT(time_spent_deleting, expected_penlty * 0.9);
|
|
|
|
ASSERT_EQ(bg_delete_file, num_files);
|
|
|
|
ASSERT_EQ(fg_delete_file, 0);
|
|
|
|
} else {
|
|
|
|
ASSERT_EQ(penalties.size(), 0);
|
|
|
|
ASSERT_EQ(bg_delete_file, 0);
|
|
|
|
ASSERT_EQ(fg_delete_file, num_files);
|
|
|
|
}
|
|
|
|
|
2017-10-27 22:25:54 +02:00
|
|
|
ASSERT_EQ(CountTrashFiles(), 0);
|
2017-03-16 20:06:04 +01:00
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-13 01:51:37 +02:00
|
|
|
TEST_F(DeleteSchedulerTest, ImmediateDeleteOn25PercDBSize) {
|
|
|
|
int bg_delete_file = 0;
|
|
|
|
int fg_delete_file = 0;
|
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DeleteScheduler::DeleteTrashFile:DeleteFile",
|
2017-07-22 03:13:59 +02:00
|
|
|
[&](void* arg) { bg_delete_file++; });
|
2017-06-13 01:51:37 +02:00
|
|
|
rocksdb::SyncPoint::GetInstance()->SetCallBack(
|
2017-07-22 03:13:59 +02:00
|
|
|
"DeleteScheduler::DeleteFile", [&](void* arg) { fg_delete_file++; });
|
2017-06-13 01:51:37 +02:00
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
int num_files = 100; // 100 files
|
|
|
|
uint64_t file_size = 1024 * 10; // 100 KB as a file size
|
|
|
|
rate_bytes_per_sec_ = 1; // 1 byte per sec (very slow trash delete)
|
|
|
|
|
|
|
|
NewDeleteScheduler();
|
2017-11-17 20:56:41 +01:00
|
|
|
delete_scheduler_->SetMaxTrashDBRatio(0.25);
|
2017-06-13 01:51:37 +02:00
|
|
|
|
|
|
|
std::vector<std::string> generated_files;
|
|
|
|
for (int i = 0; i < num_files; i++) {
|
|
|
|
std::string file_name = "file" + ToString(i) + ".data";
|
|
|
|
generated_files.push_back(NewDummyFile(file_name, file_size));
|
|
|
|
}
|
|
|
|
|
|
|
|
for (std::string& file_name : generated_files) {
|
|
|
|
delete_scheduler_->DeleteFile(file_name);
|
|
|
|
}
|
|
|
|
|
2017-07-20 20:22:31 +02:00
|
|
|
// When we end up with 26 files in trash we will start
|
2017-06-13 01:51:37 +02:00
|
|
|
// deleting new files immediately
|
|
|
|
ASSERT_EQ(fg_delete_file, 74);
|
|
|
|
|
|
|
|
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
}
|
|
|
|
|
2017-10-27 22:25:54 +02:00
|
|
|
TEST_F(DeleteSchedulerTest, IsTrashCheck) {
|
|
|
|
// Trash files
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("x.trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile(".trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("abc.sst.trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("/a/b/c/abc..sst.trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("log.trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("^^^^^.log.trash"));
|
|
|
|
ASSERT_TRUE(DeleteScheduler::IsTrashFile("abc.t.trash"));
|
|
|
|
|
|
|
|
// Not trash files
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("abc.sst"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("abc.txt"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("/a/b/c/abc.sst"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("/a/b/c/abc.sstrash"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("^^^^^.trashh"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("abc.ttrash"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile(".ttrash"));
|
|
|
|
ASSERT_FALSE(DeleteScheduler::IsTrashFile("abc.trashx"));
|
|
|
|
}
|
|
|
|
|
2015-08-05 05:45:27 +02:00
|
|
|
} // namespace rocksdb
|
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
2016-12-22 02:35:00 +01:00
|
|
|
|
|
|
|
#else
|
|
|
|
int main(int argc, char** argv) {
|
|
|
|
printf("DeleteScheduler is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
#endif // ROCKSDB_LITE
|