2018-03-28 19:23:31 +02:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
|
|
|
#ifndef ROCKSDB_LITE
|
|
|
|
|
|
|
|
#include <stdlib.h>
|
2020-05-07 18:29:21 +02:00
|
|
|
#include <algorithm>
|
2018-03-28 19:23:31 +02:00
|
|
|
#include <map>
|
|
|
|
#include <string>
|
|
|
|
#include <vector>
|
2019-05-31 20:52:59 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2019-09-18 20:49:41 +02:00
|
|
|
#include "db/db_test_util.h"
|
2018-03-28 19:23:31 +02:00
|
|
|
#include "db/version_set.h"
|
|
|
|
#include "db/write_batch_internal.h"
|
2019-05-30 05:44:08 +02:00
|
|
|
#include "file/filename.h"
|
2019-09-18 20:49:41 +02:00
|
|
|
#include "port/stack_trace.h"
|
2018-03-28 19:23:31 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
|
|
|
#include "rocksdb/transaction_log.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
|
|
|
#include "test_util/testutil.h"
|
2019-05-31 02:39:43 +02:00
|
|
|
#include "util/string_util.h"
|
2018-03-28 19:23:31 +02:00
|
|
|
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2018-03-28 19:23:31 +02:00
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
class ObsoleteFilesTest : public DBTestBase {
|
2018-03-28 19:23:31 +02:00
|
|
|
public:
|
2019-09-18 20:49:41 +02:00
|
|
|
ObsoleteFilesTest()
|
2020-08-18 03:41:20 +02:00
|
|
|
: DBTestBase("/obsolete_files_test", /*env_do_fsync=*/true),
|
|
|
|
wal_dir_(dbname_ + "/wal_files") {}
|
2018-03-28 19:23:31 +02:00
|
|
|
|
|
|
|
void AddKeys(int numkeys, int startkey) {
|
|
|
|
WriteOptions options;
|
|
|
|
options.sync = false;
|
|
|
|
for (int i = startkey; i < (numkeys + startkey) ; i++) {
|
|
|
|
std::string temp = ToString(i);
|
|
|
|
Slice key(temp);
|
|
|
|
Slice value(temp);
|
|
|
|
ASSERT_OK(db_->Put(options, key, value));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void createLevel0Files(int numFiles, int numKeysPerFile) {
|
|
|
|
int startKey = 0;
|
|
|
|
for (int i = 0; i < numFiles; i++) {
|
|
|
|
AddKeys(numKeysPerFile, startKey);
|
|
|
|
startKey += numKeysPerFile;
|
2019-09-18 20:49:41 +02:00
|
|
|
ASSERT_OK(dbfull()->TEST_FlushMemTable());
|
|
|
|
ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable());
|
2018-03-28 19:23:31 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
void CheckFileTypeCounts(const std::string& dir, int required_log,
|
|
|
|
int required_sst, int required_manifest) {
|
2018-03-28 19:23:31 +02:00
|
|
|
std::vector<std::string> filenames;
|
2020-12-23 08:44:44 +01:00
|
|
|
ASSERT_OK(env_->GetChildren(dir, &filenames));
|
2018-03-28 19:23:31 +02:00
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
int log_cnt = 0;
|
|
|
|
int sst_cnt = 0;
|
|
|
|
int manifest_cnt = 0;
|
2018-03-28 19:23:31 +02:00
|
|
|
for (auto file : filenames) {
|
|
|
|
uint64_t number;
|
|
|
|
FileType type;
|
|
|
|
if (ParseFileName(file, &number, &type)) {
|
2020-10-23 02:04:39 +02:00
|
|
|
log_cnt += (type == kWalFile);
|
2018-03-28 19:23:31 +02:00
|
|
|
sst_cnt += (type == kTableFile);
|
|
|
|
manifest_cnt += (type == kDescriptorFile);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_EQ(required_log, log_cnt);
|
|
|
|
ASSERT_EQ(required_sst, sst_cnt);
|
|
|
|
ASSERT_EQ(required_manifest, manifest_cnt);
|
|
|
|
}
|
2019-09-18 20:49:41 +02:00
|
|
|
|
|
|
|
void ReopenDB() {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
// Trigger compaction when the number of level 0 files reaches 2.
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.level0_file_num_compaction_trigger = 2;
|
|
|
|
options.disable_auto_compactions = false;
|
|
|
|
options.delete_obsolete_files_period_micros = 0; // always do full purge
|
|
|
|
options.enable_thread_tracking = true;
|
|
|
|
options.write_buffer_size = 1024 * 1024 * 1000;
|
|
|
|
options.target_file_size_base = 1024 * 1024 * 1000;
|
|
|
|
options.max_bytes_for_level_base = 1024 * 1024 * 1000;
|
|
|
|
options.WAL_ttl_seconds = 300; // Used to test log files
|
|
|
|
options.WAL_size_limit_MB = 1024; // Used to test log files
|
|
|
|
options.wal_dir = wal_dir_;
|
2021-03-13 01:42:30 +01:00
|
|
|
|
|
|
|
// Note: the following prevents an otherwise harmless data race between the
|
|
|
|
// test setup code (AddBlobFile) in ObsoleteFilesTest.BlobFiles and the
|
|
|
|
// periodic stat dumping thread.
|
|
|
|
options.stats_dump_period_sec = 0;
|
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
Destroy(options);
|
|
|
|
Reopen(options);
|
|
|
|
}
|
|
|
|
|
|
|
|
const std::string wal_dir_;
|
2018-03-28 19:23:31 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(ObsoleteFilesTest, RaceForObsoleteFileDeletion) {
|
2019-09-18 20:49:41 +02:00
|
|
|
ReopenDB();
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
2018-03-28 19:23:31 +02:00
|
|
|
SyncPoint::GetInstance()->LoadDependency({
|
|
|
|
{"DBImpl::BackgroundCallCompaction:FoundObsoleteFiles",
|
|
|
|
"ObsoleteFilesTest::RaceForObsoleteFileDeletion:1"},
|
|
|
|
{"DBImpl::BackgroundCallCompaction:PurgedObsoleteFiles",
|
|
|
|
"ObsoleteFilesTest::RaceForObsoleteFileDeletion:2"},
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::DeleteObsoleteFileImpl:AfterDeletion", [&](void* arg) {
|
|
|
|
Status* p_status = reinterpret_cast<Status*>(arg);
|
2019-03-28 21:11:53 +01:00
|
|
|
ASSERT_OK(*p_status);
|
2018-03-28 19:23:31 +02:00
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::CloseHelper:PendingPurgeFinished", [&](void* arg) {
|
2019-09-18 01:43:07 +02:00
|
|
|
std::unordered_set<uint64_t>* files_grabbed_for_purge_ptr =
|
|
|
|
reinterpret_cast<std::unordered_set<uint64_t>*>(arg);
|
2018-03-28 19:23:31 +02:00
|
|
|
ASSERT_TRUE(files_grabbed_for_purge_ptr->empty());
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2019-03-28 21:11:53 +01:00
|
|
|
createLevel0Files(2, 50000);
|
2019-09-18 20:49:41 +02:00
|
|
|
CheckFileTypeCounts(wal_dir_, 1, 0, 0);
|
2019-03-28 21:11:53 +01:00
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
port::Thread user_thread([this]() {
|
2018-03-28 19:23:31 +02:00
|
|
|
JobContext jobCxt(0);
|
|
|
|
TEST_SYNC_POINT("ObsoleteFilesTest::RaceForObsoleteFileDeletion:1");
|
2019-09-18 20:49:41 +02:00
|
|
|
dbfull()->TEST_LockMutex();
|
|
|
|
dbfull()->FindObsoleteFiles(&jobCxt, true /* force=true */,
|
|
|
|
false /* no_full_scan=false */);
|
|
|
|
dbfull()->TEST_UnlockMutex();
|
2018-03-28 19:23:31 +02:00
|
|
|
TEST_SYNC_POINT("ObsoleteFilesTest::RaceForObsoleteFileDeletion:2");
|
2019-09-18 20:49:41 +02:00
|
|
|
dbfull()->PurgeObsoleteFiles(jobCxt);
|
2018-03-28 19:23:31 +02:00
|
|
|
jobCxt.Clean();
|
|
|
|
});
|
|
|
|
|
|
|
|
user_thread.join();
|
|
|
|
}
|
|
|
|
|
2018-07-11 23:49:31 +02:00
|
|
|
TEST_F(ObsoleteFilesTest, DeleteObsoleteOptionsFile) {
|
2019-09-18 20:49:41 +02:00
|
|
|
ReopenDB();
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
2018-07-11 23:49:31 +02:00
|
|
|
std::vector<uint64_t> optsfiles_nums;
|
|
|
|
std::vector<bool> optsfiles_keep;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::PurgeObsoleteFiles:CheckOptionsFiles:1", [&](void* arg) {
|
|
|
|
optsfiles_nums.push_back(*reinterpret_cast<uint64_t*>(arg));
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::PurgeObsoleteFiles:CheckOptionsFiles:2", [&](void* arg) {
|
|
|
|
optsfiles_keep.push_back(*reinterpret_cast<bool*>(arg));
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
2019-03-28 21:11:53 +01:00
|
|
|
createLevel0Files(2, 50000);
|
2019-09-18 20:49:41 +02:00
|
|
|
CheckFileTypeCounts(wal_dir_, 1, 0, 0);
|
2019-03-28 21:11:53 +01:00
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
ASSERT_OK(dbfull()->DisableFileDeletions());
|
2018-07-11 23:49:31 +02:00
|
|
|
for (int i = 0; i != 4; ++i) {
|
|
|
|
if (i % 2) {
|
2019-09-18 20:49:41 +02:00
|
|
|
ASSERT_OK(dbfull()->SetOptions(dbfull()->DefaultColumnFamily(),
|
|
|
|
{{"paranoid_file_checks", "false"}}));
|
2018-07-11 23:49:31 +02:00
|
|
|
} else {
|
2019-09-18 20:49:41 +02:00
|
|
|
ASSERT_OK(dbfull()->SetOptions(dbfull()->DefaultColumnFamily(),
|
|
|
|
{{"paranoid_file_checks", "true"}}));
|
2018-07-11 23:49:31 +02:00
|
|
|
}
|
|
|
|
}
|
2019-09-18 20:49:41 +02:00
|
|
|
ASSERT_OK(dbfull()->EnableFileDeletions(true /* force */));
|
2018-07-11 23:49:31 +02:00
|
|
|
ASSERT_EQ(optsfiles_nums.size(), optsfiles_keep.size());
|
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
Close();
|
2018-08-03 22:55:32 +02:00
|
|
|
|
|
|
|
std::vector<std::string> files;
|
|
|
|
int opts_file_count = 0;
|
|
|
|
ASSERT_OK(env_->GetChildren(dbname_, &files));
|
|
|
|
for (const auto& file : files) {
|
|
|
|
uint64_t file_num;
|
|
|
|
Slice dummy_info_log_name_prefix;
|
|
|
|
FileType type;
|
|
|
|
WalFileType log_type;
|
|
|
|
if (ParseFileName(file, &file_num, dummy_info_log_name_prefix, &type,
|
|
|
|
&log_type) &&
|
|
|
|
type == kOptionsFile) {
|
|
|
|
opts_file_count++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ASSERT_EQ(2, opts_file_count);
|
2018-07-11 23:49:31 +02:00
|
|
|
}
|
|
|
|
|
2020-05-07 18:29:21 +02:00
|
|
|
TEST_F(ObsoleteFilesTest, BlobFiles) {
|
2021-03-13 01:42:30 +01:00
|
|
|
ReopenDB();
|
|
|
|
|
2020-05-07 18:29:21 +02:00
|
|
|
VersionSet* const versions = dbfull()->TEST_GetVersionSet();
|
|
|
|
assert(versions);
|
|
|
|
assert(versions->GetColumnFamilySet());
|
|
|
|
|
|
|
|
ColumnFamilyData* const cfd = versions->GetColumnFamilySet()->GetDefault();
|
|
|
|
assert(cfd);
|
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
const ImmutableCFOptions* const ioptions = cfd->ioptions();
|
|
|
|
assert(ioptions);
|
|
|
|
assert(!ioptions->cf_paths.empty());
|
2020-05-07 18:29:21 +02:00
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
const std::string& path = ioptions->cf_paths.front().path;
|
|
|
|
|
|
|
|
// Add an obsolete blob file.
|
2020-05-07 18:29:21 +02:00
|
|
|
constexpr uint64_t first_blob_file_number = 234;
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
versions->AddObsoleteBlobFile(first_blob_file_number, path);
|
|
|
|
|
|
|
|
// Add a live blob file.
|
|
|
|
Version* const version = cfd->current();
|
|
|
|
assert(version);
|
2020-05-07 18:29:21 +02:00
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
VersionStorageInfo* const storage_info = version->storage_info();
|
|
|
|
assert(storage_info);
|
2020-05-07 18:29:21 +02:00
|
|
|
|
|
|
|
constexpr uint64_t second_blob_file_number = 456;
|
|
|
|
constexpr uint64_t second_total_blob_count = 100;
|
|
|
|
constexpr uint64_t second_total_blob_bytes = 2000000;
|
|
|
|
constexpr char second_checksum_method[] = "CRC32B";
|
2021-06-22 18:48:50 +02:00
|
|
|
constexpr char second_checksum_value[] = "\x6d\xbd\xf2\x3a";
|
2020-05-07 18:29:21 +02:00
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
auto shared_meta = SharedBlobFileMetaData::Create(
|
|
|
|
second_blob_file_number, second_total_blob_count, second_total_blob_bytes,
|
|
|
|
second_checksum_method, second_checksum_value);
|
2020-05-07 18:29:21 +02:00
|
|
|
|
Clean up blob files based on the linked SST set (#7001)
Summary:
The earlier `VersionBuilder` code only cleaned up blob files that were
marked as entirely consisting of garbage using `VersionEdits` with
`BlobFileGarbage`. This covers the cases when table files go through
regular compaction, where we iterate through the KVs and thus have an
opportunity to calculate the amount of garbage (that is, most cases).
However, it does not help when table files are simply dropped (e.g. deletion
compactions or the `DeleteFile` API). To deal with such cases, the patch
adds logic that cleans up all blob files at the head of the list until the first
one with linked SSTs is found. (As an example, let's assume we have blob files
with numbers 1..10, and the first one with any linked SSTs is number 8.
This means that SSTs in the `Version` only rely on blob files with numbers >= 8,
and thus 1..7 are no longer needed.)
The code change itself is pretty small; however, changing the logic like this
necessitated changes to some tests that have been added recently (namely
to the ones that use blob files in isolation, i.e. without any table files referring
to them). Some of these cases were fixed by bypassing `VersionBuilder` altogether
in order to keep the tests simple (which actually makes them more proper unit tests
as well), while the `VersionBuilder` unit tests were fixed by adding dummy table
files to the test cases as needed.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/7001
Test Plan: `make check`
Reviewed By: riversand963
Differential Revision: D22119474
Pulled By: ltamasi
fbshipit-source-id: c6547141355667d4291d9661d6518eb741e7b54a
2020-07-01 00:30:01 +02:00
|
|
|
constexpr uint64_t second_garbage_blob_count = 0;
|
|
|
|
constexpr uint64_t second_garbage_blob_bytes = 0;
|
|
|
|
|
|
|
|
auto meta = BlobFileMetaData::Create(
|
|
|
|
std::move(shared_meta), BlobFileMetaData::LinkedSsts(),
|
|
|
|
second_garbage_blob_count, second_garbage_blob_bytes);
|
|
|
|
|
|
|
|
storage_info->AddBlobFile(std::move(meta));
|
2020-05-07 18:29:21 +02:00
|
|
|
|
|
|
|
// Check for obsolete files and make sure the first blob file is picked up
|
|
|
|
// and grabbed for purge. The second blob file should be on the live list.
|
|
|
|
constexpr int job_id = 0;
|
|
|
|
JobContext job_context{job_id};
|
|
|
|
|
|
|
|
dbfull()->TEST_LockMutex();
|
|
|
|
constexpr bool force_full_scan = false;
|
|
|
|
dbfull()->FindObsoleteFiles(&job_context, force_full_scan);
|
|
|
|
dbfull()->TEST_UnlockMutex();
|
|
|
|
|
|
|
|
ASSERT_TRUE(job_context.HaveSomethingToDelete());
|
|
|
|
ASSERT_EQ(job_context.blob_delete_files.size(), 1);
|
|
|
|
ASSERT_EQ(job_context.blob_delete_files[0].GetBlobFileNumber(),
|
|
|
|
first_blob_file_number);
|
|
|
|
|
|
|
|
const auto& files_grabbed_for_purge =
|
|
|
|
dbfull()->TEST_GetFilesGrabbedForPurge();
|
|
|
|
ASSERT_NE(files_grabbed_for_purge.find(first_blob_file_number),
|
|
|
|
files_grabbed_for_purge.end());
|
|
|
|
|
|
|
|
ASSERT_EQ(job_context.blob_live.size(), 1);
|
|
|
|
ASSERT_EQ(job_context.blob_live[0], second_blob_file_number);
|
|
|
|
|
|
|
|
// Hack the job context a bit by adding a few files to the full scan
|
|
|
|
// list and adjusting the pending file number. We add the two files
|
|
|
|
// above as well as two additional ones, where one is old
|
|
|
|
// and should be cleaned up, and the other is still pending.
|
|
|
|
constexpr uint64_t old_blob_file_number = 123;
|
|
|
|
constexpr uint64_t pending_blob_file_number = 567;
|
|
|
|
|
|
|
|
job_context.full_scan_candidate_files.emplace_back(
|
|
|
|
BlobFileName(old_blob_file_number), path);
|
|
|
|
job_context.full_scan_candidate_files.emplace_back(
|
|
|
|
BlobFileName(first_blob_file_number), path);
|
|
|
|
job_context.full_scan_candidate_files.emplace_back(
|
|
|
|
BlobFileName(second_blob_file_number), path);
|
|
|
|
job_context.full_scan_candidate_files.emplace_back(
|
|
|
|
BlobFileName(pending_blob_file_number), path);
|
|
|
|
|
|
|
|
job_context.min_pending_output = pending_blob_file_number;
|
|
|
|
|
|
|
|
// Purge obsolete files and make sure we purge the old file and the first file
|
|
|
|
// (and keep the second file and the pending file).
|
|
|
|
std::vector<std::string> deleted_files;
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"DBImpl::DeleteObsoleteFileImpl::BeforeDeletion", [&](void* arg) {
|
|
|
|
const std::string* file = static_cast<std::string*>(arg);
|
|
|
|
assert(file);
|
|
|
|
|
|
|
|
constexpr char blob_extension[] = ".blob";
|
|
|
|
|
|
|
|
if (file->find(blob_extension) != std::string::npos) {
|
|
|
|
deleted_files.emplace_back(*file);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
dbfull()->PurgeObsoleteFiles(job_context);
|
|
|
|
job_context.Clean();
|
|
|
|
|
|
|
|
SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
SyncPoint::GetInstance()->ClearAllCallBacks();
|
|
|
|
|
|
|
|
ASSERT_EQ(files_grabbed_for_purge.find(first_blob_file_number),
|
|
|
|
files_grabbed_for_purge.end());
|
|
|
|
|
|
|
|
std::sort(deleted_files.begin(), deleted_files.end());
|
|
|
|
const std::vector<std::string> expected_deleted_files{
|
|
|
|
BlobFileName(path, old_blob_file_number),
|
|
|
|
BlobFileName(path, first_blob_file_number)};
|
|
|
|
|
|
|
|
ASSERT_EQ(deleted_files, expected_deleted_files);
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2018-03-28 19:23:31 +02:00
|
|
|
|
2019-09-18 20:49:41 +02:00
|
|
|
#ifdef ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
|
|
|
|
extern "C" {
|
|
|
|
void RegisterCustomObjects(int argc, char** argv);
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
void RegisterCustomObjects(int /*argc*/, char** /*argv*/) {}
|
|
|
|
#endif // !ROCKSDB_UNITTESTS_WITH_CUSTOM_OBJECTS_FROM_STATIC_LIBS
|
|
|
|
|
2018-03-28 19:23:31 +02:00
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2018-03-28 19:23:31 +02:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
2019-09-18 20:49:41 +02:00
|
|
|
RegisterCustomObjects(argc, argv);
|
2018-03-28 19:23:31 +02:00
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2018-04-16 02:19:57 +02:00
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
2018-03-28 19:23:31 +02:00
|
|
|
fprintf(stderr,
|
|
|
|
"SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE
|