2016-02-10 00:12:00 +01:00
|
|
|
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
|
2017-07-16 01:03:42 +02:00
|
|
|
// This source code is licensed under both the GPLv2 (found in the
|
|
|
|
// COPYING file in the root directory) and Apache 2.0 License
|
|
|
|
// (found in the LICENSE.Apache file in the root directory).
|
2015-06-20 01:08:31 +02:00
|
|
|
//
|
|
|
|
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style license that can be
|
|
|
|
// found in the LICENSE file. See the AUTHORS file for names of contributors.
|
|
|
|
|
2015-07-02 01:13:49 +02:00
|
|
|
// Syncpoint prevents us building and running tests in release
|
2015-07-20 20:24:54 +02:00
|
|
|
#ifndef ROCKSDB_LITE
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "rocksdb/utilities/checkpoint.h"
|
2015-07-20 20:24:54 +02:00
|
|
|
|
2015-07-02 01:13:49 +02:00
|
|
|
#ifndef OS_WIN
|
2015-07-13 21:11:05 +02:00
|
|
|
#include <unistd.h>
|
2015-07-02 01:13:49 +02:00
|
|
|
#endif
|
2015-06-20 01:08:31 +02:00
|
|
|
#include <iostream>
|
|
|
|
#include <thread>
|
|
|
|
#include <utility>
|
2020-07-09 23:33:42 +02:00
|
|
|
|
2019-05-31 20:52:59 +02:00
|
|
|
#include "db/db_impl/db_impl.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "file/file_util.h"
|
2017-02-06 23:43:55 +01:00
|
|
|
#include "port/port.h"
|
2018-08-28 21:35:17 +02:00
|
|
|
#include "port/stack_trace.h"
|
2015-06-20 01:08:31 +02:00
|
|
|
#include "rocksdb/db.h"
|
|
|
|
#include "rocksdb/env.h"
|
2016-12-28 20:53:29 +01:00
|
|
|
#include "rocksdb/utilities/transaction_db.h"
|
2019-05-30 20:21:38 +02:00
|
|
|
#include "test_util/sync_point.h"
|
|
|
|
#include "test_util/testharness.h"
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
#include "test_util/testutil.h"
|
2020-07-09 23:33:42 +02:00
|
|
|
#include "utilities/fault_injection_env.h"
|
2021-05-05 21:53:42 +02:00
|
|
|
#include "utilities/fault_injection_fs.h"
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
namespace ROCKSDB_NAMESPACE {
|
2017-01-09 20:40:20 +01:00
|
|
|
class CheckpointTest : public testing::Test {
|
2015-06-20 01:08:31 +02:00
|
|
|
protected:
|
|
|
|
// Sequence of option configurations to try
|
|
|
|
enum OptionConfig {
|
|
|
|
kDefault = 0,
|
|
|
|
};
|
|
|
|
int option_config_;
|
|
|
|
|
|
|
|
public:
|
|
|
|
std::string dbname_;
|
|
|
|
std::string alternative_wal_dir_;
|
|
|
|
Env* env_;
|
|
|
|
DB* db_;
|
|
|
|
Options last_options_;
|
|
|
|
std::vector<ColumnFamilyHandle*> handles_;
|
2018-01-10 21:14:18 +01:00
|
|
|
std::string snapshot_name_;
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
std::string export_path_;
|
|
|
|
ColumnFamilyHandle* cfh_reverse_comp_;
|
|
|
|
ExportImportFilesMetaData* metadata_;
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2017-01-09 20:40:20 +01:00
|
|
|
CheckpointTest() : env_(Env::Default()) {
|
2015-06-20 01:08:31 +02:00
|
|
|
env_->SetBackgroundThreads(1, Env::LOW);
|
|
|
|
env_->SetBackgroundThreads(1, Env::HIGH);
|
2018-07-14 02:18:39 +02:00
|
|
|
dbname_ = test::PerThreadDBPath(env_, "checkpoint_test");
|
2015-06-20 01:08:31 +02:00
|
|
|
alternative_wal_dir_ = dbname_ + "/wal";
|
|
|
|
auto options = CurrentOptions();
|
|
|
|
auto delete_options = options;
|
|
|
|
delete_options.wal_dir = alternative_wal_dir_;
|
|
|
|
EXPECT_OK(DestroyDB(dbname_, delete_options));
|
|
|
|
// Destroy it for not alternative WAL dir is used.
|
|
|
|
EXPECT_OK(DestroyDB(dbname_, options));
|
|
|
|
db_ = nullptr;
|
2018-07-14 02:18:39 +02:00
|
|
|
snapshot_name_ = test::PerThreadDBPath(env_, "snapshot");
|
2018-01-10 21:14:18 +01:00
|
|
|
std::string snapshot_tmp_name = snapshot_name_ + ".tmp";
|
|
|
|
EXPECT_OK(DestroyDB(snapshot_name_, options));
|
2020-12-10 06:19:55 +01:00
|
|
|
test::DeleteDir(env_, snapshot_name_);
|
2018-01-10 21:14:18 +01:00
|
|
|
EXPECT_OK(DestroyDB(snapshot_tmp_name, options));
|
2020-12-10 06:19:55 +01:00
|
|
|
test::DeleteDir(env_, snapshot_tmp_name);
|
2015-06-20 01:08:31 +02:00
|
|
|
Reopen(options);
|
2020-06-25 21:07:47 +02:00
|
|
|
export_path_ = test::PerThreadDBPath("/export");
|
2020-12-10 06:19:55 +01:00
|
|
|
DestroyDir(env_, export_path_).PermitUncheckedError();
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
cfh_reverse_comp_ = nullptr;
|
|
|
|
metadata_ = nullptr;
|
2015-06-20 01:08:31 +02:00
|
|
|
}
|
|
|
|
|
2019-02-14 22:52:47 +01:00
|
|
|
~CheckpointTest() override {
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency({});
|
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->ClearAllCallBacks();
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
if (cfh_reverse_comp_) {
|
|
|
|
EXPECT_OK(db_->DestroyColumnFamilyHandle(cfh_reverse_comp_));
|
|
|
|
cfh_reverse_comp_ = nullptr;
|
|
|
|
}
|
|
|
|
if (metadata_) {
|
|
|
|
delete metadata_;
|
|
|
|
metadata_ = nullptr;
|
|
|
|
}
|
2015-06-20 01:08:31 +02:00
|
|
|
Close();
|
|
|
|
Options options;
|
|
|
|
options.db_paths.emplace_back(dbname_, 0);
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_2", 0);
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_3", 0);
|
|
|
|
options.db_paths.emplace_back(dbname_ + "_4", 0);
|
|
|
|
EXPECT_OK(DestroyDB(dbname_, options));
|
2018-01-10 21:14:18 +01:00
|
|
|
EXPECT_OK(DestroyDB(snapshot_name_, options));
|
2020-12-10 06:19:55 +01:00
|
|
|
DestroyDir(env_, export_path_).PermitUncheckedError();
|
2015-06-20 01:08:31 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Return the current option configuration.
|
|
|
|
Options CurrentOptions() {
|
|
|
|
Options options;
|
|
|
|
options.env = env_;
|
|
|
|
options.create_if_missing = true;
|
|
|
|
return options;
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateColumnFamilies(const std::vector<std::string>& cfs,
|
|
|
|
const Options& options) {
|
|
|
|
ColumnFamilyOptions cf_opts(options);
|
|
|
|
size_t cfi = handles_.size();
|
|
|
|
handles_.resize(cfi + cfs.size());
|
|
|
|
for (auto cf : cfs) {
|
|
|
|
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
|
|
|
|
const Options& options) {
|
|
|
|
CreateColumnFamilies(cfs, options);
|
|
|
|
std::vector<std::string> cfs_plus_default = cfs;
|
|
|
|
cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
|
|
|
|
ReopenWithColumnFamilies(cfs_plus_default, options);
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
|
|
|
const std::vector<Options>& options) {
|
|
|
|
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
|
|
|
const Options& options) {
|
|
|
|
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TryReopenWithColumnFamilies(
|
|
|
|
const std::vector<std::string>& cfs,
|
|
|
|
const std::vector<Options>& options) {
|
|
|
|
Close();
|
|
|
|
EXPECT_EQ(cfs.size(), options.size());
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i]));
|
|
|
|
}
|
|
|
|
DBOptions db_opts = DBOptions(options[0]);
|
|
|
|
return DB::Open(db_opts, dbname_, column_families, &handles_, &db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
|
|
|
const Options& options) {
|
|
|
|
Close();
|
|
|
|
std::vector<Options> v_opts(cfs.size(), options);
|
|
|
|
return TryReopenWithColumnFamilies(cfs, v_opts);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Reopen(const Options& options) {
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
void CompactAll() {
|
|
|
|
for (auto h : handles_) {
|
|
|
|
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), h, nullptr, nullptr));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-06-20 01:08:31 +02:00
|
|
|
void Close() {
|
|
|
|
for (auto h : handles_) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
handles_.clear();
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
void DestroyAndReopen(const Options& options) {
|
|
|
|
// Destroy using last options
|
|
|
|
Destroy(last_options_);
|
|
|
|
ASSERT_OK(TryReopen(options));
|
|
|
|
}
|
|
|
|
|
|
|
|
void Destroy(const Options& options) {
|
|
|
|
Close();
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, options));
|
|
|
|
}
|
|
|
|
|
|
|
|
Status ReadOnlyReopen(const Options& options) {
|
|
|
|
return DB::OpenForReadOnly(options, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
2018-12-08 02:03:49 +01:00
|
|
|
Status ReadOnlyReopenWithColumnFamilies(const std::vector<std::string>& cfs,
|
|
|
|
const Options& options) {
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (const auto& cf : cfs) {
|
|
|
|
column_families.emplace_back(cf, options);
|
|
|
|
}
|
|
|
|
return DB::OpenForReadOnly(options, dbname_, column_families, &handles_,
|
|
|
|
&db_);
|
|
|
|
}
|
|
|
|
|
2015-06-20 01:08:31 +02:00
|
|
|
Status TryReopen(const Options& options) {
|
|
|
|
Close();
|
|
|
|
last_options_ = options;
|
|
|
|
return DB::Open(options, dbname_, &db_);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Flush(int cf = 0) {
|
|
|
|
if (cf == 0) {
|
|
|
|
return db_->Flush(FlushOptions());
|
|
|
|
} else {
|
|
|
|
return db_->Flush(FlushOptions(), handles_[cf]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
|
|
|
|
return db_->Put(wo, k, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Put(int cf, const Slice& k, const Slice& v,
|
|
|
|
WriteOptions wo = WriteOptions()) {
|
|
|
|
return db_->Put(wo, handles_[cf], k, v);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Delete(const std::string& k) {
|
|
|
|
return db_->Delete(WriteOptions(), k);
|
|
|
|
}
|
|
|
|
|
|
|
|
Status Delete(int cf, const std::string& k) {
|
|
|
|
return db_->Delete(WriteOptions(), handles_[cf], k);
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
|
|
|
|
ReadOptions options;
|
|
|
|
options.verify_checksums = true;
|
|
|
|
options.snapshot = snapshot;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::string Get(int cf, const std::string& k,
|
|
|
|
const Snapshot* snapshot = nullptr) {
|
|
|
|
ReadOptions options;
|
|
|
|
options.verify_checksums = true;
|
|
|
|
options.snapshot = snapshot;
|
|
|
|
std::string result;
|
|
|
|
Status s = db_->Get(options, handles_[cf], k, &result);
|
|
|
|
if (s.IsNotFound()) {
|
|
|
|
result = "NOT_FOUND";
|
|
|
|
} else if (!s.ok()) {
|
|
|
|
result = s.ToString();
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_F(CheckpointTest, GetSnapshotLink) {
|
2017-05-18 01:38:05 +02:00
|
|
|
for (uint64_t log_size_for_flush : {0, 1000000}) {
|
2017-03-22 01:53:21 +01:00
|
|
|
Options options;
|
|
|
|
DB* snapshotDB;
|
|
|
|
ReadOptions roptions;
|
|
|
|
std::string result;
|
|
|
|
Checkpoint* checkpoint;
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
options = CurrentOptions();
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, options));
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
// Create a database
|
|
|
|
options.create_if_missing = true;
|
|
|
|
ASSERT_OK(DB::Open(options, dbname_, &db_));
|
|
|
|
std::string key = std::string("foo");
|
|
|
|
ASSERT_OK(Put(key, "v1"));
|
|
|
|
// Take a snapshot
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_, log_size_for_flush));
|
2017-03-22 01:53:21 +01:00
|
|
|
ASSERT_OK(Put(key, "v2"));
|
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
ASSERT_EQ("v2", Get(key));
|
|
|
|
// Open snapshot and verify contents while DB is running
|
|
|
|
options.create_if_missing = false;
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &snapshotDB));
|
2017-03-22 01:53:21 +01:00
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, key, &result));
|
|
|
|
ASSERT_EQ("v1", result);
|
|
|
|
delete snapshotDB;
|
|
|
|
snapshotDB = nullptr;
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
// Destroy original DB
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, options));
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
// Open snapshot and verify contents
|
|
|
|
options.create_if_missing = false;
|
2018-01-10 21:14:18 +01:00
|
|
|
dbname_ = snapshot_name_;
|
2017-03-22 01:53:21 +01:00
|
|
|
ASSERT_OK(DB::Open(options, dbname_, &db_));
|
|
|
|
ASSERT_EQ("v1", Get(key));
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
ASSERT_OK(DestroyDB(dbname_, options));
|
|
|
|
delete checkpoint;
|
2017-01-09 20:40:20 +01:00
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
// Restore DB name
|
2018-07-14 02:18:39 +02:00
|
|
|
dbname_ = test::PerThreadDBPath(env_, "db_test");
|
2017-03-22 01:53:21 +01:00
|
|
|
}
|
2015-06-20 01:08:31 +02:00
|
|
|
}
|
|
|
|
|
2021-02-17 21:40:33 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointWithBlob) {
|
|
|
|
// Create a database with a blob file
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
options.enable_blob_files = true;
|
|
|
|
options.min_blob_size = 0;
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
|
|
|
|
constexpr char key[] = "key";
|
|
|
|
constexpr char blob[] = "blob";
|
|
|
|
|
|
|
|
ASSERT_OK(Put(key, blob));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
// Create a checkpoint
|
|
|
|
Checkpoint* checkpoint = nullptr;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
|
|
|
|
std::unique_ptr<Checkpoint> checkpoint_guard(checkpoint);
|
|
|
|
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
|
|
|
|
// Make sure it contains the blob file
|
|
|
|
std::vector<std::string> files;
|
|
|
|
ASSERT_OK(env_->GetChildren(snapshot_name_, &files));
|
|
|
|
|
|
|
|
bool blob_file_found = false;
|
|
|
|
for (const auto& file : files) {
|
|
|
|
uint64_t number = 0;
|
|
|
|
FileType type = kWalFile;
|
|
|
|
|
|
|
|
if (ParseFileName(file, &number, &type) && type == kBlobFile) {
|
|
|
|
blob_file_found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ASSERT_TRUE(blob_file_found);
|
|
|
|
|
|
|
|
// Make sure the checkpoint can be opened and the blob value read
|
|
|
|
options.create_if_missing = false;
|
|
|
|
DB* checkpoint_db = nullptr;
|
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &checkpoint_db));
|
|
|
|
|
|
|
|
std::unique_ptr<DB> checkpoint_db_guard(checkpoint_db);
|
|
|
|
|
|
|
|
PinnableSlice value;
|
|
|
|
ASSERT_OK(checkpoint_db->Get(
|
|
|
|
ReadOptions(), checkpoint_db->DefaultColumnFamily(), key, &value));
|
|
|
|
|
|
|
|
ASSERT_EQ(value, blob);
|
|
|
|
}
|
|
|
|
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
TEST_F(CheckpointTest, ExportColumnFamilyWithLinks) {
|
2019-09-20 21:00:55 +02:00
|
|
|
// Create a database
|
|
|
|
auto options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
CreateAndReopenWithCF({}, options);
|
|
|
|
|
|
|
|
// Helper to verify the number of files in metadata and export dir
|
|
|
|
auto verify_files_exported = [&](const ExportImportFilesMetaData& metadata,
|
|
|
|
int num_files_expected) {
|
|
|
|
ASSERT_EQ(metadata.files.size(), num_files_expected);
|
|
|
|
std::vector<std::string> subchildren;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(env_->GetChildren(export_path_, &subchildren));
|
2021-01-09 18:42:21 +01:00
|
|
|
ASSERT_EQ(subchildren.size(), num_files_expected);
|
2019-09-20 21:00:55 +02:00
|
|
|
};
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
|
2019-09-20 21:00:55 +02:00
|
|
|
// Test DefaultColumnFamily
|
|
|
|
{
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
const auto key = std::string("foo");
|
|
|
|
ASSERT_OK(Put(key, "v1"));
|
|
|
|
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
|
2019-09-20 21:00:55 +02:00
|
|
|
// Export the Tables and verify
|
|
|
|
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
|
|
|
export_path_, &metadata_));
|
|
|
|
verify_files_exported(*metadata_, 1);
|
|
|
|
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(DestroyDir(env_, export_path_));
|
2019-09-20 21:00:55 +02:00
|
|
|
delete metadata_;
|
|
|
|
metadata_ = nullptr;
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
|
2019-09-20 21:00:55 +02:00
|
|
|
// Check again after compaction
|
|
|
|
CompactAll();
|
|
|
|
ASSERT_OK(Put(key, "v2"));
|
|
|
|
ASSERT_OK(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
|
|
|
export_path_, &metadata_));
|
|
|
|
verify_files_exported(*metadata_, 2);
|
|
|
|
ASSERT_EQ(metadata_->db_comparator_name, options.comparator->Name());
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(DestroyDir(env_, export_path_));
|
2019-09-20 21:00:55 +02:00
|
|
|
delete metadata_;
|
|
|
|
metadata_ = nullptr;
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
delete checkpoint;
|
2019-09-20 21:00:55 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Test non default column family with non default comparator
|
|
|
|
{
|
|
|
|
auto cf_options = CurrentOptions();
|
|
|
|
cf_options.comparator = ReverseBytewiseComparator();
|
|
|
|
ASSERT_OK(db_->CreateColumnFamily(cf_options, "yoyo", &cfh_reverse_comp_));
|
|
|
|
|
|
|
|
const auto key = std::string("foo");
|
|
|
|
ASSERT_OK(db_->Put(WriteOptions(), cfh_reverse_comp_, key, "v1"));
|
|
|
|
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
|
|
|
|
// Export the Tables and verify
|
|
|
|
ASSERT_OK(checkpoint->ExportColumnFamily(cfh_reverse_comp_, export_path_,
|
|
|
|
&metadata_));
|
|
|
|
verify_files_exported(*metadata_, 1);
|
|
|
|
ASSERT_EQ(metadata_->db_comparator_name,
|
|
|
|
ReverseBytewiseComparator()->Name());
|
|
|
|
delete checkpoint;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CheckpointTest, ExportColumnFamilyNegativeTest) {
|
|
|
|
// Create a database
|
|
|
|
auto options = CurrentOptions();
|
|
|
|
options.create_if_missing = true;
|
|
|
|
CreateAndReopenWithCF({}, options);
|
|
|
|
|
|
|
|
const auto key = std::string("foo");
|
|
|
|
ASSERT_OK(Put(key, "v1"));
|
|
|
|
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
|
|
|
|
// Export onto existing directory
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(env_->CreateDirIfMissing(export_path_));
|
2019-09-20 21:00:55 +02:00
|
|
|
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
|
|
|
export_path_, &metadata_),
|
|
|
|
Status::InvalidArgument("Specified export_dir exists"));
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(DestroyDir(env_, export_path_));
|
2019-09-20 21:00:55 +02:00
|
|
|
|
|
|
|
// Export with invalid directory specification
|
|
|
|
export_path_ = "";
|
|
|
|
ASSERT_EQ(checkpoint->ExportColumnFamily(db_->DefaultColumnFamily(),
|
|
|
|
export_path_, &metadata_),
|
|
|
|
Status::InvalidArgument("Specified export_dir invalid"));
|
|
|
|
delete checkpoint;
|
Export Import sst files (#5495)
Summary:
Refresh of the earlier change here - https://github.com/facebook/rocksdb/issues/5135
This is a review request for code change needed for - https://github.com/facebook/rocksdb/issues/3469
"Add support for taking snapshot of a column family and creating column family from a given CF snapshot"
We have an implementation for this that we have been testing internally. We have two new APIs that together provide this functionality.
(1) ExportColumnFamily() - This API is modelled after CreateCheckpoint() as below.
// Exports all live SST files of a specified Column Family onto export_dir,
// returning SST files information in metadata.
// - SST files will be created as hard links when the directory specified
// is in the same partition as the db directory, copied otherwise.
// - export_dir should not already exist and will be created by this API.
// - Always triggers a flush.
virtual Status ExportColumnFamily(ColumnFamilyHandle* handle,
const std::string& export_dir,
ExportImportFilesMetaData** metadata);
Internally, the API will DisableFileDeletions(), GetColumnFamilyMetaData(), Parse through
metadata, creating links/copies of all the sst files, EnableFileDeletions() and complete the call by
returning the list of file metadata.
(2) CreateColumnFamilyWithImport() - This API is modeled after IngestExternalFile(), but invoked only during a CF creation as below.
// CreateColumnFamilyWithImport() will create a new column family with
// column_family_name and import external SST files specified in metadata into
// this column family.
// (1) External SST files can be created using SstFileWriter.
// (2) External SST files can be exported from a particular column family in
// an existing DB.
// Option in import_options specifies whether the external files are copied or
// moved (default is copy). When option specifies copy, managing files at
// external_file_path is caller's responsibility. When option specifies a
// move, the call ensures that the specified files at external_file_path are
// deleted on successful return and files are not modified on any error
// return.
// On error return, column family handle returned will be nullptr.
// ColumnFamily will be present on successful return and will not be present
// on error return. ColumnFamily may be present on any crash during this call.
virtual Status CreateColumnFamilyWithImport(
const ColumnFamilyOptions& options, const std::string& column_family_name,
const ImportColumnFamilyOptions& import_options,
const ExportImportFilesMetaData& metadata,
ColumnFamilyHandle** handle);
Internally, this API creates a new CF, parses all the sst files and adds it to the specified column family, at the same level and with same sequence number as in the metadata. Also performs safety checks with respect to overlaps between the sst files being imported.
If incoming sequence number is higher than current local sequence number, local sequence
number is updated to reflect this.
Note, as the sst files is are being moved across Column Families, Column Family name in sst file
will no longer match the actual column family on destination DB. The API does not modify Column
Family name or id in the sst files being imported.
Pull Request resolved: https://github.com/facebook/rocksdb/pull/5495
Differential Revision: D16018881
fbshipit-source-id: 9ae2251025d5916d35a9fc4ea4d6707f6be16ff9
2019-07-17 21:22:21 +02:00
|
|
|
}
|
|
|
|
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointCF) {
|
2015-06-20 01:08:31 +02:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options);
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
2017-01-09 20:40:20 +01:00
|
|
|
{{"CheckpointTest::CheckpointCF:2", "DBImpl::GetLiveFiles:2"},
|
|
|
|
{"DBImpl::GetLiveFiles:1", "CheckpointTest::CheckpointCF:1"}});
|
2015-06-20 01:08:31 +02:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2015-06-20 01:08:31 +02:00
|
|
|
|
|
|
|
ASSERT_OK(Put(0, "Default", "Default"));
|
|
|
|
ASSERT_OK(Put(1, "one", "one"));
|
|
|
|
ASSERT_OK(Put(2, "two", "two"));
|
|
|
|
ASSERT_OK(Put(3, "three", "three"));
|
|
|
|
ASSERT_OK(Put(4, "four", "four"));
|
|
|
|
ASSERT_OK(Put(5, "five", "five"));
|
|
|
|
|
|
|
|
DB* snapshotDB;
|
|
|
|
ReadOptions roptions;
|
|
|
|
std::string result;
|
|
|
|
std::vector<ColumnFamilyHandle*> cphandles;
|
|
|
|
|
|
|
|
// Take a snapshot
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::port::Thread t([&]() {
|
2015-06-20 01:08:31 +02:00
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
2015-06-20 05:21:23 +02:00
|
|
|
delete checkpoint;
|
2015-06-20 01:08:31 +02:00
|
|
|
});
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_SYNC_POINT("CheckpointTest::CheckpointCF:1");
|
2015-06-20 01:08:31 +02:00
|
|
|
ASSERT_OK(Put(0, "Default", "Default1"));
|
|
|
|
ASSERT_OK(Put(1, "one", "eleven"));
|
|
|
|
ASSERT_OK(Put(2, "two", "twelve"));
|
|
|
|
ASSERT_OK(Put(3, "three", "thirteen"));
|
|
|
|
ASSERT_OK(Put(4, "four", "fourteen"));
|
|
|
|
ASSERT_OK(Put(5, "five", "fifteen"));
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_SYNC_POINT("CheckpointTest::CheckpointCF:2");
|
2015-06-20 01:08:31 +02:00
|
|
|
t.join();
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2015-06-20 01:08:31 +02:00
|
|
|
ASSERT_OK(Put(1, "one", "twentyone"));
|
|
|
|
ASSERT_OK(Put(2, "two", "twentytwo"));
|
|
|
|
ASSERT_OK(Put(3, "three", "twentythree"));
|
|
|
|
ASSERT_OK(Put(4, "four", "twentyfour"));
|
|
|
|
ASSERT_OK(Put(5, "five", "twentyfive"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
|
|
|
|
// Open snapshot and verify contents while DB is running
|
|
|
|
options.create_if_missing = false;
|
|
|
|
std::vector<std::string> cfs;
|
2020-12-10 06:19:55 +01:00
|
|
|
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
2015-06-20 01:08:31 +02:00
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
|
|
|
|
}
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_,
|
2015-06-20 01:08:31 +02:00
|
|
|
column_families, &cphandles, &snapshotDB));
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
|
|
|
|
ASSERT_EQ("Default1", result);
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
|
|
|
|
ASSERT_EQ("eleven", result);
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
|
|
|
|
for (auto h : cphandles) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
cphandles.clear();
|
|
|
|
delete snapshotDB;
|
|
|
|
snapshotDB = nullptr;
|
|
|
|
}
|
|
|
|
|
2017-03-22 01:53:21 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointCFNoFlush) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options);
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-03-22 01:53:21 +01:00
|
|
|
|
|
|
|
ASSERT_OK(Put(0, "Default", "Default"));
|
|
|
|
ASSERT_OK(Put(1, "one", "one"));
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(Flush());
|
2017-03-22 01:53:21 +01:00
|
|
|
ASSERT_OK(Put(2, "two", "two"));
|
|
|
|
|
|
|
|
DB* snapshotDB;
|
|
|
|
ReadOptions roptions;
|
|
|
|
std::string result;
|
|
|
|
std::vector<ColumnFamilyHandle*> cphandles;
|
|
|
|
|
|
|
|
// Take a snapshot
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->SetCallBack(
|
2018-04-13 02:55:14 +02:00
|
|
|
"DBImpl::BackgroundCallFlush:start", [&](void* /*arg*/) {
|
2017-03-22 01:53:21 +01:00
|
|
|
// Flush should never trigger.
|
2017-07-17 06:23:33 +02:00
|
|
|
FAIL();
|
2017-03-22 01:53:21 +01:00
|
|
|
});
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2017-03-22 01:53:21 +01:00
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_, 1000000));
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2017-03-22 01:53:21 +01:00
|
|
|
|
|
|
|
delete checkpoint;
|
|
|
|
ASSERT_OK(Put(1, "one", "two"));
|
|
|
|
ASSERT_OK(Flush(1));
|
|
|
|
ASSERT_OK(Put(2, "two", "twentytwo"));
|
|
|
|
Close();
|
|
|
|
EXPECT_OK(DestroyDB(dbname_, options));
|
|
|
|
|
|
|
|
// Open snapshot and verify contents while DB is running
|
|
|
|
options.create_if_missing = false;
|
|
|
|
std::vector<std::string> cfs;
|
|
|
|
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
for (size_t i = 0; i < cfs.size(); ++i) {
|
|
|
|
column_families.push_back(ColumnFamilyDescriptor(cfs[i], options));
|
|
|
|
}
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, column_families, &cphandles,
|
2017-03-22 01:53:21 +01:00
|
|
|
&snapshotDB));
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
|
|
|
|
ASSERT_EQ("Default", result);
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
|
|
|
|
ASSERT_EQ("one", result);
|
|
|
|
ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
|
|
|
|
ASSERT_EQ("two", result);
|
|
|
|
for (auto h : cphandles) {
|
|
|
|
delete h;
|
|
|
|
}
|
|
|
|
cphandles.clear();
|
|
|
|
delete snapshotDB;
|
|
|
|
snapshotDB = nullptr;
|
|
|
|
}
|
|
|
|
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing) {
|
2016-03-17 18:07:21 +01:00
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.max_manifest_file_size = 0; // always rollover manifest for file add
|
|
|
|
Reopen(options);
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
2016-03-17 18:07:21 +01:00
|
|
|
{// Get past the flush in the checkpoint thread before adding any keys to
|
|
|
|
// the db so the checkpoint thread won't hit the WriteManifest
|
|
|
|
// syncpoints.
|
2021-01-09 22:22:01 +01:00
|
|
|
{"CheckpointImpl::CreateCheckpoint:FlushDone",
|
2017-01-09 20:40:20 +01:00
|
|
|
"CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut"},
|
2016-03-17 18:07:21 +01:00
|
|
|
// Roll the manifest during checkpointing right after live files are
|
|
|
|
// snapshotted.
|
|
|
|
{"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1",
|
|
|
|
"VersionSet::LogAndApply:WriteManifest"},
|
|
|
|
{"VersionSet::LogAndApply:WriteManifestDone",
|
|
|
|
"CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"}});
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
2016-03-17 18:07:21 +01:00
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::port::Thread t([&]() {
|
2016-03-17 18:07:21 +01:00
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
2016-03-17 18:07:21 +01:00
|
|
|
delete checkpoint;
|
|
|
|
});
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut");
|
2016-03-17 18:07:21 +01:00
|
|
|
ASSERT_OK(Put("Default", "Default1"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
t.join();
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2016-03-17 18:07:21 +01:00
|
|
|
|
|
|
|
DB* snapshotDB;
|
|
|
|
// Successful Open() implies that CURRENT pointed to the manifest in the
|
|
|
|
// checkpoint.
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &snapshotDB));
|
2016-03-17 18:07:21 +01:00
|
|
|
delete snapshotDB;
|
|
|
|
snapshotDB = nullptr;
|
|
|
|
}
|
|
|
|
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing2PC) {
|
|
|
|
Close();
|
2018-07-14 02:18:39 +02:00
|
|
|
const std::string dbname = test::PerThreadDBPath("transaction_testdb");
|
2016-12-28 20:53:29 +01:00
|
|
|
ASSERT_OK(DestroyDB(dbname, CurrentOptions()));
|
2020-12-10 06:19:55 +01:00
|
|
|
test::DeleteDir(env_, dbname);
|
2016-12-28 20:53:29 +01:00
|
|
|
|
|
|
|
Options options = CurrentOptions();
|
2017-01-09 20:40:20 +01:00
|
|
|
options.allow_2pc = true;
|
2016-12-28 20:53:29 +01:00
|
|
|
// allow_2pc is implicitly set with tx prepare
|
|
|
|
// options.allow_2pc = true;
|
|
|
|
TransactionDBOptions txn_db_options;
|
|
|
|
TransactionDB* txdb;
|
|
|
|
Status s = TransactionDB::Open(options, txn_db_options, dbname, &txdb);
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(s);
|
2016-12-28 20:53:29 +01:00
|
|
|
ColumnFamilyHandle* cfa;
|
|
|
|
ColumnFamilyHandle* cfb;
|
|
|
|
ColumnFamilyOptions cf_options;
|
|
|
|
ASSERT_OK(txdb->CreateColumnFamily(cf_options, "CFA", &cfa));
|
|
|
|
|
|
|
|
WriteOptions write_options;
|
|
|
|
// Insert something into CFB so lots of log files will be kept
|
|
|
|
// before creating the checkpoint.
|
|
|
|
ASSERT_OK(txdb->CreateColumnFamily(cf_options, "CFB", &cfb));
|
|
|
|
ASSERT_OK(txdb->Put(write_options, cfb, "", ""));
|
|
|
|
|
|
|
|
ReadOptions read_options;
|
|
|
|
std::string value;
|
|
|
|
TransactionOptions txn_options;
|
|
|
|
Transaction* txn = txdb->BeginTransaction(write_options, txn_options);
|
|
|
|
s = txn->SetName("xid");
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ(txdb->GetTransactionByName("xid"), txn);
|
|
|
|
|
|
|
|
s = txn->Put(Slice("foo"), Slice("bar"));
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(s);
|
2016-12-28 20:53:29 +01:00
|
|
|
s = txn->Put(cfa, Slice("foocfa"), Slice("barcfa"));
|
|
|
|
ASSERT_OK(s);
|
|
|
|
// Writing prepare into middle of first WAL, then flush WALs many times
|
|
|
|
for (int i = 1; i <= 100000; i++) {
|
|
|
|
Transaction* tx = txdb->BeginTransaction(write_options, txn_options);
|
|
|
|
ASSERT_OK(tx->SetName("x"));
|
|
|
|
ASSERT_OK(tx->Put(Slice(std::to_string(i)), Slice("val")));
|
|
|
|
ASSERT_OK(tx->Put(cfa, Slice("aaa"), Slice("111")));
|
|
|
|
ASSERT_OK(tx->Prepare());
|
|
|
|
ASSERT_OK(tx->Commit());
|
|
|
|
if (i % 10000 == 0) {
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(txdb->Flush(FlushOptions()));
|
2016-12-28 20:53:29 +01:00
|
|
|
}
|
|
|
|
if (i == 88888) {
|
|
|
|
ASSERT_OK(txn->Prepare());
|
|
|
|
}
|
2017-01-09 20:40:20 +01:00
|
|
|
delete tx;
|
2016-12-28 20:53:29 +01:00
|
|
|
}
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->LoadDependency(
|
2016-12-28 20:53:29 +01:00
|
|
|
{{"CheckpointImpl::CreateCheckpoint:SavedLiveFiles1",
|
2017-01-09 20:40:20 +01:00
|
|
|
"CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PreCommit"},
|
|
|
|
{"CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PostCommit",
|
2016-12-28 20:53:29 +01:00
|
|
|
"CheckpointImpl::CreateCheckpoint:SavedLiveFiles2"}});
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
ROCKSDB_NAMESPACE::port::Thread t([&]() {
|
2016-12-28 20:53:29 +01:00
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(txdb, &checkpoint));
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
2016-12-28 20:53:29 +01:00
|
|
|
delete checkpoint;
|
|
|
|
});
|
2017-01-09 20:40:20 +01:00
|
|
|
TEST_SYNC_POINT(
|
|
|
|
"CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PreCommit");
|
2016-12-28 20:53:29 +01:00
|
|
|
ASSERT_OK(txn->Commit());
|
2017-01-09 20:40:20 +01:00
|
|
|
delete txn;
|
2016-12-28 20:53:29 +01:00
|
|
|
TEST_SYNC_POINT(
|
2017-01-09 20:40:20 +01:00
|
|
|
"CheckpointTest::CurrentFileModifiedWhileCheckpointing2PC:PostCommit");
|
2016-12-28 20:53:29 +01:00
|
|
|
t.join();
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::SyncPoint::GetInstance()->DisableProcessing();
|
2016-12-28 20:53:29 +01:00
|
|
|
|
|
|
|
// No more than two logs files should exist.
|
|
|
|
std::vector<std::string> files;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(env_->GetChildren(snapshot_name_, &files));
|
2016-12-28 20:53:29 +01:00
|
|
|
int num_log_files = 0;
|
|
|
|
for (auto& file : files) {
|
|
|
|
uint64_t num;
|
|
|
|
FileType type;
|
|
|
|
WalFileType log_type;
|
2020-10-23 02:04:39 +02:00
|
|
|
if (ParseFileName(file, &num, &type, &log_type) && type == kWalFile) {
|
2016-12-28 20:53:29 +01:00
|
|
|
num_log_files++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// One flush after preapare + one outstanding file before checkpoint + one log
|
|
|
|
// file generated after checkpoint.
|
|
|
|
ASSERT_LE(num_log_files, 3);
|
|
|
|
|
|
|
|
TransactionDB* snapshotDB;
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families;
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor(kDefaultColumnFamilyName, ColumnFamilyOptions()));
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor("CFA", ColumnFamilyOptions()));
|
|
|
|
column_families.push_back(
|
|
|
|
ColumnFamilyDescriptor("CFB", ColumnFamilyOptions()));
|
2020-02-20 21:07:53 +01:00
|
|
|
std::vector<ROCKSDB_NAMESPACE::ColumnFamilyHandle*> cf_handles;
|
2018-01-10 21:14:18 +01:00
|
|
|
ASSERT_OK(TransactionDB::Open(options, txn_db_options, snapshot_name_,
|
2016-12-28 20:53:29 +01:00
|
|
|
column_families, &cf_handles, &snapshotDB));
|
|
|
|
ASSERT_OK(snapshotDB->Get(read_options, "foo", &value));
|
|
|
|
ASSERT_EQ(value, "bar");
|
|
|
|
ASSERT_OK(snapshotDB->Get(read_options, cf_handles[1], "foocfa", &value));
|
|
|
|
ASSERT_EQ(value, "barcfa");
|
|
|
|
|
|
|
|
delete cfa;
|
|
|
|
delete cfb;
|
|
|
|
delete cf_handles[0];
|
|
|
|
delete cf_handles[1];
|
|
|
|
delete cf_handles[2];
|
|
|
|
delete snapshotDB;
|
|
|
|
snapshotDB = nullptr;
|
2017-01-09 20:40:20 +01:00
|
|
|
delete txdb;
|
2016-12-28 20:53:29 +01:00
|
|
|
}
|
|
|
|
|
2018-02-21 01:42:06 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointInvalidDirectoryName) {
|
|
|
|
for (std::string checkpoint_dir : {"", "/", "////"}) {
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_TRUE(checkpoint->CreateCheckpoint("").IsInvalidArgument());
|
|
|
|
delete checkpoint;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 02:25:57 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointWithParallelWrites) {
|
|
|
|
// When run with TSAN, this exposes the data race fixed in
|
|
|
|
// https://github.com/facebook/rocksdb/pull/3603
|
|
|
|
ASSERT_OK(Put("key1", "val1"));
|
|
|
|
port::Thread thread([this]() { ASSERT_OK(Put("key2", "val2")); });
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
delete checkpoint;
|
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
|
2018-08-28 21:35:17 +02:00
|
|
|
TEST_F(CheckpointTest, CheckpointWithUnsyncedDataDropped) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
std::unique_ptr<FaultInjectionTestEnv> env(new FaultInjectionTestEnv(env_));
|
|
|
|
options.env = env.get();
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_OK(Put("key1", "val1"));
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
delete checkpoint;
|
2020-12-10 06:19:55 +01:00
|
|
|
ASSERT_OK(env->DropUnsyncedFileData());
|
2018-08-28 21:35:17 +02:00
|
|
|
|
|
|
|
// make sure it's openable even though whatever data that wasn't synced got
|
|
|
|
// dropped.
|
|
|
|
options.env = env_;
|
|
|
|
DB* snapshot_db;
|
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &snapshot_db));
|
|
|
|
ReadOptions read_opts;
|
|
|
|
std::string get_result;
|
|
|
|
ASSERT_OK(snapshot_db->Get(read_opts, "key1", &get_result));
|
|
|
|
ASSERT_EQ("val1", get_result);
|
|
|
|
delete snapshot_db;
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2021-05-05 21:53:42 +02:00
|
|
|
TEST_F(CheckpointTest, CheckpointOptionsFileFailedToPersist) {
|
|
|
|
// Regression test for a bug where checkpoint failed on a DB where persisting
|
|
|
|
// OPTIONS file failed and the DB was opened with
|
|
|
|
// `fail_if_options_file_error == false`.
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
options.fail_if_options_file_error = false;
|
|
|
|
auto fault_fs = std::make_shared<FaultInjectionTestFS>(FileSystem::Default());
|
|
|
|
|
|
|
|
// Setup `FaultInjectionTestFS` and `SyncPoint` callbacks to fail one
|
|
|
|
// operation when inside the OPTIONS file persisting code.
|
|
|
|
std::unique_ptr<Env> fault_fs_env(NewCompositeEnv(fault_fs));
|
|
|
|
fault_fs->SetRandomMetadataWriteError(1 /* one_in */);
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"PersistRocksDBOptions:start", [fault_fs](void* /* arg */) {
|
|
|
|
fault_fs->EnableMetadataWriteErrorInjection();
|
|
|
|
});
|
|
|
|
SyncPoint::GetInstance()->SetCallBack(
|
|
|
|
"FaultInjectionTestFS::InjectMetadataWriteError:Injected",
|
|
|
|
[fault_fs](void* /* arg */) {
|
|
|
|
fault_fs->DisableMetadataWriteErrorInjection();
|
|
|
|
});
|
|
|
|
options.env = fault_fs_env.get();
|
|
|
|
SyncPoint::GetInstance()->EnableProcessing();
|
|
|
|
|
|
|
|
Reopen(options);
|
|
|
|
ASSERT_OK(Put("key1", "val1"));
|
|
|
|
Checkpoint* checkpoint;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
delete checkpoint;
|
|
|
|
|
|
|
|
// Make sure it's usable.
|
|
|
|
options.env = env_;
|
|
|
|
DB* snapshot_db;
|
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &snapshot_db));
|
|
|
|
ReadOptions read_opts;
|
|
|
|
std::string get_result;
|
|
|
|
ASSERT_OK(snapshot_db->Get(read_opts, "key1", &get_result));
|
|
|
|
ASSERT_EQ("val1", get_result);
|
|
|
|
delete snapshot_db;
|
|
|
|
delete db_;
|
|
|
|
db_ = nullptr;
|
|
|
|
}
|
|
|
|
|
2018-12-08 02:03:49 +01:00
|
|
|
TEST_F(CheckpointTest, CheckpointReadOnlyDB) {
|
|
|
|
ASSERT_OK(Put("foo", "foo_value"));
|
|
|
|
ASSERT_OK(Flush());
|
|
|
|
Close();
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
ASSERT_OK(ReadOnlyReopen(options));
|
|
|
|
Checkpoint* checkpoint = nullptr;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
delete checkpoint;
|
|
|
|
checkpoint = nullptr;
|
|
|
|
Close();
|
|
|
|
DB* snapshot_db = nullptr;
|
|
|
|
ASSERT_OK(DB::Open(options, snapshot_name_, &snapshot_db));
|
|
|
|
ReadOptions read_opts;
|
|
|
|
std::string get_result;
|
|
|
|
ASSERT_OK(snapshot_db->Get(read_opts, "foo", &get_result));
|
|
|
|
ASSERT_EQ("foo_value", get_result);
|
|
|
|
delete snapshot_db;
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(CheckpointTest, CheckpointReadOnlyDBWithMultipleColumnFamilies) {
|
|
|
|
Options options = CurrentOptions();
|
|
|
|
CreateAndReopenWithCF({"pikachu", "eevee"}, options);
|
|
|
|
for (int i = 0; i != 3; ++i) {
|
|
|
|
ASSERT_OK(Put(i, "foo", "foo_value"));
|
|
|
|
ASSERT_OK(Flush(i));
|
|
|
|
}
|
|
|
|
Close();
|
|
|
|
Status s = ReadOnlyReopenWithColumnFamilies(
|
|
|
|
{kDefaultColumnFamilyName, "pikachu", "eevee"}, options);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
Checkpoint* checkpoint = nullptr;
|
|
|
|
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
|
|
|
|
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
|
|
|
|
delete checkpoint;
|
|
|
|
checkpoint = nullptr;
|
|
|
|
Close();
|
|
|
|
|
|
|
|
std::vector<ColumnFamilyDescriptor> column_families{
|
|
|
|
{kDefaultColumnFamilyName, options},
|
|
|
|
{"pikachu", options},
|
|
|
|
{"eevee", options}};
|
|
|
|
DB* snapshot_db = nullptr;
|
|
|
|
std::vector<ColumnFamilyHandle*> snapshot_handles;
|
|
|
|
s = DB::Open(options, snapshot_name_, column_families, &snapshot_handles,
|
|
|
|
&snapshot_db);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ReadOptions read_opts;
|
|
|
|
for (int i = 0; i != 3; ++i) {
|
|
|
|
std::string get_result;
|
|
|
|
s = snapshot_db->Get(read_opts, snapshot_handles[i], "foo", &get_result);
|
|
|
|
ASSERT_OK(s);
|
|
|
|
ASSERT_EQ("foo_value", get_result);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (auto snapshot_h : snapshot_handles) {
|
|
|
|
delete snapshot_h;
|
|
|
|
}
|
|
|
|
snapshot_handles.clear();
|
|
|
|
delete snapshot_db;
|
|
|
|
}
|
|
|
|
|
2020-02-20 21:07:53 +01:00
|
|
|
} // namespace ROCKSDB_NAMESPACE
|
2015-06-20 01:08:31 +02:00
|
|
|
|
|
|
|
int main(int argc, char** argv) {
|
2020-02-20 21:07:53 +01:00
|
|
|
ROCKSDB_NAMESPACE::port::InstallStackTraceHandler();
|
2015-06-20 01:08:31 +02:00
|
|
|
::testing::InitGoogleTest(&argc, argv);
|
|
|
|
return RUN_ALL_TESTS();
|
|
|
|
}
|
2015-07-20 20:24:54 +02:00
|
|
|
|
|
|
|
#else
|
|
|
|
#include <stdio.h>
|
|
|
|
|
2018-04-16 02:19:57 +02:00
|
|
|
int main(int /*argc*/, char** /*argv*/) {
|
2015-07-20 20:24:54 +02:00
|
|
|
fprintf(stderr, "SKIPPED as Checkpoint is not supported in ROCKSDB_LITE\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif // !ROCKSDB_LITE
|